core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg(not(target_arch = "arm"))]
20#[cfg_attr(test, assert_instr(crc32cx))]
21#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
22pub fn __crc32cd(crc: u32, data: u64) -> u32 {
23    unsafe extern "unadjusted" {
24        #[cfg_attr(
25            any(target_arch = "aarch64", target_arch = "arm64ec"),
26            link_name = "llvm.aarch64.crc32cx"
27        )]
28        fn ___crc32cd(crc: u32, data: u64) -> u32;
29    }
30    unsafe { ___crc32cd(crc, data) }
31}
32#[doc = "CRC32 single round checksum for quad words (64 bits)."]
33#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
34#[inline]
35#[target_feature(enable = "crc")]
36#[cfg(not(target_arch = "arm"))]
37#[cfg_attr(test, assert_instr(crc32x))]
38#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
39pub fn __crc32d(crc: u32, data: u64) -> u32 {
40    unsafe extern "unadjusted" {
41        #[cfg_attr(
42            any(target_arch = "aarch64", target_arch = "arm64ec"),
43            link_name = "llvm.aarch64.crc32x"
44        )]
45        fn ___crc32d(crc: u32, data: u64) -> u32;
46    }
47    unsafe { ___crc32d(crc, data) }
48}
49#[doc = "Signed Absolute difference and Accumulate Long"]
50#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
51#[inline]
52#[target_feature(enable = "neon")]
53#[stable(feature = "neon_intrinsics", since = "1.59.0")]
54#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
55pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
56    unsafe {
57        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
58        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
59        let f: int8x8_t = vabd_s8(d, e);
60        let f: uint8x8_t = simd_cast(f);
61        simd_add(a, simd_cast(f))
62    }
63}
64#[doc = "Signed Absolute difference and Accumulate Long"]
65#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
66#[inline]
67#[target_feature(enable = "neon")]
68#[stable(feature = "neon_intrinsics", since = "1.59.0")]
69#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
70pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
71    unsafe {
72        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
73        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
74        let f: int16x4_t = vabd_s16(d, e);
75        let f: uint16x4_t = simd_cast(f);
76        simd_add(a, simd_cast(f))
77    }
78}
79#[doc = "Signed Absolute difference and Accumulate Long"]
80#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
81#[inline]
82#[target_feature(enable = "neon")]
83#[stable(feature = "neon_intrinsics", since = "1.59.0")]
84#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
85pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
86    unsafe {
87        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
88        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
89        let f: int32x2_t = vabd_s32(d, e);
90        let f: uint32x2_t = simd_cast(f);
91        simd_add(a, simd_cast(f))
92    }
93}
94#[doc = "Unsigned Absolute difference and Accumulate Long"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
96#[inline]
97#[target_feature(enable = "neon")]
98#[stable(feature = "neon_intrinsics", since = "1.59.0")]
99#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
100pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
101    unsafe {
102        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
103        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
104        let f: uint8x8_t = vabd_u8(d, e);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
114pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
115    unsafe {
116        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
117        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
118        let f: uint16x4_t = vabd_u16(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
128pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
129    unsafe {
130        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
131        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
132        let f: uint32x2_t = vabd_u32(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Absolute difference between the arguments of Floating"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(test, assert_instr(fabd))]
142pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
143    unsafe extern "unadjusted" {
144        #[cfg_attr(
145            any(target_arch = "aarch64", target_arch = "arm64ec"),
146            link_name = "llvm.aarch64.neon.fabd.v1f64"
147        )]
148        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
149    }
150    unsafe { _vabd_f64(a, b) }
151}
152#[doc = "Absolute difference between the arguments of Floating"]
153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
154#[inline]
155#[target_feature(enable = "neon")]
156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
157#[cfg_attr(test, assert_instr(fabd))]
158pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
159    unsafe extern "unadjusted" {
160        #[cfg_attr(
161            any(target_arch = "aarch64", target_arch = "arm64ec"),
162            link_name = "llvm.aarch64.neon.fabd.v2f64"
163        )]
164        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
165    }
166    unsafe { _vabdq_f64(a, b) }
167}
168#[doc = "Floating-point absolute difference"]
169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
170#[inline]
171#[target_feature(enable = "neon")]
172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
173#[cfg_attr(test, assert_instr(fabd))]
174pub fn vabdd_f64(a: f64, b: f64) -> f64 {
175    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
176}
177#[doc = "Floating-point absolute difference"]
178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
179#[inline]
180#[target_feature(enable = "neon")]
181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
182#[cfg_attr(test, assert_instr(fabd))]
183pub fn vabds_f32(a: f32, b: f32) -> f32 {
184    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
185}
186#[doc = "Floating-point absolute difference"]
187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
188#[inline]
189#[target_feature(enable = "neon,fp16")]
190#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
191#[cfg_attr(test, assert_instr(fabd))]
192pub fn vabdh_f16(a: f16, b: f16) -> f16 {
193    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
194}
195#[doc = "Signed Absolute difference Long"]
196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
197#[inline]
198#[target_feature(enable = "neon")]
199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
200#[cfg_attr(test, assert_instr(sabdl))]
201pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
202    unsafe {
203        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
204        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
205        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
206        simd_cast(e)
207    }
208}
209#[doc = "Signed Absolute difference Long"]
210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
211#[inline]
212#[target_feature(enable = "neon")]
213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
214#[cfg_attr(test, assert_instr(sabdl))]
215pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
216    unsafe {
217        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
218        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
219        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
220        simd_cast(e)
221    }
222}
223#[doc = "Signed Absolute difference Long"]
224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
225#[inline]
226#[target_feature(enable = "neon")]
227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
228#[cfg_attr(test, assert_instr(sabdl))]
229pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
230    unsafe {
231        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
232        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
233        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
234        simd_cast(e)
235    }
236}
237#[doc = "Unsigned Absolute difference Long"]
238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
239#[inline]
240#[target_feature(enable = "neon")]
241#[cfg_attr(test, assert_instr(uabdl))]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
244    unsafe {
245        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
246        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
247        simd_cast(vabd_u8(c, d))
248    }
249}
250#[doc = "Unsigned Absolute difference Long"]
251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
252#[inline]
253#[target_feature(enable = "neon")]
254#[cfg_attr(test, assert_instr(uabdl))]
255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
256pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
257    unsafe {
258        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
259        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
260        simd_cast(vabd_u16(c, d))
261    }
262}
263#[doc = "Unsigned Absolute difference Long"]
264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
265#[inline]
266#[target_feature(enable = "neon")]
267#[cfg_attr(test, assert_instr(uabdl))]
268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
269pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
270    unsafe {
271        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
272        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
273        simd_cast(vabd_u32(c, d))
274    }
275}
276#[doc = "Floating-point absolute value"]
277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
278#[inline]
279#[target_feature(enable = "neon")]
280#[cfg_attr(test, assert_instr(fabs))]
281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
282pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
283    unsafe { simd_fabs(a) }
284}
285#[doc = "Floating-point absolute value"]
286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
287#[inline]
288#[target_feature(enable = "neon")]
289#[cfg_attr(test, assert_instr(fabs))]
290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
291pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
292    unsafe { simd_fabs(a) }
293}
294#[doc = "Absolute Value (wrapping)."]
295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
296#[inline]
297#[target_feature(enable = "neon")]
298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
299#[cfg_attr(test, assert_instr(abs))]
300pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
301    unsafe extern "unadjusted" {
302        #[cfg_attr(
303            any(target_arch = "aarch64", target_arch = "arm64ec"),
304            link_name = "llvm.aarch64.neon.abs.v1i64"
305        )]
306        fn _vabs_s64(a: int64x1_t) -> int64x1_t;
307    }
308    unsafe { _vabs_s64(a) }
309}
310#[doc = "Absolute Value (wrapping)."]
311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
312#[inline]
313#[target_feature(enable = "neon")]
314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
315#[cfg_attr(test, assert_instr(abs))]
316pub fn vabsd_s64(a: i64) -> i64 {
317    unsafe extern "unadjusted" {
318        #[cfg_attr(
319            any(target_arch = "aarch64", target_arch = "arm64ec"),
320            link_name = "llvm.aarch64.neon.abs.i64"
321        )]
322        fn _vabsd_s64(a: i64) -> i64;
323    }
324    unsafe { _vabsd_s64(a) }
325}
326#[doc = "Absolute Value (wrapping)."]
327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
328#[inline]
329#[target_feature(enable = "neon")]
330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
331#[cfg_attr(test, assert_instr(abs))]
332pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
333    unsafe extern "unadjusted" {
334        #[cfg_attr(
335            any(target_arch = "aarch64", target_arch = "arm64ec"),
336            link_name = "llvm.aarch64.neon.abs.v2i64"
337        )]
338        fn _vabsq_s64(a: int64x2_t) -> int64x2_t;
339    }
340    unsafe { _vabsq_s64(a) }
341}
342#[doc = "Add"]
343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
344#[inline]
345#[target_feature(enable = "neon")]
346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
347#[cfg_attr(test, assert_instr(nop))]
348pub fn vaddd_s64(a: i64, b: i64) -> i64 {
349    a.wrapping_add(b)
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
353#[inline]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_u64(a: u64, b: u64) -> u64 {
358    a.wrapping_add(b)
359}
360#[doc = "Signed Add Long across Vector"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
362#[inline]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(saddlv))]
366pub fn vaddlv_s16(a: int16x4_t) -> i32 {
367    unsafe extern "unadjusted" {
368        #[cfg_attr(
369            any(target_arch = "aarch64", target_arch = "arm64ec"),
370            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
371        )]
372        fn _vaddlv_s16(a: int16x4_t) -> i32;
373    }
374    unsafe { _vaddlv_s16(a) }
375}
376#[doc = "Signed Add Long across Vector"]
377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
378#[inline]
379#[target_feature(enable = "neon")]
380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
381#[cfg_attr(test, assert_instr(saddlv))]
382pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
383    unsafe extern "unadjusted" {
384        #[cfg_attr(
385            any(target_arch = "aarch64", target_arch = "arm64ec"),
386            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
387        )]
388        fn _vaddlvq_s16(a: int16x8_t) -> i32;
389    }
390    unsafe { _vaddlvq_s16(a) }
391}
392#[doc = "Signed Add Long across Vector"]
393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
394#[inline]
395#[target_feature(enable = "neon")]
396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
397#[cfg_attr(test, assert_instr(saddlv))]
398pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
399    unsafe extern "unadjusted" {
400        #[cfg_attr(
401            any(target_arch = "aarch64", target_arch = "arm64ec"),
402            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
403        )]
404        fn _vaddlvq_s32(a: int32x4_t) -> i64;
405    }
406    unsafe { _vaddlvq_s32(a) }
407}
408#[doc = "Signed Add Long across Vector"]
409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
410#[inline]
411#[target_feature(enable = "neon")]
412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
413#[cfg_attr(test, assert_instr(saddlp))]
414pub fn vaddlv_s32(a: int32x2_t) -> i64 {
415    unsafe extern "unadjusted" {
416        #[cfg_attr(
417            any(target_arch = "aarch64", target_arch = "arm64ec"),
418            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
419        )]
420        fn _vaddlv_s32(a: int32x2_t) -> i64;
421    }
422    unsafe { _vaddlv_s32(a) }
423}
424#[doc = "Signed Add Long across Vector"]
425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
426#[inline]
427#[target_feature(enable = "neon")]
428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
429#[cfg_attr(test, assert_instr(saddlv))]
430pub fn vaddlv_s8(a: int8x8_t) -> i16 {
431    unsafe extern "unadjusted" {
432        #[cfg_attr(
433            any(target_arch = "aarch64", target_arch = "arm64ec"),
434            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
435        )]
436        fn _vaddlv_s8(a: int8x8_t) -> i32;
437    }
438    unsafe { _vaddlv_s8(a) as i16 }
439}
440#[doc = "Signed Add Long across Vector"]
441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
442#[inline]
443#[target_feature(enable = "neon")]
444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
445#[cfg_attr(test, assert_instr(saddlv))]
446pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
447    unsafe extern "unadjusted" {
448        #[cfg_attr(
449            any(target_arch = "aarch64", target_arch = "arm64ec"),
450            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
451        )]
452        fn _vaddlvq_s8(a: int8x16_t) -> i32;
453    }
454    unsafe { _vaddlvq_s8(a) as i16 }
455}
456#[doc = "Unsigned Add Long across Vector"]
457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
458#[inline]
459#[target_feature(enable = "neon")]
460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
461#[cfg_attr(test, assert_instr(uaddlv))]
462pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
463    unsafe extern "unadjusted" {
464        #[cfg_attr(
465            any(target_arch = "aarch64", target_arch = "arm64ec"),
466            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
467        )]
468        fn _vaddlv_u16(a: uint16x4_t) -> u32;
469    }
470    unsafe { _vaddlv_u16(a) }
471}
472#[doc = "Unsigned Add Long across Vector"]
473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
474#[inline]
475#[target_feature(enable = "neon")]
476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
477#[cfg_attr(test, assert_instr(uaddlv))]
478pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
479    unsafe extern "unadjusted" {
480        #[cfg_attr(
481            any(target_arch = "aarch64", target_arch = "arm64ec"),
482            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
483        )]
484        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
485    }
486    unsafe { _vaddlvq_u16(a) }
487}
488#[doc = "Unsigned Add Long across Vector"]
489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
490#[inline]
491#[target_feature(enable = "neon")]
492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
493#[cfg_attr(test, assert_instr(uaddlv))]
494pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
495    unsafe extern "unadjusted" {
496        #[cfg_attr(
497            any(target_arch = "aarch64", target_arch = "arm64ec"),
498            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
499        )]
500        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
501    }
502    unsafe { _vaddlvq_u32(a) }
503}
504#[doc = "Unsigned Add Long across Vector"]
505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
506#[inline]
507#[target_feature(enable = "neon")]
508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
509#[cfg_attr(test, assert_instr(uaddlp))]
510pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
511    unsafe extern "unadjusted" {
512        #[cfg_attr(
513            any(target_arch = "aarch64", target_arch = "arm64ec"),
514            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
515        )]
516        fn _vaddlv_u32(a: uint32x2_t) -> u64;
517    }
518    unsafe { _vaddlv_u32(a) }
519}
520#[doc = "Unsigned Add Long across Vector"]
521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
522#[inline]
523#[target_feature(enable = "neon")]
524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
525#[cfg_attr(test, assert_instr(uaddlv))]
526pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
527    unsafe extern "unadjusted" {
528        #[cfg_attr(
529            any(target_arch = "aarch64", target_arch = "arm64ec"),
530            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
531        )]
532        fn _vaddlv_u8(a: uint8x8_t) -> i32;
533    }
534    unsafe { _vaddlv_u8(a) as u16 }
535}
536#[doc = "Unsigned Add Long across Vector"]
537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
538#[inline]
539#[target_feature(enable = "neon")]
540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
541#[cfg_attr(test, assert_instr(uaddlv))]
542pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
543    unsafe extern "unadjusted" {
544        #[cfg_attr(
545            any(target_arch = "aarch64", target_arch = "arm64ec"),
546            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
547        )]
548        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
549    }
550    unsafe { _vaddlvq_u8(a) as u16 }
551}
552#[doc = "Floating-point add across vector"]
553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
554#[inline]
555#[target_feature(enable = "neon")]
556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
557#[cfg_attr(test, assert_instr(faddp))]
558pub fn vaddv_f32(a: float32x2_t) -> f32 {
559    unsafe extern "unadjusted" {
560        #[cfg_attr(
561            any(target_arch = "aarch64", target_arch = "arm64ec"),
562            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
563        )]
564        fn _vaddv_f32(a: float32x2_t) -> f32;
565    }
566    unsafe { _vaddv_f32(a) }
567}
568#[doc = "Floating-point add across vector"]
569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
570#[inline]
571#[target_feature(enable = "neon")]
572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
573#[cfg_attr(test, assert_instr(faddp))]
574pub fn vaddvq_f32(a: float32x4_t) -> f32 {
575    unsafe extern "unadjusted" {
576        #[cfg_attr(
577            any(target_arch = "aarch64", target_arch = "arm64ec"),
578            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
579        )]
580        fn _vaddvq_f32(a: float32x4_t) -> f32;
581    }
582    unsafe { _vaddvq_f32(a) }
583}
584#[doc = "Floating-point add across vector"]
585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
586#[inline]
587#[target_feature(enable = "neon")]
588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
589#[cfg_attr(test, assert_instr(faddp))]
590pub fn vaddvq_f64(a: float64x2_t) -> f64 {
591    unsafe extern "unadjusted" {
592        #[cfg_attr(
593            any(target_arch = "aarch64", target_arch = "arm64ec"),
594            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
595        )]
596        fn _vaddvq_f64(a: float64x2_t) -> f64;
597    }
598    unsafe { _vaddvq_f64(a) }
599}
600#[doc = "Add across vector"]
601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
602#[inline]
603#[target_feature(enable = "neon")]
604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
605#[cfg_attr(test, assert_instr(addp))]
606pub fn vaddv_s32(a: int32x2_t) -> i32 {
607    unsafe extern "unadjusted" {
608        #[cfg_attr(
609            any(target_arch = "aarch64", target_arch = "arm64ec"),
610            link_name = "llvm.aarch64.neon.saddv.i32.v2i32"
611        )]
612        fn _vaddv_s32(a: int32x2_t) -> i32;
613    }
614    unsafe { _vaddv_s32(a) }
615}
616#[doc = "Add across vector"]
617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
618#[inline]
619#[target_feature(enable = "neon")]
620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
621#[cfg_attr(test, assert_instr(addv))]
622pub fn vaddv_s8(a: int8x8_t) -> i8 {
623    unsafe extern "unadjusted" {
624        #[cfg_attr(
625            any(target_arch = "aarch64", target_arch = "arm64ec"),
626            link_name = "llvm.aarch64.neon.saddv.i32.v8i8"
627        )]
628        fn _vaddv_s8(a: int8x8_t) -> i8;
629    }
630    unsafe { _vaddv_s8(a) }
631}
632#[doc = "Add across vector"]
633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
634#[inline]
635#[target_feature(enable = "neon")]
636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
637#[cfg_attr(test, assert_instr(addv))]
638pub fn vaddvq_s8(a: int8x16_t) -> i8 {
639    unsafe extern "unadjusted" {
640        #[cfg_attr(
641            any(target_arch = "aarch64", target_arch = "arm64ec"),
642            link_name = "llvm.aarch64.neon.saddv.i32.v16i8"
643        )]
644        fn _vaddvq_s8(a: int8x16_t) -> i8;
645    }
646    unsafe { _vaddvq_s8(a) }
647}
648#[doc = "Add across vector"]
649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
650#[inline]
651#[target_feature(enable = "neon")]
652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
653#[cfg_attr(test, assert_instr(addv))]
654pub fn vaddv_s16(a: int16x4_t) -> i16 {
655    unsafe extern "unadjusted" {
656        #[cfg_attr(
657            any(target_arch = "aarch64", target_arch = "arm64ec"),
658            link_name = "llvm.aarch64.neon.saddv.i32.v4i16"
659        )]
660        fn _vaddv_s16(a: int16x4_t) -> i16;
661    }
662    unsafe { _vaddv_s16(a) }
663}
664#[doc = "Add across vector"]
665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
666#[inline]
667#[target_feature(enable = "neon")]
668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
669#[cfg_attr(test, assert_instr(addv))]
670pub fn vaddvq_s16(a: int16x8_t) -> i16 {
671    unsafe extern "unadjusted" {
672        #[cfg_attr(
673            any(target_arch = "aarch64", target_arch = "arm64ec"),
674            link_name = "llvm.aarch64.neon.saddv.i32.v8i16"
675        )]
676        fn _vaddvq_s16(a: int16x8_t) -> i16;
677    }
678    unsafe { _vaddvq_s16(a) }
679}
680#[doc = "Add across vector"]
681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
682#[inline]
683#[target_feature(enable = "neon")]
684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
685#[cfg_attr(test, assert_instr(addv))]
686pub fn vaddvq_s32(a: int32x4_t) -> i32 {
687    unsafe extern "unadjusted" {
688        #[cfg_attr(
689            any(target_arch = "aarch64", target_arch = "arm64ec"),
690            link_name = "llvm.aarch64.neon.saddv.i32.v4i32"
691        )]
692        fn _vaddvq_s32(a: int32x4_t) -> i32;
693    }
694    unsafe { _vaddvq_s32(a) }
695}
696#[doc = "Add across vector"]
697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
698#[inline]
699#[target_feature(enable = "neon")]
700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
701#[cfg_attr(test, assert_instr(addp))]
702pub fn vaddv_u32(a: uint32x2_t) -> u32 {
703    unsafe extern "unadjusted" {
704        #[cfg_attr(
705            any(target_arch = "aarch64", target_arch = "arm64ec"),
706            link_name = "llvm.aarch64.neon.uaddv.i32.v2i32"
707        )]
708        fn _vaddv_u32(a: uint32x2_t) -> u32;
709    }
710    unsafe { _vaddv_u32(a) }
711}
712#[doc = "Add across vector"]
713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
714#[inline]
715#[target_feature(enable = "neon")]
716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
717#[cfg_attr(test, assert_instr(addv))]
718pub fn vaddv_u8(a: uint8x8_t) -> u8 {
719    unsafe extern "unadjusted" {
720        #[cfg_attr(
721            any(target_arch = "aarch64", target_arch = "arm64ec"),
722            link_name = "llvm.aarch64.neon.uaddv.i32.v8i8"
723        )]
724        fn _vaddv_u8(a: uint8x8_t) -> u8;
725    }
726    unsafe { _vaddv_u8(a) }
727}
728#[doc = "Add across vector"]
729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
730#[inline]
731#[target_feature(enable = "neon")]
732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
733#[cfg_attr(test, assert_instr(addv))]
734pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
735    unsafe extern "unadjusted" {
736        #[cfg_attr(
737            any(target_arch = "aarch64", target_arch = "arm64ec"),
738            link_name = "llvm.aarch64.neon.uaddv.i32.v16i8"
739        )]
740        fn _vaddvq_u8(a: uint8x16_t) -> u8;
741    }
742    unsafe { _vaddvq_u8(a) }
743}
744#[doc = "Add across vector"]
745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
746#[inline]
747#[target_feature(enable = "neon")]
748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
749#[cfg_attr(test, assert_instr(addv))]
750pub fn vaddv_u16(a: uint16x4_t) -> u16 {
751    unsafe extern "unadjusted" {
752        #[cfg_attr(
753            any(target_arch = "aarch64", target_arch = "arm64ec"),
754            link_name = "llvm.aarch64.neon.uaddv.i32.v4i16"
755        )]
756        fn _vaddv_u16(a: uint16x4_t) -> u16;
757    }
758    unsafe { _vaddv_u16(a) }
759}
760#[doc = "Add across vector"]
761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
762#[inline]
763#[target_feature(enable = "neon")]
764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
765#[cfg_attr(test, assert_instr(addv))]
766pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
767    unsafe extern "unadjusted" {
768        #[cfg_attr(
769            any(target_arch = "aarch64", target_arch = "arm64ec"),
770            link_name = "llvm.aarch64.neon.uaddv.i32.v8i16"
771        )]
772        fn _vaddvq_u16(a: uint16x8_t) -> u16;
773    }
774    unsafe { _vaddvq_u16(a) }
775}
776#[doc = "Add across vector"]
777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
778#[inline]
779#[target_feature(enable = "neon")]
780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
781#[cfg_attr(test, assert_instr(addv))]
782pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
783    unsafe extern "unadjusted" {
784        #[cfg_attr(
785            any(target_arch = "aarch64", target_arch = "arm64ec"),
786            link_name = "llvm.aarch64.neon.uaddv.i32.v4i32"
787        )]
788        fn _vaddvq_u32(a: uint32x4_t) -> u32;
789    }
790    unsafe { _vaddvq_u32(a) }
791}
792#[doc = "Add across vector"]
793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
794#[inline]
795#[target_feature(enable = "neon")]
796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
797#[cfg_attr(test, assert_instr(addp))]
798pub fn vaddvq_s64(a: int64x2_t) -> i64 {
799    unsafe extern "unadjusted" {
800        #[cfg_attr(
801            any(target_arch = "aarch64", target_arch = "arm64ec"),
802            link_name = "llvm.aarch64.neon.saddv.i64.v2i64"
803        )]
804        fn _vaddvq_s64(a: int64x2_t) -> i64;
805    }
806    unsafe { _vaddvq_s64(a) }
807}
808#[doc = "Add across vector"]
809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
810#[inline]
811#[target_feature(enable = "neon")]
812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
813#[cfg_attr(test, assert_instr(addp))]
814pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
815    unsafe extern "unadjusted" {
816        #[cfg_attr(
817            any(target_arch = "aarch64", target_arch = "arm64ec"),
818            link_name = "llvm.aarch64.neon.uaddv.i64.v2i64"
819        )]
820        fn _vaddvq_u64(a: uint64x2_t) -> u64;
821    }
822    unsafe { _vaddvq_u64(a) }
823}
824#[doc = "Multi-vector floating-point absolute maximum"]
825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
826#[inline]
827#[target_feature(enable = "neon,faminmax")]
828#[cfg_attr(test, assert_instr(nop))]
829#[unstable(feature = "faminmax", issue = "137933")]
830pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
831    unsafe extern "unadjusted" {
832        #[cfg_attr(
833            any(target_arch = "aarch64", target_arch = "arm64ec"),
834            link_name = "llvm.aarch64.neon.famax.v2f32"
835        )]
836        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
837    }
838    unsafe { _vamax_f32(a, b) }
839}
840#[doc = "Multi-vector floating-point absolute maximum"]
841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
842#[inline]
843#[target_feature(enable = "neon,faminmax")]
844#[cfg_attr(test, assert_instr(nop))]
845#[unstable(feature = "faminmax", issue = "137933")]
846pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
847    unsafe extern "unadjusted" {
848        #[cfg_attr(
849            any(target_arch = "aarch64", target_arch = "arm64ec"),
850            link_name = "llvm.aarch64.neon.famax.v4f32"
851        )]
852        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
853    }
854    unsafe { _vamaxq_f32(a, b) }
855}
856#[doc = "Multi-vector floating-point absolute maximum"]
857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
858#[inline]
859#[target_feature(enable = "neon,faminmax")]
860#[cfg_attr(test, assert_instr(nop))]
861#[unstable(feature = "faminmax", issue = "137933")]
862pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
863    unsafe extern "unadjusted" {
864        #[cfg_attr(
865            any(target_arch = "aarch64", target_arch = "arm64ec"),
866            link_name = "llvm.aarch64.neon.famax.v2f64"
867        )]
868        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
869    }
870    unsafe { _vamaxq_f64(a, b) }
871}
872#[doc = "Multi-vector floating-point absolute minimum"]
873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
874#[inline]
875#[target_feature(enable = "neon,faminmax")]
876#[cfg_attr(test, assert_instr(nop))]
877#[unstable(feature = "faminmax", issue = "137933")]
878pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
879    unsafe extern "unadjusted" {
880        #[cfg_attr(
881            any(target_arch = "aarch64", target_arch = "arm64ec"),
882            link_name = "llvm.aarch64.neon.famin.v2f32"
883        )]
884        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
885    }
886    unsafe { _vamin_f32(a, b) }
887}
888#[doc = "Multi-vector floating-point absolute minimum"]
889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
890#[inline]
891#[target_feature(enable = "neon,faminmax")]
892#[cfg_attr(test, assert_instr(nop))]
893#[unstable(feature = "faminmax", issue = "137933")]
894pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
895    unsafe extern "unadjusted" {
896        #[cfg_attr(
897            any(target_arch = "aarch64", target_arch = "arm64ec"),
898            link_name = "llvm.aarch64.neon.famin.v4f32"
899        )]
900        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
901    }
902    unsafe { _vaminq_f32(a, b) }
903}
904#[doc = "Multi-vector floating-point absolute minimum"]
905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
906#[inline]
907#[target_feature(enable = "neon,faminmax")]
908#[cfg_attr(test, assert_instr(nop))]
909#[unstable(feature = "faminmax", issue = "137933")]
910pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
911    unsafe extern "unadjusted" {
912        #[cfg_attr(
913            any(target_arch = "aarch64", target_arch = "arm64ec"),
914            link_name = "llvm.aarch64.neon.famin.v2f64"
915        )]
916        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
917    }
918    unsafe { _vaminq_f64(a, b) }
919}
920#[doc = "Bit clear and exclusive OR"]
921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
922#[inline]
923#[target_feature(enable = "neon,sha3")]
924#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
925#[cfg_attr(test, assert_instr(bcax))]
926pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
927    unsafe extern "unadjusted" {
928        #[cfg_attr(
929            any(target_arch = "aarch64", target_arch = "arm64ec"),
930            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
931        )]
932        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
933    }
934    unsafe { _vbcaxq_s8(a, b, c) }
935}
936#[doc = "Bit clear and exclusive OR"]
937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
938#[inline]
939#[target_feature(enable = "neon,sha3")]
940#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
941#[cfg_attr(test, assert_instr(bcax))]
942pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
943    unsafe extern "unadjusted" {
944        #[cfg_attr(
945            any(target_arch = "aarch64", target_arch = "arm64ec"),
946            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
947        )]
948        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
949    }
950    unsafe { _vbcaxq_s16(a, b, c) }
951}
952#[doc = "Bit clear and exclusive OR"]
953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
954#[inline]
955#[target_feature(enable = "neon,sha3")]
956#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
957#[cfg_attr(test, assert_instr(bcax))]
958pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
959    unsafe extern "unadjusted" {
960        #[cfg_attr(
961            any(target_arch = "aarch64", target_arch = "arm64ec"),
962            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
963        )]
964        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
965    }
966    unsafe { _vbcaxq_s32(a, b, c) }
967}
968#[doc = "Bit clear and exclusive OR"]
969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
970#[inline]
971#[target_feature(enable = "neon,sha3")]
972#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
973#[cfg_attr(test, assert_instr(bcax))]
974pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
975    unsafe extern "unadjusted" {
976        #[cfg_attr(
977            any(target_arch = "aarch64", target_arch = "arm64ec"),
978            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
979        )]
980        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
981    }
982    unsafe { _vbcaxq_s64(a, b, c) }
983}
984#[doc = "Bit clear and exclusive OR"]
985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
986#[inline]
987#[target_feature(enable = "neon,sha3")]
988#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
989#[cfg_attr(test, assert_instr(bcax))]
990pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
991    unsafe extern "unadjusted" {
992        #[cfg_attr(
993            any(target_arch = "aarch64", target_arch = "arm64ec"),
994            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
995        )]
996        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
997    }
998    unsafe { _vbcaxq_u8(a, b, c) }
999}
1000#[doc = "Bit clear and exclusive OR"]
1001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
1002#[inline]
1003#[target_feature(enable = "neon,sha3")]
1004#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1005#[cfg_attr(test, assert_instr(bcax))]
1006pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
1007    unsafe extern "unadjusted" {
1008        #[cfg_attr(
1009            any(target_arch = "aarch64", target_arch = "arm64ec"),
1010            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
1011        )]
1012        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
1013    }
1014    unsafe { _vbcaxq_u16(a, b, c) }
1015}
1016#[doc = "Bit clear and exclusive OR"]
1017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
1018#[inline]
1019#[target_feature(enable = "neon,sha3")]
1020#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1021#[cfg_attr(test, assert_instr(bcax))]
1022pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
1023    unsafe extern "unadjusted" {
1024        #[cfg_attr(
1025            any(target_arch = "aarch64", target_arch = "arm64ec"),
1026            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1027        )]
1028        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1029    }
1030    unsafe { _vbcaxq_u32(a, b, c) }
1031}
1032#[doc = "Bit clear and exclusive OR"]
1033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1034#[inline]
1035#[target_feature(enable = "neon,sha3")]
1036#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1037#[cfg_attr(test, assert_instr(bcax))]
1038pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1039    unsafe extern "unadjusted" {
1040        #[cfg_attr(
1041            any(target_arch = "aarch64", target_arch = "arm64ec"),
1042            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1043        )]
1044        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1045    }
1046    unsafe { _vbcaxq_u64(a, b, c) }
1047}
1048#[doc = "Floating-point complex add"]
1049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1050#[inline]
1051#[target_feature(enable = "neon,fp16")]
1052#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1053#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1054#[cfg_attr(test, assert_instr(fcadd))]
1055pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1056    unsafe extern "unadjusted" {
1057        #[cfg_attr(
1058            any(target_arch = "aarch64", target_arch = "arm64ec"),
1059            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1060        )]
1061        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1062    }
1063    unsafe { _vcadd_rot270_f16(a, b) }
1064}
1065#[doc = "Floating-point complex add"]
1066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1067#[inline]
1068#[target_feature(enable = "neon,fp16")]
1069#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1070#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1071#[cfg_attr(test, assert_instr(fcadd))]
1072pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1073    unsafe extern "unadjusted" {
1074        #[cfg_attr(
1075            any(target_arch = "aarch64", target_arch = "arm64ec"),
1076            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1077        )]
1078        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1079    }
1080    unsafe { _vcaddq_rot270_f16(a, b) }
1081}
1082#[doc = "Floating-point complex add"]
1083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1084#[inline]
1085#[target_feature(enable = "neon,fcma")]
1086#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1087#[cfg_attr(test, assert_instr(fcadd))]
1088pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1089    unsafe extern "unadjusted" {
1090        #[cfg_attr(
1091            any(target_arch = "aarch64", target_arch = "arm64ec"),
1092            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1093        )]
1094        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1095    }
1096    unsafe { _vcadd_rot270_f32(a, b) }
1097}
1098#[doc = "Floating-point complex add"]
1099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1100#[inline]
1101#[target_feature(enable = "neon,fcma")]
1102#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1103#[cfg_attr(test, assert_instr(fcadd))]
1104pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1105    unsafe extern "unadjusted" {
1106        #[cfg_attr(
1107            any(target_arch = "aarch64", target_arch = "arm64ec"),
1108            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1109        )]
1110        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1111    }
1112    unsafe { _vcaddq_rot270_f32(a, b) }
1113}
1114#[doc = "Floating-point complex add"]
1115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1116#[inline]
1117#[target_feature(enable = "neon,fcma")]
1118#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1119#[cfg_attr(test, assert_instr(fcadd))]
1120pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1121    unsafe extern "unadjusted" {
1122        #[cfg_attr(
1123            any(target_arch = "aarch64", target_arch = "arm64ec"),
1124            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1125        )]
1126        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1127    }
1128    unsafe { _vcaddq_rot270_f64(a, b) }
1129}
1130#[doc = "Floating-point complex add"]
1131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1132#[inline]
1133#[target_feature(enable = "neon,fp16")]
1134#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1136#[cfg_attr(test, assert_instr(fcadd))]
1137pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1138    unsafe extern "unadjusted" {
1139        #[cfg_attr(
1140            any(target_arch = "aarch64", target_arch = "arm64ec"),
1141            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1142        )]
1143        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1144    }
1145    unsafe { _vcadd_rot90_f16(a, b) }
1146}
1147#[doc = "Floating-point complex add"]
1148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1149#[inline]
1150#[target_feature(enable = "neon,fp16")]
1151#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1152#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1153#[cfg_attr(test, assert_instr(fcadd))]
1154pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1155    unsafe extern "unadjusted" {
1156        #[cfg_attr(
1157            any(target_arch = "aarch64", target_arch = "arm64ec"),
1158            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1159        )]
1160        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1161    }
1162    unsafe { _vcaddq_rot90_f16(a, b) }
1163}
1164#[doc = "Floating-point complex add"]
1165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1166#[inline]
1167#[target_feature(enable = "neon,fcma")]
1168#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1169#[cfg_attr(test, assert_instr(fcadd))]
1170pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1171    unsafe extern "unadjusted" {
1172        #[cfg_attr(
1173            any(target_arch = "aarch64", target_arch = "arm64ec"),
1174            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1175        )]
1176        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1177    }
1178    unsafe { _vcadd_rot90_f32(a, b) }
1179}
1180#[doc = "Floating-point complex add"]
1181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1182#[inline]
1183#[target_feature(enable = "neon,fcma")]
1184#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1185#[cfg_attr(test, assert_instr(fcadd))]
1186pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1187    unsafe extern "unadjusted" {
1188        #[cfg_attr(
1189            any(target_arch = "aarch64", target_arch = "arm64ec"),
1190            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1191        )]
1192        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1193    }
1194    unsafe { _vcaddq_rot90_f32(a, b) }
1195}
1196#[doc = "Floating-point complex add"]
1197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1198#[inline]
1199#[target_feature(enable = "neon,fcma")]
1200#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1201#[cfg_attr(test, assert_instr(fcadd))]
1202pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1203    unsafe extern "unadjusted" {
1204        #[cfg_attr(
1205            any(target_arch = "aarch64", target_arch = "arm64ec"),
1206            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1207        )]
1208        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1209    }
1210    unsafe { _vcaddq_rot90_f64(a, b) }
1211}
1212#[doc = "Floating-point absolute compare greater than or equal"]
1213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1214#[inline]
1215#[target_feature(enable = "neon")]
1216#[cfg_attr(test, assert_instr(facge))]
1217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1218pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1219    unsafe extern "unadjusted" {
1220        #[cfg_attr(
1221            any(target_arch = "aarch64", target_arch = "arm64ec"),
1222            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1223        )]
1224        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1225    }
1226    unsafe { _vcage_f64(a, b) }
1227}
1228#[doc = "Floating-point absolute compare greater than or equal"]
1229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1230#[inline]
1231#[target_feature(enable = "neon")]
1232#[cfg_attr(test, assert_instr(facge))]
1233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1234pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1235    unsafe extern "unadjusted" {
1236        #[cfg_attr(
1237            any(target_arch = "aarch64", target_arch = "arm64ec"),
1238            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1239        )]
1240        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1241    }
1242    unsafe { _vcageq_f64(a, b) }
1243}
1244#[doc = "Floating-point absolute compare greater than or equal"]
1245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1246#[inline]
1247#[target_feature(enable = "neon")]
1248#[cfg_attr(test, assert_instr(facge))]
1249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1250pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1251    unsafe extern "unadjusted" {
1252        #[cfg_attr(
1253            any(target_arch = "aarch64", target_arch = "arm64ec"),
1254            link_name = "llvm.aarch64.neon.facge.i64.f64"
1255        )]
1256        fn _vcaged_f64(a: f64, b: f64) -> u64;
1257    }
1258    unsafe { _vcaged_f64(a, b) }
1259}
1260#[doc = "Floating-point absolute compare greater than or equal"]
1261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1262#[inline]
1263#[target_feature(enable = "neon")]
1264#[cfg_attr(test, assert_instr(facge))]
1265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1266pub fn vcages_f32(a: f32, b: f32) -> u32 {
1267    unsafe extern "unadjusted" {
1268        #[cfg_attr(
1269            any(target_arch = "aarch64", target_arch = "arm64ec"),
1270            link_name = "llvm.aarch64.neon.facge.i32.f32"
1271        )]
1272        fn _vcages_f32(a: f32, b: f32) -> u32;
1273    }
1274    unsafe { _vcages_f32(a, b) }
1275}
1276#[doc = "Floating-point absolute compare greater than or equal"]
1277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1278#[inline]
1279#[cfg_attr(test, assert_instr(facge))]
1280#[target_feature(enable = "neon,fp16")]
1281#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1282pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1283    unsafe extern "unadjusted" {
1284        #[cfg_attr(
1285            any(target_arch = "aarch64", target_arch = "arm64ec"),
1286            link_name = "llvm.aarch64.neon.facge.i32.f16"
1287        )]
1288        fn _vcageh_f16(a: f16, b: f16) -> i32;
1289    }
1290    unsafe { _vcageh_f16(a, b) as u16 }
1291}
1292#[doc = "Floating-point absolute compare greater than"]
1293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1294#[inline]
1295#[target_feature(enable = "neon")]
1296#[cfg_attr(test, assert_instr(facgt))]
1297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1298pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1299    unsafe extern "unadjusted" {
1300        #[cfg_attr(
1301            any(target_arch = "aarch64", target_arch = "arm64ec"),
1302            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1303        )]
1304        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1305    }
1306    unsafe { _vcagt_f64(a, b) }
1307}
1308#[doc = "Floating-point absolute compare greater than"]
1309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1310#[inline]
1311#[target_feature(enable = "neon")]
1312#[cfg_attr(test, assert_instr(facgt))]
1313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1314pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1315    unsafe extern "unadjusted" {
1316        #[cfg_attr(
1317            any(target_arch = "aarch64", target_arch = "arm64ec"),
1318            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1319        )]
1320        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1321    }
1322    unsafe { _vcagtq_f64(a, b) }
1323}
1324#[doc = "Floating-point absolute compare greater than"]
1325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1326#[inline]
1327#[target_feature(enable = "neon")]
1328#[cfg_attr(test, assert_instr(facgt))]
1329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1330pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1331    unsafe extern "unadjusted" {
1332        #[cfg_attr(
1333            any(target_arch = "aarch64", target_arch = "arm64ec"),
1334            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1335        )]
1336        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1337    }
1338    unsafe { _vcagtd_f64(a, b) }
1339}
1340#[doc = "Floating-point absolute compare greater than"]
1341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1342#[inline]
1343#[target_feature(enable = "neon")]
1344#[cfg_attr(test, assert_instr(facgt))]
1345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1346pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1347    unsafe extern "unadjusted" {
1348        #[cfg_attr(
1349            any(target_arch = "aarch64", target_arch = "arm64ec"),
1350            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1351        )]
1352        fn _vcagts_f32(a: f32, b: f32) -> u32;
1353    }
1354    unsafe { _vcagts_f32(a, b) }
1355}
1356#[doc = "Floating-point absolute compare greater than"]
1357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1358#[inline]
1359#[cfg_attr(test, assert_instr(facgt))]
1360#[target_feature(enable = "neon,fp16")]
1361#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1362pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1363    unsafe extern "unadjusted" {
1364        #[cfg_attr(
1365            any(target_arch = "aarch64", target_arch = "arm64ec"),
1366            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1367        )]
1368        fn _vcagth_f16(a: f16, b: f16) -> i32;
1369    }
1370    unsafe { _vcagth_f16(a, b) as u16 }
1371}
1372#[doc = "Floating-point absolute compare less than or equal"]
1373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1374#[inline]
1375#[target_feature(enable = "neon")]
1376#[cfg_attr(test, assert_instr(facge))]
1377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1378pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1379    vcage_f64(b, a)
1380}
1381#[doc = "Floating-point absolute compare less than or equal"]
1382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1383#[inline]
1384#[target_feature(enable = "neon")]
1385#[cfg_attr(test, assert_instr(facge))]
1386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1387pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1388    vcageq_f64(b, a)
1389}
1390#[doc = "Floating-point absolute compare less than or equal"]
1391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1392#[inline]
1393#[target_feature(enable = "neon")]
1394#[cfg_attr(test, assert_instr(facge))]
1395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1396pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1397    vcaged_f64(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than or equal"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1401#[inline]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facge))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcales_f32(a: f32, b: f32) -> u32 {
1406    vcages_f32(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than or equal"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1410#[inline]
1411#[cfg_attr(test, assert_instr(facge))]
1412#[target_feature(enable = "neon,fp16")]
1413#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1414pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1415    vcageh_f16(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1419#[inline]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1424    vcagt_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1428#[inline]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1433    vcagtq_f64(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1437#[inline]
1438#[target_feature(enable = "neon")]
1439#[cfg_attr(test, assert_instr(facgt))]
1440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1441pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1442    vcagtd_f64(b, a)
1443}
1444#[doc = "Floating-point absolute compare less than"]
1445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1446#[inline]
1447#[target_feature(enable = "neon")]
1448#[cfg_attr(test, assert_instr(facgt))]
1449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1450pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1451    vcagts_f32(b, a)
1452}
1453#[doc = "Floating-point absolute compare less than"]
1454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1455#[inline]
1456#[cfg_attr(test, assert_instr(facgt))]
1457#[target_feature(enable = "neon,fp16")]
1458#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1459pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1460    vcagth_f16(b, a)
1461}
1462#[doc = "Floating-point compare equal"]
1463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1464#[inline]
1465#[target_feature(enable = "neon")]
1466#[cfg_attr(test, assert_instr(fcmeq))]
1467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1468pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1469    unsafe { simd_eq(a, b) }
1470}
1471#[doc = "Floating-point compare equal"]
1472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1473#[inline]
1474#[target_feature(enable = "neon")]
1475#[cfg_attr(test, assert_instr(fcmeq))]
1476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1477pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1478    unsafe { simd_eq(a, b) }
1479}
1480#[doc = "Compare bitwise Equal (vector)"]
1481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1482#[inline]
1483#[target_feature(enable = "neon")]
1484#[cfg_attr(test, assert_instr(cmeq))]
1485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1486pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1487    unsafe { simd_eq(a, b) }
1488}
1489#[doc = "Compare bitwise Equal (vector)"]
1490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1491#[inline]
1492#[target_feature(enable = "neon")]
1493#[cfg_attr(test, assert_instr(cmeq))]
1494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1495pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1496    unsafe { simd_eq(a, b) }
1497}
1498#[doc = "Compare bitwise Equal (vector)"]
1499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1500#[inline]
1501#[target_feature(enable = "neon")]
1502#[cfg_attr(test, assert_instr(cmeq))]
1503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1504pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1505    unsafe { simd_eq(a, b) }
1506}
1507#[doc = "Compare bitwise Equal (vector)"]
1508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1509#[inline]
1510#[target_feature(enable = "neon")]
1511#[cfg_attr(test, assert_instr(cmeq))]
1512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1513pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1514    unsafe { simd_eq(a, b) }
1515}
1516#[doc = "Compare bitwise Equal (vector)"]
1517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1518#[inline]
1519#[target_feature(enable = "neon")]
1520#[cfg_attr(test, assert_instr(cmeq))]
1521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1522pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1523    unsafe { simd_eq(a, b) }
1524}
1525#[doc = "Compare bitwise Equal (vector)"]
1526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1527#[inline]
1528#[target_feature(enable = "neon")]
1529#[cfg_attr(test, assert_instr(cmeq))]
1530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1531pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1532    unsafe { simd_eq(a, b) }
1533}
1534#[doc = "Floating-point compare equal"]
1535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1536#[inline]
1537#[target_feature(enable = "neon")]
1538#[cfg_attr(test, assert_instr(fcmp))]
1539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1540pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1541    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1542}
1543#[doc = "Floating-point compare equal"]
1544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1545#[inline]
1546#[target_feature(enable = "neon")]
1547#[cfg_attr(test, assert_instr(fcmp))]
1548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1549pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1550    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1551}
1552#[doc = "Compare bitwise equal"]
1553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1554#[inline]
1555#[target_feature(enable = "neon")]
1556#[cfg_attr(test, assert_instr(cmp))]
1557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1558pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1559    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1560}
1561#[doc = "Compare bitwise equal"]
1562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1563#[inline]
1564#[target_feature(enable = "neon")]
1565#[cfg_attr(test, assert_instr(cmp))]
1566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1567pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1568    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1569}
1570#[doc = "Floating-point compare equal"]
1571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1572#[inline]
1573#[cfg_attr(test, assert_instr(fcmp))]
1574#[target_feature(enable = "neon,fp16")]
1575#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1576pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1577    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1578}
1579#[doc = "Floating-point compare bitwise equal to zero"]
1580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1581#[inline]
1582#[cfg_attr(test, assert_instr(fcmeq))]
1583#[target_feature(enable = "neon,fp16")]
1584#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1585pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1586    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1587    unsafe { simd_eq(a, transmute(b)) }
1588}
1589#[doc = "Floating-point compare bitwise equal to zero"]
1590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1591#[inline]
1592#[cfg_attr(test, assert_instr(fcmeq))]
1593#[target_feature(enable = "neon,fp16")]
1594#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1595pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1596    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1597    unsafe { simd_eq(a, transmute(b)) }
1598}
1599#[doc = "Floating-point compare bitwise equal to zero"]
1600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1601#[inline]
1602#[target_feature(enable = "neon")]
1603#[cfg_attr(test, assert_instr(fcmeq))]
1604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1605pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1606    let b: f32x2 = f32x2::new(0.0, 0.0);
1607    unsafe { simd_eq(a, transmute(b)) }
1608}
1609#[doc = "Floating-point compare bitwise equal to zero"]
1610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1611#[inline]
1612#[target_feature(enable = "neon")]
1613#[cfg_attr(test, assert_instr(fcmeq))]
1614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1615pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1616    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1617    unsafe { simd_eq(a, transmute(b)) }
1618}
1619#[doc = "Floating-point compare bitwise equal to zero"]
1620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1621#[inline]
1622#[target_feature(enable = "neon")]
1623#[cfg_attr(test, assert_instr(fcmeq))]
1624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1625pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1626    let b: f64 = 0.0;
1627    unsafe { simd_eq(a, transmute(b)) }
1628}
1629#[doc = "Floating-point compare bitwise equal to zero"]
1630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1631#[inline]
1632#[target_feature(enable = "neon")]
1633#[cfg_attr(test, assert_instr(fcmeq))]
1634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1635pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1636    let b: f64x2 = f64x2::new(0.0, 0.0);
1637    unsafe { simd_eq(a, transmute(b)) }
1638}
1639#[doc = "Signed compare bitwise equal to zero"]
1640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1641#[inline]
1642#[target_feature(enable = "neon")]
1643#[cfg_attr(test, assert_instr(cmeq))]
1644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1645pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1646    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1647    unsafe { simd_eq(a, transmute(b)) }
1648}
1649#[doc = "Signed compare bitwise equal to zero"]
1650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1651#[inline]
1652#[target_feature(enable = "neon")]
1653#[cfg_attr(test, assert_instr(cmeq))]
1654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1655pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1656    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1657    unsafe { simd_eq(a, transmute(b)) }
1658}
1659#[doc = "Signed compare bitwise equal to zero"]
1660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1661#[inline]
1662#[target_feature(enable = "neon")]
1663#[cfg_attr(test, assert_instr(cmeq))]
1664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1665pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1666    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1667    unsafe { simd_eq(a, transmute(b)) }
1668}
1669#[doc = "Signed compare bitwise equal to zero"]
1670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1671#[inline]
1672#[target_feature(enable = "neon")]
1673#[cfg_attr(test, assert_instr(cmeq))]
1674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1675pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1676    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1677    unsafe { simd_eq(a, transmute(b)) }
1678}
1679#[doc = "Signed compare bitwise equal to zero"]
1680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1681#[inline]
1682#[target_feature(enable = "neon")]
1683#[cfg_attr(test, assert_instr(cmeq))]
1684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1685pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1686    let b: i32x2 = i32x2::new(0, 0);
1687    unsafe { simd_eq(a, transmute(b)) }
1688}
1689#[doc = "Signed compare bitwise equal to zero"]
1690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1691#[inline]
1692#[target_feature(enable = "neon")]
1693#[cfg_attr(test, assert_instr(cmeq))]
1694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1695pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1696    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1697    unsafe { simd_eq(a, transmute(b)) }
1698}
1699#[doc = "Signed compare bitwise equal to zero"]
1700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1701#[inline]
1702#[target_feature(enable = "neon")]
1703#[cfg_attr(test, assert_instr(cmeq))]
1704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1705pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1706    let b: i64x1 = i64x1::new(0);
1707    unsafe { simd_eq(a, transmute(b)) }
1708}
1709#[doc = "Signed compare bitwise equal to zero"]
1710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1711#[inline]
1712#[target_feature(enable = "neon")]
1713#[cfg_attr(test, assert_instr(cmeq))]
1714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1715pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1716    let b: i64x2 = i64x2::new(0, 0);
1717    unsafe { simd_eq(a, transmute(b)) }
1718}
1719#[doc = "Signed compare bitwise equal to zero"]
1720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1721#[inline]
1722#[target_feature(enable = "neon")]
1723#[cfg_attr(test, assert_instr(cmeq))]
1724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1725pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1726    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1727    unsafe { simd_eq(a, transmute(b)) }
1728}
1729#[doc = "Signed compare bitwise equal to zero"]
1730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1731#[inline]
1732#[target_feature(enable = "neon")]
1733#[cfg_attr(test, assert_instr(cmeq))]
1734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1735pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1736    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1737    unsafe { simd_eq(a, transmute(b)) }
1738}
1739#[doc = "Signed compare bitwise equal to zero"]
1740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1741#[inline]
1742#[target_feature(enable = "neon")]
1743#[cfg_attr(test, assert_instr(cmeq))]
1744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1745pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1746    let b: i64x1 = i64x1::new(0);
1747    unsafe { simd_eq(a, transmute(b)) }
1748}
1749#[doc = "Signed compare bitwise equal to zero"]
1750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1751#[inline]
1752#[target_feature(enable = "neon")]
1753#[cfg_attr(test, assert_instr(cmeq))]
1754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1755pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1756    let b: i64x2 = i64x2::new(0, 0);
1757    unsafe { simd_eq(a, transmute(b)) }
1758}
1759#[doc = "Unsigned compare bitwise equal to zero"]
1760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1761#[inline]
1762#[target_feature(enable = "neon")]
1763#[cfg_attr(test, assert_instr(cmeq))]
1764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1765pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1766    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1767    unsafe { simd_eq(a, transmute(b)) }
1768}
1769#[doc = "Unsigned compare bitwise equal to zero"]
1770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1771#[inline]
1772#[target_feature(enable = "neon")]
1773#[cfg_attr(test, assert_instr(cmeq))]
1774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1775pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1776    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1777    unsafe { simd_eq(a, transmute(b)) }
1778}
1779#[doc = "Unsigned compare bitwise equal to zero"]
1780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1781#[inline]
1782#[target_feature(enable = "neon")]
1783#[cfg_attr(test, assert_instr(cmeq))]
1784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1785pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1786    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1787    unsafe { simd_eq(a, transmute(b)) }
1788}
1789#[doc = "Unsigned compare bitwise equal to zero"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1791#[inline]
1792#[target_feature(enable = "neon")]
1793#[cfg_attr(test, assert_instr(cmeq))]
1794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1795pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1796    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1797    unsafe { simd_eq(a, transmute(b)) }
1798}
1799#[doc = "Unsigned compare bitwise equal to zero"]
1800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1801#[inline]
1802#[target_feature(enable = "neon")]
1803#[cfg_attr(test, assert_instr(cmeq))]
1804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1805pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1806    let b: u32x2 = u32x2::new(0, 0);
1807    unsafe { simd_eq(a, transmute(b)) }
1808}
1809#[doc = "Unsigned compare bitwise equal to zero"]
1810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1811#[inline]
1812#[target_feature(enable = "neon")]
1813#[cfg_attr(test, assert_instr(cmeq))]
1814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1815pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1816    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1817    unsafe { simd_eq(a, transmute(b)) }
1818}
1819#[doc = "Unsigned compare bitwise equal to zero"]
1820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1821#[inline]
1822#[target_feature(enable = "neon")]
1823#[cfg_attr(test, assert_instr(cmeq))]
1824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1825pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1826    let b: u64x1 = u64x1::new(0);
1827    unsafe { simd_eq(a, transmute(b)) }
1828}
1829#[doc = "Unsigned compare bitwise equal to zero"]
1830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1831#[inline]
1832#[target_feature(enable = "neon")]
1833#[cfg_attr(test, assert_instr(cmeq))]
1834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1835pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1836    let b: u64x2 = u64x2::new(0, 0);
1837    unsafe { simd_eq(a, transmute(b)) }
1838}
1839#[doc = "Compare bitwise equal to zero"]
1840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1841#[inline]
1842#[target_feature(enable = "neon")]
1843#[cfg_attr(test, assert_instr(cmp))]
1844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1845pub fn vceqzd_s64(a: i64) -> u64 {
1846    unsafe { transmute(vceqz_s64(transmute(a))) }
1847}
1848#[doc = "Compare bitwise equal to zero"]
1849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1850#[inline]
1851#[target_feature(enable = "neon")]
1852#[cfg_attr(test, assert_instr(cmp))]
1853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1854pub fn vceqzd_u64(a: u64) -> u64 {
1855    unsafe { transmute(vceqz_u64(transmute(a))) }
1856}
1857#[doc = "Floating-point compare bitwise equal to zero"]
1858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1859#[inline]
1860#[cfg_attr(test, assert_instr(fcmp))]
1861#[target_feature(enable = "neon,fp16")]
1862#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1863pub fn vceqzh_f16(a: f16) -> u16 {
1864    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1865}
1866#[doc = "Floating-point compare bitwise equal to zero"]
1867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1868#[inline]
1869#[target_feature(enable = "neon")]
1870#[cfg_attr(test, assert_instr(fcmp))]
1871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1872pub fn vceqzs_f32(a: f32) -> u32 {
1873    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1874}
1875#[doc = "Floating-point compare bitwise equal to zero"]
1876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1877#[inline]
1878#[target_feature(enable = "neon")]
1879#[cfg_attr(test, assert_instr(fcmp))]
1880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1881pub fn vceqzd_f64(a: f64) -> u64 {
1882    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1883}
1884#[doc = "Floating-point compare greater than or equal"]
1885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1886#[inline]
1887#[target_feature(enable = "neon")]
1888#[cfg_attr(test, assert_instr(fcmge))]
1889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1890pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1891    unsafe { simd_ge(a, b) }
1892}
1893#[doc = "Floating-point compare greater than or equal"]
1894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1895#[inline]
1896#[target_feature(enable = "neon")]
1897#[cfg_attr(test, assert_instr(fcmge))]
1898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1899pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1900    unsafe { simd_ge(a, b) }
1901}
1902#[doc = "Compare signed greater than or equal"]
1903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1904#[inline]
1905#[target_feature(enable = "neon")]
1906#[cfg_attr(test, assert_instr(cmge))]
1907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1908pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1909    unsafe { simd_ge(a, b) }
1910}
1911#[doc = "Compare signed greater than or equal"]
1912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1913#[inline]
1914#[target_feature(enable = "neon")]
1915#[cfg_attr(test, assert_instr(cmge))]
1916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1917pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1918    unsafe { simd_ge(a, b) }
1919}
1920#[doc = "Compare unsigned greater than or equal"]
1921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1922#[inline]
1923#[target_feature(enable = "neon")]
1924#[cfg_attr(test, assert_instr(cmhs))]
1925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1926pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1927    unsafe { simd_ge(a, b) }
1928}
1929#[doc = "Compare unsigned greater than or equal"]
1930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1931#[inline]
1932#[target_feature(enable = "neon")]
1933#[cfg_attr(test, assert_instr(cmhs))]
1934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1935pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1936    unsafe { simd_ge(a, b) }
1937}
1938#[doc = "Floating-point compare greater than or equal"]
1939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1940#[inline]
1941#[target_feature(enable = "neon")]
1942#[cfg_attr(test, assert_instr(fcmp))]
1943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1944pub fn vcged_f64(a: f64, b: f64) -> u64 {
1945    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1946}
1947#[doc = "Floating-point compare greater than or equal"]
1948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1949#[inline]
1950#[target_feature(enable = "neon")]
1951#[cfg_attr(test, assert_instr(fcmp))]
1952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1953pub fn vcges_f32(a: f32, b: f32) -> u32 {
1954    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1955}
1956#[doc = "Compare greater than or equal"]
1957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1958#[inline]
1959#[target_feature(enable = "neon")]
1960#[cfg_attr(test, assert_instr(cmp))]
1961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1962pub fn vcged_s64(a: i64, b: i64) -> u64 {
1963    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1964}
1965#[doc = "Compare greater than or equal"]
1966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1967#[inline]
1968#[target_feature(enable = "neon")]
1969#[cfg_attr(test, assert_instr(cmp))]
1970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1971pub fn vcged_u64(a: u64, b: u64) -> u64 {
1972    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1973}
1974#[doc = "Floating-point compare greater than or equal"]
1975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1976#[inline]
1977#[cfg_attr(test, assert_instr(fcmp))]
1978#[target_feature(enable = "neon,fp16")]
1979#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1980pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1981    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1982}
1983#[doc = "Floating-point compare greater than or equal to zero"]
1984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1985#[inline]
1986#[target_feature(enable = "neon")]
1987#[cfg_attr(test, assert_instr(fcmge))]
1988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1989pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1990    let b: f32x2 = f32x2::new(0.0, 0.0);
1991    unsafe { simd_ge(a, transmute(b)) }
1992}
1993#[doc = "Floating-point compare greater than or equal to zero"]
1994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1995#[inline]
1996#[target_feature(enable = "neon")]
1997#[cfg_attr(test, assert_instr(fcmge))]
1998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1999pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
2000    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2001    unsafe { simd_ge(a, transmute(b)) }
2002}
2003#[doc = "Floating-point compare greater than or equal to zero"]
2004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
2005#[inline]
2006#[target_feature(enable = "neon")]
2007#[cfg_attr(test, assert_instr(fcmge))]
2008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2009pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
2010    let b: f64 = 0.0;
2011    unsafe { simd_ge(a, transmute(b)) }
2012}
2013#[doc = "Floating-point compare greater than or equal to zero"]
2014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2015#[inline]
2016#[target_feature(enable = "neon")]
2017#[cfg_attr(test, assert_instr(fcmge))]
2018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2019pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2020    let b: f64x2 = f64x2::new(0.0, 0.0);
2021    unsafe { simd_ge(a, transmute(b)) }
2022}
2023#[doc = "Compare signed greater than or equal to zero"]
2024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2025#[inline]
2026#[target_feature(enable = "neon")]
2027#[cfg_attr(test, assert_instr(cmge))]
2028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2029pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2030    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2031    unsafe { simd_ge(a, transmute(b)) }
2032}
2033#[doc = "Compare signed greater than or equal to zero"]
2034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2035#[inline]
2036#[target_feature(enable = "neon")]
2037#[cfg_attr(test, assert_instr(cmge))]
2038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2039pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2040    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2041    unsafe { simd_ge(a, transmute(b)) }
2042}
2043#[doc = "Compare signed greater than or equal to zero"]
2044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2045#[inline]
2046#[target_feature(enable = "neon")]
2047#[cfg_attr(test, assert_instr(cmge))]
2048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2049pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2050    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2051    unsafe { simd_ge(a, transmute(b)) }
2052}
2053#[doc = "Compare signed greater than or equal to zero"]
2054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2055#[inline]
2056#[target_feature(enable = "neon")]
2057#[cfg_attr(test, assert_instr(cmge))]
2058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2059pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2060    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2061    unsafe { simd_ge(a, transmute(b)) }
2062}
2063#[doc = "Compare signed greater than or equal to zero"]
2064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2065#[inline]
2066#[target_feature(enable = "neon")]
2067#[cfg_attr(test, assert_instr(cmge))]
2068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2069pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2070    let b: i32x2 = i32x2::new(0, 0);
2071    unsafe { simd_ge(a, transmute(b)) }
2072}
2073#[doc = "Compare signed greater than or equal to zero"]
2074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2075#[inline]
2076#[target_feature(enable = "neon")]
2077#[cfg_attr(test, assert_instr(cmge))]
2078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2079pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2080    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2081    unsafe { simd_ge(a, transmute(b)) }
2082}
2083#[doc = "Compare signed greater than or equal to zero"]
2084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2085#[inline]
2086#[target_feature(enable = "neon")]
2087#[cfg_attr(test, assert_instr(cmge))]
2088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2089pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2090    let b: i64x1 = i64x1::new(0);
2091    unsafe { simd_ge(a, transmute(b)) }
2092}
2093#[doc = "Compare signed greater than or equal to zero"]
2094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2095#[inline]
2096#[target_feature(enable = "neon")]
2097#[cfg_attr(test, assert_instr(cmge))]
2098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2099pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2100    let b: i64x2 = i64x2::new(0, 0);
2101    unsafe { simd_ge(a, transmute(b)) }
2102}
2103#[doc = "Floating-point compare greater than or equal to zero"]
2104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2105#[inline]
2106#[target_feature(enable = "neon")]
2107#[cfg_attr(test, assert_instr(fcmp))]
2108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2109pub fn vcgezd_f64(a: f64) -> u64 {
2110    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2111}
2112#[doc = "Floating-point compare greater than or equal to zero"]
2113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2114#[inline]
2115#[target_feature(enable = "neon")]
2116#[cfg_attr(test, assert_instr(fcmp))]
2117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2118pub fn vcgezs_f32(a: f32) -> u32 {
2119    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2120}
2121#[doc = "Compare signed greater than or equal to zero"]
2122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2123#[inline]
2124#[target_feature(enable = "neon")]
2125#[cfg_attr(test, assert_instr(nop))]
2126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2127pub fn vcgezd_s64(a: i64) -> u64 {
2128    unsafe { transmute(vcgez_s64(transmute(a))) }
2129}
2130#[doc = "Floating-point compare greater than or equal to zero"]
2131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2132#[inline]
2133#[cfg_attr(test, assert_instr(fcmp))]
2134#[target_feature(enable = "neon,fp16")]
2135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2136pub fn vcgezh_f16(a: f16) -> u16 {
2137    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2138}
2139#[doc = "Floating-point compare greater than"]
2140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2141#[inline]
2142#[target_feature(enable = "neon")]
2143#[cfg_attr(test, assert_instr(fcmgt))]
2144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2145pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2146    unsafe { simd_gt(a, b) }
2147}
2148#[doc = "Floating-point compare greater than"]
2149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2150#[inline]
2151#[target_feature(enable = "neon")]
2152#[cfg_attr(test, assert_instr(fcmgt))]
2153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2154pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2155    unsafe { simd_gt(a, b) }
2156}
2157#[doc = "Compare signed greater than"]
2158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2159#[inline]
2160#[target_feature(enable = "neon")]
2161#[cfg_attr(test, assert_instr(cmgt))]
2162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2163pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2164    unsafe { simd_gt(a, b) }
2165}
2166#[doc = "Compare signed greater than"]
2167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2168#[inline]
2169#[target_feature(enable = "neon")]
2170#[cfg_attr(test, assert_instr(cmgt))]
2171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2172pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2173    unsafe { simd_gt(a, b) }
2174}
2175#[doc = "Compare unsigned greater than"]
2176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2177#[inline]
2178#[target_feature(enable = "neon")]
2179#[cfg_attr(test, assert_instr(cmhi))]
2180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2181pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2182    unsafe { simd_gt(a, b) }
2183}
2184#[doc = "Compare unsigned greater than"]
2185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(cmhi))]
2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2190pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2191    unsafe { simd_gt(a, b) }
2192}
2193#[doc = "Floating-point compare greater than"]
2194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2195#[inline]
2196#[target_feature(enable = "neon")]
2197#[cfg_attr(test, assert_instr(fcmp))]
2198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2199pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2200    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2201}
2202#[doc = "Floating-point compare greater than"]
2203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2204#[inline]
2205#[target_feature(enable = "neon")]
2206#[cfg_attr(test, assert_instr(fcmp))]
2207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2208pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2209    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2210}
2211#[doc = "Compare greater than"]
2212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2213#[inline]
2214#[target_feature(enable = "neon")]
2215#[cfg_attr(test, assert_instr(cmp))]
2216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2217pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2218    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2219}
2220#[doc = "Compare greater than"]
2221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2222#[inline]
2223#[target_feature(enable = "neon")]
2224#[cfg_attr(test, assert_instr(cmp))]
2225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2226pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2227    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2228}
2229#[doc = "Floating-point compare greater than"]
2230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2231#[inline]
2232#[cfg_attr(test, assert_instr(fcmp))]
2233#[target_feature(enable = "neon,fp16")]
2234#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2235pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2236    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2240#[inline]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2245    let b: f32x2 = f32x2::new(0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2250#[inline]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2255    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2260#[inline]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2265    let b: f64 = 0.0;
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Floating-point compare greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2270#[inline]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(fcmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2275    let b: f64x2 = f64x2::new(0.0, 0.0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2280#[inline]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2285    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2290#[inline]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2295    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2300#[inline]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2305    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2310#[inline]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2315    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2320#[inline]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2325    let b: i32x2 = i32x2::new(0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2330#[inline]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2335    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2340#[inline]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2345    let b: i64x1 = i64x1::new(0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Compare signed greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2350#[inline]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(cmgt))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2355    let b: i64x2 = i64x2::new(0, 0);
2356    unsafe { simd_gt(a, transmute(b)) }
2357}
2358#[doc = "Floating-point compare greater than zero"]
2359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2360#[inline]
2361#[target_feature(enable = "neon")]
2362#[cfg_attr(test, assert_instr(fcmp))]
2363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2364pub fn vcgtzd_f64(a: f64) -> u64 {
2365    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2366}
2367#[doc = "Floating-point compare greater than zero"]
2368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2369#[inline]
2370#[target_feature(enable = "neon")]
2371#[cfg_attr(test, assert_instr(fcmp))]
2372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2373pub fn vcgtzs_f32(a: f32) -> u32 {
2374    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2375}
2376#[doc = "Compare signed greater than zero"]
2377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2378#[inline]
2379#[target_feature(enable = "neon")]
2380#[cfg_attr(test, assert_instr(cmp))]
2381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2382pub fn vcgtzd_s64(a: i64) -> u64 {
2383    unsafe { transmute(vcgtz_s64(transmute(a))) }
2384}
2385#[doc = "Floating-point compare greater than zero"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2387#[inline]
2388#[cfg_attr(test, assert_instr(fcmp))]
2389#[target_feature(enable = "neon,fp16")]
2390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2391pub fn vcgtzh_f16(a: f16) -> u16 {
2392    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2396#[inline]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Floating-point compare less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2405#[inline]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(fcmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2414#[inline]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare signed less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2423#[inline]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmge))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2432#[inline]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Compare unsigned less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2441#[inline]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(cmhs))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2446    unsafe { simd_le(a, b) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2450#[inline]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcled_f64(a: f64, b: f64) -> u64 {
2455    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2456}
2457#[doc = "Floating-point compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2459#[inline]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(fcmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcles_f32(a: f32, b: f32) -> u32 {
2464    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2468#[inline]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_u64(a: u64, b: u64) -> u64 {
2473    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2477#[inline]
2478#[target_feature(enable = "neon")]
2479#[cfg_attr(test, assert_instr(cmp))]
2480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2481pub fn vcled_s64(a: i64, b: i64) -> u64 {
2482    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2483}
2484#[doc = "Floating-point compare less than or equal"]
2485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2486#[inline]
2487#[cfg_attr(test, assert_instr(fcmp))]
2488#[target_feature(enable = "neon,fp16")]
2489#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2490pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2491    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2492}
2493#[doc = "Floating-point compare less than or equal to zero"]
2494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2495#[inline]
2496#[target_feature(enable = "neon")]
2497#[cfg_attr(test, assert_instr(fcmle))]
2498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2499pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2500    let b: f32x2 = f32x2::new(0.0, 0.0);
2501    unsafe { simd_le(a, transmute(b)) }
2502}
2503#[doc = "Floating-point compare less than or equal to zero"]
2504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2505#[inline]
2506#[target_feature(enable = "neon")]
2507#[cfg_attr(test, assert_instr(fcmle))]
2508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2509pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2510    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2511    unsafe { simd_le(a, transmute(b)) }
2512}
2513#[doc = "Floating-point compare less than or equal to zero"]
2514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2515#[inline]
2516#[target_feature(enable = "neon")]
2517#[cfg_attr(test, assert_instr(fcmle))]
2518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2519pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2520    let b: f64 = 0.0;
2521    unsafe { simd_le(a, transmute(b)) }
2522}
2523#[doc = "Floating-point compare less than or equal to zero"]
2524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2525#[inline]
2526#[target_feature(enable = "neon")]
2527#[cfg_attr(test, assert_instr(fcmle))]
2528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2529pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2530    let b: f64x2 = f64x2::new(0.0, 0.0);
2531    unsafe { simd_le(a, transmute(b)) }
2532}
2533#[doc = "Compare signed less than or equal to zero"]
2534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2535#[inline]
2536#[target_feature(enable = "neon")]
2537#[cfg_attr(test, assert_instr(cmle))]
2538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2539pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2540    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2541    unsafe { simd_le(a, transmute(b)) }
2542}
2543#[doc = "Compare signed less than or equal to zero"]
2544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2545#[inline]
2546#[target_feature(enable = "neon")]
2547#[cfg_attr(test, assert_instr(cmle))]
2548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2549pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2550    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2551    unsafe { simd_le(a, transmute(b)) }
2552}
2553#[doc = "Compare signed less than or equal to zero"]
2554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2555#[inline]
2556#[target_feature(enable = "neon")]
2557#[cfg_attr(test, assert_instr(cmle))]
2558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2559pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2560    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2561    unsafe { simd_le(a, transmute(b)) }
2562}
2563#[doc = "Compare signed less than or equal to zero"]
2564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2565#[inline]
2566#[target_feature(enable = "neon")]
2567#[cfg_attr(test, assert_instr(cmle))]
2568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2569pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2570    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2571    unsafe { simd_le(a, transmute(b)) }
2572}
2573#[doc = "Compare signed less than or equal to zero"]
2574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2575#[inline]
2576#[target_feature(enable = "neon")]
2577#[cfg_attr(test, assert_instr(cmle))]
2578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2579pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2580    let b: i32x2 = i32x2::new(0, 0);
2581    unsafe { simd_le(a, transmute(b)) }
2582}
2583#[doc = "Compare signed less than or equal to zero"]
2584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2585#[inline]
2586#[target_feature(enable = "neon")]
2587#[cfg_attr(test, assert_instr(cmle))]
2588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2589pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2590    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2591    unsafe { simd_le(a, transmute(b)) }
2592}
2593#[doc = "Compare signed less than or equal to zero"]
2594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2595#[inline]
2596#[target_feature(enable = "neon")]
2597#[cfg_attr(test, assert_instr(cmle))]
2598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2599pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2600    let b: i64x1 = i64x1::new(0);
2601    unsafe { simd_le(a, transmute(b)) }
2602}
2603#[doc = "Compare signed less than or equal to zero"]
2604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2605#[inline]
2606#[target_feature(enable = "neon")]
2607#[cfg_attr(test, assert_instr(cmle))]
2608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2609pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2610    let b: i64x2 = i64x2::new(0, 0);
2611    unsafe { simd_le(a, transmute(b)) }
2612}
2613#[doc = "Floating-point compare less than or equal to zero"]
2614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2615#[inline]
2616#[target_feature(enable = "neon")]
2617#[cfg_attr(test, assert_instr(fcmp))]
2618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2619pub fn vclezd_f64(a: f64) -> u64 {
2620    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2621}
2622#[doc = "Floating-point compare less than or equal to zero"]
2623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2624#[inline]
2625#[target_feature(enable = "neon")]
2626#[cfg_attr(test, assert_instr(fcmp))]
2627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2628pub fn vclezs_f32(a: f32) -> u32 {
2629    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2630}
2631#[doc = "Compare less than or equal to zero"]
2632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2633#[inline]
2634#[target_feature(enable = "neon")]
2635#[cfg_attr(test, assert_instr(cmp))]
2636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2637pub fn vclezd_s64(a: i64) -> u64 {
2638    unsafe { transmute(vclez_s64(transmute(a))) }
2639}
2640#[doc = "Floating-point compare less than or equal to zero"]
2641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2642#[inline]
2643#[cfg_attr(test, assert_instr(fcmp))]
2644#[target_feature(enable = "neon,fp16")]
2645#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2646pub fn vclezh_f16(a: f16) -> u16 {
2647    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2648}
2649#[doc = "Floating-point compare less than"]
2650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2651#[inline]
2652#[target_feature(enable = "neon")]
2653#[cfg_attr(test, assert_instr(fcmgt))]
2654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2655pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2656    unsafe { simd_lt(a, b) }
2657}
2658#[doc = "Floating-point compare less than"]
2659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2660#[inline]
2661#[target_feature(enable = "neon")]
2662#[cfg_attr(test, assert_instr(fcmgt))]
2663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2664pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2665    unsafe { simd_lt(a, b) }
2666}
2667#[doc = "Compare signed less than"]
2668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2669#[inline]
2670#[target_feature(enable = "neon")]
2671#[cfg_attr(test, assert_instr(cmgt))]
2672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2673pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2674    unsafe { simd_lt(a, b) }
2675}
2676#[doc = "Compare signed less than"]
2677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2678#[inline]
2679#[target_feature(enable = "neon")]
2680#[cfg_attr(test, assert_instr(cmgt))]
2681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2682pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2683    unsafe { simd_lt(a, b) }
2684}
2685#[doc = "Compare unsigned less than"]
2686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2687#[inline]
2688#[target_feature(enable = "neon")]
2689#[cfg_attr(test, assert_instr(cmhi))]
2690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2691pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2692    unsafe { simd_lt(a, b) }
2693}
2694#[doc = "Compare unsigned less than"]
2695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2696#[inline]
2697#[target_feature(enable = "neon")]
2698#[cfg_attr(test, assert_instr(cmhi))]
2699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2700pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2701    unsafe { simd_lt(a, b) }
2702}
2703#[doc = "Compare less than"]
2704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2705#[inline]
2706#[target_feature(enable = "neon")]
2707#[cfg_attr(test, assert_instr(cmp))]
2708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2709pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2710    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2711}
2712#[doc = "Compare less than"]
2713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2714#[inline]
2715#[target_feature(enable = "neon")]
2716#[cfg_attr(test, assert_instr(cmp))]
2717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2718pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2719    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2720}
2721#[doc = "Floating-point compare less than"]
2722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2723#[inline]
2724#[cfg_attr(test, assert_instr(fcmp))]
2725#[target_feature(enable = "neon,fp16")]
2726#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2727pub fn vclth_f16(a: f16, b: f16) -> u16 {
2728    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2729}
2730#[doc = "Floating-point compare less than"]
2731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2732#[inline]
2733#[target_feature(enable = "neon")]
2734#[cfg_attr(test, assert_instr(fcmp))]
2735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2736pub fn vclts_f32(a: f32, b: f32) -> u32 {
2737    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2738}
2739#[doc = "Floating-point compare less than"]
2740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2741#[inline]
2742#[target_feature(enable = "neon")]
2743#[cfg_attr(test, assert_instr(fcmp))]
2744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2745pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2746    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2747}
2748#[doc = "Floating-point compare less than zero"]
2749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2750#[inline]
2751#[target_feature(enable = "neon")]
2752#[cfg_attr(test, assert_instr(fcmlt))]
2753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2754pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2755    let b: f32x2 = f32x2::new(0.0, 0.0);
2756    unsafe { simd_lt(a, transmute(b)) }
2757}
2758#[doc = "Floating-point compare less than zero"]
2759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2760#[inline]
2761#[target_feature(enable = "neon")]
2762#[cfg_attr(test, assert_instr(fcmlt))]
2763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2764pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2765    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2766    unsafe { simd_lt(a, transmute(b)) }
2767}
2768#[doc = "Floating-point compare less than zero"]
2769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2770#[inline]
2771#[target_feature(enable = "neon")]
2772#[cfg_attr(test, assert_instr(fcmlt))]
2773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2774pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2775    let b: f64 = 0.0;
2776    unsafe { simd_lt(a, transmute(b)) }
2777}
2778#[doc = "Floating-point compare less than zero"]
2779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2780#[inline]
2781#[target_feature(enable = "neon")]
2782#[cfg_attr(test, assert_instr(fcmlt))]
2783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2784pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2785    let b: f64x2 = f64x2::new(0.0, 0.0);
2786    unsafe { simd_lt(a, transmute(b)) }
2787}
2788#[doc = "Compare signed less than zero"]
2789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2790#[inline]
2791#[target_feature(enable = "neon")]
2792#[cfg_attr(test, assert_instr(cmlt))]
2793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2794pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2795    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2796    unsafe { simd_lt(a, transmute(b)) }
2797}
2798#[doc = "Compare signed less than zero"]
2799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2800#[inline]
2801#[target_feature(enable = "neon")]
2802#[cfg_attr(test, assert_instr(cmlt))]
2803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2804pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2805    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2806    unsafe { simd_lt(a, transmute(b)) }
2807}
2808#[doc = "Compare signed less than zero"]
2809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2810#[inline]
2811#[target_feature(enable = "neon")]
2812#[cfg_attr(test, assert_instr(cmlt))]
2813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2814pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2815    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2816    unsafe { simd_lt(a, transmute(b)) }
2817}
2818#[doc = "Compare signed less than zero"]
2819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2820#[inline]
2821#[target_feature(enable = "neon")]
2822#[cfg_attr(test, assert_instr(cmlt))]
2823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2824pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2825    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2826    unsafe { simd_lt(a, transmute(b)) }
2827}
2828#[doc = "Compare signed less than zero"]
2829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2830#[inline]
2831#[target_feature(enable = "neon")]
2832#[cfg_attr(test, assert_instr(cmlt))]
2833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2834pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2835    let b: i32x2 = i32x2::new(0, 0);
2836    unsafe { simd_lt(a, transmute(b)) }
2837}
2838#[doc = "Compare signed less than zero"]
2839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2840#[inline]
2841#[target_feature(enable = "neon")]
2842#[cfg_attr(test, assert_instr(cmlt))]
2843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2844pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2845    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2846    unsafe { simd_lt(a, transmute(b)) }
2847}
2848#[doc = "Compare signed less than zero"]
2849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2850#[inline]
2851#[target_feature(enable = "neon")]
2852#[cfg_attr(test, assert_instr(cmlt))]
2853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2854pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2855    let b: i64x1 = i64x1::new(0);
2856    unsafe { simd_lt(a, transmute(b)) }
2857}
2858#[doc = "Compare signed less than zero"]
2859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2860#[inline]
2861#[target_feature(enable = "neon")]
2862#[cfg_attr(test, assert_instr(cmlt))]
2863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2864pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2865    let b: i64x2 = i64x2::new(0, 0);
2866    unsafe { simd_lt(a, transmute(b)) }
2867}
2868#[doc = "Floating-point compare less than zero"]
2869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2870#[inline]
2871#[target_feature(enable = "neon")]
2872#[cfg_attr(test, assert_instr(fcmp))]
2873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2874pub fn vcltzd_f64(a: f64) -> u64 {
2875    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2876}
2877#[doc = "Floating-point compare less than zero"]
2878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2879#[inline]
2880#[target_feature(enable = "neon")]
2881#[cfg_attr(test, assert_instr(fcmp))]
2882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2883pub fn vcltzs_f32(a: f32) -> u32 {
2884    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2885}
2886#[doc = "Compare less than zero"]
2887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2888#[inline]
2889#[target_feature(enable = "neon")]
2890#[cfg_attr(test, assert_instr(asr))]
2891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2892pub fn vcltzd_s64(a: i64) -> u64 {
2893    unsafe { transmute(vcltz_s64(transmute(a))) }
2894}
2895#[doc = "Floating-point compare less than zero"]
2896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2897#[inline]
2898#[cfg_attr(test, assert_instr(fcmp))]
2899#[target_feature(enable = "neon,fp16")]
2900#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2901pub fn vcltzh_f16(a: f16) -> u16 {
2902    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2903}
2904#[doc = "Floating-point complex multiply accumulate"]
2905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2906#[inline]
2907#[target_feature(enable = "neon,fcma")]
2908#[target_feature(enable = "neon,fp16")]
2909#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2910#[cfg_attr(test, assert_instr(fcmla))]
2911pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2912    unsafe extern "unadjusted" {
2913        #[cfg_attr(
2914            any(target_arch = "aarch64", target_arch = "arm64ec"),
2915            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2916        )]
2917        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2918    }
2919    unsafe { _vcmla_f16(a, b, c) }
2920}
2921#[doc = "Floating-point complex multiply accumulate"]
2922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2923#[inline]
2924#[target_feature(enable = "neon,fcma")]
2925#[target_feature(enable = "neon,fp16")]
2926#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2927#[cfg_attr(test, assert_instr(fcmla))]
2928pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2929    unsafe extern "unadjusted" {
2930        #[cfg_attr(
2931            any(target_arch = "aarch64", target_arch = "arm64ec"),
2932            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2933        )]
2934        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2935    }
2936    unsafe { _vcmlaq_f16(a, b, c) }
2937}
2938#[doc = "Floating-point complex multiply accumulate"]
2939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2940#[inline]
2941#[target_feature(enable = "neon,fcma")]
2942#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2943#[cfg_attr(test, assert_instr(fcmla))]
2944pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2945    unsafe extern "unadjusted" {
2946        #[cfg_attr(
2947            any(target_arch = "aarch64", target_arch = "arm64ec"),
2948            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2949        )]
2950        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2951    }
2952    unsafe { _vcmla_f32(a, b, c) }
2953}
2954#[doc = "Floating-point complex multiply accumulate"]
2955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2956#[inline]
2957#[target_feature(enable = "neon,fcma")]
2958#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2959#[cfg_attr(test, assert_instr(fcmla))]
2960pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2961    unsafe extern "unadjusted" {
2962        #[cfg_attr(
2963            any(target_arch = "aarch64", target_arch = "arm64ec"),
2964            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2965        )]
2966        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2967    }
2968    unsafe { _vcmlaq_f32(a, b, c) }
2969}
2970#[doc = "Floating-point complex multiply accumulate"]
2971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2972#[inline]
2973#[target_feature(enable = "neon,fcma")]
2974#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2975#[cfg_attr(test, assert_instr(fcmla))]
2976pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2977    unsafe extern "unadjusted" {
2978        #[cfg_attr(
2979            any(target_arch = "aarch64", target_arch = "arm64ec"),
2980            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2981        )]
2982        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2983    }
2984    unsafe { _vcmlaq_f64(a, b, c) }
2985}
2986#[doc = "Floating-point complex multiply accumulate"]
2987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2988#[inline]
2989#[target_feature(enable = "neon,fcma")]
2990#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2991#[rustc_legacy_const_generics(3)]
2992#[target_feature(enable = "neon,fp16")]
2993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2994pub fn vcmla_lane_f16<const LANE: i32>(
2995    a: float16x4_t,
2996    b: float16x4_t,
2997    c: float16x4_t,
2998) -> float16x4_t {
2999    static_assert_uimm_bits!(LANE, 1);
3000    unsafe {
3001        let c: float16x4_t = simd_shuffle!(
3002            c,
3003            c,
3004            [
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1,
3007                2 * LANE as u32,
3008                2 * LANE as u32 + 1
3009            ]
3010        );
3011        vcmla_f16(a, b, c)
3012    }
3013}
3014#[doc = "Floating-point complex multiply accumulate"]
3015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3016#[inline]
3017#[target_feature(enable = "neon,fcma")]
3018#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3019#[rustc_legacy_const_generics(3)]
3020#[target_feature(enable = "neon,fp16")]
3021#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3022pub fn vcmlaq_lane_f16<const LANE: i32>(
3023    a: float16x8_t,
3024    b: float16x8_t,
3025    c: float16x4_t,
3026) -> float16x8_t {
3027    static_assert_uimm_bits!(LANE, 1);
3028    unsafe {
3029        let c: float16x8_t = simd_shuffle!(
3030            c,
3031            c,
3032            [
3033                2 * LANE as u32,
3034                2 * LANE as u32 + 1,
3035                2 * LANE as u32,
3036                2 * LANE as u32 + 1,
3037                2 * LANE as u32,
3038                2 * LANE as u32 + 1,
3039                2 * LANE as u32,
3040                2 * LANE as u32 + 1
3041            ]
3042        );
3043        vcmlaq_f16(a, b, c)
3044    }
3045}
3046#[doc = "Floating-point complex multiply accumulate"]
3047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3048#[inline]
3049#[target_feature(enable = "neon,fcma")]
3050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3051#[rustc_legacy_const_generics(3)]
3052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3053pub fn vcmla_lane_f32<const LANE: i32>(
3054    a: float32x2_t,
3055    b: float32x2_t,
3056    c: float32x2_t,
3057) -> float32x2_t {
3058    static_assert!(LANE == 0);
3059    unsafe {
3060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3061        vcmla_f32(a, b, c)
3062    }
3063}
3064#[doc = "Floating-point complex multiply accumulate"]
3065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3066#[inline]
3067#[target_feature(enable = "neon,fcma")]
3068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3069#[rustc_legacy_const_generics(3)]
3070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3071pub fn vcmlaq_lane_f32<const LANE: i32>(
3072    a: float32x4_t,
3073    b: float32x4_t,
3074    c: float32x2_t,
3075) -> float32x4_t {
3076    static_assert!(LANE == 0);
3077    unsafe {
3078        let c: float32x4_t = simd_shuffle!(
3079            c,
3080            c,
3081            [
3082                2 * LANE as u32,
3083                2 * LANE as u32 + 1,
3084                2 * LANE as u32,
3085                2 * LANE as u32 + 1
3086            ]
3087        );
3088        vcmlaq_f32(a, b, c)
3089    }
3090}
3091#[doc = "Floating-point complex multiply accumulate"]
3092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3093#[inline]
3094#[target_feature(enable = "neon,fcma")]
3095#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3096#[rustc_legacy_const_generics(3)]
3097#[target_feature(enable = "neon,fp16")]
3098#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3127pub fn vcmlaq_laneq_f16<const LANE: i32>(
3128    a: float16x8_t,
3129    b: float16x8_t,
3130    c: float16x8_t,
3131) -> float16x8_t {
3132    static_assert_uimm_bits!(LANE, 2);
3133    unsafe {
3134        let c: float16x8_t = simd_shuffle!(
3135            c,
3136            c,
3137            [
3138                2 * LANE as u32,
3139                2 * LANE as u32 + 1,
3140                2 * LANE as u32,
3141                2 * LANE as u32 + 1,
3142                2 * LANE as u32,
3143                2 * LANE as u32 + 1,
3144                2 * LANE as u32,
3145                2 * LANE as u32 + 1
3146            ]
3147        );
3148        vcmlaq_f16(a, b, c)
3149    }
3150}
3151#[doc = "Floating-point complex multiply accumulate"]
3152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3153#[inline]
3154#[target_feature(enable = "neon,fcma")]
3155#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3156#[rustc_legacy_const_generics(3)]
3157#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3158pub fn vcmla_laneq_f32<const LANE: i32>(
3159    a: float32x2_t,
3160    b: float32x2_t,
3161    c: float32x4_t,
3162) -> float32x2_t {
3163    static_assert_uimm_bits!(LANE, 1);
3164    unsafe {
3165        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3166        vcmla_f32(a, b, c)
3167    }
3168}
3169#[doc = "Floating-point complex multiply accumulate"]
3170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3171#[inline]
3172#[target_feature(enable = "neon,fcma")]
3173#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3174#[rustc_legacy_const_generics(3)]
3175#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3176pub fn vcmlaq_laneq_f32<const LANE: i32>(
3177    a: float32x4_t,
3178    b: float32x4_t,
3179    c: float32x4_t,
3180) -> float32x4_t {
3181    static_assert_uimm_bits!(LANE, 1);
3182    unsafe {
3183        let c: float32x4_t = simd_shuffle!(
3184            c,
3185            c,
3186            [
3187                2 * LANE as u32,
3188                2 * LANE as u32 + 1,
3189                2 * LANE as u32,
3190                2 * LANE as u32 + 1
3191            ]
3192        );
3193        vcmlaq_f32(a, b, c)
3194    }
3195}
3196#[doc = "Floating-point complex multiply accumulate"]
3197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3198#[inline]
3199#[target_feature(enable = "neon,fcma")]
3200#[target_feature(enable = "neon,fp16")]
3201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3202#[cfg_attr(test, assert_instr(fcmla))]
3203pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3204    unsafe extern "unadjusted" {
3205        #[cfg_attr(
3206            any(target_arch = "aarch64", target_arch = "arm64ec"),
3207            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3208        )]
3209        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3210    }
3211    unsafe { _vcmla_rot180_f16(a, b, c) }
3212}
3213#[doc = "Floating-point complex multiply accumulate"]
3214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3215#[inline]
3216#[target_feature(enable = "neon,fcma")]
3217#[target_feature(enable = "neon,fp16")]
3218#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3219#[cfg_attr(test, assert_instr(fcmla))]
3220pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3221    unsafe extern "unadjusted" {
3222        #[cfg_attr(
3223            any(target_arch = "aarch64", target_arch = "arm64ec"),
3224            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3225        )]
3226        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3227    }
3228    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3229}
3230#[doc = "Floating-point complex multiply accumulate"]
3231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3232#[inline]
3233#[target_feature(enable = "neon,fcma")]
3234#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3235#[cfg_attr(test, assert_instr(fcmla))]
3236pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3237    unsafe extern "unadjusted" {
3238        #[cfg_attr(
3239            any(target_arch = "aarch64", target_arch = "arm64ec"),
3240            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3241        )]
3242        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3243    }
3244    unsafe { _vcmla_rot180_f32(a, b, c) }
3245}
3246#[doc = "Floating-point complex multiply accumulate"]
3247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3248#[inline]
3249#[target_feature(enable = "neon,fcma")]
3250#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3251#[cfg_attr(test, assert_instr(fcmla))]
3252pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3253    unsafe extern "unadjusted" {
3254        #[cfg_attr(
3255            any(target_arch = "aarch64", target_arch = "arm64ec"),
3256            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3257        )]
3258        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3259    }
3260    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3261}
3262#[doc = "Floating-point complex multiply accumulate"]
3263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3264#[inline]
3265#[target_feature(enable = "neon,fcma")]
3266#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3267#[cfg_attr(test, assert_instr(fcmla))]
3268pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3269    unsafe extern "unadjusted" {
3270        #[cfg_attr(
3271            any(target_arch = "aarch64", target_arch = "arm64ec"),
3272            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3273        )]
3274        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3275    }
3276    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3277}
3278#[doc = "Floating-point complex multiply accumulate"]
3279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3280#[inline]
3281#[target_feature(enable = "neon,fcma")]
3282#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3283#[rustc_legacy_const_generics(3)]
3284#[target_feature(enable = "neon,fp16")]
3285#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3286pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3287    a: float16x4_t,
3288    b: float16x4_t,
3289    c: float16x4_t,
3290) -> float16x4_t {
3291    static_assert_uimm_bits!(LANE, 1);
3292    unsafe {
3293        let c: float16x4_t = simd_shuffle!(
3294            c,
3295            c,
3296            [
3297                2 * LANE as u32,
3298                2 * LANE as u32 + 1,
3299                2 * LANE as u32,
3300                2 * LANE as u32 + 1
3301            ]
3302        );
3303        vcmla_rot180_f16(a, b, c)
3304    }
3305}
3306#[doc = "Floating-point complex multiply accumulate"]
3307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3308#[inline]
3309#[target_feature(enable = "neon,fcma")]
3310#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3311#[rustc_legacy_const_generics(3)]
3312#[target_feature(enable = "neon,fp16")]
3313#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3314pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3315    a: float16x8_t,
3316    b: float16x8_t,
3317    c: float16x4_t,
3318) -> float16x8_t {
3319    static_assert_uimm_bits!(LANE, 1);
3320    unsafe {
3321        let c: float16x8_t = simd_shuffle!(
3322            c,
3323            c,
3324            [
3325                2 * LANE as u32,
3326                2 * LANE as u32 + 1,
3327                2 * LANE as u32,
3328                2 * LANE as u32 + 1,
3329                2 * LANE as u32,
3330                2 * LANE as u32 + 1,
3331                2 * LANE as u32,
3332                2 * LANE as u32 + 1
3333            ]
3334        );
3335        vcmlaq_rot180_f16(a, b, c)
3336    }
3337}
3338#[doc = "Floating-point complex multiply accumulate"]
3339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3340#[inline]
3341#[target_feature(enable = "neon,fcma")]
3342#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3343#[rustc_legacy_const_generics(3)]
3344#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3345pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3346    a: float32x2_t,
3347    b: float32x2_t,
3348    c: float32x2_t,
3349) -> float32x2_t {
3350    static_assert!(LANE == 0);
3351    unsafe {
3352        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3353        vcmla_rot180_f32(a, b, c)
3354    }
3355}
3356#[doc = "Floating-point complex multiply accumulate"]
3357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3358#[inline]
3359#[target_feature(enable = "neon,fcma")]
3360#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3361#[rustc_legacy_const_generics(3)]
3362#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3363pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3364    a: float32x4_t,
3365    b: float32x4_t,
3366    c: float32x2_t,
3367) -> float32x4_t {
3368    static_assert!(LANE == 0);
3369    unsafe {
3370        let c: float32x4_t = simd_shuffle!(
3371            c,
3372            c,
3373            [
3374                2 * LANE as u32,
3375                2 * LANE as u32 + 1,
3376                2 * LANE as u32,
3377                2 * LANE as u32 + 1
3378            ]
3379        );
3380        vcmlaq_rot180_f32(a, b, c)
3381    }
3382}
3383#[doc = "Floating-point complex multiply accumulate"]
3384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3385#[inline]
3386#[target_feature(enable = "neon,fcma")]
3387#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3388#[rustc_legacy_const_generics(3)]
3389#[target_feature(enable = "neon,fp16")]
3390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3391pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3392    a: float16x4_t,
3393    b: float16x4_t,
3394    c: float16x8_t,
3395) -> float16x4_t {
3396    static_assert_uimm_bits!(LANE, 2);
3397    unsafe {
3398        let c: float16x4_t = simd_shuffle!(
3399            c,
3400            c,
3401            [
3402                2 * LANE as u32,
3403                2 * LANE as u32 + 1,
3404                2 * LANE as u32,
3405                2 * LANE as u32 + 1
3406            ]
3407        );
3408        vcmla_rot180_f16(a, b, c)
3409    }
3410}
3411#[doc = "Floating-point complex multiply accumulate"]
3412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3413#[inline]
3414#[target_feature(enable = "neon,fcma")]
3415#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3416#[rustc_legacy_const_generics(3)]
3417#[target_feature(enable = "neon,fp16")]
3418#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3419pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3420    a: float16x8_t,
3421    b: float16x8_t,
3422    c: float16x8_t,
3423) -> float16x8_t {
3424    static_assert_uimm_bits!(LANE, 2);
3425    unsafe {
3426        let c: float16x8_t = simd_shuffle!(
3427            c,
3428            c,
3429            [
3430                2 * LANE as u32,
3431                2 * LANE as u32 + 1,
3432                2 * LANE as u32,
3433                2 * LANE as u32 + 1,
3434                2 * LANE as u32,
3435                2 * LANE as u32 + 1,
3436                2 * LANE as u32,
3437                2 * LANE as u32 + 1
3438            ]
3439        );
3440        vcmlaq_rot180_f16(a, b, c)
3441    }
3442}
3443#[doc = "Floating-point complex multiply accumulate"]
3444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3445#[inline]
3446#[target_feature(enable = "neon,fcma")]
3447#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3448#[rustc_legacy_const_generics(3)]
3449#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3450pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3451    a: float32x2_t,
3452    b: float32x2_t,
3453    c: float32x4_t,
3454) -> float32x2_t {
3455    static_assert_uimm_bits!(LANE, 1);
3456    unsafe {
3457        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3458        vcmla_rot180_f32(a, b, c)
3459    }
3460}
3461#[doc = "Floating-point complex multiply accumulate"]
3462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3463#[inline]
3464#[target_feature(enable = "neon,fcma")]
3465#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3466#[rustc_legacy_const_generics(3)]
3467#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3468pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3469    a: float32x4_t,
3470    b: float32x4_t,
3471    c: float32x4_t,
3472) -> float32x4_t {
3473    static_assert_uimm_bits!(LANE, 1);
3474    unsafe {
3475        let c: float32x4_t = simd_shuffle!(
3476            c,
3477            c,
3478            [
3479                2 * LANE as u32,
3480                2 * LANE as u32 + 1,
3481                2 * LANE as u32,
3482                2 * LANE as u32 + 1
3483            ]
3484        );
3485        vcmlaq_rot180_f32(a, b, c)
3486    }
3487}
3488#[doc = "Floating-point complex multiply accumulate"]
3489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3490#[inline]
3491#[target_feature(enable = "neon,fcma")]
3492#[target_feature(enable = "neon,fp16")]
3493#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3494#[cfg_attr(test, assert_instr(fcmla))]
3495pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3496    unsafe extern "unadjusted" {
3497        #[cfg_attr(
3498            any(target_arch = "aarch64", target_arch = "arm64ec"),
3499            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3500        )]
3501        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3502    }
3503    unsafe { _vcmla_rot270_f16(a, b, c) }
3504}
3505#[doc = "Floating-point complex multiply accumulate"]
3506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3507#[inline]
3508#[target_feature(enable = "neon,fcma")]
3509#[target_feature(enable = "neon,fp16")]
3510#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3511#[cfg_attr(test, assert_instr(fcmla))]
3512pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3513    unsafe extern "unadjusted" {
3514        #[cfg_attr(
3515            any(target_arch = "aarch64", target_arch = "arm64ec"),
3516            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3517        )]
3518        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3519    }
3520    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3521}
3522#[doc = "Floating-point complex multiply accumulate"]
3523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3524#[inline]
3525#[target_feature(enable = "neon,fcma")]
3526#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3527#[cfg_attr(test, assert_instr(fcmla))]
3528pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3529    unsafe extern "unadjusted" {
3530        #[cfg_attr(
3531            any(target_arch = "aarch64", target_arch = "arm64ec"),
3532            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3533        )]
3534        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3535    }
3536    unsafe { _vcmla_rot270_f32(a, b, c) }
3537}
3538#[doc = "Floating-point complex multiply accumulate"]
3539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3540#[inline]
3541#[target_feature(enable = "neon,fcma")]
3542#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3543#[cfg_attr(test, assert_instr(fcmla))]
3544pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3545    unsafe extern "unadjusted" {
3546        #[cfg_attr(
3547            any(target_arch = "aarch64", target_arch = "arm64ec"),
3548            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3549        )]
3550        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3551    }
3552    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3553}
3554#[doc = "Floating-point complex multiply accumulate"]
3555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3556#[inline]
3557#[target_feature(enable = "neon,fcma")]
3558#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3559#[cfg_attr(test, assert_instr(fcmla))]
3560pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3561    unsafe extern "unadjusted" {
3562        #[cfg_attr(
3563            any(target_arch = "aarch64", target_arch = "arm64ec"),
3564            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3565        )]
3566        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3567    }
3568    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3569}
3570#[doc = "Floating-point complex multiply accumulate"]
3571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3572#[inline]
3573#[target_feature(enable = "neon,fcma")]
3574#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3575#[rustc_legacy_const_generics(3)]
3576#[target_feature(enable = "neon,fp16")]
3577#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3578pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3579    a: float16x4_t,
3580    b: float16x4_t,
3581    c: float16x4_t,
3582) -> float16x4_t {
3583    static_assert_uimm_bits!(LANE, 1);
3584    unsafe {
3585        let c: float16x4_t = simd_shuffle!(
3586            c,
3587            c,
3588            [
3589                2 * LANE as u32,
3590                2 * LANE as u32 + 1,
3591                2 * LANE as u32,
3592                2 * LANE as u32 + 1
3593            ]
3594        );
3595        vcmla_rot270_f16(a, b, c)
3596    }
3597}
3598#[doc = "Floating-point complex multiply accumulate"]
3599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3600#[inline]
3601#[target_feature(enable = "neon,fcma")]
3602#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3603#[rustc_legacy_const_generics(3)]
3604#[target_feature(enable = "neon,fp16")]
3605#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3606pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3607    a: float16x8_t,
3608    b: float16x8_t,
3609    c: float16x4_t,
3610) -> float16x8_t {
3611    static_assert_uimm_bits!(LANE, 1);
3612    unsafe {
3613        let c: float16x8_t = simd_shuffle!(
3614            c,
3615            c,
3616            [
3617                2 * LANE as u32,
3618                2 * LANE as u32 + 1,
3619                2 * LANE as u32,
3620                2 * LANE as u32 + 1,
3621                2 * LANE as u32,
3622                2 * LANE as u32 + 1,
3623                2 * LANE as u32,
3624                2 * LANE as u32 + 1
3625            ]
3626        );
3627        vcmlaq_rot270_f16(a, b, c)
3628    }
3629}
3630#[doc = "Floating-point complex multiply accumulate"]
3631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3632#[inline]
3633#[target_feature(enable = "neon,fcma")]
3634#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3635#[rustc_legacy_const_generics(3)]
3636#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3637pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3638    a: float32x2_t,
3639    b: float32x2_t,
3640    c: float32x2_t,
3641) -> float32x2_t {
3642    static_assert!(LANE == 0);
3643    unsafe {
3644        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3645        vcmla_rot270_f32(a, b, c)
3646    }
3647}
3648#[doc = "Floating-point complex multiply accumulate"]
3649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3650#[inline]
3651#[target_feature(enable = "neon,fcma")]
3652#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3653#[rustc_legacy_const_generics(3)]
3654#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3655pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3656    a: float32x4_t,
3657    b: float32x4_t,
3658    c: float32x2_t,
3659) -> float32x4_t {
3660    static_assert!(LANE == 0);
3661    unsafe {
3662        let c: float32x4_t = simd_shuffle!(
3663            c,
3664            c,
3665            [
3666                2 * LANE as u32,
3667                2 * LANE as u32 + 1,
3668                2 * LANE as u32,
3669                2 * LANE as u32 + 1
3670            ]
3671        );
3672        vcmlaq_rot270_f32(a, b, c)
3673    }
3674}
3675#[doc = "Floating-point complex multiply accumulate"]
3676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3677#[inline]
3678#[target_feature(enable = "neon,fcma")]
3679#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3680#[rustc_legacy_const_generics(3)]
3681#[target_feature(enable = "neon,fp16")]
3682#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3683pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3684    a: float16x4_t,
3685    b: float16x4_t,
3686    c: float16x8_t,
3687) -> float16x4_t {
3688    static_assert_uimm_bits!(LANE, 2);
3689    unsafe {
3690        let c: float16x4_t = simd_shuffle!(
3691            c,
3692            c,
3693            [
3694                2 * LANE as u32,
3695                2 * LANE as u32 + 1,
3696                2 * LANE as u32,
3697                2 * LANE as u32 + 1
3698            ]
3699        );
3700        vcmla_rot270_f16(a, b, c)
3701    }
3702}
3703#[doc = "Floating-point complex multiply accumulate"]
3704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3705#[inline]
3706#[target_feature(enable = "neon,fcma")]
3707#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3708#[rustc_legacy_const_generics(3)]
3709#[target_feature(enable = "neon,fp16")]
3710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3711pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3712    a: float16x8_t,
3713    b: float16x8_t,
3714    c: float16x8_t,
3715) -> float16x8_t {
3716    static_assert_uimm_bits!(LANE, 2);
3717    unsafe {
3718        let c: float16x8_t = simd_shuffle!(
3719            c,
3720            c,
3721            [
3722                2 * LANE as u32,
3723                2 * LANE as u32 + 1,
3724                2 * LANE as u32,
3725                2 * LANE as u32 + 1,
3726                2 * LANE as u32,
3727                2 * LANE as u32 + 1,
3728                2 * LANE as u32,
3729                2 * LANE as u32 + 1
3730            ]
3731        );
3732        vcmlaq_rot270_f16(a, b, c)
3733    }
3734}
3735#[doc = "Floating-point complex multiply accumulate"]
3736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3737#[inline]
3738#[target_feature(enable = "neon,fcma")]
3739#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3740#[rustc_legacy_const_generics(3)]
3741#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3742pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3743    a: float32x2_t,
3744    b: float32x2_t,
3745    c: float32x4_t,
3746) -> float32x2_t {
3747    static_assert_uimm_bits!(LANE, 1);
3748    unsafe {
3749        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3750        vcmla_rot270_f32(a, b, c)
3751    }
3752}
3753#[doc = "Floating-point complex multiply accumulate"]
3754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3755#[inline]
3756#[target_feature(enable = "neon,fcma")]
3757#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3758#[rustc_legacy_const_generics(3)]
3759#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3760pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3761    a: float32x4_t,
3762    b: float32x4_t,
3763    c: float32x4_t,
3764) -> float32x4_t {
3765    static_assert_uimm_bits!(LANE, 1);
3766    unsafe {
3767        let c: float32x4_t = simd_shuffle!(
3768            c,
3769            c,
3770            [
3771                2 * LANE as u32,
3772                2 * LANE as u32 + 1,
3773                2 * LANE as u32,
3774                2 * LANE as u32 + 1
3775            ]
3776        );
3777        vcmlaq_rot270_f32(a, b, c)
3778    }
3779}
3780#[doc = "Floating-point complex multiply accumulate"]
3781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3782#[inline]
3783#[target_feature(enable = "neon,fcma")]
3784#[target_feature(enable = "neon,fp16")]
3785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3786#[cfg_attr(test, assert_instr(fcmla))]
3787pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3788    unsafe extern "unadjusted" {
3789        #[cfg_attr(
3790            any(target_arch = "aarch64", target_arch = "arm64ec"),
3791            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3792        )]
3793        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3794    }
3795    unsafe { _vcmla_rot90_f16(a, b, c) }
3796}
3797#[doc = "Floating-point complex multiply accumulate"]
3798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3799#[inline]
3800#[target_feature(enable = "neon,fcma")]
3801#[target_feature(enable = "neon,fp16")]
3802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3803#[cfg_attr(test, assert_instr(fcmla))]
3804pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3805    unsafe extern "unadjusted" {
3806        #[cfg_attr(
3807            any(target_arch = "aarch64", target_arch = "arm64ec"),
3808            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3809        )]
3810        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3811    }
3812    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3813}
3814#[doc = "Floating-point complex multiply accumulate"]
3815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3816#[inline]
3817#[target_feature(enable = "neon,fcma")]
3818#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3819#[cfg_attr(test, assert_instr(fcmla))]
3820pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3821    unsafe extern "unadjusted" {
3822        #[cfg_attr(
3823            any(target_arch = "aarch64", target_arch = "arm64ec"),
3824            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3825        )]
3826        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3827    }
3828    unsafe { _vcmla_rot90_f32(a, b, c) }
3829}
3830#[doc = "Floating-point complex multiply accumulate"]
3831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3832#[inline]
3833#[target_feature(enable = "neon,fcma")]
3834#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3835#[cfg_attr(test, assert_instr(fcmla))]
3836pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3837    unsafe extern "unadjusted" {
3838        #[cfg_attr(
3839            any(target_arch = "aarch64", target_arch = "arm64ec"),
3840            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3841        )]
3842        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3843    }
3844    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3845}
3846#[doc = "Floating-point complex multiply accumulate"]
3847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3848#[inline]
3849#[target_feature(enable = "neon,fcma")]
3850#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3851#[cfg_attr(test, assert_instr(fcmla))]
3852pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3853    unsafe extern "unadjusted" {
3854        #[cfg_attr(
3855            any(target_arch = "aarch64", target_arch = "arm64ec"),
3856            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3857        )]
3858        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3859    }
3860    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3861}
3862#[doc = "Floating-point complex multiply accumulate"]
3863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3864#[inline]
3865#[target_feature(enable = "neon,fcma")]
3866#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3867#[rustc_legacy_const_generics(3)]
3868#[target_feature(enable = "neon,fp16")]
3869#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3870pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3871    a: float16x4_t,
3872    b: float16x4_t,
3873    c: float16x4_t,
3874) -> float16x4_t {
3875    static_assert_uimm_bits!(LANE, 1);
3876    unsafe {
3877        let c: float16x4_t = simd_shuffle!(
3878            c,
3879            c,
3880            [
3881                2 * LANE as u32,
3882                2 * LANE as u32 + 1,
3883                2 * LANE as u32,
3884                2 * LANE as u32 + 1
3885            ]
3886        );
3887        vcmla_rot90_f16(a, b, c)
3888    }
3889}
3890#[doc = "Floating-point complex multiply accumulate"]
3891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3892#[inline]
3893#[target_feature(enable = "neon,fcma")]
3894#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3895#[rustc_legacy_const_generics(3)]
3896#[target_feature(enable = "neon,fp16")]
3897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3898pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3899    a: float16x8_t,
3900    b: float16x8_t,
3901    c: float16x4_t,
3902) -> float16x8_t {
3903    static_assert_uimm_bits!(LANE, 1);
3904    unsafe {
3905        let c: float16x8_t = simd_shuffle!(
3906            c,
3907            c,
3908            [
3909                2 * LANE as u32,
3910                2 * LANE as u32 + 1,
3911                2 * LANE as u32,
3912                2 * LANE as u32 + 1,
3913                2 * LANE as u32,
3914                2 * LANE as u32 + 1,
3915                2 * LANE as u32,
3916                2 * LANE as u32 + 1
3917            ]
3918        );
3919        vcmlaq_rot90_f16(a, b, c)
3920    }
3921}
3922#[doc = "Floating-point complex multiply accumulate"]
3923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3924#[inline]
3925#[target_feature(enable = "neon,fcma")]
3926#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3927#[rustc_legacy_const_generics(3)]
3928#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3929pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3930    a: float32x2_t,
3931    b: float32x2_t,
3932    c: float32x2_t,
3933) -> float32x2_t {
3934    static_assert!(LANE == 0);
3935    unsafe {
3936        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3937        vcmla_rot90_f32(a, b, c)
3938    }
3939}
3940#[doc = "Floating-point complex multiply accumulate"]
3941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3942#[inline]
3943#[target_feature(enable = "neon,fcma")]
3944#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3945#[rustc_legacy_const_generics(3)]
3946#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3947pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3948    a: float32x4_t,
3949    b: float32x4_t,
3950    c: float32x2_t,
3951) -> float32x4_t {
3952    static_assert!(LANE == 0);
3953    unsafe {
3954        let c: float32x4_t = simd_shuffle!(
3955            c,
3956            c,
3957            [
3958                2 * LANE as u32,
3959                2 * LANE as u32 + 1,
3960                2 * LANE as u32,
3961                2 * LANE as u32 + 1
3962            ]
3963        );
3964        vcmlaq_rot90_f32(a, b, c)
3965    }
3966}
3967#[doc = "Floating-point complex multiply accumulate"]
3968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3969#[inline]
3970#[target_feature(enable = "neon,fcma")]
3971#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3972#[rustc_legacy_const_generics(3)]
3973#[target_feature(enable = "neon,fp16")]
3974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3975pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3976    a: float16x4_t,
3977    b: float16x4_t,
3978    c: float16x8_t,
3979) -> float16x4_t {
3980    static_assert_uimm_bits!(LANE, 2);
3981    unsafe {
3982        let c: float16x4_t = simd_shuffle!(
3983            c,
3984            c,
3985            [
3986                2 * LANE as u32,
3987                2 * LANE as u32 + 1,
3988                2 * LANE as u32,
3989                2 * LANE as u32 + 1
3990            ]
3991        );
3992        vcmla_rot90_f16(a, b, c)
3993    }
3994}
3995#[doc = "Floating-point complex multiply accumulate"]
3996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3997#[inline]
3998#[target_feature(enable = "neon,fcma")]
3999#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4000#[rustc_legacy_const_generics(3)]
4001#[target_feature(enable = "neon,fp16")]
4002#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
4003pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4004    a: float16x8_t,
4005    b: float16x8_t,
4006    c: float16x8_t,
4007) -> float16x8_t {
4008    static_assert_uimm_bits!(LANE, 2);
4009    unsafe {
4010        let c: float16x8_t = simd_shuffle!(
4011            c,
4012            c,
4013            [
4014                2 * LANE as u32,
4015                2 * LANE as u32 + 1,
4016                2 * LANE as u32,
4017                2 * LANE as u32 + 1,
4018                2 * LANE as u32,
4019                2 * LANE as u32 + 1,
4020                2 * LANE as u32,
4021                2 * LANE as u32 + 1
4022            ]
4023        );
4024        vcmlaq_rot90_f16(a, b, c)
4025    }
4026}
4027#[doc = "Floating-point complex multiply accumulate"]
4028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4029#[inline]
4030#[target_feature(enable = "neon,fcma")]
4031#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4032#[rustc_legacy_const_generics(3)]
4033#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4034pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4035    a: float32x2_t,
4036    b: float32x2_t,
4037    c: float32x4_t,
4038) -> float32x2_t {
4039    static_assert_uimm_bits!(LANE, 1);
4040    unsafe {
4041        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4042        vcmla_rot90_f32(a, b, c)
4043    }
4044}
4045#[doc = "Floating-point complex multiply accumulate"]
4046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4047#[inline]
4048#[target_feature(enable = "neon,fcma")]
4049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4050#[rustc_legacy_const_generics(3)]
4051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4052pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4053    a: float32x4_t,
4054    b: float32x4_t,
4055    c: float32x4_t,
4056) -> float32x4_t {
4057    static_assert_uimm_bits!(LANE, 1);
4058    unsafe {
4059        let c: float32x4_t = simd_shuffle!(
4060            c,
4061            c,
4062            [
4063                2 * LANE as u32,
4064                2 * LANE as u32 + 1,
4065                2 * LANE as u32,
4066                2 * LANE as u32 + 1
4067            ]
4068        );
4069        vcmlaq_rot90_f32(a, b, c)
4070    }
4071}
4072#[doc = "Insert vector element from another vector element"]
4073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4074#[inline]
4075#[target_feature(enable = "neon")]
4076#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4077#[rustc_legacy_const_generics(1, 3)]
4078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4079pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4080    a: float32x2_t,
4081    b: float32x2_t,
4082) -> float32x2_t {
4083    static_assert_uimm_bits!(LANE1, 1);
4084    static_assert_uimm_bits!(LANE2, 1);
4085    unsafe {
4086        match LANE1 & 0b1 {
4087            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4088            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4089            _ => unreachable_unchecked(),
4090        }
4091    }
4092}
4093#[doc = "Insert vector element from another vector element"]
4094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4095#[inline]
4096#[target_feature(enable = "neon")]
4097#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4098#[rustc_legacy_const_generics(1, 3)]
4099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4100pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4101    static_assert_uimm_bits!(LANE1, 3);
4102    static_assert_uimm_bits!(LANE2, 3);
4103    unsafe {
4104        match LANE1 & 0b111 {
4105            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4106            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4107            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4108            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4109            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4110            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4111            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4112            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4113            _ => unreachable_unchecked(),
4114        }
4115    }
4116}
4117#[doc = "Insert vector element from another vector element"]
4118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4119#[inline]
4120#[target_feature(enable = "neon")]
4121#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4122#[rustc_legacy_const_generics(1, 3)]
4123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4124pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4125    static_assert_uimm_bits!(LANE1, 2);
4126    static_assert_uimm_bits!(LANE2, 2);
4127    unsafe {
4128        match LANE1 & 0b11 {
4129            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4130            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4131            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4132            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4133            _ => unreachable_unchecked(),
4134        }
4135    }
4136}
4137#[doc = "Insert vector element from another vector element"]
4138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4139#[inline]
4140#[target_feature(enable = "neon")]
4141#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4142#[rustc_legacy_const_generics(1, 3)]
4143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4144pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4145    static_assert_uimm_bits!(LANE1, 1);
4146    static_assert_uimm_bits!(LANE2, 1);
4147    unsafe {
4148        match LANE1 & 0b1 {
4149            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4150            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4151            _ => unreachable_unchecked(),
4152        }
4153    }
4154}
4155#[doc = "Insert vector element from another vector element"]
4156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4157#[inline]
4158#[target_feature(enable = "neon")]
4159#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4160#[rustc_legacy_const_generics(1, 3)]
4161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4162pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4163    static_assert_uimm_bits!(LANE1, 3);
4164    static_assert_uimm_bits!(LANE2, 3);
4165    unsafe {
4166        match LANE1 & 0b111 {
4167            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4168            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4169            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4170            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4171            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4172            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4173            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4174            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4175            _ => unreachable_unchecked(),
4176        }
4177    }
4178}
4179#[doc = "Insert vector element from another vector element"]
4180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4181#[inline]
4182#[target_feature(enable = "neon")]
4183#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4184#[rustc_legacy_const_generics(1, 3)]
4185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4186pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4187    a: uint16x4_t,
4188    b: uint16x4_t,
4189) -> uint16x4_t {
4190    static_assert_uimm_bits!(LANE1, 2);
4191    static_assert_uimm_bits!(LANE2, 2);
4192    unsafe {
4193        match LANE1 & 0b11 {
4194            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4195            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4196            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4197            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4198            _ => unreachable_unchecked(),
4199        }
4200    }
4201}
4202#[doc = "Insert vector element from another vector element"]
4203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4204#[inline]
4205#[target_feature(enable = "neon")]
4206#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4207#[rustc_legacy_const_generics(1, 3)]
4208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4209pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4210    a: uint32x2_t,
4211    b: uint32x2_t,
4212) -> uint32x2_t {
4213    static_assert_uimm_bits!(LANE1, 1);
4214    static_assert_uimm_bits!(LANE2, 1);
4215    unsafe {
4216        match LANE1 & 0b1 {
4217            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4218            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4219            _ => unreachable_unchecked(),
4220        }
4221    }
4222}
4223#[doc = "Insert vector element from another vector element"]
4224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4225#[inline]
4226#[target_feature(enable = "neon")]
4227#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4228#[rustc_legacy_const_generics(1, 3)]
4229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4230pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4231    static_assert_uimm_bits!(LANE1, 3);
4232    static_assert_uimm_bits!(LANE2, 3);
4233    unsafe {
4234        match LANE1 & 0b111 {
4235            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4236            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4237            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4238            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4239            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4240            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4241            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4242            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4243            _ => unreachable_unchecked(),
4244        }
4245    }
4246}
4247#[doc = "Insert vector element from another vector element"]
4248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4249#[inline]
4250#[target_feature(enable = "neon")]
4251#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4252#[rustc_legacy_const_generics(1, 3)]
4253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4254pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4255    a: poly16x4_t,
4256    b: poly16x4_t,
4257) -> poly16x4_t {
4258    static_assert_uimm_bits!(LANE1, 2);
4259    static_assert_uimm_bits!(LANE2, 2);
4260    unsafe {
4261        match LANE1 & 0b11 {
4262            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4263            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4264            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4265            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4266            _ => unreachable_unchecked(),
4267        }
4268    }
4269}
4270#[doc = "Insert vector element from another vector element"]
4271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4272#[inline]
4273#[target_feature(enable = "neon")]
4274#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4275#[rustc_legacy_const_generics(1, 3)]
4276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4277pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4278    a: float32x2_t,
4279    b: float32x4_t,
4280) -> float32x2_t {
4281    static_assert_uimm_bits!(LANE1, 1);
4282    static_assert_uimm_bits!(LANE2, 2);
4283    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4284    unsafe {
4285        match LANE1 & 0b1 {
4286            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4287            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4288            _ => unreachable_unchecked(),
4289        }
4290    }
4291}
4292#[doc = "Insert vector element from another vector element"]
4293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4294#[inline]
4295#[target_feature(enable = "neon")]
4296#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4297#[rustc_legacy_const_generics(1, 3)]
4298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4299pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4300    static_assert_uimm_bits!(LANE1, 3);
4301    static_assert_uimm_bits!(LANE2, 4);
4302    let a: int8x16_t =
4303        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4304    unsafe {
4305        match LANE1 & 0b111 {
4306            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4307            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4308            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4309            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4310            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4311            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4312            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4313            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4314            _ => unreachable_unchecked(),
4315        }
4316    }
4317}
4318#[doc = "Insert vector element from another vector element"]
4319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4320#[inline]
4321#[target_feature(enable = "neon")]
4322#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4323#[rustc_legacy_const_generics(1, 3)]
4324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4325pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4326    a: int16x4_t,
4327    b: int16x8_t,
4328) -> int16x4_t {
4329    static_assert_uimm_bits!(LANE1, 2);
4330    static_assert_uimm_bits!(LANE2, 3);
4331    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4332    unsafe {
4333        match LANE1 & 0b11 {
4334            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4335            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4336            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4337            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4338            _ => unreachable_unchecked(),
4339        }
4340    }
4341}
4342#[doc = "Insert vector element from another vector element"]
4343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4344#[inline]
4345#[target_feature(enable = "neon")]
4346#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4347#[rustc_legacy_const_generics(1, 3)]
4348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4349pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4350    a: int32x2_t,
4351    b: int32x4_t,
4352) -> int32x2_t {
4353    static_assert_uimm_bits!(LANE1, 1);
4354    static_assert_uimm_bits!(LANE2, 2);
4355    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4356    unsafe {
4357        match LANE1 & 0b1 {
4358            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4359            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4360            _ => unreachable_unchecked(),
4361        }
4362    }
4363}
4364#[doc = "Insert vector element from another vector element"]
4365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4366#[inline]
4367#[target_feature(enable = "neon")]
4368#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4369#[rustc_legacy_const_generics(1, 3)]
4370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4371pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4372    a: uint8x8_t,
4373    b: uint8x16_t,
4374) -> uint8x8_t {
4375    static_assert_uimm_bits!(LANE1, 3);
4376    static_assert_uimm_bits!(LANE2, 4);
4377    let a: uint8x16_t =
4378        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4379    unsafe {
4380        match LANE1 & 0b111 {
4381            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4382            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4383            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4384            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4385            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4386            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4387            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4388            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4389            _ => unreachable_unchecked(),
4390        }
4391    }
4392}
4393#[doc = "Insert vector element from another vector element"]
4394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4395#[inline]
4396#[target_feature(enable = "neon")]
4397#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4398#[rustc_legacy_const_generics(1, 3)]
4399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4400pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4401    a: uint16x4_t,
4402    b: uint16x8_t,
4403) -> uint16x4_t {
4404    static_assert_uimm_bits!(LANE1, 2);
4405    static_assert_uimm_bits!(LANE2, 3);
4406    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4407    unsafe {
4408        match LANE1 & 0b11 {
4409            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4410            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4411            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4412            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4413            _ => unreachable_unchecked(),
4414        }
4415    }
4416}
4417#[doc = "Insert vector element from another vector element"]
4418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4419#[inline]
4420#[target_feature(enable = "neon")]
4421#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4422#[rustc_legacy_const_generics(1, 3)]
4423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4424pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4425    a: uint32x2_t,
4426    b: uint32x4_t,
4427) -> uint32x2_t {
4428    static_assert_uimm_bits!(LANE1, 1);
4429    static_assert_uimm_bits!(LANE2, 2);
4430    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4431    unsafe {
4432        match LANE1 & 0b1 {
4433            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4434            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4435            _ => unreachable_unchecked(),
4436        }
4437    }
4438}
4439#[doc = "Insert vector element from another vector element"]
4440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4441#[inline]
4442#[target_feature(enable = "neon")]
4443#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4444#[rustc_legacy_const_generics(1, 3)]
4445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4446pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4447    a: poly8x8_t,
4448    b: poly8x16_t,
4449) -> poly8x8_t {
4450    static_assert_uimm_bits!(LANE1, 3);
4451    static_assert_uimm_bits!(LANE2, 4);
4452    let a: poly8x16_t =
4453        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4454    unsafe {
4455        match LANE1 & 0b111 {
4456            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4457            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4458            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4459            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4460            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4461            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4462            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4463            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4464            _ => unreachable_unchecked(),
4465        }
4466    }
4467}
4468#[doc = "Insert vector element from another vector element"]
4469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4470#[inline]
4471#[target_feature(enable = "neon")]
4472#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4473#[rustc_legacy_const_generics(1, 3)]
4474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4475pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4476    a: poly16x4_t,
4477    b: poly16x8_t,
4478) -> poly16x4_t {
4479    static_assert_uimm_bits!(LANE1, 2);
4480    static_assert_uimm_bits!(LANE2, 3);
4481    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4482    unsafe {
4483        match LANE1 & 0b11 {
4484            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4485            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4486            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4487            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4488            _ => unreachable_unchecked(),
4489        }
4490    }
4491}
4492#[doc = "Insert vector element from another vector element"]
4493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4494#[inline]
4495#[target_feature(enable = "neon")]
4496#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4497#[rustc_legacy_const_generics(1, 3)]
4498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4499pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4500    a: float32x4_t,
4501    b: float32x2_t,
4502) -> float32x4_t {
4503    static_assert_uimm_bits!(LANE1, 2);
4504    static_assert_uimm_bits!(LANE2, 1);
4505    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4506    unsafe {
4507        match LANE1 & 0b11 {
4508            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4509            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4510            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4511            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4512            _ => unreachable_unchecked(),
4513        }
4514    }
4515}
4516#[doc = "Insert vector element from another vector element"]
4517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4518#[inline]
4519#[target_feature(enable = "neon")]
4520#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4521#[rustc_legacy_const_generics(1, 3)]
4522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4523pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4524    a: float64x2_t,
4525    b: float64x1_t,
4526) -> float64x2_t {
4527    static_assert_uimm_bits!(LANE1, 1);
4528    static_assert!(LANE2 == 0);
4529    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4530    unsafe {
4531        match LANE1 & 0b1 {
4532            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4533            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4534            _ => unreachable_unchecked(),
4535        }
4536    }
4537}
4538#[doc = "Insert vector element from another vector element"]
4539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4540#[inline]
4541#[target_feature(enable = "neon")]
4542#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4543#[rustc_legacy_const_generics(1, 3)]
4544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4545pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4546    a: int64x2_t,
4547    b: int64x1_t,
4548) -> int64x2_t {
4549    static_assert_uimm_bits!(LANE1, 1);
4550    static_assert!(LANE2 == 0);
4551    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4552    unsafe {
4553        match LANE1 & 0b1 {
4554            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4555            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4556            _ => unreachable_unchecked(),
4557        }
4558    }
4559}
4560#[doc = "Insert vector element from another vector element"]
4561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4562#[inline]
4563#[target_feature(enable = "neon")]
4564#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4565#[rustc_legacy_const_generics(1, 3)]
4566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4567pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4568    a: uint64x2_t,
4569    b: uint64x1_t,
4570) -> uint64x2_t {
4571    static_assert_uimm_bits!(LANE1, 1);
4572    static_assert!(LANE2 == 0);
4573    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4574    unsafe {
4575        match LANE1 & 0b1 {
4576            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4577            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4578            _ => unreachable_unchecked(),
4579        }
4580    }
4581}
4582#[doc = "Insert vector element from another vector element"]
4583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4584#[inline]
4585#[target_feature(enable = "neon")]
4586#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4587#[rustc_legacy_const_generics(1, 3)]
4588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4589pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4590    a: poly64x2_t,
4591    b: poly64x1_t,
4592) -> poly64x2_t {
4593    static_assert_uimm_bits!(LANE1, 1);
4594    static_assert!(LANE2 == 0);
4595    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4596    unsafe {
4597        match LANE1 & 0b1 {
4598            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4599            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4600            _ => unreachable_unchecked(),
4601        }
4602    }
4603}
4604#[doc = "Insert vector element from another vector element"]
4605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4606#[inline]
4607#[target_feature(enable = "neon")]
4608#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4609#[rustc_legacy_const_generics(1, 3)]
4610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4611pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4612    static_assert_uimm_bits!(LANE1, 4);
4613    static_assert_uimm_bits!(LANE2, 3);
4614    let b: int8x16_t =
4615        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4616    unsafe {
4617        match LANE1 & 0b1111 {
4618            0 => simd_shuffle!(
4619                a,
4620                b,
4621                [
4622                    16 + LANE2 as u32,
4623                    1,
4624                    2,
4625                    3,
4626                    4,
4627                    5,
4628                    6,
4629                    7,
4630                    8,
4631                    9,
4632                    10,
4633                    11,
4634                    12,
4635                    13,
4636                    14,
4637                    15
4638                ]
4639            ),
4640            1 => simd_shuffle!(
4641                a,
4642                b,
4643                [
4644                    0,
4645                    16 + LANE2 as u32,
4646                    2,
4647                    3,
4648                    4,
4649                    5,
4650                    6,
4651                    7,
4652                    8,
4653                    9,
4654                    10,
4655                    11,
4656                    12,
4657                    13,
4658                    14,
4659                    15
4660                ]
4661            ),
4662            2 => simd_shuffle!(
4663                a,
4664                b,
4665                [
4666                    0,
4667                    1,
4668                    16 + LANE2 as u32,
4669                    3,
4670                    4,
4671                    5,
4672                    6,
4673                    7,
4674                    8,
4675                    9,
4676                    10,
4677                    11,
4678                    12,
4679                    13,
4680                    14,
4681                    15
4682                ]
4683            ),
4684            3 => simd_shuffle!(
4685                a,
4686                b,
4687                [
4688                    0,
4689                    1,
4690                    2,
4691                    16 + LANE2 as u32,
4692                    4,
4693                    5,
4694                    6,
4695                    7,
4696                    8,
4697                    9,
4698                    10,
4699                    11,
4700                    12,
4701                    13,
4702                    14,
4703                    15
4704                ]
4705            ),
4706            4 => simd_shuffle!(
4707                a,
4708                b,
4709                [
4710                    0,
4711                    1,
4712                    2,
4713                    3,
4714                    16 + LANE2 as u32,
4715                    5,
4716                    6,
4717                    7,
4718                    8,
4719                    9,
4720                    10,
4721                    11,
4722                    12,
4723                    13,
4724                    14,
4725                    15
4726                ]
4727            ),
4728            5 => simd_shuffle!(
4729                a,
4730                b,
4731                [
4732                    0,
4733                    1,
4734                    2,
4735                    3,
4736                    4,
4737                    16 + LANE2 as u32,
4738                    6,
4739                    7,
4740                    8,
4741                    9,
4742                    10,
4743                    11,
4744                    12,
4745                    13,
4746                    14,
4747                    15
4748                ]
4749            ),
4750            6 => simd_shuffle!(
4751                a,
4752                b,
4753                [
4754                    0,
4755                    1,
4756                    2,
4757                    3,
4758                    4,
4759                    5,
4760                    16 + LANE2 as u32,
4761                    7,
4762                    8,
4763                    9,
4764                    10,
4765                    11,
4766                    12,
4767                    13,
4768                    14,
4769                    15
4770                ]
4771            ),
4772            7 => simd_shuffle!(
4773                a,
4774                b,
4775                [
4776                    0,
4777                    1,
4778                    2,
4779                    3,
4780                    4,
4781                    5,
4782                    6,
4783                    16 + LANE2 as u32,
4784                    8,
4785                    9,
4786                    10,
4787                    11,
4788                    12,
4789                    13,
4790                    14,
4791                    15
4792                ]
4793            ),
4794            8 => simd_shuffle!(
4795                a,
4796                b,
4797                [
4798                    0,
4799                    1,
4800                    2,
4801                    3,
4802                    4,
4803                    5,
4804                    6,
4805                    7,
4806                    16 + LANE2 as u32,
4807                    9,
4808                    10,
4809                    11,
4810                    12,
4811                    13,
4812                    14,
4813                    15
4814                ]
4815            ),
4816            9 => simd_shuffle!(
4817                a,
4818                b,
4819                [
4820                    0,
4821                    1,
4822                    2,
4823                    3,
4824                    4,
4825                    5,
4826                    6,
4827                    7,
4828                    8,
4829                    16 + LANE2 as u32,
4830                    10,
4831                    11,
4832                    12,
4833                    13,
4834                    14,
4835                    15
4836                ]
4837            ),
4838            10 => simd_shuffle!(
4839                a,
4840                b,
4841                [
4842                    0,
4843                    1,
4844                    2,
4845                    3,
4846                    4,
4847                    5,
4848                    6,
4849                    7,
4850                    8,
4851                    9,
4852                    16 + LANE2 as u32,
4853                    11,
4854                    12,
4855                    13,
4856                    14,
4857                    15
4858                ]
4859            ),
4860            11 => simd_shuffle!(
4861                a,
4862                b,
4863                [
4864                    0,
4865                    1,
4866                    2,
4867                    3,
4868                    4,
4869                    5,
4870                    6,
4871                    7,
4872                    8,
4873                    9,
4874                    10,
4875                    16 + LANE2 as u32,
4876                    12,
4877                    13,
4878                    14,
4879                    15
4880                ]
4881            ),
4882            12 => simd_shuffle!(
4883                a,
4884                b,
4885                [
4886                    0,
4887                    1,
4888                    2,
4889                    3,
4890                    4,
4891                    5,
4892                    6,
4893                    7,
4894                    8,
4895                    9,
4896                    10,
4897                    11,
4898                    16 + LANE2 as u32,
4899                    13,
4900                    14,
4901                    15
4902                ]
4903            ),
4904            13 => simd_shuffle!(
4905                a,
4906                b,
4907                [
4908                    0,
4909                    1,
4910                    2,
4911                    3,
4912                    4,
4913                    5,
4914                    6,
4915                    7,
4916                    8,
4917                    9,
4918                    10,
4919                    11,
4920                    12,
4921                    16 + LANE2 as u32,
4922                    14,
4923                    15
4924                ]
4925            ),
4926            14 => simd_shuffle!(
4927                a,
4928                b,
4929                [
4930                    0,
4931                    1,
4932                    2,
4933                    3,
4934                    4,
4935                    5,
4936                    6,
4937                    7,
4938                    8,
4939                    9,
4940                    10,
4941                    11,
4942                    12,
4943                    13,
4944                    16 + LANE2 as u32,
4945                    15
4946                ]
4947            ),
4948            15 => simd_shuffle!(
4949                a,
4950                b,
4951                [
4952                    0,
4953                    1,
4954                    2,
4955                    3,
4956                    4,
4957                    5,
4958                    6,
4959                    7,
4960                    8,
4961                    9,
4962                    10,
4963                    11,
4964                    12,
4965                    13,
4966                    14,
4967                    16 + LANE2 as u32
4968                ]
4969            ),
4970            _ => unreachable_unchecked(),
4971        }
4972    }
4973}
4974#[doc = "Insert vector element from another vector element"]
4975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4976#[inline]
4977#[target_feature(enable = "neon")]
4978#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4979#[rustc_legacy_const_generics(1, 3)]
4980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4981pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4982    a: int16x8_t,
4983    b: int16x4_t,
4984) -> int16x8_t {
4985    static_assert_uimm_bits!(LANE1, 3);
4986    static_assert_uimm_bits!(LANE2, 2);
4987    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4988    unsafe {
4989        match LANE1 & 0b111 {
4990            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4991            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4992            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4993            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4994            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4995            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4996            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4997            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4998            _ => unreachable_unchecked(),
4999        }
5000    }
5001}
5002#[doc = "Insert vector element from another vector element"]
5003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5004#[inline]
5005#[target_feature(enable = "neon")]
5006#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5007#[rustc_legacy_const_generics(1, 3)]
5008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5009pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5010    a: int32x4_t,
5011    b: int32x2_t,
5012) -> int32x4_t {
5013    static_assert_uimm_bits!(LANE1, 2);
5014    static_assert_uimm_bits!(LANE2, 1);
5015    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5016    unsafe {
5017        match LANE1 & 0b11 {
5018            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5019            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5020            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5021            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5022            _ => unreachable_unchecked(),
5023        }
5024    }
5025}
5026#[doc = "Insert vector element from another vector element"]
5027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5028#[inline]
5029#[target_feature(enable = "neon")]
5030#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5031#[rustc_legacy_const_generics(1, 3)]
5032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5033pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5034    a: uint8x16_t,
5035    b: uint8x8_t,
5036) -> uint8x16_t {
5037    static_assert_uimm_bits!(LANE1, 4);
5038    static_assert_uimm_bits!(LANE2, 3);
5039    let b: uint8x16_t =
5040        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5041    unsafe {
5042        match LANE1 & 0b1111 {
5043            0 => simd_shuffle!(
5044                a,
5045                b,
5046                [
5047                    16 + LANE2 as u32,
5048                    1,
5049                    2,
5050                    3,
5051                    4,
5052                    5,
5053                    6,
5054                    7,
5055                    8,
5056                    9,
5057                    10,
5058                    11,
5059                    12,
5060                    13,
5061                    14,
5062                    15
5063                ]
5064            ),
5065            1 => simd_shuffle!(
5066                a,
5067                b,
5068                [
5069                    0,
5070                    16 + LANE2 as u32,
5071                    2,
5072                    3,
5073                    4,
5074                    5,
5075                    6,
5076                    7,
5077                    8,
5078                    9,
5079                    10,
5080                    11,
5081                    12,
5082                    13,
5083                    14,
5084                    15
5085                ]
5086            ),
5087            2 => simd_shuffle!(
5088                a,
5089                b,
5090                [
5091                    0,
5092                    1,
5093                    16 + LANE2 as u32,
5094                    3,
5095                    4,
5096                    5,
5097                    6,
5098                    7,
5099                    8,
5100                    9,
5101                    10,
5102                    11,
5103                    12,
5104                    13,
5105                    14,
5106                    15
5107                ]
5108            ),
5109            3 => simd_shuffle!(
5110                a,
5111                b,
5112                [
5113                    0,
5114                    1,
5115                    2,
5116                    16 + LANE2 as u32,
5117                    4,
5118                    5,
5119                    6,
5120                    7,
5121                    8,
5122                    9,
5123                    10,
5124                    11,
5125                    12,
5126                    13,
5127                    14,
5128                    15
5129                ]
5130            ),
5131            4 => simd_shuffle!(
5132                a,
5133                b,
5134                [
5135                    0,
5136                    1,
5137                    2,
5138                    3,
5139                    16 + LANE2 as u32,
5140                    5,
5141                    6,
5142                    7,
5143                    8,
5144                    9,
5145                    10,
5146                    11,
5147                    12,
5148                    13,
5149                    14,
5150                    15
5151                ]
5152            ),
5153            5 => simd_shuffle!(
5154                a,
5155                b,
5156                [
5157                    0,
5158                    1,
5159                    2,
5160                    3,
5161                    4,
5162                    16 + LANE2 as u32,
5163                    6,
5164                    7,
5165                    8,
5166                    9,
5167                    10,
5168                    11,
5169                    12,
5170                    13,
5171                    14,
5172                    15
5173                ]
5174            ),
5175            6 => simd_shuffle!(
5176                a,
5177                b,
5178                [
5179                    0,
5180                    1,
5181                    2,
5182                    3,
5183                    4,
5184                    5,
5185                    16 + LANE2 as u32,
5186                    7,
5187                    8,
5188                    9,
5189                    10,
5190                    11,
5191                    12,
5192                    13,
5193                    14,
5194                    15
5195                ]
5196            ),
5197            7 => simd_shuffle!(
5198                a,
5199                b,
5200                [
5201                    0,
5202                    1,
5203                    2,
5204                    3,
5205                    4,
5206                    5,
5207                    6,
5208                    16 + LANE2 as u32,
5209                    8,
5210                    9,
5211                    10,
5212                    11,
5213                    12,
5214                    13,
5215                    14,
5216                    15
5217                ]
5218            ),
5219            8 => simd_shuffle!(
5220                a,
5221                b,
5222                [
5223                    0,
5224                    1,
5225                    2,
5226                    3,
5227                    4,
5228                    5,
5229                    6,
5230                    7,
5231                    16 + LANE2 as u32,
5232                    9,
5233                    10,
5234                    11,
5235                    12,
5236                    13,
5237                    14,
5238                    15
5239                ]
5240            ),
5241            9 => simd_shuffle!(
5242                a,
5243                b,
5244                [
5245                    0,
5246                    1,
5247                    2,
5248                    3,
5249                    4,
5250                    5,
5251                    6,
5252                    7,
5253                    8,
5254                    16 + LANE2 as u32,
5255                    10,
5256                    11,
5257                    12,
5258                    13,
5259                    14,
5260                    15
5261                ]
5262            ),
5263            10 => simd_shuffle!(
5264                a,
5265                b,
5266                [
5267                    0,
5268                    1,
5269                    2,
5270                    3,
5271                    4,
5272                    5,
5273                    6,
5274                    7,
5275                    8,
5276                    9,
5277                    16 + LANE2 as u32,
5278                    11,
5279                    12,
5280                    13,
5281                    14,
5282                    15
5283                ]
5284            ),
5285            11 => simd_shuffle!(
5286                a,
5287                b,
5288                [
5289                    0,
5290                    1,
5291                    2,
5292                    3,
5293                    4,
5294                    5,
5295                    6,
5296                    7,
5297                    8,
5298                    9,
5299                    10,
5300                    16 + LANE2 as u32,
5301                    12,
5302                    13,
5303                    14,
5304                    15
5305                ]
5306            ),
5307            12 => simd_shuffle!(
5308                a,
5309                b,
5310                [
5311                    0,
5312                    1,
5313                    2,
5314                    3,
5315                    4,
5316                    5,
5317                    6,
5318                    7,
5319                    8,
5320                    9,
5321                    10,
5322                    11,
5323                    16 + LANE2 as u32,
5324                    13,
5325                    14,
5326                    15
5327                ]
5328            ),
5329            13 => simd_shuffle!(
5330                a,
5331                b,
5332                [
5333                    0,
5334                    1,
5335                    2,
5336                    3,
5337                    4,
5338                    5,
5339                    6,
5340                    7,
5341                    8,
5342                    9,
5343                    10,
5344                    11,
5345                    12,
5346                    16 + LANE2 as u32,
5347                    14,
5348                    15
5349                ]
5350            ),
5351            14 => simd_shuffle!(
5352                a,
5353                b,
5354                [
5355                    0,
5356                    1,
5357                    2,
5358                    3,
5359                    4,
5360                    5,
5361                    6,
5362                    7,
5363                    8,
5364                    9,
5365                    10,
5366                    11,
5367                    12,
5368                    13,
5369                    16 + LANE2 as u32,
5370                    15
5371                ]
5372            ),
5373            15 => simd_shuffle!(
5374                a,
5375                b,
5376                [
5377                    0,
5378                    1,
5379                    2,
5380                    3,
5381                    4,
5382                    5,
5383                    6,
5384                    7,
5385                    8,
5386                    9,
5387                    10,
5388                    11,
5389                    12,
5390                    13,
5391                    14,
5392                    16 + LANE2 as u32
5393                ]
5394            ),
5395            _ => unreachable_unchecked(),
5396        }
5397    }
5398}
5399#[doc = "Insert vector element from another vector element"]
5400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5401#[inline]
5402#[target_feature(enable = "neon")]
5403#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5404#[rustc_legacy_const_generics(1, 3)]
5405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5406pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5407    a: uint16x8_t,
5408    b: uint16x4_t,
5409) -> uint16x8_t {
5410    static_assert_uimm_bits!(LANE1, 3);
5411    static_assert_uimm_bits!(LANE2, 2);
5412    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5413    unsafe {
5414        match LANE1 & 0b111 {
5415            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5416            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5417            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5418            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5419            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5420            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5421            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5422            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5423            _ => unreachable_unchecked(),
5424        }
5425    }
5426}
5427#[doc = "Insert vector element from another vector element"]
5428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5429#[inline]
5430#[target_feature(enable = "neon")]
5431#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5432#[rustc_legacy_const_generics(1, 3)]
5433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5434pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5435    a: uint32x4_t,
5436    b: uint32x2_t,
5437) -> uint32x4_t {
5438    static_assert_uimm_bits!(LANE1, 2);
5439    static_assert_uimm_bits!(LANE2, 1);
5440    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5441    unsafe {
5442        match LANE1 & 0b11 {
5443            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5444            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5445            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5446            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5447            _ => unreachable_unchecked(),
5448        }
5449    }
5450}
5451#[doc = "Insert vector element from another vector element"]
5452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5453#[inline]
5454#[target_feature(enable = "neon")]
5455#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5456#[rustc_legacy_const_generics(1, 3)]
5457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5458pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5459    a: poly8x16_t,
5460    b: poly8x8_t,
5461) -> poly8x16_t {
5462    static_assert_uimm_bits!(LANE1, 4);
5463    static_assert_uimm_bits!(LANE2, 3);
5464    let b: poly8x16_t =
5465        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5466    unsafe {
5467        match LANE1 & 0b1111 {
5468            0 => simd_shuffle!(
5469                a,
5470                b,
5471                [
5472                    16 + LANE2 as u32,
5473                    1,
5474                    2,
5475                    3,
5476                    4,
5477                    5,
5478                    6,
5479                    7,
5480                    8,
5481                    9,
5482                    10,
5483                    11,
5484                    12,
5485                    13,
5486                    14,
5487                    15
5488                ]
5489            ),
5490            1 => simd_shuffle!(
5491                a,
5492                b,
5493                [
5494                    0,
5495                    16 + LANE2 as u32,
5496                    2,
5497                    3,
5498                    4,
5499                    5,
5500                    6,
5501                    7,
5502                    8,
5503                    9,
5504                    10,
5505                    11,
5506                    12,
5507                    13,
5508                    14,
5509                    15
5510                ]
5511            ),
5512            2 => simd_shuffle!(
5513                a,
5514                b,
5515                [
5516                    0,
5517                    1,
5518                    16 + LANE2 as u32,
5519                    3,
5520                    4,
5521                    5,
5522                    6,
5523                    7,
5524                    8,
5525                    9,
5526                    10,
5527                    11,
5528                    12,
5529                    13,
5530                    14,
5531                    15
5532                ]
5533            ),
5534            3 => simd_shuffle!(
5535                a,
5536                b,
5537                [
5538                    0,
5539                    1,
5540                    2,
5541                    16 + LANE2 as u32,
5542                    4,
5543                    5,
5544                    6,
5545                    7,
5546                    8,
5547                    9,
5548                    10,
5549                    11,
5550                    12,
5551                    13,
5552                    14,
5553                    15
5554                ]
5555            ),
5556            4 => simd_shuffle!(
5557                a,
5558                b,
5559                [
5560                    0,
5561                    1,
5562                    2,
5563                    3,
5564                    16 + LANE2 as u32,
5565                    5,
5566                    6,
5567                    7,
5568                    8,
5569                    9,
5570                    10,
5571                    11,
5572                    12,
5573                    13,
5574                    14,
5575                    15
5576                ]
5577            ),
5578            5 => simd_shuffle!(
5579                a,
5580                b,
5581                [
5582                    0,
5583                    1,
5584                    2,
5585                    3,
5586                    4,
5587                    16 + LANE2 as u32,
5588                    6,
5589                    7,
5590                    8,
5591                    9,
5592                    10,
5593                    11,
5594                    12,
5595                    13,
5596                    14,
5597                    15
5598                ]
5599            ),
5600            6 => simd_shuffle!(
5601                a,
5602                b,
5603                [
5604                    0,
5605                    1,
5606                    2,
5607                    3,
5608                    4,
5609                    5,
5610                    16 + LANE2 as u32,
5611                    7,
5612                    8,
5613                    9,
5614                    10,
5615                    11,
5616                    12,
5617                    13,
5618                    14,
5619                    15
5620                ]
5621            ),
5622            7 => simd_shuffle!(
5623                a,
5624                b,
5625                [
5626                    0,
5627                    1,
5628                    2,
5629                    3,
5630                    4,
5631                    5,
5632                    6,
5633                    16 + LANE2 as u32,
5634                    8,
5635                    9,
5636                    10,
5637                    11,
5638                    12,
5639                    13,
5640                    14,
5641                    15
5642                ]
5643            ),
5644            8 => simd_shuffle!(
5645                a,
5646                b,
5647                [
5648                    0,
5649                    1,
5650                    2,
5651                    3,
5652                    4,
5653                    5,
5654                    6,
5655                    7,
5656                    16 + LANE2 as u32,
5657                    9,
5658                    10,
5659                    11,
5660                    12,
5661                    13,
5662                    14,
5663                    15
5664                ]
5665            ),
5666            9 => simd_shuffle!(
5667                a,
5668                b,
5669                [
5670                    0,
5671                    1,
5672                    2,
5673                    3,
5674                    4,
5675                    5,
5676                    6,
5677                    7,
5678                    8,
5679                    16 + LANE2 as u32,
5680                    10,
5681                    11,
5682                    12,
5683                    13,
5684                    14,
5685                    15
5686                ]
5687            ),
5688            10 => simd_shuffle!(
5689                a,
5690                b,
5691                [
5692                    0,
5693                    1,
5694                    2,
5695                    3,
5696                    4,
5697                    5,
5698                    6,
5699                    7,
5700                    8,
5701                    9,
5702                    16 + LANE2 as u32,
5703                    11,
5704                    12,
5705                    13,
5706                    14,
5707                    15
5708                ]
5709            ),
5710            11 => simd_shuffle!(
5711                a,
5712                b,
5713                [
5714                    0,
5715                    1,
5716                    2,
5717                    3,
5718                    4,
5719                    5,
5720                    6,
5721                    7,
5722                    8,
5723                    9,
5724                    10,
5725                    16 + LANE2 as u32,
5726                    12,
5727                    13,
5728                    14,
5729                    15
5730                ]
5731            ),
5732            12 => simd_shuffle!(
5733                a,
5734                b,
5735                [
5736                    0,
5737                    1,
5738                    2,
5739                    3,
5740                    4,
5741                    5,
5742                    6,
5743                    7,
5744                    8,
5745                    9,
5746                    10,
5747                    11,
5748                    16 + LANE2 as u32,
5749                    13,
5750                    14,
5751                    15
5752                ]
5753            ),
5754            13 => simd_shuffle!(
5755                a,
5756                b,
5757                [
5758                    0,
5759                    1,
5760                    2,
5761                    3,
5762                    4,
5763                    5,
5764                    6,
5765                    7,
5766                    8,
5767                    9,
5768                    10,
5769                    11,
5770                    12,
5771                    16 + LANE2 as u32,
5772                    14,
5773                    15
5774                ]
5775            ),
5776            14 => simd_shuffle!(
5777                a,
5778                b,
5779                [
5780                    0,
5781                    1,
5782                    2,
5783                    3,
5784                    4,
5785                    5,
5786                    6,
5787                    7,
5788                    8,
5789                    9,
5790                    10,
5791                    11,
5792                    12,
5793                    13,
5794                    16 + LANE2 as u32,
5795                    15
5796                ]
5797            ),
5798            15 => simd_shuffle!(
5799                a,
5800                b,
5801                [
5802                    0,
5803                    1,
5804                    2,
5805                    3,
5806                    4,
5807                    5,
5808                    6,
5809                    7,
5810                    8,
5811                    9,
5812                    10,
5813                    11,
5814                    12,
5815                    13,
5816                    14,
5817                    16 + LANE2 as u32
5818                ]
5819            ),
5820            _ => unreachable_unchecked(),
5821        }
5822    }
5823}
5824#[doc = "Insert vector element from another vector element"]
5825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5826#[inline]
5827#[target_feature(enable = "neon")]
5828#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5829#[rustc_legacy_const_generics(1, 3)]
5830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5831pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5832    a: poly16x8_t,
5833    b: poly16x4_t,
5834) -> poly16x8_t {
5835    static_assert_uimm_bits!(LANE1, 3);
5836    static_assert_uimm_bits!(LANE2, 2);
5837    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5838    unsafe {
5839        match LANE1 & 0b111 {
5840            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5841            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5842            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5843            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5844            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5845            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5846            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5847            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5848            _ => unreachable_unchecked(),
5849        }
5850    }
5851}
5852#[doc = "Insert vector element from another vector element"]
5853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5854#[inline]
5855#[target_feature(enable = "neon")]
5856#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5857#[rustc_legacy_const_generics(1, 3)]
5858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5859pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5860    a: float32x4_t,
5861    b: float32x4_t,
5862) -> float32x4_t {
5863    static_assert_uimm_bits!(LANE1, 2);
5864    static_assert_uimm_bits!(LANE2, 2);
5865    unsafe {
5866        match LANE1 & 0b11 {
5867            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5868            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5869            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5870            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5871            _ => unreachable_unchecked(),
5872        }
5873    }
5874}
5875#[doc = "Insert vector element from another vector element"]
5876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5877#[inline]
5878#[target_feature(enable = "neon")]
5879#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5880#[rustc_legacy_const_generics(1, 3)]
5881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5882pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5883    a: float64x2_t,
5884    b: float64x2_t,
5885) -> float64x2_t {
5886    static_assert_uimm_bits!(LANE1, 1);
5887    static_assert_uimm_bits!(LANE2, 1);
5888    unsafe {
5889        match LANE1 & 0b1 {
5890            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5891            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5892            _ => unreachable_unchecked(),
5893        }
5894    }
5895}
5896#[doc = "Insert vector element from another vector element"]
5897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5898#[inline]
5899#[target_feature(enable = "neon")]
5900#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5901#[rustc_legacy_const_generics(1, 3)]
5902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5903pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5904    a: int8x16_t,
5905    b: int8x16_t,
5906) -> int8x16_t {
5907    static_assert_uimm_bits!(LANE1, 4);
5908    static_assert_uimm_bits!(LANE2, 4);
5909    unsafe {
5910        match LANE1 & 0b1111 {
5911            0 => simd_shuffle!(
5912                a,
5913                b,
5914                [
5915                    16 + LANE2 as u32,
5916                    1,
5917                    2,
5918                    3,
5919                    4,
5920                    5,
5921                    6,
5922                    7,
5923                    8,
5924                    9,
5925                    10,
5926                    11,
5927                    12,
5928                    13,
5929                    14,
5930                    15
5931                ]
5932            ),
5933            1 => simd_shuffle!(
5934                a,
5935                b,
5936                [
5937                    0,
5938                    16 + LANE2 as u32,
5939                    2,
5940                    3,
5941                    4,
5942                    5,
5943                    6,
5944                    7,
5945                    8,
5946                    9,
5947                    10,
5948                    11,
5949                    12,
5950                    13,
5951                    14,
5952                    15
5953                ]
5954            ),
5955            2 => simd_shuffle!(
5956                a,
5957                b,
5958                [
5959                    0,
5960                    1,
5961                    16 + LANE2 as u32,
5962                    3,
5963                    4,
5964                    5,
5965                    6,
5966                    7,
5967                    8,
5968                    9,
5969                    10,
5970                    11,
5971                    12,
5972                    13,
5973                    14,
5974                    15
5975                ]
5976            ),
5977            3 => simd_shuffle!(
5978                a,
5979                b,
5980                [
5981                    0,
5982                    1,
5983                    2,
5984                    16 + LANE2 as u32,
5985                    4,
5986                    5,
5987                    6,
5988                    7,
5989                    8,
5990                    9,
5991                    10,
5992                    11,
5993                    12,
5994                    13,
5995                    14,
5996                    15
5997                ]
5998            ),
5999            4 => simd_shuffle!(
6000                a,
6001                b,
6002                [
6003                    0,
6004                    1,
6005                    2,
6006                    3,
6007                    16 + LANE2 as u32,
6008                    5,
6009                    6,
6010                    7,
6011                    8,
6012                    9,
6013                    10,
6014                    11,
6015                    12,
6016                    13,
6017                    14,
6018                    15
6019                ]
6020            ),
6021            5 => simd_shuffle!(
6022                a,
6023                b,
6024                [
6025                    0,
6026                    1,
6027                    2,
6028                    3,
6029                    4,
6030                    16 + LANE2 as u32,
6031                    6,
6032                    7,
6033                    8,
6034                    9,
6035                    10,
6036                    11,
6037                    12,
6038                    13,
6039                    14,
6040                    15
6041                ]
6042            ),
6043            6 => simd_shuffle!(
6044                a,
6045                b,
6046                [
6047                    0,
6048                    1,
6049                    2,
6050                    3,
6051                    4,
6052                    5,
6053                    16 + LANE2 as u32,
6054                    7,
6055                    8,
6056                    9,
6057                    10,
6058                    11,
6059                    12,
6060                    13,
6061                    14,
6062                    15
6063                ]
6064            ),
6065            7 => simd_shuffle!(
6066                a,
6067                b,
6068                [
6069                    0,
6070                    1,
6071                    2,
6072                    3,
6073                    4,
6074                    5,
6075                    6,
6076                    16 + LANE2 as u32,
6077                    8,
6078                    9,
6079                    10,
6080                    11,
6081                    12,
6082                    13,
6083                    14,
6084                    15
6085                ]
6086            ),
6087            8 => simd_shuffle!(
6088                a,
6089                b,
6090                [
6091                    0,
6092                    1,
6093                    2,
6094                    3,
6095                    4,
6096                    5,
6097                    6,
6098                    7,
6099                    16 + LANE2 as u32,
6100                    9,
6101                    10,
6102                    11,
6103                    12,
6104                    13,
6105                    14,
6106                    15
6107                ]
6108            ),
6109            9 => simd_shuffle!(
6110                a,
6111                b,
6112                [
6113                    0,
6114                    1,
6115                    2,
6116                    3,
6117                    4,
6118                    5,
6119                    6,
6120                    7,
6121                    8,
6122                    16 + LANE2 as u32,
6123                    10,
6124                    11,
6125                    12,
6126                    13,
6127                    14,
6128                    15
6129                ]
6130            ),
6131            10 => simd_shuffle!(
6132                a,
6133                b,
6134                [
6135                    0,
6136                    1,
6137                    2,
6138                    3,
6139                    4,
6140                    5,
6141                    6,
6142                    7,
6143                    8,
6144                    9,
6145                    16 + LANE2 as u32,
6146                    11,
6147                    12,
6148                    13,
6149                    14,
6150                    15
6151                ]
6152            ),
6153            11 => simd_shuffle!(
6154                a,
6155                b,
6156                [
6157                    0,
6158                    1,
6159                    2,
6160                    3,
6161                    4,
6162                    5,
6163                    6,
6164                    7,
6165                    8,
6166                    9,
6167                    10,
6168                    16 + LANE2 as u32,
6169                    12,
6170                    13,
6171                    14,
6172                    15
6173                ]
6174            ),
6175            12 => simd_shuffle!(
6176                a,
6177                b,
6178                [
6179                    0,
6180                    1,
6181                    2,
6182                    3,
6183                    4,
6184                    5,
6185                    6,
6186                    7,
6187                    8,
6188                    9,
6189                    10,
6190                    11,
6191                    16 + LANE2 as u32,
6192                    13,
6193                    14,
6194                    15
6195                ]
6196            ),
6197            13 => simd_shuffle!(
6198                a,
6199                b,
6200                [
6201                    0,
6202                    1,
6203                    2,
6204                    3,
6205                    4,
6206                    5,
6207                    6,
6208                    7,
6209                    8,
6210                    9,
6211                    10,
6212                    11,
6213                    12,
6214                    16 + LANE2 as u32,
6215                    14,
6216                    15
6217                ]
6218            ),
6219            14 => simd_shuffle!(
6220                a,
6221                b,
6222                [
6223                    0,
6224                    1,
6225                    2,
6226                    3,
6227                    4,
6228                    5,
6229                    6,
6230                    7,
6231                    8,
6232                    9,
6233                    10,
6234                    11,
6235                    12,
6236                    13,
6237                    16 + LANE2 as u32,
6238                    15
6239                ]
6240            ),
6241            15 => simd_shuffle!(
6242                a,
6243                b,
6244                [
6245                    0,
6246                    1,
6247                    2,
6248                    3,
6249                    4,
6250                    5,
6251                    6,
6252                    7,
6253                    8,
6254                    9,
6255                    10,
6256                    11,
6257                    12,
6258                    13,
6259                    14,
6260                    16 + LANE2 as u32
6261                ]
6262            ),
6263            _ => unreachable_unchecked(),
6264        }
6265    }
6266}
6267#[doc = "Insert vector element from another vector element"]
6268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6269#[inline]
6270#[target_feature(enable = "neon")]
6271#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6272#[rustc_legacy_const_generics(1, 3)]
6273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6274pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6275    a: int16x8_t,
6276    b: int16x8_t,
6277) -> int16x8_t {
6278    static_assert_uimm_bits!(LANE1, 3);
6279    static_assert_uimm_bits!(LANE2, 3);
6280    unsafe {
6281        match LANE1 & 0b111 {
6282            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6283            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6284            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6285            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6286            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6287            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6288            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6289            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6290            _ => unreachable_unchecked(),
6291        }
6292    }
6293}
6294#[doc = "Insert vector element from another vector element"]
6295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6296#[inline]
6297#[target_feature(enable = "neon")]
6298#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6299#[rustc_legacy_const_generics(1, 3)]
6300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6301pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6302    a: int32x4_t,
6303    b: int32x4_t,
6304) -> int32x4_t {
6305    static_assert_uimm_bits!(LANE1, 2);
6306    static_assert_uimm_bits!(LANE2, 2);
6307    unsafe {
6308        match LANE1 & 0b11 {
6309            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6310            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6311            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6312            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6313            _ => unreachable_unchecked(),
6314        }
6315    }
6316}
6317#[doc = "Insert vector element from another vector element"]
6318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6319#[inline]
6320#[target_feature(enable = "neon")]
6321#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6322#[rustc_legacy_const_generics(1, 3)]
6323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6324pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6325    a: int64x2_t,
6326    b: int64x2_t,
6327) -> int64x2_t {
6328    static_assert_uimm_bits!(LANE1, 1);
6329    static_assert_uimm_bits!(LANE2, 1);
6330    unsafe {
6331        match LANE1 & 0b1 {
6332            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6333            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6334            _ => unreachable_unchecked(),
6335        }
6336    }
6337}
6338#[doc = "Insert vector element from another vector element"]
6339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6340#[inline]
6341#[target_feature(enable = "neon")]
6342#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6343#[rustc_legacy_const_generics(1, 3)]
6344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6345pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6346    a: uint8x16_t,
6347    b: uint8x16_t,
6348) -> uint8x16_t {
6349    static_assert_uimm_bits!(LANE1, 4);
6350    static_assert_uimm_bits!(LANE2, 4);
6351    unsafe {
6352        match LANE1 & 0b1111 {
6353            0 => simd_shuffle!(
6354                a,
6355                b,
6356                [
6357                    16 + LANE2 as u32,
6358                    1,
6359                    2,
6360                    3,
6361                    4,
6362                    5,
6363                    6,
6364                    7,
6365                    8,
6366                    9,
6367                    10,
6368                    11,
6369                    12,
6370                    13,
6371                    14,
6372                    15
6373                ]
6374            ),
6375            1 => simd_shuffle!(
6376                a,
6377                b,
6378                [
6379                    0,
6380                    16 + LANE2 as u32,
6381                    2,
6382                    3,
6383                    4,
6384                    5,
6385                    6,
6386                    7,
6387                    8,
6388                    9,
6389                    10,
6390                    11,
6391                    12,
6392                    13,
6393                    14,
6394                    15
6395                ]
6396            ),
6397            2 => simd_shuffle!(
6398                a,
6399                b,
6400                [
6401                    0,
6402                    1,
6403                    16 + LANE2 as u32,
6404                    3,
6405                    4,
6406                    5,
6407                    6,
6408                    7,
6409                    8,
6410                    9,
6411                    10,
6412                    11,
6413                    12,
6414                    13,
6415                    14,
6416                    15
6417                ]
6418            ),
6419            3 => simd_shuffle!(
6420                a,
6421                b,
6422                [
6423                    0,
6424                    1,
6425                    2,
6426                    16 + LANE2 as u32,
6427                    4,
6428                    5,
6429                    6,
6430                    7,
6431                    8,
6432                    9,
6433                    10,
6434                    11,
6435                    12,
6436                    13,
6437                    14,
6438                    15
6439                ]
6440            ),
6441            4 => simd_shuffle!(
6442                a,
6443                b,
6444                [
6445                    0,
6446                    1,
6447                    2,
6448                    3,
6449                    16 + LANE2 as u32,
6450                    5,
6451                    6,
6452                    7,
6453                    8,
6454                    9,
6455                    10,
6456                    11,
6457                    12,
6458                    13,
6459                    14,
6460                    15
6461                ]
6462            ),
6463            5 => simd_shuffle!(
6464                a,
6465                b,
6466                [
6467                    0,
6468                    1,
6469                    2,
6470                    3,
6471                    4,
6472                    16 + LANE2 as u32,
6473                    6,
6474                    7,
6475                    8,
6476                    9,
6477                    10,
6478                    11,
6479                    12,
6480                    13,
6481                    14,
6482                    15
6483                ]
6484            ),
6485            6 => simd_shuffle!(
6486                a,
6487                b,
6488                [
6489                    0,
6490                    1,
6491                    2,
6492                    3,
6493                    4,
6494                    5,
6495                    16 + LANE2 as u32,
6496                    7,
6497                    8,
6498                    9,
6499                    10,
6500                    11,
6501                    12,
6502                    13,
6503                    14,
6504                    15
6505                ]
6506            ),
6507            7 => simd_shuffle!(
6508                a,
6509                b,
6510                [
6511                    0,
6512                    1,
6513                    2,
6514                    3,
6515                    4,
6516                    5,
6517                    6,
6518                    16 + LANE2 as u32,
6519                    8,
6520                    9,
6521                    10,
6522                    11,
6523                    12,
6524                    13,
6525                    14,
6526                    15
6527                ]
6528            ),
6529            8 => simd_shuffle!(
6530                a,
6531                b,
6532                [
6533                    0,
6534                    1,
6535                    2,
6536                    3,
6537                    4,
6538                    5,
6539                    6,
6540                    7,
6541                    16 + LANE2 as u32,
6542                    9,
6543                    10,
6544                    11,
6545                    12,
6546                    13,
6547                    14,
6548                    15
6549                ]
6550            ),
6551            9 => simd_shuffle!(
6552                a,
6553                b,
6554                [
6555                    0,
6556                    1,
6557                    2,
6558                    3,
6559                    4,
6560                    5,
6561                    6,
6562                    7,
6563                    8,
6564                    16 + LANE2 as u32,
6565                    10,
6566                    11,
6567                    12,
6568                    13,
6569                    14,
6570                    15
6571                ]
6572            ),
6573            10 => simd_shuffle!(
6574                a,
6575                b,
6576                [
6577                    0,
6578                    1,
6579                    2,
6580                    3,
6581                    4,
6582                    5,
6583                    6,
6584                    7,
6585                    8,
6586                    9,
6587                    16 + LANE2 as u32,
6588                    11,
6589                    12,
6590                    13,
6591                    14,
6592                    15
6593                ]
6594            ),
6595            11 => simd_shuffle!(
6596                a,
6597                b,
6598                [
6599                    0,
6600                    1,
6601                    2,
6602                    3,
6603                    4,
6604                    5,
6605                    6,
6606                    7,
6607                    8,
6608                    9,
6609                    10,
6610                    16 + LANE2 as u32,
6611                    12,
6612                    13,
6613                    14,
6614                    15
6615                ]
6616            ),
6617            12 => simd_shuffle!(
6618                a,
6619                b,
6620                [
6621                    0,
6622                    1,
6623                    2,
6624                    3,
6625                    4,
6626                    5,
6627                    6,
6628                    7,
6629                    8,
6630                    9,
6631                    10,
6632                    11,
6633                    16 + LANE2 as u32,
6634                    13,
6635                    14,
6636                    15
6637                ]
6638            ),
6639            13 => simd_shuffle!(
6640                a,
6641                b,
6642                [
6643                    0,
6644                    1,
6645                    2,
6646                    3,
6647                    4,
6648                    5,
6649                    6,
6650                    7,
6651                    8,
6652                    9,
6653                    10,
6654                    11,
6655                    12,
6656                    16 + LANE2 as u32,
6657                    14,
6658                    15
6659                ]
6660            ),
6661            14 => simd_shuffle!(
6662                a,
6663                b,
6664                [
6665                    0,
6666                    1,
6667                    2,
6668                    3,
6669                    4,
6670                    5,
6671                    6,
6672                    7,
6673                    8,
6674                    9,
6675                    10,
6676                    11,
6677                    12,
6678                    13,
6679                    16 + LANE2 as u32,
6680                    15
6681                ]
6682            ),
6683            15 => simd_shuffle!(
6684                a,
6685                b,
6686                [
6687                    0,
6688                    1,
6689                    2,
6690                    3,
6691                    4,
6692                    5,
6693                    6,
6694                    7,
6695                    8,
6696                    9,
6697                    10,
6698                    11,
6699                    12,
6700                    13,
6701                    14,
6702                    16 + LANE2 as u32
6703                ]
6704            ),
6705            _ => unreachable_unchecked(),
6706        }
6707    }
6708}
6709#[doc = "Insert vector element from another vector element"]
6710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6711#[inline]
6712#[target_feature(enable = "neon")]
6713#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6714#[rustc_legacy_const_generics(1, 3)]
6715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6716pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6717    a: uint16x8_t,
6718    b: uint16x8_t,
6719) -> uint16x8_t {
6720    static_assert_uimm_bits!(LANE1, 3);
6721    static_assert_uimm_bits!(LANE2, 3);
6722    unsafe {
6723        match LANE1 & 0b111 {
6724            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6725            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6726            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6727            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6728            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6729            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6730            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6731            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6732            _ => unreachable_unchecked(),
6733        }
6734    }
6735}
6736#[doc = "Insert vector element from another vector element"]
6737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6738#[inline]
6739#[target_feature(enable = "neon")]
6740#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6741#[rustc_legacy_const_generics(1, 3)]
6742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6743pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6744    a: uint32x4_t,
6745    b: uint32x4_t,
6746) -> uint32x4_t {
6747    static_assert_uimm_bits!(LANE1, 2);
6748    static_assert_uimm_bits!(LANE2, 2);
6749    unsafe {
6750        match LANE1 & 0b11 {
6751            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6752            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6753            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6754            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6755            _ => unreachable_unchecked(),
6756        }
6757    }
6758}
6759#[doc = "Insert vector element from another vector element"]
6760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6761#[inline]
6762#[target_feature(enable = "neon")]
6763#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6764#[rustc_legacy_const_generics(1, 3)]
6765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6766pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6767    a: uint64x2_t,
6768    b: uint64x2_t,
6769) -> uint64x2_t {
6770    static_assert_uimm_bits!(LANE1, 1);
6771    static_assert_uimm_bits!(LANE2, 1);
6772    unsafe {
6773        match LANE1 & 0b1 {
6774            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6775            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6776            _ => unreachable_unchecked(),
6777        }
6778    }
6779}
6780#[doc = "Insert vector element from another vector element"]
6781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6782#[inline]
6783#[target_feature(enable = "neon")]
6784#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6785#[rustc_legacy_const_generics(1, 3)]
6786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6787pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6788    a: poly8x16_t,
6789    b: poly8x16_t,
6790) -> poly8x16_t {
6791    static_assert_uimm_bits!(LANE1, 4);
6792    static_assert_uimm_bits!(LANE2, 4);
6793    unsafe {
6794        match LANE1 & 0b1111 {
6795            0 => simd_shuffle!(
6796                a,
6797                b,
6798                [
6799                    16 + LANE2 as u32,
6800                    1,
6801                    2,
6802                    3,
6803                    4,
6804                    5,
6805                    6,
6806                    7,
6807                    8,
6808                    9,
6809                    10,
6810                    11,
6811                    12,
6812                    13,
6813                    14,
6814                    15
6815                ]
6816            ),
6817            1 => simd_shuffle!(
6818                a,
6819                b,
6820                [
6821                    0,
6822                    16 + LANE2 as u32,
6823                    2,
6824                    3,
6825                    4,
6826                    5,
6827                    6,
6828                    7,
6829                    8,
6830                    9,
6831                    10,
6832                    11,
6833                    12,
6834                    13,
6835                    14,
6836                    15
6837                ]
6838            ),
6839            2 => simd_shuffle!(
6840                a,
6841                b,
6842                [
6843                    0,
6844                    1,
6845                    16 + LANE2 as u32,
6846                    3,
6847                    4,
6848                    5,
6849                    6,
6850                    7,
6851                    8,
6852                    9,
6853                    10,
6854                    11,
6855                    12,
6856                    13,
6857                    14,
6858                    15
6859                ]
6860            ),
6861            3 => simd_shuffle!(
6862                a,
6863                b,
6864                [
6865                    0,
6866                    1,
6867                    2,
6868                    16 + LANE2 as u32,
6869                    4,
6870                    5,
6871                    6,
6872                    7,
6873                    8,
6874                    9,
6875                    10,
6876                    11,
6877                    12,
6878                    13,
6879                    14,
6880                    15
6881                ]
6882            ),
6883            4 => simd_shuffle!(
6884                a,
6885                b,
6886                [
6887                    0,
6888                    1,
6889                    2,
6890                    3,
6891                    16 + LANE2 as u32,
6892                    5,
6893                    6,
6894                    7,
6895                    8,
6896                    9,
6897                    10,
6898                    11,
6899                    12,
6900                    13,
6901                    14,
6902                    15
6903                ]
6904            ),
6905            5 => simd_shuffle!(
6906                a,
6907                b,
6908                [
6909                    0,
6910                    1,
6911                    2,
6912                    3,
6913                    4,
6914                    16 + LANE2 as u32,
6915                    6,
6916                    7,
6917                    8,
6918                    9,
6919                    10,
6920                    11,
6921                    12,
6922                    13,
6923                    14,
6924                    15
6925                ]
6926            ),
6927            6 => simd_shuffle!(
6928                a,
6929                b,
6930                [
6931                    0,
6932                    1,
6933                    2,
6934                    3,
6935                    4,
6936                    5,
6937                    16 + LANE2 as u32,
6938                    7,
6939                    8,
6940                    9,
6941                    10,
6942                    11,
6943                    12,
6944                    13,
6945                    14,
6946                    15
6947                ]
6948            ),
6949            7 => simd_shuffle!(
6950                a,
6951                b,
6952                [
6953                    0,
6954                    1,
6955                    2,
6956                    3,
6957                    4,
6958                    5,
6959                    6,
6960                    16 + LANE2 as u32,
6961                    8,
6962                    9,
6963                    10,
6964                    11,
6965                    12,
6966                    13,
6967                    14,
6968                    15
6969                ]
6970            ),
6971            8 => simd_shuffle!(
6972                a,
6973                b,
6974                [
6975                    0,
6976                    1,
6977                    2,
6978                    3,
6979                    4,
6980                    5,
6981                    6,
6982                    7,
6983                    16 + LANE2 as u32,
6984                    9,
6985                    10,
6986                    11,
6987                    12,
6988                    13,
6989                    14,
6990                    15
6991                ]
6992            ),
6993            9 => simd_shuffle!(
6994                a,
6995                b,
6996                [
6997                    0,
6998                    1,
6999                    2,
7000                    3,
7001                    4,
7002                    5,
7003                    6,
7004                    7,
7005                    8,
7006                    16 + LANE2 as u32,
7007                    10,
7008                    11,
7009                    12,
7010                    13,
7011                    14,
7012                    15
7013                ]
7014            ),
7015            10 => simd_shuffle!(
7016                a,
7017                b,
7018                [
7019                    0,
7020                    1,
7021                    2,
7022                    3,
7023                    4,
7024                    5,
7025                    6,
7026                    7,
7027                    8,
7028                    9,
7029                    16 + LANE2 as u32,
7030                    11,
7031                    12,
7032                    13,
7033                    14,
7034                    15
7035                ]
7036            ),
7037            11 => simd_shuffle!(
7038                a,
7039                b,
7040                [
7041                    0,
7042                    1,
7043                    2,
7044                    3,
7045                    4,
7046                    5,
7047                    6,
7048                    7,
7049                    8,
7050                    9,
7051                    10,
7052                    16 + LANE2 as u32,
7053                    12,
7054                    13,
7055                    14,
7056                    15
7057                ]
7058            ),
7059            12 => simd_shuffle!(
7060                a,
7061                b,
7062                [
7063                    0,
7064                    1,
7065                    2,
7066                    3,
7067                    4,
7068                    5,
7069                    6,
7070                    7,
7071                    8,
7072                    9,
7073                    10,
7074                    11,
7075                    16 + LANE2 as u32,
7076                    13,
7077                    14,
7078                    15
7079                ]
7080            ),
7081            13 => simd_shuffle!(
7082                a,
7083                b,
7084                [
7085                    0,
7086                    1,
7087                    2,
7088                    3,
7089                    4,
7090                    5,
7091                    6,
7092                    7,
7093                    8,
7094                    9,
7095                    10,
7096                    11,
7097                    12,
7098                    16 + LANE2 as u32,
7099                    14,
7100                    15
7101                ]
7102            ),
7103            14 => simd_shuffle!(
7104                a,
7105                b,
7106                [
7107                    0,
7108                    1,
7109                    2,
7110                    3,
7111                    4,
7112                    5,
7113                    6,
7114                    7,
7115                    8,
7116                    9,
7117                    10,
7118                    11,
7119                    12,
7120                    13,
7121                    16 + LANE2 as u32,
7122                    15
7123                ]
7124            ),
7125            15 => simd_shuffle!(
7126                a,
7127                b,
7128                [
7129                    0,
7130                    1,
7131                    2,
7132                    3,
7133                    4,
7134                    5,
7135                    6,
7136                    7,
7137                    8,
7138                    9,
7139                    10,
7140                    11,
7141                    12,
7142                    13,
7143                    14,
7144                    16 + LANE2 as u32
7145                ]
7146            ),
7147            _ => unreachable_unchecked(),
7148        }
7149    }
7150}
7151#[doc = "Insert vector element from another vector element"]
7152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7153#[inline]
7154#[target_feature(enable = "neon")]
7155#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7156#[rustc_legacy_const_generics(1, 3)]
7157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7158pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7159    a: poly16x8_t,
7160    b: poly16x8_t,
7161) -> poly16x8_t {
7162    static_assert_uimm_bits!(LANE1, 3);
7163    static_assert_uimm_bits!(LANE2, 3);
7164    unsafe {
7165        match LANE1 & 0b111 {
7166            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7167            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7168            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7169            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7170            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7171            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7172            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7173            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7174            _ => unreachable_unchecked(),
7175        }
7176    }
7177}
7178#[doc = "Insert vector element from another vector element"]
7179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7180#[inline]
7181#[target_feature(enable = "neon")]
7182#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7183#[rustc_legacy_const_generics(1, 3)]
7184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7185pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7186    a: poly64x2_t,
7187    b: poly64x2_t,
7188) -> poly64x2_t {
7189    static_assert_uimm_bits!(LANE1, 1);
7190    static_assert_uimm_bits!(LANE2, 1);
7191    unsafe {
7192        match LANE1 & 0b1 {
7193            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7194            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7195            _ => unreachable_unchecked(),
7196        }
7197    }
7198}
7199#[doc = "Insert vector element from another vector element"]
7200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7201#[inline]
7202#[target_feature(enable = "neon")]
7203#[cfg_attr(test, assert_instr(nop))]
7204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7205pub fn vcreate_f64(a: u64) -> float64x1_t {
7206    unsafe { transmute(a) }
7207}
7208#[doc = "Floating-point convert"]
7209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7210#[inline]
7211#[target_feature(enable = "neon")]
7212#[cfg_attr(test, assert_instr(fcvtn))]
7213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7214pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7215    unsafe { simd_cast(a) }
7216}
7217#[doc = "Floating-point convert to higher precision long"]
7218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7219#[inline]
7220#[target_feature(enable = "neon")]
7221#[cfg_attr(test, assert_instr(fcvtl))]
7222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7223pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7224    unsafe { simd_cast(a) }
7225}
7226#[doc = "Fixed-point convert to floating-point"]
7227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7228#[inline]
7229#[target_feature(enable = "neon")]
7230#[cfg_attr(test, assert_instr(scvtf))]
7231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7232pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7233    unsafe { simd_cast(a) }
7234}
7235#[doc = "Fixed-point convert to floating-point"]
7236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7237#[inline]
7238#[target_feature(enable = "neon")]
7239#[cfg_attr(test, assert_instr(scvtf))]
7240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7241pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7242    unsafe { simd_cast(a) }
7243}
7244#[doc = "Fixed-point convert to floating-point"]
7245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7246#[inline]
7247#[target_feature(enable = "neon")]
7248#[cfg_attr(test, assert_instr(ucvtf))]
7249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7250pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7251    unsafe { simd_cast(a) }
7252}
7253#[doc = "Fixed-point convert to floating-point"]
7254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7255#[inline]
7256#[target_feature(enable = "neon")]
7257#[cfg_attr(test, assert_instr(ucvtf))]
7258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7259pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7260    unsafe { simd_cast(a) }
7261}
7262#[doc = "Floating-point convert to lower precision"]
7263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7264#[inline]
7265#[cfg_attr(test, assert_instr(fcvtn2))]
7266#[target_feature(enable = "neon,fp16")]
7267#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7268pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7269    vcombine_f16(a, vcvt_f16_f32(b))
7270}
7271#[doc = "Floating-point convert to higher precision"]
7272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7273#[inline]
7274#[cfg_attr(test, assert_instr(fcvtl2))]
7275#[target_feature(enable = "neon,fp16")]
7276#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7277pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7278    vcvt_f32_f16(vget_high_f16(a))
7279}
7280#[doc = "Floating-point convert to lower precision narrow"]
7281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7282#[inline]
7283#[target_feature(enable = "neon")]
7284#[cfg_attr(test, assert_instr(fcvtn))]
7285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7286pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7287    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7288}
7289#[doc = "Floating-point convert to higher precision long"]
7290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7291#[inline]
7292#[target_feature(enable = "neon")]
7293#[cfg_attr(test, assert_instr(fcvtl))]
7294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7295pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7296    unsafe {
7297        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7298        simd_cast(b)
7299    }
7300}
7301#[doc = "Fixed-point convert to floating-point"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7303#[inline]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7306#[rustc_legacy_const_generics(1)]
7307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7308pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7309    static_assert!(N >= 1 && N <= 64);
7310    unsafe extern "unadjusted" {
7311        #[cfg_attr(
7312            any(target_arch = "aarch64", target_arch = "arm64ec"),
7313            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7314        )]
7315        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7316    }
7317    unsafe { _vcvt_n_f64_s64(a, N) }
7318}
7319#[doc = "Fixed-point convert to floating-point"]
7320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7321#[inline]
7322#[target_feature(enable = "neon")]
7323#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7324#[rustc_legacy_const_generics(1)]
7325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7326pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7327    static_assert!(N >= 1 && N <= 64);
7328    unsafe extern "unadjusted" {
7329        #[cfg_attr(
7330            any(target_arch = "aarch64", target_arch = "arm64ec"),
7331            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7332        )]
7333        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7334    }
7335    unsafe { _vcvtq_n_f64_s64(a, N) }
7336}
7337#[doc = "Fixed-point convert to floating-point"]
7338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7339#[inline]
7340#[target_feature(enable = "neon")]
7341#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7342#[rustc_legacy_const_generics(1)]
7343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7344pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7345    static_assert!(N >= 1 && N <= 64);
7346    unsafe extern "unadjusted" {
7347        #[cfg_attr(
7348            any(target_arch = "aarch64", target_arch = "arm64ec"),
7349            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7350        )]
7351        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7352    }
7353    unsafe { _vcvt_n_f64_u64(a, N) }
7354}
7355#[doc = "Fixed-point convert to floating-point"]
7356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7357#[inline]
7358#[target_feature(enable = "neon")]
7359#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7360#[rustc_legacy_const_generics(1)]
7361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7362pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7363    static_assert!(N >= 1 && N <= 64);
7364    unsafe extern "unadjusted" {
7365        #[cfg_attr(
7366            any(target_arch = "aarch64", target_arch = "arm64ec"),
7367            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7368        )]
7369        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7370    }
7371    unsafe { _vcvtq_n_f64_u64(a, N) }
7372}
7373#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7375#[inline]
7376#[target_feature(enable = "neon")]
7377#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7378#[rustc_legacy_const_generics(1)]
7379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7380pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7381    static_assert!(N >= 1 && N <= 64);
7382    unsafe extern "unadjusted" {
7383        #[cfg_attr(
7384            any(target_arch = "aarch64", target_arch = "arm64ec"),
7385            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7386        )]
7387        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7388    }
7389    unsafe { _vcvt_n_s64_f64(a, N) }
7390}
7391#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7393#[inline]
7394#[target_feature(enable = "neon")]
7395#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7396#[rustc_legacy_const_generics(1)]
7397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7398pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7399    static_assert!(N >= 1 && N <= 64);
7400    unsafe extern "unadjusted" {
7401        #[cfg_attr(
7402            any(target_arch = "aarch64", target_arch = "arm64ec"),
7403            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7404        )]
7405        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7406    }
7407    unsafe { _vcvtq_n_s64_f64(a, N) }
7408}
7409#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7411#[inline]
7412#[target_feature(enable = "neon")]
7413#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7414#[rustc_legacy_const_generics(1)]
7415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7416pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7417    static_assert!(N >= 1 && N <= 64);
7418    unsafe extern "unadjusted" {
7419        #[cfg_attr(
7420            any(target_arch = "aarch64", target_arch = "arm64ec"),
7421            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7422        )]
7423        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7424    }
7425    unsafe { _vcvt_n_u64_f64(a, N) }
7426}
7427#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7429#[inline]
7430#[target_feature(enable = "neon")]
7431#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7432#[rustc_legacy_const_generics(1)]
7433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7434pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7435    static_assert!(N >= 1 && N <= 64);
7436    unsafe extern "unadjusted" {
7437        #[cfg_attr(
7438            any(target_arch = "aarch64", target_arch = "arm64ec"),
7439            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7440        )]
7441        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7442    }
7443    unsafe { _vcvtq_n_u64_f64(a, N) }
7444}
7445#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7447#[inline]
7448#[target_feature(enable = "neon")]
7449#[cfg_attr(test, assert_instr(fcvtzs))]
7450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7451pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7452    unsafe extern "unadjusted" {
7453        #[cfg_attr(
7454            any(target_arch = "aarch64", target_arch = "arm64ec"),
7455            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7456        )]
7457        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7458    }
7459    unsafe { _vcvt_s64_f64(a) }
7460}
7461#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7463#[inline]
7464#[target_feature(enable = "neon")]
7465#[cfg_attr(test, assert_instr(fcvtzs))]
7466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7467pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7468    unsafe extern "unadjusted" {
7469        #[cfg_attr(
7470            any(target_arch = "aarch64", target_arch = "arm64ec"),
7471            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7472        )]
7473        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7474    }
7475    unsafe { _vcvtq_s64_f64(a) }
7476}
7477#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7479#[inline]
7480#[target_feature(enable = "neon")]
7481#[cfg_attr(test, assert_instr(fcvtzu))]
7482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7483pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7484    unsafe extern "unadjusted" {
7485        #[cfg_attr(
7486            any(target_arch = "aarch64", target_arch = "arm64ec"),
7487            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7488        )]
7489        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7490    }
7491    unsafe { _vcvt_u64_f64(a) }
7492}
7493#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7495#[inline]
7496#[target_feature(enable = "neon")]
7497#[cfg_attr(test, assert_instr(fcvtzu))]
7498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7499pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7500    unsafe extern "unadjusted" {
7501        #[cfg_attr(
7502            any(target_arch = "aarch64", target_arch = "arm64ec"),
7503            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7504        )]
7505        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7506    }
7507    unsafe { _vcvtq_u64_f64(a) }
7508}
7509#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7511#[inline]
7512#[cfg_attr(test, assert_instr(fcvtas))]
7513#[target_feature(enable = "neon,fp16")]
7514#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7515pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7516    unsafe extern "unadjusted" {
7517        #[cfg_attr(
7518            any(target_arch = "aarch64", target_arch = "arm64ec"),
7519            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7520        )]
7521        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7522    }
7523    unsafe { _vcvta_s16_f16(a) }
7524}
7525#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7527#[inline]
7528#[cfg_attr(test, assert_instr(fcvtas))]
7529#[target_feature(enable = "neon,fp16")]
7530#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7531pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7532    unsafe extern "unadjusted" {
7533        #[cfg_attr(
7534            any(target_arch = "aarch64", target_arch = "arm64ec"),
7535            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7536        )]
7537        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7538    }
7539    unsafe { _vcvtaq_s16_f16(a) }
7540}
7541#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7543#[inline]
7544#[target_feature(enable = "neon")]
7545#[cfg_attr(test, assert_instr(fcvtas))]
7546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7547pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7548    unsafe extern "unadjusted" {
7549        #[cfg_attr(
7550            any(target_arch = "aarch64", target_arch = "arm64ec"),
7551            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7552        )]
7553        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7554    }
7555    unsafe { _vcvta_s32_f32(a) }
7556}
7557#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7559#[inline]
7560#[target_feature(enable = "neon")]
7561#[cfg_attr(test, assert_instr(fcvtas))]
7562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7563pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7564    unsafe extern "unadjusted" {
7565        #[cfg_attr(
7566            any(target_arch = "aarch64", target_arch = "arm64ec"),
7567            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7568        )]
7569        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7570    }
7571    unsafe { _vcvtaq_s32_f32(a) }
7572}
7573#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7575#[inline]
7576#[target_feature(enable = "neon")]
7577#[cfg_attr(test, assert_instr(fcvtas))]
7578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7579pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7580    unsafe extern "unadjusted" {
7581        #[cfg_attr(
7582            any(target_arch = "aarch64", target_arch = "arm64ec"),
7583            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7584        )]
7585        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7586    }
7587    unsafe { _vcvta_s64_f64(a) }
7588}
7589#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7591#[inline]
7592#[target_feature(enable = "neon")]
7593#[cfg_attr(test, assert_instr(fcvtas))]
7594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7595pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7596    unsafe extern "unadjusted" {
7597        #[cfg_attr(
7598            any(target_arch = "aarch64", target_arch = "arm64ec"),
7599            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7600        )]
7601        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7602    }
7603    unsafe { _vcvtaq_s64_f64(a) }
7604}
7605#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7607#[inline]
7608#[cfg_attr(test, assert_instr(fcvtau))]
7609#[target_feature(enable = "neon,fp16")]
7610#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7611pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7612    unsafe extern "unadjusted" {
7613        #[cfg_attr(
7614            any(target_arch = "aarch64", target_arch = "arm64ec"),
7615            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7616        )]
7617        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7618    }
7619    unsafe { _vcvta_u16_f16(a) }
7620}
7621#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7623#[inline]
7624#[cfg_attr(test, assert_instr(fcvtau))]
7625#[target_feature(enable = "neon,fp16")]
7626#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7627pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7628    unsafe extern "unadjusted" {
7629        #[cfg_attr(
7630            any(target_arch = "aarch64", target_arch = "arm64ec"),
7631            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7632        )]
7633        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7634    }
7635    unsafe { _vcvtaq_u16_f16(a) }
7636}
7637#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7639#[inline]
7640#[target_feature(enable = "neon")]
7641#[cfg_attr(test, assert_instr(fcvtau))]
7642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7643pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7644    unsafe extern "unadjusted" {
7645        #[cfg_attr(
7646            any(target_arch = "aarch64", target_arch = "arm64ec"),
7647            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7648        )]
7649        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7650    }
7651    unsafe { _vcvta_u32_f32(a) }
7652}
7653#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7655#[inline]
7656#[target_feature(enable = "neon")]
7657#[cfg_attr(test, assert_instr(fcvtau))]
7658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7659pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7660    unsafe extern "unadjusted" {
7661        #[cfg_attr(
7662            any(target_arch = "aarch64", target_arch = "arm64ec"),
7663            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7664        )]
7665        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7666    }
7667    unsafe { _vcvtaq_u32_f32(a) }
7668}
7669#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7671#[inline]
7672#[target_feature(enable = "neon")]
7673#[cfg_attr(test, assert_instr(fcvtau))]
7674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7675pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7676    unsafe extern "unadjusted" {
7677        #[cfg_attr(
7678            any(target_arch = "aarch64", target_arch = "arm64ec"),
7679            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7680        )]
7681        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7682    }
7683    unsafe { _vcvta_u64_f64(a) }
7684}
7685#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7687#[inline]
7688#[target_feature(enable = "neon")]
7689#[cfg_attr(test, assert_instr(fcvtau))]
7690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7691pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7692    unsafe extern "unadjusted" {
7693        #[cfg_attr(
7694            any(target_arch = "aarch64", target_arch = "arm64ec"),
7695            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7696        )]
7697        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7698    }
7699    unsafe { _vcvtaq_u64_f64(a) }
7700}
7701#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7703#[inline]
7704#[cfg_attr(test, assert_instr(fcvtas))]
7705#[target_feature(enable = "neon,fp16")]
7706#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7707pub fn vcvtah_s16_f16(a: f16) -> i16 {
7708    vcvtah_s32_f16(a) as i16
7709}
7710#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7712#[inline]
7713#[cfg_attr(test, assert_instr(fcvtas))]
7714#[target_feature(enable = "neon,fp16")]
7715#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7716pub fn vcvtah_s32_f16(a: f16) -> i32 {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7721        )]
7722        fn _vcvtah_s32_f16(a: f16) -> i32;
7723    }
7724    unsafe { _vcvtah_s32_f16(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7728#[inline]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732pub fn vcvtah_s64_f16(a: f16) -> i64 {
7733    unsafe extern "unadjusted" {
7734        #[cfg_attr(
7735            any(target_arch = "aarch64", target_arch = "arm64ec"),
7736            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7737        )]
7738        fn _vcvtah_s64_f16(a: f16) -> i64;
7739    }
7740    unsafe { _vcvtah_s64_f16(a) }
7741}
7742#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7744#[inline]
7745#[cfg_attr(test, assert_instr(fcvtau))]
7746#[target_feature(enable = "neon,fp16")]
7747#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7748pub fn vcvtah_u16_f16(a: f16) -> u16 {
7749    vcvtah_u32_f16(a) as u16
7750}
7751#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7753#[inline]
7754#[cfg_attr(test, assert_instr(fcvtau))]
7755#[target_feature(enable = "neon,fp16")]
7756#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7757pub fn vcvtah_u32_f16(a: f16) -> u32 {
7758    unsafe extern "unadjusted" {
7759        #[cfg_attr(
7760            any(target_arch = "aarch64", target_arch = "arm64ec"),
7761            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7762        )]
7763        fn _vcvtah_u32_f16(a: f16) -> u32;
7764    }
7765    unsafe { _vcvtah_u32_f16(a) }
7766}
7767#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7769#[inline]
7770#[cfg_attr(test, assert_instr(fcvtau))]
7771#[target_feature(enable = "neon,fp16")]
7772#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7773pub fn vcvtah_u64_f16(a: f16) -> u64 {
7774    unsafe extern "unadjusted" {
7775        #[cfg_attr(
7776            any(target_arch = "aarch64", target_arch = "arm64ec"),
7777            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7778        )]
7779        fn _vcvtah_u64_f16(a: f16) -> u64;
7780    }
7781    unsafe { _vcvtah_u64_f16(a) }
7782}
7783#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7785#[inline]
7786#[target_feature(enable = "neon")]
7787#[cfg_attr(test, assert_instr(fcvtas))]
7788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7789pub fn vcvtas_s32_f32(a: f32) -> i32 {
7790    unsafe extern "unadjusted" {
7791        #[cfg_attr(
7792            any(target_arch = "aarch64", target_arch = "arm64ec"),
7793            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7794        )]
7795        fn _vcvtas_s32_f32(a: f32) -> i32;
7796    }
7797    unsafe { _vcvtas_s32_f32(a) }
7798}
7799#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7801#[inline]
7802#[target_feature(enable = "neon")]
7803#[cfg_attr(test, assert_instr(fcvtas))]
7804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7805pub fn vcvtad_s64_f64(a: f64) -> i64 {
7806    unsafe extern "unadjusted" {
7807        #[cfg_attr(
7808            any(target_arch = "aarch64", target_arch = "arm64ec"),
7809            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7810        )]
7811        fn _vcvtad_s64_f64(a: f64) -> i64;
7812    }
7813    unsafe { _vcvtad_s64_f64(a) }
7814}
7815#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7817#[inline]
7818#[target_feature(enable = "neon")]
7819#[cfg_attr(test, assert_instr(fcvtau))]
7820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7821pub fn vcvtas_u32_f32(a: f32) -> u32 {
7822    unsafe extern "unadjusted" {
7823        #[cfg_attr(
7824            any(target_arch = "aarch64", target_arch = "arm64ec"),
7825            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7826        )]
7827        fn _vcvtas_u32_f32(a: f32) -> u32;
7828    }
7829    unsafe { _vcvtas_u32_f32(a) }
7830}
7831#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7833#[inline]
7834#[target_feature(enable = "neon")]
7835#[cfg_attr(test, assert_instr(fcvtau))]
7836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7837pub fn vcvtad_u64_f64(a: f64) -> u64 {
7838    unsafe extern "unadjusted" {
7839        #[cfg_attr(
7840            any(target_arch = "aarch64", target_arch = "arm64ec"),
7841            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7842        )]
7843        fn _vcvtad_u64_f64(a: f64) -> u64;
7844    }
7845    unsafe { _vcvtad_u64_f64(a) }
7846}
7847#[doc = "Fixed-point convert to floating-point"]
7848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7849#[inline]
7850#[target_feature(enable = "neon")]
7851#[cfg_attr(test, assert_instr(scvtf))]
7852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7853pub fn vcvtd_f64_s64(a: i64) -> f64 {
7854    a as f64
7855}
7856#[doc = "Fixed-point convert to floating-point"]
7857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7858#[inline]
7859#[target_feature(enable = "neon")]
7860#[cfg_attr(test, assert_instr(scvtf))]
7861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7862pub fn vcvts_f32_s32(a: i32) -> f32 {
7863    a as f32
7864}
7865#[doc = "Fixed-point convert to floating-point"]
7866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7867#[inline]
7868#[cfg_attr(test, assert_instr(scvtf))]
7869#[target_feature(enable = "neon,fp16")]
7870#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7871pub fn vcvth_f16_s16(a: i16) -> f16 {
7872    a as f16
7873}
7874#[doc = "Fixed-point convert to floating-point"]
7875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7876#[inline]
7877#[cfg_attr(test, assert_instr(scvtf))]
7878#[target_feature(enable = "neon,fp16")]
7879#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7880pub fn vcvth_f16_s32(a: i32) -> f16 {
7881    a as f16
7882}
7883#[doc = "Fixed-point convert to floating-point"]
7884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7885#[inline]
7886#[cfg_attr(test, assert_instr(scvtf))]
7887#[target_feature(enable = "neon,fp16")]
7888#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7889pub fn vcvth_f16_s64(a: i64) -> f16 {
7890    a as f16
7891}
7892#[doc = "Unsigned fixed-point convert to floating-point"]
7893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7894#[inline]
7895#[cfg_attr(test, assert_instr(ucvtf))]
7896#[target_feature(enable = "neon,fp16")]
7897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7898pub fn vcvth_f16_u16(a: u16) -> f16 {
7899    a as f16
7900}
7901#[doc = "Unsigned fixed-point convert to floating-point"]
7902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7903#[inline]
7904#[cfg_attr(test, assert_instr(ucvtf))]
7905#[target_feature(enable = "neon,fp16")]
7906#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7907pub fn vcvth_f16_u32(a: u32) -> f16 {
7908    a as f16
7909}
7910#[doc = "Unsigned fixed-point convert to floating-point"]
7911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7912#[inline]
7913#[cfg_attr(test, assert_instr(ucvtf))]
7914#[target_feature(enable = "neon,fp16")]
7915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7916pub fn vcvth_f16_u64(a: u64) -> f16 {
7917    a as f16
7918}
7919#[doc = "Fixed-point convert to floating-point"]
7920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7921#[inline]
7922#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7923#[rustc_legacy_const_generics(1)]
7924#[target_feature(enable = "neon,fp16")]
7925#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7926pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7927    static_assert!(N >= 1 && N <= 16);
7928    vcvth_n_f16_s32::<N>(a as i32) as f16
7929}
7930#[doc = "Fixed-point convert to floating-point"]
7931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7932#[inline]
7933#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7934#[rustc_legacy_const_generics(1)]
7935#[target_feature(enable = "neon,fp16")]
7936#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7937pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7938    static_assert!(N >= 1 && N <= 16);
7939    unsafe extern "unadjusted" {
7940        #[cfg_attr(
7941            any(target_arch = "aarch64", target_arch = "arm64ec"),
7942            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7943        )]
7944        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7945    }
7946    unsafe { _vcvth_n_f16_s32(a, N) }
7947}
7948#[doc = "Fixed-point convert to floating-point"]
7949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7950#[inline]
7951#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7952#[rustc_legacy_const_generics(1)]
7953#[target_feature(enable = "neon,fp16")]
7954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7955pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7956    static_assert!(N >= 1 && N <= 16);
7957    unsafe extern "unadjusted" {
7958        #[cfg_attr(
7959            any(target_arch = "aarch64", target_arch = "arm64ec"),
7960            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7961        )]
7962        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7963    }
7964    unsafe { _vcvth_n_f16_s64(a, N) }
7965}
7966#[doc = "Fixed-point convert to floating-point"]
7967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7968#[inline]
7969#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7970#[rustc_legacy_const_generics(1)]
7971#[target_feature(enable = "neon,fp16")]
7972#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7973pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7974    static_assert!(N >= 1 && N <= 16);
7975    vcvth_n_f16_u32::<N>(a as u32) as f16
7976}
7977#[doc = "Fixed-point convert to floating-point"]
7978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7979#[inline]
7980#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7981#[rustc_legacy_const_generics(1)]
7982#[target_feature(enable = "neon,fp16")]
7983#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7984pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7985    static_assert!(N >= 1 && N <= 16);
7986    unsafe extern "unadjusted" {
7987        #[cfg_attr(
7988            any(target_arch = "aarch64", target_arch = "arm64ec"),
7989            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7990        )]
7991        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7992    }
7993    unsafe { _vcvth_n_f16_u32(a, N) }
7994}
7995#[doc = "Fixed-point convert to floating-point"]
7996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7997#[inline]
7998#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7999#[rustc_legacy_const_generics(1)]
8000#[target_feature(enable = "neon,fp16")]
8001#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8002pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8003    static_assert!(N >= 1 && N <= 16);
8004    unsafe extern "unadjusted" {
8005        #[cfg_attr(
8006            any(target_arch = "aarch64", target_arch = "arm64ec"),
8007            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8008        )]
8009        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8010    }
8011    unsafe { _vcvth_n_f16_u64(a, N) }
8012}
8013#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8015#[inline]
8016#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8017#[rustc_legacy_const_generics(1)]
8018#[target_feature(enable = "neon,fp16")]
8019#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8020pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8021    static_assert!(N >= 1 && N <= 16);
8022    vcvth_n_s32_f16::<N>(a) as i16
8023}
8024#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8026#[inline]
8027#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8028#[rustc_legacy_const_generics(1)]
8029#[target_feature(enable = "neon,fp16")]
8030#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8031pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8032    static_assert!(N >= 1 && N <= 16);
8033    unsafe extern "unadjusted" {
8034        #[cfg_attr(
8035            any(target_arch = "aarch64", target_arch = "arm64ec"),
8036            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8037        )]
8038        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8039    }
8040    unsafe { _vcvth_n_s32_f16(a, N) }
8041}
8042#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8044#[inline]
8045#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8046#[rustc_legacy_const_generics(1)]
8047#[target_feature(enable = "neon,fp16")]
8048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8049pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8050    static_assert!(N >= 1 && N <= 16);
8051    unsafe extern "unadjusted" {
8052        #[cfg_attr(
8053            any(target_arch = "aarch64", target_arch = "arm64ec"),
8054            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8055        )]
8056        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8057    }
8058    unsafe { _vcvth_n_s64_f16(a, N) }
8059}
8060#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8062#[inline]
8063#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8064#[rustc_legacy_const_generics(1)]
8065#[target_feature(enable = "neon,fp16")]
8066#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8067pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8068    static_assert!(N >= 1 && N <= 16);
8069    vcvth_n_u32_f16::<N>(a) as u16
8070}
8071#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8073#[inline]
8074#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8075#[rustc_legacy_const_generics(1)]
8076#[target_feature(enable = "neon,fp16")]
8077#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8078pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8079    static_assert!(N >= 1 && N <= 16);
8080    unsafe extern "unadjusted" {
8081        #[cfg_attr(
8082            any(target_arch = "aarch64", target_arch = "arm64ec"),
8083            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8084        )]
8085        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8086    }
8087    unsafe { _vcvth_n_u32_f16(a, N) }
8088}
8089#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8091#[inline]
8092#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8093#[rustc_legacy_const_generics(1)]
8094#[target_feature(enable = "neon,fp16")]
8095#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8096pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8097    static_assert!(N >= 1 && N <= 16);
8098    unsafe extern "unadjusted" {
8099        #[cfg_attr(
8100            any(target_arch = "aarch64", target_arch = "arm64ec"),
8101            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8102        )]
8103        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8104    }
8105    unsafe { _vcvth_n_u64_f16(a, N) }
8106}
8107#[doc = "Floating-point convert to signed fixed-point"]
8108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8109#[inline]
8110#[cfg_attr(test, assert_instr(fcvtzs))]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113pub fn vcvth_s16_f16(a: f16) -> i16 {
8114    a as i16
8115}
8116#[doc = "Floating-point convert to signed fixed-point"]
8117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8118#[inline]
8119#[cfg_attr(test, assert_instr(fcvtzs))]
8120#[target_feature(enable = "neon,fp16")]
8121#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8122pub fn vcvth_s32_f16(a: f16) -> i32 {
8123    a as i32
8124}
8125#[doc = "Floating-point convert to signed fixed-point"]
8126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8127#[inline]
8128#[cfg_attr(test, assert_instr(fcvtzs))]
8129#[target_feature(enable = "neon,fp16")]
8130#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8131pub fn vcvth_s64_f16(a: f16) -> i64 {
8132    a as i64
8133}
8134#[doc = "Floating-point convert to unsigned fixed-point"]
8135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8136#[inline]
8137#[cfg_attr(test, assert_instr(fcvtzu))]
8138#[target_feature(enable = "neon,fp16")]
8139#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8140pub fn vcvth_u16_f16(a: f16) -> u16 {
8141    a as u16
8142}
8143#[doc = "Floating-point convert to unsigned fixed-point"]
8144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8145#[inline]
8146#[cfg_attr(test, assert_instr(fcvtzu))]
8147#[target_feature(enable = "neon,fp16")]
8148#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8149pub fn vcvth_u32_f16(a: f16) -> u32 {
8150    a as u32
8151}
8152#[doc = "Floating-point convert to unsigned fixed-point"]
8153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8154#[inline]
8155#[cfg_attr(test, assert_instr(fcvtzu))]
8156#[target_feature(enable = "neon,fp16")]
8157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8158pub fn vcvth_u64_f16(a: f16) -> u64 {
8159    a as u64
8160}
8161#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8163#[inline]
8164#[cfg_attr(test, assert_instr(fcvtms))]
8165#[target_feature(enable = "neon,fp16")]
8166#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8167pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8168    unsafe extern "unadjusted" {
8169        #[cfg_attr(
8170            any(target_arch = "aarch64", target_arch = "arm64ec"),
8171            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8172        )]
8173        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8174    }
8175    unsafe { _vcvtm_s16_f16(a) }
8176}
8177#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8179#[inline]
8180#[cfg_attr(test, assert_instr(fcvtms))]
8181#[target_feature(enable = "neon,fp16")]
8182#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8183pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8184    unsafe extern "unadjusted" {
8185        #[cfg_attr(
8186            any(target_arch = "aarch64", target_arch = "arm64ec"),
8187            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8188        )]
8189        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8190    }
8191    unsafe { _vcvtmq_s16_f16(a) }
8192}
8193#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8195#[inline]
8196#[target_feature(enable = "neon")]
8197#[cfg_attr(test, assert_instr(fcvtms))]
8198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8199pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8200    unsafe extern "unadjusted" {
8201        #[cfg_attr(
8202            any(target_arch = "aarch64", target_arch = "arm64ec"),
8203            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8204        )]
8205        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8206    }
8207    unsafe { _vcvtm_s32_f32(a) }
8208}
8209#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8211#[inline]
8212#[target_feature(enable = "neon")]
8213#[cfg_attr(test, assert_instr(fcvtms))]
8214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8215pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8216    unsafe extern "unadjusted" {
8217        #[cfg_attr(
8218            any(target_arch = "aarch64", target_arch = "arm64ec"),
8219            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8220        )]
8221        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8222    }
8223    unsafe { _vcvtmq_s32_f32(a) }
8224}
8225#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8227#[inline]
8228#[target_feature(enable = "neon")]
8229#[cfg_attr(test, assert_instr(fcvtms))]
8230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8231pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8232    unsafe extern "unadjusted" {
8233        #[cfg_attr(
8234            any(target_arch = "aarch64", target_arch = "arm64ec"),
8235            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8236        )]
8237        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8238    }
8239    unsafe { _vcvtm_s64_f64(a) }
8240}
8241#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8243#[inline]
8244#[target_feature(enable = "neon")]
8245#[cfg_attr(test, assert_instr(fcvtms))]
8246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8247pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8248    unsafe extern "unadjusted" {
8249        #[cfg_attr(
8250            any(target_arch = "aarch64", target_arch = "arm64ec"),
8251            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8252        )]
8253        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8254    }
8255    unsafe { _vcvtmq_s64_f64(a) }
8256}
8257#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8259#[inline]
8260#[cfg_attr(test, assert_instr(fcvtmu))]
8261#[target_feature(enable = "neon,fp16")]
8262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8263pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8264    unsafe extern "unadjusted" {
8265        #[cfg_attr(
8266            any(target_arch = "aarch64", target_arch = "arm64ec"),
8267            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8268        )]
8269        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8270    }
8271    unsafe { _vcvtm_u16_f16(a) }
8272}
8273#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8275#[inline]
8276#[cfg_attr(test, assert_instr(fcvtmu))]
8277#[target_feature(enable = "neon,fp16")]
8278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8279pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8280    unsafe extern "unadjusted" {
8281        #[cfg_attr(
8282            any(target_arch = "aarch64", target_arch = "arm64ec"),
8283            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8284        )]
8285        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8286    }
8287    unsafe { _vcvtmq_u16_f16(a) }
8288}
8289#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8291#[inline]
8292#[target_feature(enable = "neon")]
8293#[cfg_attr(test, assert_instr(fcvtmu))]
8294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8295pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8296    unsafe extern "unadjusted" {
8297        #[cfg_attr(
8298            any(target_arch = "aarch64", target_arch = "arm64ec"),
8299            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8300        )]
8301        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8302    }
8303    unsafe { _vcvtm_u32_f32(a) }
8304}
8305#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8307#[inline]
8308#[target_feature(enable = "neon")]
8309#[cfg_attr(test, assert_instr(fcvtmu))]
8310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8311pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8312    unsafe extern "unadjusted" {
8313        #[cfg_attr(
8314            any(target_arch = "aarch64", target_arch = "arm64ec"),
8315            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8316        )]
8317        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8318    }
8319    unsafe { _vcvtmq_u32_f32(a) }
8320}
8321#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8323#[inline]
8324#[target_feature(enable = "neon")]
8325#[cfg_attr(test, assert_instr(fcvtmu))]
8326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8327pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8328    unsafe extern "unadjusted" {
8329        #[cfg_attr(
8330            any(target_arch = "aarch64", target_arch = "arm64ec"),
8331            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8332        )]
8333        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8334    }
8335    unsafe { _vcvtm_u64_f64(a) }
8336}
8337#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8339#[inline]
8340#[target_feature(enable = "neon")]
8341#[cfg_attr(test, assert_instr(fcvtmu))]
8342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8343pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8344    unsafe extern "unadjusted" {
8345        #[cfg_attr(
8346            any(target_arch = "aarch64", target_arch = "arm64ec"),
8347            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8348        )]
8349        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8350    }
8351    unsafe { _vcvtmq_u64_f64(a) }
8352}
8353#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8355#[inline]
8356#[cfg_attr(test, assert_instr(fcvtms))]
8357#[target_feature(enable = "neon,fp16")]
8358#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8359pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8360    vcvtmh_s32_f16(a) as i16
8361}
8362#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8364#[inline]
8365#[cfg_attr(test, assert_instr(fcvtms))]
8366#[target_feature(enable = "neon,fp16")]
8367#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8368pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8369    unsafe extern "unadjusted" {
8370        #[cfg_attr(
8371            any(target_arch = "aarch64", target_arch = "arm64ec"),
8372            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8373        )]
8374        fn _vcvtmh_s32_f16(a: f16) -> i32;
8375    }
8376    unsafe { _vcvtmh_s32_f16(a) }
8377}
8378#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8380#[inline]
8381#[cfg_attr(test, assert_instr(fcvtms))]
8382#[target_feature(enable = "neon,fp16")]
8383#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8384pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8385    unsafe extern "unadjusted" {
8386        #[cfg_attr(
8387            any(target_arch = "aarch64", target_arch = "arm64ec"),
8388            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8389        )]
8390        fn _vcvtmh_s64_f16(a: f16) -> i64;
8391    }
8392    unsafe { _vcvtmh_s64_f16(a) }
8393}
8394#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8396#[inline]
8397#[cfg_attr(test, assert_instr(fcvtmu))]
8398#[target_feature(enable = "neon,fp16")]
8399#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8400pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8401    vcvtmh_u32_f16(a) as u16
8402}
8403#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8405#[inline]
8406#[cfg_attr(test, assert_instr(fcvtmu))]
8407#[target_feature(enable = "neon,fp16")]
8408#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8409pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8410    unsafe extern "unadjusted" {
8411        #[cfg_attr(
8412            any(target_arch = "aarch64", target_arch = "arm64ec"),
8413            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8414        )]
8415        fn _vcvtmh_u32_f16(a: f16) -> u32;
8416    }
8417    unsafe { _vcvtmh_u32_f16(a) }
8418}
8419#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8421#[inline]
8422#[cfg_attr(test, assert_instr(fcvtmu))]
8423#[target_feature(enable = "neon,fp16")]
8424#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8425pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8426    unsafe extern "unadjusted" {
8427        #[cfg_attr(
8428            any(target_arch = "aarch64", target_arch = "arm64ec"),
8429            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8430        )]
8431        fn _vcvtmh_u64_f16(a: f16) -> u64;
8432    }
8433    unsafe { _vcvtmh_u64_f16(a) }
8434}
8435#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8437#[inline]
8438#[target_feature(enable = "neon")]
8439#[cfg_attr(test, assert_instr(fcvtms))]
8440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8441pub fn vcvtms_s32_f32(a: f32) -> i32 {
8442    unsafe extern "unadjusted" {
8443        #[cfg_attr(
8444            any(target_arch = "aarch64", target_arch = "arm64ec"),
8445            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8446        )]
8447        fn _vcvtms_s32_f32(a: f32) -> i32;
8448    }
8449    unsafe { _vcvtms_s32_f32(a) }
8450}
8451#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8453#[inline]
8454#[target_feature(enable = "neon")]
8455#[cfg_attr(test, assert_instr(fcvtms))]
8456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8457pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8458    unsafe extern "unadjusted" {
8459        #[cfg_attr(
8460            any(target_arch = "aarch64", target_arch = "arm64ec"),
8461            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8462        )]
8463        fn _vcvtmd_s64_f64(a: f64) -> i64;
8464    }
8465    unsafe { _vcvtmd_s64_f64(a) }
8466}
8467#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8469#[inline]
8470#[target_feature(enable = "neon")]
8471#[cfg_attr(test, assert_instr(fcvtmu))]
8472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8473pub fn vcvtms_u32_f32(a: f32) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8478        )]
8479        fn _vcvtms_u32_f32(a: f32) -> u32;
8480    }
8481    unsafe { _vcvtms_u32_f32(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8485#[inline]
8486#[target_feature(enable = "neon")]
8487#[cfg_attr(test, assert_instr(fcvtmu))]
8488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8489pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8490    unsafe extern "unadjusted" {
8491        #[cfg_attr(
8492            any(target_arch = "aarch64", target_arch = "arm64ec"),
8493            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8494        )]
8495        fn _vcvtmd_u64_f64(a: f64) -> u64;
8496    }
8497    unsafe { _vcvtmd_u64_f64(a) }
8498}
8499#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8501#[inline]
8502#[cfg_attr(test, assert_instr(fcvtns))]
8503#[target_feature(enable = "neon,fp16")]
8504#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8505pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8506    unsafe extern "unadjusted" {
8507        #[cfg_attr(
8508            any(target_arch = "aarch64", target_arch = "arm64ec"),
8509            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8510        )]
8511        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8512    }
8513    unsafe { _vcvtn_s16_f16(a) }
8514}
8515#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8517#[inline]
8518#[cfg_attr(test, assert_instr(fcvtns))]
8519#[target_feature(enable = "neon,fp16")]
8520#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8521pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8522    unsafe extern "unadjusted" {
8523        #[cfg_attr(
8524            any(target_arch = "aarch64", target_arch = "arm64ec"),
8525            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8526        )]
8527        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8528    }
8529    unsafe { _vcvtnq_s16_f16(a) }
8530}
8531#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8533#[inline]
8534#[target_feature(enable = "neon")]
8535#[cfg_attr(test, assert_instr(fcvtns))]
8536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8537pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8538    unsafe extern "unadjusted" {
8539        #[cfg_attr(
8540            any(target_arch = "aarch64", target_arch = "arm64ec"),
8541            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8542        )]
8543        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8544    }
8545    unsafe { _vcvtn_s32_f32(a) }
8546}
8547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8549#[inline]
8550#[target_feature(enable = "neon")]
8551#[cfg_attr(test, assert_instr(fcvtns))]
8552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8553pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8554    unsafe extern "unadjusted" {
8555        #[cfg_attr(
8556            any(target_arch = "aarch64", target_arch = "arm64ec"),
8557            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8558        )]
8559        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8560    }
8561    unsafe { _vcvtnq_s32_f32(a) }
8562}
8563#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8565#[inline]
8566#[target_feature(enable = "neon")]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8569pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8570    unsafe extern "unadjusted" {
8571        #[cfg_attr(
8572            any(target_arch = "aarch64", target_arch = "arm64ec"),
8573            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8574        )]
8575        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8576    }
8577    unsafe { _vcvtn_s64_f64(a) }
8578}
8579#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8581#[inline]
8582#[target_feature(enable = "neon")]
8583#[cfg_attr(test, assert_instr(fcvtns))]
8584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8585pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8586    unsafe extern "unadjusted" {
8587        #[cfg_attr(
8588            any(target_arch = "aarch64", target_arch = "arm64ec"),
8589            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8590        )]
8591        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8592    }
8593    unsafe { _vcvtnq_s64_f64(a) }
8594}
8595#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8597#[inline]
8598#[cfg_attr(test, assert_instr(fcvtnu))]
8599#[target_feature(enable = "neon,fp16")]
8600#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8601pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8602    unsafe extern "unadjusted" {
8603        #[cfg_attr(
8604            any(target_arch = "aarch64", target_arch = "arm64ec"),
8605            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8606        )]
8607        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8608    }
8609    unsafe { _vcvtn_u16_f16(a) }
8610}
8611#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8613#[inline]
8614#[cfg_attr(test, assert_instr(fcvtnu))]
8615#[target_feature(enable = "neon,fp16")]
8616#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8617pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8618    unsafe extern "unadjusted" {
8619        #[cfg_attr(
8620            any(target_arch = "aarch64", target_arch = "arm64ec"),
8621            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8622        )]
8623        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8624    }
8625    unsafe { _vcvtnq_u16_f16(a) }
8626}
8627#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8629#[inline]
8630#[target_feature(enable = "neon")]
8631#[cfg_attr(test, assert_instr(fcvtnu))]
8632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8633pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8634    unsafe extern "unadjusted" {
8635        #[cfg_attr(
8636            any(target_arch = "aarch64", target_arch = "arm64ec"),
8637            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8638        )]
8639        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8640    }
8641    unsafe { _vcvtn_u32_f32(a) }
8642}
8643#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8645#[inline]
8646#[target_feature(enable = "neon")]
8647#[cfg_attr(test, assert_instr(fcvtnu))]
8648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8649pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8650    unsafe extern "unadjusted" {
8651        #[cfg_attr(
8652            any(target_arch = "aarch64", target_arch = "arm64ec"),
8653            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8654        )]
8655        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8656    }
8657    unsafe { _vcvtnq_u32_f32(a) }
8658}
8659#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8661#[inline]
8662#[target_feature(enable = "neon")]
8663#[cfg_attr(test, assert_instr(fcvtnu))]
8664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8665pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8666    unsafe extern "unadjusted" {
8667        #[cfg_attr(
8668            any(target_arch = "aarch64", target_arch = "arm64ec"),
8669            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8670        )]
8671        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8672    }
8673    unsafe { _vcvtn_u64_f64(a) }
8674}
8675#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8677#[inline]
8678#[target_feature(enable = "neon")]
8679#[cfg_attr(test, assert_instr(fcvtnu))]
8680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8681pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8682    unsafe extern "unadjusted" {
8683        #[cfg_attr(
8684            any(target_arch = "aarch64", target_arch = "arm64ec"),
8685            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8686        )]
8687        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8688    }
8689    unsafe { _vcvtnq_u64_f64(a) }
8690}
8691#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8693#[inline]
8694#[cfg_attr(test, assert_instr(fcvtns))]
8695#[target_feature(enable = "neon,fp16")]
8696#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8697pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8698    vcvtnh_s32_f16(a) as i16
8699}
8700#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8702#[inline]
8703#[cfg_attr(test, assert_instr(fcvtns))]
8704#[target_feature(enable = "neon,fp16")]
8705#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8706pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8707    unsafe extern "unadjusted" {
8708        #[cfg_attr(
8709            any(target_arch = "aarch64", target_arch = "arm64ec"),
8710            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8711        )]
8712        fn _vcvtnh_s32_f16(a: f16) -> i32;
8713    }
8714    unsafe { _vcvtnh_s32_f16(a) }
8715}
8716#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8718#[inline]
8719#[cfg_attr(test, assert_instr(fcvtns))]
8720#[target_feature(enable = "neon,fp16")]
8721#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8722pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8723    unsafe extern "unadjusted" {
8724        #[cfg_attr(
8725            any(target_arch = "aarch64", target_arch = "arm64ec"),
8726            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8727        )]
8728        fn _vcvtnh_s64_f16(a: f16) -> i64;
8729    }
8730    unsafe { _vcvtnh_s64_f16(a) }
8731}
8732#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8734#[inline]
8735#[cfg_attr(test, assert_instr(fcvtnu))]
8736#[target_feature(enable = "neon,fp16")]
8737#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8738pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8739    vcvtnh_u32_f16(a) as u16
8740}
8741#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8743#[inline]
8744#[cfg_attr(test, assert_instr(fcvtnu))]
8745#[target_feature(enable = "neon,fp16")]
8746#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8747pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8748    unsafe extern "unadjusted" {
8749        #[cfg_attr(
8750            any(target_arch = "aarch64", target_arch = "arm64ec"),
8751            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8752        )]
8753        fn _vcvtnh_u32_f16(a: f16) -> u32;
8754    }
8755    unsafe { _vcvtnh_u32_f16(a) }
8756}
8757#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8759#[inline]
8760#[cfg_attr(test, assert_instr(fcvtnu))]
8761#[target_feature(enable = "neon,fp16")]
8762#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8763pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8764    unsafe extern "unadjusted" {
8765        #[cfg_attr(
8766            any(target_arch = "aarch64", target_arch = "arm64ec"),
8767            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8768        )]
8769        fn _vcvtnh_u64_f16(a: f16) -> u64;
8770    }
8771    unsafe { _vcvtnh_u64_f16(a) }
8772}
8773#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8775#[inline]
8776#[target_feature(enable = "neon")]
8777#[cfg_attr(test, assert_instr(fcvtns))]
8778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8779pub fn vcvtns_s32_f32(a: f32) -> i32 {
8780    unsafe extern "unadjusted" {
8781        #[cfg_attr(
8782            any(target_arch = "aarch64", target_arch = "arm64ec"),
8783            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8784        )]
8785        fn _vcvtns_s32_f32(a: f32) -> i32;
8786    }
8787    unsafe { _vcvtns_s32_f32(a) }
8788}
8789#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8791#[inline]
8792#[target_feature(enable = "neon")]
8793#[cfg_attr(test, assert_instr(fcvtns))]
8794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8795pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8796    unsafe extern "unadjusted" {
8797        #[cfg_attr(
8798            any(target_arch = "aarch64", target_arch = "arm64ec"),
8799            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8800        )]
8801        fn _vcvtnd_s64_f64(a: f64) -> i64;
8802    }
8803    unsafe { _vcvtnd_s64_f64(a) }
8804}
8805#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8807#[inline]
8808#[target_feature(enable = "neon")]
8809#[cfg_attr(test, assert_instr(fcvtnu))]
8810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8811pub fn vcvtns_u32_f32(a: f32) -> u32 {
8812    unsafe extern "unadjusted" {
8813        #[cfg_attr(
8814            any(target_arch = "aarch64", target_arch = "arm64ec"),
8815            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8816        )]
8817        fn _vcvtns_u32_f32(a: f32) -> u32;
8818    }
8819    unsafe { _vcvtns_u32_f32(a) }
8820}
8821#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8823#[inline]
8824#[target_feature(enable = "neon")]
8825#[cfg_attr(test, assert_instr(fcvtnu))]
8826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8827pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8828    unsafe extern "unadjusted" {
8829        #[cfg_attr(
8830            any(target_arch = "aarch64", target_arch = "arm64ec"),
8831            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8832        )]
8833        fn _vcvtnd_u64_f64(a: f64) -> u64;
8834    }
8835    unsafe { _vcvtnd_u64_f64(a) }
8836}
8837#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8839#[inline]
8840#[cfg_attr(test, assert_instr(fcvtps))]
8841#[target_feature(enable = "neon,fp16")]
8842#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8843pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8844    unsafe extern "unadjusted" {
8845        #[cfg_attr(
8846            any(target_arch = "aarch64", target_arch = "arm64ec"),
8847            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8848        )]
8849        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8850    }
8851    unsafe { _vcvtp_s16_f16(a) }
8852}
8853#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8855#[inline]
8856#[cfg_attr(test, assert_instr(fcvtps))]
8857#[target_feature(enable = "neon,fp16")]
8858#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8859pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8860    unsafe extern "unadjusted" {
8861        #[cfg_attr(
8862            any(target_arch = "aarch64", target_arch = "arm64ec"),
8863            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8864        )]
8865        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8866    }
8867    unsafe { _vcvtpq_s16_f16(a) }
8868}
8869#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8871#[inline]
8872#[target_feature(enable = "neon")]
8873#[cfg_attr(test, assert_instr(fcvtps))]
8874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8875pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8876    unsafe extern "unadjusted" {
8877        #[cfg_attr(
8878            any(target_arch = "aarch64", target_arch = "arm64ec"),
8879            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8880        )]
8881        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8882    }
8883    unsafe { _vcvtp_s32_f32(a) }
8884}
8885#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8887#[inline]
8888#[target_feature(enable = "neon")]
8889#[cfg_attr(test, assert_instr(fcvtps))]
8890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8891pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8892    unsafe extern "unadjusted" {
8893        #[cfg_attr(
8894            any(target_arch = "aarch64", target_arch = "arm64ec"),
8895            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8896        )]
8897        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8898    }
8899    unsafe { _vcvtpq_s32_f32(a) }
8900}
8901#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8903#[inline]
8904#[target_feature(enable = "neon")]
8905#[cfg_attr(test, assert_instr(fcvtps))]
8906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8907pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8908    unsafe extern "unadjusted" {
8909        #[cfg_attr(
8910            any(target_arch = "aarch64", target_arch = "arm64ec"),
8911            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8912        )]
8913        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8914    }
8915    unsafe { _vcvtp_s64_f64(a) }
8916}
8917#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8919#[inline]
8920#[target_feature(enable = "neon")]
8921#[cfg_attr(test, assert_instr(fcvtps))]
8922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8923pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8924    unsafe extern "unadjusted" {
8925        #[cfg_attr(
8926            any(target_arch = "aarch64", target_arch = "arm64ec"),
8927            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8928        )]
8929        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8930    }
8931    unsafe { _vcvtpq_s64_f64(a) }
8932}
8933#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8935#[inline]
8936#[cfg_attr(test, assert_instr(fcvtpu))]
8937#[target_feature(enable = "neon,fp16")]
8938#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8939pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8940    unsafe extern "unadjusted" {
8941        #[cfg_attr(
8942            any(target_arch = "aarch64", target_arch = "arm64ec"),
8943            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8944        )]
8945        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8946    }
8947    unsafe { _vcvtp_u16_f16(a) }
8948}
8949#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8951#[inline]
8952#[cfg_attr(test, assert_instr(fcvtpu))]
8953#[target_feature(enable = "neon,fp16")]
8954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8955pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8956    unsafe extern "unadjusted" {
8957        #[cfg_attr(
8958            any(target_arch = "aarch64", target_arch = "arm64ec"),
8959            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8960        )]
8961        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8962    }
8963    unsafe { _vcvtpq_u16_f16(a) }
8964}
8965#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8967#[inline]
8968#[target_feature(enable = "neon")]
8969#[cfg_attr(test, assert_instr(fcvtpu))]
8970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8971pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8972    unsafe extern "unadjusted" {
8973        #[cfg_attr(
8974            any(target_arch = "aarch64", target_arch = "arm64ec"),
8975            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8976        )]
8977        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8978    }
8979    unsafe { _vcvtp_u32_f32(a) }
8980}
8981#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8983#[inline]
8984#[target_feature(enable = "neon")]
8985#[cfg_attr(test, assert_instr(fcvtpu))]
8986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8987pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
8988    unsafe extern "unadjusted" {
8989        #[cfg_attr(
8990            any(target_arch = "aarch64", target_arch = "arm64ec"),
8991            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
8992        )]
8993        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
8994    }
8995    unsafe { _vcvtpq_u32_f32(a) }
8996}
8997#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
8999#[inline]
9000#[target_feature(enable = "neon")]
9001#[cfg_attr(test, assert_instr(fcvtpu))]
9002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9003pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9004    unsafe extern "unadjusted" {
9005        #[cfg_attr(
9006            any(target_arch = "aarch64", target_arch = "arm64ec"),
9007            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9008        )]
9009        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9010    }
9011    unsafe { _vcvtp_u64_f64(a) }
9012}
9013#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9015#[inline]
9016#[target_feature(enable = "neon")]
9017#[cfg_attr(test, assert_instr(fcvtpu))]
9018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9019pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9020    unsafe extern "unadjusted" {
9021        #[cfg_attr(
9022            any(target_arch = "aarch64", target_arch = "arm64ec"),
9023            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9024        )]
9025        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9026    }
9027    unsafe { _vcvtpq_u64_f64(a) }
9028}
9029#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9031#[inline]
9032#[cfg_attr(test, assert_instr(fcvtps))]
9033#[target_feature(enable = "neon,fp16")]
9034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9035pub fn vcvtph_s16_f16(a: f16) -> i16 {
9036    vcvtph_s32_f16(a) as i16
9037}
9038#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9040#[inline]
9041#[cfg_attr(test, assert_instr(fcvtps))]
9042#[target_feature(enable = "neon,fp16")]
9043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9044pub fn vcvtph_s32_f16(a: f16) -> i32 {
9045    unsafe extern "unadjusted" {
9046        #[cfg_attr(
9047            any(target_arch = "aarch64", target_arch = "arm64ec"),
9048            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9049        )]
9050        fn _vcvtph_s32_f16(a: f16) -> i32;
9051    }
9052    unsafe { _vcvtph_s32_f16(a) }
9053}
9054#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9056#[inline]
9057#[cfg_attr(test, assert_instr(fcvtps))]
9058#[target_feature(enable = "neon,fp16")]
9059#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9060pub fn vcvtph_s64_f16(a: f16) -> i64 {
9061    unsafe extern "unadjusted" {
9062        #[cfg_attr(
9063            any(target_arch = "aarch64", target_arch = "arm64ec"),
9064            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9065        )]
9066        fn _vcvtph_s64_f16(a: f16) -> i64;
9067    }
9068    unsafe { _vcvtph_s64_f16(a) }
9069}
9070#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9072#[inline]
9073#[cfg_attr(test, assert_instr(fcvtpu))]
9074#[target_feature(enable = "neon,fp16")]
9075#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9076pub fn vcvtph_u16_f16(a: f16) -> u16 {
9077    vcvtph_u32_f16(a) as u16
9078}
9079#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9081#[inline]
9082#[cfg_attr(test, assert_instr(fcvtpu))]
9083#[target_feature(enable = "neon,fp16")]
9084#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9085pub fn vcvtph_u32_f16(a: f16) -> u32 {
9086    unsafe extern "unadjusted" {
9087        #[cfg_attr(
9088            any(target_arch = "aarch64", target_arch = "arm64ec"),
9089            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9090        )]
9091        fn _vcvtph_u32_f16(a: f16) -> u32;
9092    }
9093    unsafe { _vcvtph_u32_f16(a) }
9094}
9095#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9097#[inline]
9098#[cfg_attr(test, assert_instr(fcvtpu))]
9099#[target_feature(enable = "neon,fp16")]
9100#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9101pub fn vcvtph_u64_f16(a: f16) -> u64 {
9102    unsafe extern "unadjusted" {
9103        #[cfg_attr(
9104            any(target_arch = "aarch64", target_arch = "arm64ec"),
9105            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9106        )]
9107        fn _vcvtph_u64_f16(a: f16) -> u64;
9108    }
9109    unsafe { _vcvtph_u64_f16(a) }
9110}
9111#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9113#[inline]
9114#[target_feature(enable = "neon")]
9115#[cfg_attr(test, assert_instr(fcvtps))]
9116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9117pub fn vcvtps_s32_f32(a: f32) -> i32 {
9118    unsafe extern "unadjusted" {
9119        #[cfg_attr(
9120            any(target_arch = "aarch64", target_arch = "arm64ec"),
9121            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9122        )]
9123        fn _vcvtps_s32_f32(a: f32) -> i32;
9124    }
9125    unsafe { _vcvtps_s32_f32(a) }
9126}
9127#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9129#[inline]
9130#[target_feature(enable = "neon")]
9131#[cfg_attr(test, assert_instr(fcvtps))]
9132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9133pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9134    unsafe extern "unadjusted" {
9135        #[cfg_attr(
9136            any(target_arch = "aarch64", target_arch = "arm64ec"),
9137            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9138        )]
9139        fn _vcvtpd_s64_f64(a: f64) -> i64;
9140    }
9141    unsafe { _vcvtpd_s64_f64(a) }
9142}
9143#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9145#[inline]
9146#[target_feature(enable = "neon")]
9147#[cfg_attr(test, assert_instr(fcvtpu))]
9148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9149pub fn vcvtps_u32_f32(a: f32) -> u32 {
9150    unsafe extern "unadjusted" {
9151        #[cfg_attr(
9152            any(target_arch = "aarch64", target_arch = "arm64ec"),
9153            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9154        )]
9155        fn _vcvtps_u32_f32(a: f32) -> u32;
9156    }
9157    unsafe { _vcvtps_u32_f32(a) }
9158}
9159#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9161#[inline]
9162#[target_feature(enable = "neon")]
9163#[cfg_attr(test, assert_instr(fcvtpu))]
9164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9165pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9166    unsafe extern "unadjusted" {
9167        #[cfg_attr(
9168            any(target_arch = "aarch64", target_arch = "arm64ec"),
9169            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9170        )]
9171        fn _vcvtpd_u64_f64(a: f64) -> u64;
9172    }
9173    unsafe { _vcvtpd_u64_f64(a) }
9174}
9175#[doc = "Fixed-point convert to floating-point"]
9176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9177#[inline]
9178#[target_feature(enable = "neon")]
9179#[cfg_attr(test, assert_instr(ucvtf))]
9180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9181pub fn vcvts_f32_u32(a: u32) -> f32 {
9182    a as f32
9183}
9184#[doc = "Fixed-point convert to floating-point"]
9185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9186#[inline]
9187#[target_feature(enable = "neon")]
9188#[cfg_attr(test, assert_instr(ucvtf))]
9189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9190pub fn vcvtd_f64_u64(a: u64) -> f64 {
9191    a as f64
9192}
9193#[doc = "Fixed-point convert to floating-point"]
9194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9195#[inline]
9196#[target_feature(enable = "neon")]
9197#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9198#[rustc_legacy_const_generics(1)]
9199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9200pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9201    static_assert!(N >= 1 && N <= 64);
9202    unsafe extern "unadjusted" {
9203        #[cfg_attr(
9204            any(target_arch = "aarch64", target_arch = "arm64ec"),
9205            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9206        )]
9207        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9208    }
9209    unsafe { _vcvts_n_f32_s32(a, N) }
9210}
9211#[doc = "Fixed-point convert to floating-point"]
9212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9213#[inline]
9214#[target_feature(enable = "neon")]
9215#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9216#[rustc_legacy_const_generics(1)]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9219    static_assert!(N >= 1 && N <= 64);
9220    unsafe extern "unadjusted" {
9221        #[cfg_attr(
9222            any(target_arch = "aarch64", target_arch = "arm64ec"),
9223            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9224        )]
9225        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9226    }
9227    unsafe { _vcvtd_n_f64_s64(a, N) }
9228}
9229#[doc = "Fixed-point convert to floating-point"]
9230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9231#[inline]
9232#[target_feature(enable = "neon")]
9233#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9234#[rustc_legacy_const_generics(1)]
9235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9236pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9237    static_assert!(N >= 1 && N <= 32);
9238    unsafe extern "unadjusted" {
9239        #[cfg_attr(
9240            any(target_arch = "aarch64", target_arch = "arm64ec"),
9241            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9242        )]
9243        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9244    }
9245    unsafe { _vcvts_n_f32_u32(a, N) }
9246}
9247#[doc = "Fixed-point convert to floating-point"]
9248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9249#[inline]
9250#[target_feature(enable = "neon")]
9251#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9252#[rustc_legacy_const_generics(1)]
9253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9254pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9255    static_assert!(N >= 1 && N <= 64);
9256    unsafe extern "unadjusted" {
9257        #[cfg_attr(
9258            any(target_arch = "aarch64", target_arch = "arm64ec"),
9259            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9260        )]
9261        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9262    }
9263    unsafe { _vcvtd_n_f64_u64(a, N) }
9264}
9265#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9267#[inline]
9268#[target_feature(enable = "neon")]
9269#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9270#[rustc_legacy_const_generics(1)]
9271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9272pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9273    static_assert!(N >= 1 && N <= 32);
9274    unsafe extern "unadjusted" {
9275        #[cfg_attr(
9276            any(target_arch = "aarch64", target_arch = "arm64ec"),
9277            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9278        )]
9279        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9280    }
9281    unsafe { _vcvts_n_s32_f32(a, N) }
9282}
9283#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9285#[inline]
9286#[target_feature(enable = "neon")]
9287#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9288#[rustc_legacy_const_generics(1)]
9289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9290pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9291    static_assert!(N >= 1 && N <= 64);
9292    unsafe extern "unadjusted" {
9293        #[cfg_attr(
9294            any(target_arch = "aarch64", target_arch = "arm64ec"),
9295            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9296        )]
9297        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9298    }
9299    unsafe { _vcvtd_n_s64_f64(a, N) }
9300}
9301#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9303#[inline]
9304#[target_feature(enable = "neon")]
9305#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9306#[rustc_legacy_const_generics(1)]
9307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9308pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9309    static_assert!(N >= 1 && N <= 32);
9310    unsafe extern "unadjusted" {
9311        #[cfg_attr(
9312            any(target_arch = "aarch64", target_arch = "arm64ec"),
9313            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9314        )]
9315        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9316    }
9317    unsafe { _vcvts_n_u32_f32(a, N) }
9318}
9319#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9321#[inline]
9322#[target_feature(enable = "neon")]
9323#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9324#[rustc_legacy_const_generics(1)]
9325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9326pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9327    static_assert!(N >= 1 && N <= 64);
9328    unsafe extern "unadjusted" {
9329        #[cfg_attr(
9330            any(target_arch = "aarch64", target_arch = "arm64ec"),
9331            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9332        )]
9333        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9334    }
9335    unsafe { _vcvtd_n_u64_f64(a, N) }
9336}
9337#[doc = "Fixed-point convert to floating-point"]
9338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9339#[inline]
9340#[target_feature(enable = "neon")]
9341#[cfg_attr(test, assert_instr(fcvtzs))]
9342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9343pub fn vcvts_s32_f32(a: f32) -> i32 {
9344    a as i32
9345}
9346#[doc = "Fixed-point convert to floating-point"]
9347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9348#[inline]
9349#[target_feature(enable = "neon")]
9350#[cfg_attr(test, assert_instr(fcvtzs))]
9351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9352pub fn vcvtd_s64_f64(a: f64) -> i64 {
9353    a as i64
9354}
9355#[doc = "Fixed-point convert to floating-point"]
9356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9357#[inline]
9358#[target_feature(enable = "neon")]
9359#[cfg_attr(test, assert_instr(fcvtzu))]
9360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9361pub fn vcvts_u32_f32(a: f32) -> u32 {
9362    a as u32
9363}
9364#[doc = "Fixed-point convert to floating-point"]
9365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9366#[inline]
9367#[target_feature(enable = "neon")]
9368#[cfg_attr(test, assert_instr(fcvtzu))]
9369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9370pub fn vcvtd_u64_f64(a: f64) -> u64 {
9371    a as u64
9372}
9373#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9375#[inline]
9376#[target_feature(enable = "neon")]
9377#[cfg_attr(test, assert_instr(fcvtxn))]
9378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9379pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9380    unsafe extern "unadjusted" {
9381        #[cfg_attr(
9382            any(target_arch = "aarch64", target_arch = "arm64ec"),
9383            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9384        )]
9385        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9386    }
9387    unsafe { _vcvtx_f32_f64(a) }
9388}
9389#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9391#[inline]
9392#[target_feature(enable = "neon")]
9393#[cfg_attr(test, assert_instr(fcvtxn))]
9394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9395pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9396    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9397}
9398#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9400#[inline]
9401#[target_feature(enable = "neon")]
9402#[cfg_attr(test, assert_instr(fcvtxn))]
9403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9404pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9405    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9406}
9407#[doc = "Divide"]
9408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9409#[inline]
9410#[target_feature(enable = "neon,fp16")]
9411#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9412#[cfg_attr(test, assert_instr(fdiv))]
9413pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9414    unsafe { simd_div(a, b) }
9415}
9416#[doc = "Divide"]
9417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9418#[inline]
9419#[target_feature(enable = "neon,fp16")]
9420#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9421#[cfg_attr(test, assert_instr(fdiv))]
9422pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9423    unsafe { simd_div(a, b) }
9424}
9425#[doc = "Divide"]
9426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9427#[inline]
9428#[target_feature(enable = "neon")]
9429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9430#[cfg_attr(test, assert_instr(fdiv))]
9431pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9432    unsafe { simd_div(a, b) }
9433}
9434#[doc = "Divide"]
9435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9436#[inline]
9437#[target_feature(enable = "neon")]
9438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9439#[cfg_attr(test, assert_instr(fdiv))]
9440pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9441    unsafe { simd_div(a, b) }
9442}
9443#[doc = "Divide"]
9444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9445#[inline]
9446#[target_feature(enable = "neon")]
9447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9448#[cfg_attr(test, assert_instr(fdiv))]
9449pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9450    unsafe { simd_div(a, b) }
9451}
9452#[doc = "Divide"]
9453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9454#[inline]
9455#[target_feature(enable = "neon")]
9456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9457#[cfg_attr(test, assert_instr(fdiv))]
9458pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9459    unsafe { simd_div(a, b) }
9460}
9461#[doc = "Divide"]
9462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9463#[inline]
9464#[target_feature(enable = "neon,fp16")]
9465#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9466#[cfg_attr(test, assert_instr(nop))]
9467pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9468    a / b
9469}
9470#[doc = "Dot product arithmetic (indexed)"]
9471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9472#[inline]
9473#[target_feature(enable = "neon,dotprod")]
9474#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9475#[rustc_legacy_const_generics(3)]
9476#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9477pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9478    static_assert_uimm_bits!(LANE, 2);
9479    unsafe {
9480        let c: int32x4_t = transmute(c);
9481        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9482        vdot_s32(a, b, transmute(c))
9483    }
9484}
9485#[doc = "Dot product arithmetic (indexed)"]
9486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9487#[inline]
9488#[target_feature(enable = "neon,dotprod")]
9489#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9490#[rustc_legacy_const_generics(3)]
9491#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9492pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9493    static_assert_uimm_bits!(LANE, 2);
9494    unsafe {
9495        let c: int32x4_t = transmute(c);
9496        let c: int32x4_t =
9497            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9498        vdotq_s32(a, b, transmute(c))
9499    }
9500}
9501#[doc = "Dot product arithmetic (indexed)"]
9502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9503#[inline]
9504#[target_feature(enable = "neon,dotprod")]
9505#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9506#[rustc_legacy_const_generics(3)]
9507#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9508pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9509    static_assert_uimm_bits!(LANE, 2);
9510    unsafe {
9511        let c: uint32x4_t = transmute(c);
9512        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9513        vdot_u32(a, b, transmute(c))
9514    }
9515}
9516#[doc = "Dot product arithmetic (indexed)"]
9517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9518#[inline]
9519#[target_feature(enable = "neon,dotprod")]
9520#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9521#[rustc_legacy_const_generics(3)]
9522#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9523pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9524    static_assert_uimm_bits!(LANE, 2);
9525    unsafe {
9526        let c: uint32x4_t = transmute(c);
9527        let c: uint32x4_t =
9528            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9529        vdotq_u32(a, b, transmute(c))
9530    }
9531}
9532#[doc = "Set all vector lanes to the same value"]
9533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9534#[inline]
9535#[target_feature(enable = "neon")]
9536#[cfg_attr(test, assert_instr(nop, N = 0))]
9537#[rustc_legacy_const_generics(1)]
9538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9539pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9540    static_assert!(N == 0);
9541    a
9542}
9543#[doc = "Set all vector lanes to the same value"]
9544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9545#[inline]
9546#[target_feature(enable = "neon")]
9547#[cfg_attr(test, assert_instr(nop, N = 0))]
9548#[rustc_legacy_const_generics(1)]
9549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9550pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9551    static_assert!(N == 0);
9552    a
9553}
9554#[doc = "Set all vector lanes to the same value"]
9555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9556#[inline]
9557#[target_feature(enable = "neon")]
9558#[cfg_attr(test, assert_instr(nop, N = 1))]
9559#[rustc_legacy_const_generics(1)]
9560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9561pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9562    static_assert_uimm_bits!(N, 1);
9563    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9564}
9565#[doc = "Set all vector lanes to the same value"]
9566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9567#[inline]
9568#[target_feature(enable = "neon")]
9569#[cfg_attr(test, assert_instr(nop, N = 1))]
9570#[rustc_legacy_const_generics(1)]
9571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9572pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9573    static_assert_uimm_bits!(N, 1);
9574    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9575}
9576#[doc = "Set all vector lanes to the same value"]
9577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9578#[inline]
9579#[target_feature(enable = "neon")]
9580#[cfg_attr(test, assert_instr(nop, N = 4))]
9581#[rustc_legacy_const_generics(1)]
9582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9583pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9584    static_assert_uimm_bits!(N, 3);
9585    unsafe { simd_extract!(a, N as u32) }
9586}
9587#[doc = "Set all vector lanes to the same value"]
9588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9589#[inline]
9590#[target_feature(enable = "neon")]
9591#[cfg_attr(test, assert_instr(nop, N = 4))]
9592#[rustc_legacy_const_generics(1)]
9593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9594pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9595    static_assert_uimm_bits!(N, 3);
9596    unsafe { simd_extract!(a, N as u32) }
9597}
9598#[doc = "Set all vector lanes to the same value"]
9599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9600#[inline]
9601#[target_feature(enable = "neon")]
9602#[cfg_attr(test, assert_instr(nop, N = 4))]
9603#[rustc_legacy_const_generics(1)]
9604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9605pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9606    static_assert_uimm_bits!(N, 3);
9607    unsafe { simd_extract!(a, N as u32) }
9608}
9609#[doc = "Set all vector lanes to the same value"]
9610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9611#[inline]
9612#[target_feature(enable = "neon")]
9613#[cfg_attr(test, assert_instr(nop, N = 4))]
9614#[rustc_legacy_const_generics(1)]
9615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9616pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9617    static_assert_uimm_bits!(N, 3);
9618    unsafe { simd_extract!(a, N as u32) }
9619}
9620#[doc = "Set all vector lanes to the same value"]
9621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9622#[inline]
9623#[target_feature(enable = "neon")]
9624#[cfg_attr(test, assert_instr(nop, N = 4))]
9625#[rustc_legacy_const_generics(1)]
9626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9627pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9628    static_assert_uimm_bits!(N, 3);
9629    unsafe { simd_extract!(a, N as u32) }
9630}
9631#[doc = "Set all vector lanes to the same value"]
9632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9633#[inline]
9634#[target_feature(enable = "neon")]
9635#[cfg_attr(test, assert_instr(nop, N = 4))]
9636#[rustc_legacy_const_generics(1)]
9637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9638pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9639    static_assert_uimm_bits!(N, 3);
9640    unsafe { simd_extract!(a, N as u32) }
9641}
9642#[doc = "Extract an element from a vector"]
9643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9644#[inline]
9645#[target_feature(enable = "neon")]
9646#[cfg_attr(test, assert_instr(nop, N = 8))]
9647#[rustc_legacy_const_generics(1)]
9648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9649pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9650    static_assert_uimm_bits!(N, 4);
9651    unsafe { simd_extract!(a, N as u32) }
9652}
9653#[doc = "Extract an element from a vector"]
9654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9655#[inline]
9656#[target_feature(enable = "neon")]
9657#[cfg_attr(test, assert_instr(nop, N = 8))]
9658#[rustc_legacy_const_generics(1)]
9659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9660pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9661    static_assert_uimm_bits!(N, 4);
9662    unsafe { simd_extract!(a, N as u32) }
9663}
9664#[doc = "Extract an element from a vector"]
9665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9666#[inline]
9667#[target_feature(enable = "neon")]
9668#[cfg_attr(test, assert_instr(nop, N = 8))]
9669#[rustc_legacy_const_generics(1)]
9670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9671pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9672    static_assert_uimm_bits!(N, 4);
9673    unsafe { simd_extract!(a, N as u32) }
9674}
9675#[doc = "Set all vector lanes to the same value"]
9676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9677#[inline]
9678#[target_feature(enable = "neon")]
9679#[cfg_attr(test, assert_instr(nop, N = 0))]
9680#[rustc_legacy_const_generics(1)]
9681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9682pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9683    static_assert!(N == 0);
9684    unsafe { simd_extract!(a, N as u32) }
9685}
9686#[doc = "Set all vector lanes to the same value"]
9687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9688#[inline]
9689#[target_feature(enable = "neon")]
9690#[cfg_attr(test, assert_instr(nop, N = 0))]
9691#[rustc_legacy_const_generics(1)]
9692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9693pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9694    static_assert!(N == 0);
9695    unsafe { simd_extract!(a, N as u32) }
9696}
9697#[doc = "Set all vector lanes to the same value"]
9698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9699#[inline]
9700#[target_feature(enable = "neon")]
9701#[cfg_attr(test, assert_instr(nop, N = 0))]
9702#[rustc_legacy_const_generics(1)]
9703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9704pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9705    static_assert!(N == 0);
9706    unsafe { simd_extract!(a, N as u32) }
9707}
9708#[doc = "Set all vector lanes to the same value"]
9709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9710#[inline]
9711#[cfg_attr(test, assert_instr(nop, N = 2))]
9712#[rustc_legacy_const_generics(1)]
9713#[target_feature(enable = "neon,fp16")]
9714#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9715pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9716    static_assert_uimm_bits!(N, 2);
9717    unsafe { simd_extract!(a, N as u32) }
9718}
9719#[doc = "Extract an element from a vector"]
9720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9721#[inline]
9722#[cfg_attr(test, assert_instr(nop, N = 4))]
9723#[rustc_legacy_const_generics(1)]
9724#[target_feature(enable = "neon,fp16")]
9725#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9726pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9727    static_assert_uimm_bits!(N, 4);
9728    unsafe { simd_extract!(a, N as u32) }
9729}
9730#[doc = "Set all vector lanes to the same value"]
9731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9732#[inline]
9733#[target_feature(enable = "neon")]
9734#[cfg_attr(test, assert_instr(dup, N = 0))]
9735#[rustc_legacy_const_generics(1)]
9736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9737pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9738    static_assert!(N == 0);
9739    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9740}
9741#[doc = "Set all vector lanes to the same value"]
9742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9743#[inline]
9744#[target_feature(enable = "neon")]
9745#[cfg_attr(test, assert_instr(dup, N = 0))]
9746#[rustc_legacy_const_generics(1)]
9747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9748pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9749    static_assert!(N == 0);
9750    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9751}
9752#[doc = "Set all vector lanes to the same value"]
9753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9754#[inline]
9755#[target_feature(enable = "neon")]
9756#[cfg_attr(test, assert_instr(dup, N = 1))]
9757#[rustc_legacy_const_generics(1)]
9758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9759pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9760    static_assert_uimm_bits!(N, 1);
9761    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9762}
9763#[doc = "Set all vector lanes to the same value"]
9764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9765#[inline]
9766#[target_feature(enable = "neon")]
9767#[cfg_attr(test, assert_instr(dup, N = 1))]
9768#[rustc_legacy_const_generics(1)]
9769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9770pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9771    static_assert_uimm_bits!(N, 1);
9772    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9773}
9774#[doc = "Set all vector lanes to the same value"]
9775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9776#[inline]
9777#[target_feature(enable = "neon")]
9778#[cfg_attr(test, assert_instr(nop, N = 1))]
9779#[rustc_legacy_const_generics(1)]
9780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9781pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9782    static_assert_uimm_bits!(N, 1);
9783    unsafe { simd_extract!(a, N as u32) }
9784}
9785#[doc = "Set all vector lanes to the same value"]
9786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9787#[inline]
9788#[target_feature(enable = "neon")]
9789#[cfg_attr(test, assert_instr(nop, N = 1))]
9790#[rustc_legacy_const_generics(1)]
9791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9792pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9793    static_assert_uimm_bits!(N, 1);
9794    unsafe { simd_extract!(a, N as u32) }
9795}
9796#[doc = "Set all vector lanes to the same value"]
9797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9798#[inline]
9799#[target_feature(enable = "neon")]
9800#[cfg_attr(test, assert_instr(nop, N = 1))]
9801#[rustc_legacy_const_generics(1)]
9802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9803pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9804    static_assert_uimm_bits!(N, 1);
9805    unsafe { simd_extract!(a, N as u32) }
9806}
9807#[doc = "Set all vector lanes to the same value"]
9808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9809#[inline]
9810#[target_feature(enable = "neon")]
9811#[cfg_attr(test, assert_instr(nop, N = 1))]
9812#[rustc_legacy_const_generics(1)]
9813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9814pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9815    static_assert_uimm_bits!(N, 1);
9816    unsafe { simd_extract!(a, N as u32) }
9817}
9818#[doc = "Set all vector lanes to the same value"]
9819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9820#[inline]
9821#[target_feature(enable = "neon")]
9822#[cfg_attr(test, assert_instr(nop, N = 1))]
9823#[rustc_legacy_const_generics(1)]
9824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9825pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9826    static_assert_uimm_bits!(N, 1);
9827    unsafe { simd_extract!(a, N as u32) }
9828}
9829#[doc = "Set all vector lanes to the same value"]
9830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9831#[inline]
9832#[target_feature(enable = "neon")]
9833#[cfg_attr(test, assert_instr(nop, N = 1))]
9834#[rustc_legacy_const_generics(1)]
9835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9836pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9837    static_assert_uimm_bits!(N, 1);
9838    unsafe { simd_extract!(a, N as u32) }
9839}
9840#[doc = "Set all vector lanes to the same value"]
9841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9842#[inline]
9843#[target_feature(enable = "neon")]
9844#[cfg_attr(test, assert_instr(nop, N = 2))]
9845#[rustc_legacy_const_generics(1)]
9846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9847pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9848    static_assert_uimm_bits!(N, 2);
9849    unsafe { simd_extract!(a, N as u32) }
9850}
9851#[doc = "Set all vector lanes to the same value"]
9852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9853#[inline]
9854#[target_feature(enable = "neon")]
9855#[cfg_attr(test, assert_instr(nop, N = 2))]
9856#[rustc_legacy_const_generics(1)]
9857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9858pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9859    static_assert_uimm_bits!(N, 2);
9860    unsafe { simd_extract!(a, N as u32) }
9861}
9862#[doc = "Set all vector lanes to the same value"]
9863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9864#[inline]
9865#[target_feature(enable = "neon")]
9866#[cfg_attr(test, assert_instr(nop, N = 2))]
9867#[rustc_legacy_const_generics(1)]
9868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9869pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9870    static_assert_uimm_bits!(N, 2);
9871    unsafe { simd_extract!(a, N as u32) }
9872}
9873#[doc = "Set all vector lanes to the same value"]
9874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9875#[inline]
9876#[target_feature(enable = "neon")]
9877#[cfg_attr(test, assert_instr(nop, N = 2))]
9878#[rustc_legacy_const_generics(1)]
9879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9880pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9881    static_assert_uimm_bits!(N, 2);
9882    unsafe { simd_extract!(a, N as u32) }
9883}
9884#[doc = "Set all vector lanes to the same value"]
9885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9886#[inline]
9887#[target_feature(enable = "neon")]
9888#[cfg_attr(test, assert_instr(nop, N = 2))]
9889#[rustc_legacy_const_generics(1)]
9890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9891pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9892    static_assert_uimm_bits!(N, 2);
9893    unsafe { simd_extract!(a, N as u32) }
9894}
9895#[doc = "Set all vector lanes to the same value"]
9896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9897#[inline]
9898#[target_feature(enable = "neon")]
9899#[cfg_attr(test, assert_instr(nop, N = 2))]
9900#[rustc_legacy_const_generics(1)]
9901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9902pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9903    static_assert_uimm_bits!(N, 2);
9904    unsafe { simd_extract!(a, N as u32) }
9905}
9906#[doc = "Three-way exclusive OR"]
9907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9908#[inline]
9909#[target_feature(enable = "neon,sha3")]
9910#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9911#[cfg_attr(test, assert_instr(eor3))]
9912pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9913    unsafe extern "unadjusted" {
9914        #[cfg_attr(
9915            any(target_arch = "aarch64", target_arch = "arm64ec"),
9916            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9917        )]
9918        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9919    }
9920    unsafe { _veor3q_s8(a, b, c) }
9921}
9922#[doc = "Three-way exclusive OR"]
9923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9924#[inline]
9925#[target_feature(enable = "neon,sha3")]
9926#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9927#[cfg_attr(test, assert_instr(eor3))]
9928pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9929    unsafe extern "unadjusted" {
9930        #[cfg_attr(
9931            any(target_arch = "aarch64", target_arch = "arm64ec"),
9932            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9933        )]
9934        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9935    }
9936    unsafe { _veor3q_s16(a, b, c) }
9937}
9938#[doc = "Three-way exclusive OR"]
9939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9940#[inline]
9941#[target_feature(enable = "neon,sha3")]
9942#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9943#[cfg_attr(test, assert_instr(eor3))]
9944pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9945    unsafe extern "unadjusted" {
9946        #[cfg_attr(
9947            any(target_arch = "aarch64", target_arch = "arm64ec"),
9948            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9949        )]
9950        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9951    }
9952    unsafe { _veor3q_s32(a, b, c) }
9953}
9954#[doc = "Three-way exclusive OR"]
9955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9956#[inline]
9957#[target_feature(enable = "neon,sha3")]
9958#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9959#[cfg_attr(test, assert_instr(eor3))]
9960pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9961    unsafe extern "unadjusted" {
9962        #[cfg_attr(
9963            any(target_arch = "aarch64", target_arch = "arm64ec"),
9964            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9965        )]
9966        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9967    }
9968    unsafe { _veor3q_s64(a, b, c) }
9969}
9970#[doc = "Three-way exclusive OR"]
9971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9972#[inline]
9973#[target_feature(enable = "neon,sha3")]
9974#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9975#[cfg_attr(test, assert_instr(eor3))]
9976pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9977    unsafe extern "unadjusted" {
9978        #[cfg_attr(
9979            any(target_arch = "aarch64", target_arch = "arm64ec"),
9980            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
9981        )]
9982        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9983    }
9984    unsafe { _veor3q_u8(a, b, c) }
9985}
9986#[doc = "Three-way exclusive OR"]
9987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
9988#[inline]
9989#[target_feature(enable = "neon,sha3")]
9990#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9991#[cfg_attr(test, assert_instr(eor3))]
9992pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
9993    unsafe extern "unadjusted" {
9994        #[cfg_attr(
9995            any(target_arch = "aarch64", target_arch = "arm64ec"),
9996            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
9997        )]
9998        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
9999    }
10000    unsafe { _veor3q_u16(a, b, c) }
10001}
10002#[doc = "Three-way exclusive OR"]
10003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10004#[inline]
10005#[target_feature(enable = "neon,sha3")]
10006#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10007#[cfg_attr(test, assert_instr(eor3))]
10008pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10009    unsafe extern "unadjusted" {
10010        #[cfg_attr(
10011            any(target_arch = "aarch64", target_arch = "arm64ec"),
10012            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10013        )]
10014        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10015    }
10016    unsafe { _veor3q_u32(a, b, c) }
10017}
10018#[doc = "Three-way exclusive OR"]
10019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10020#[inline]
10021#[target_feature(enable = "neon,sha3")]
10022#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10023#[cfg_attr(test, assert_instr(eor3))]
10024pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10025    unsafe extern "unadjusted" {
10026        #[cfg_attr(
10027            any(target_arch = "aarch64", target_arch = "arm64ec"),
10028            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10029        )]
10030        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10031    }
10032    unsafe { _veor3q_u64(a, b, c) }
10033}
10034#[doc = "Extract vector from pair of vectors"]
10035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10036#[inline]
10037#[target_feature(enable = "neon")]
10038#[cfg_attr(test, assert_instr(ext, N = 1))]
10039#[rustc_legacy_const_generics(2)]
10040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10041pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10042    static_assert_uimm_bits!(N, 1);
10043    unsafe {
10044        match N & 0b1 {
10045            0 => simd_shuffle!(a, b, [0, 1]),
10046            1 => simd_shuffle!(a, b, [1, 2]),
10047            _ => unreachable_unchecked(),
10048        }
10049    }
10050}
10051#[doc = "Extract vector from pair of vectors"]
10052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10053#[inline]
10054#[target_feature(enable = "neon")]
10055#[cfg_attr(test, assert_instr(ext, N = 1))]
10056#[rustc_legacy_const_generics(2)]
10057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10058pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10059    static_assert_uimm_bits!(N, 1);
10060    unsafe {
10061        match N & 0b1 {
10062            0 => simd_shuffle!(a, b, [0, 1]),
10063            1 => simd_shuffle!(a, b, [1, 2]),
10064            _ => unreachable_unchecked(),
10065        }
10066    }
10067}
10068#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10070#[inline]
10071#[target_feature(enable = "neon")]
10072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10073#[cfg_attr(test, assert_instr(fmadd))]
10074pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10075    unsafe extern "unadjusted" {
10076        #[cfg_attr(
10077            any(target_arch = "aarch64", target_arch = "arm64ec"),
10078            link_name = "llvm.fma.v1f64"
10079        )]
10080        fn _vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t;
10081    }
10082    unsafe { _vfma_f64(b, c, a) }
10083}
10084#[doc = "Floating-point fused multiply-add to accumulator"]
10085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10086#[inline]
10087#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10088#[rustc_legacy_const_generics(3)]
10089#[target_feature(enable = "neon,fp16")]
10090#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10091pub fn vfma_lane_f16<const LANE: i32>(
10092    a: float16x4_t,
10093    b: float16x4_t,
10094    c: float16x4_t,
10095) -> float16x4_t {
10096    static_assert_uimm_bits!(LANE, 2);
10097    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10098}
10099#[doc = "Floating-point fused multiply-add to accumulator"]
10100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10101#[inline]
10102#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10103#[rustc_legacy_const_generics(3)]
10104#[target_feature(enable = "neon,fp16")]
10105#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10106pub fn vfma_laneq_f16<const LANE: i32>(
10107    a: float16x4_t,
10108    b: float16x4_t,
10109    c: float16x8_t,
10110) -> float16x4_t {
10111    static_assert_uimm_bits!(LANE, 3);
10112    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10113}
10114#[doc = "Floating-point fused multiply-add to accumulator"]
10115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10116#[inline]
10117#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10118#[rustc_legacy_const_generics(3)]
10119#[target_feature(enable = "neon,fp16")]
10120#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10121pub fn vfmaq_lane_f16<const LANE: i32>(
10122    a: float16x8_t,
10123    b: float16x8_t,
10124    c: float16x4_t,
10125) -> float16x8_t {
10126    static_assert_uimm_bits!(LANE, 2);
10127    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10128}
10129#[doc = "Floating-point fused multiply-add to accumulator"]
10130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10131#[inline]
10132#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10133#[rustc_legacy_const_generics(3)]
10134#[target_feature(enable = "neon,fp16")]
10135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10136pub fn vfmaq_laneq_f16<const LANE: i32>(
10137    a: float16x8_t,
10138    b: float16x8_t,
10139    c: float16x8_t,
10140) -> float16x8_t {
10141    static_assert_uimm_bits!(LANE, 3);
10142    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10143}
10144#[doc = "Floating-point fused multiply-add to accumulator"]
10145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10146#[inline]
10147#[target_feature(enable = "neon")]
10148#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10149#[rustc_legacy_const_generics(3)]
10150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10151pub fn vfma_lane_f32<const LANE: i32>(
10152    a: float32x2_t,
10153    b: float32x2_t,
10154    c: float32x2_t,
10155) -> float32x2_t {
10156    static_assert_uimm_bits!(LANE, 1);
10157    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10158}
10159#[doc = "Floating-point fused multiply-add to accumulator"]
10160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10161#[inline]
10162#[target_feature(enable = "neon")]
10163#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10164#[rustc_legacy_const_generics(3)]
10165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10166pub fn vfma_laneq_f32<const LANE: i32>(
10167    a: float32x2_t,
10168    b: float32x2_t,
10169    c: float32x4_t,
10170) -> float32x2_t {
10171    static_assert_uimm_bits!(LANE, 2);
10172    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10173}
10174#[doc = "Floating-point fused multiply-add to accumulator"]
10175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10176#[inline]
10177#[target_feature(enable = "neon")]
10178#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10179#[rustc_legacy_const_generics(3)]
10180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10181pub fn vfmaq_lane_f32<const LANE: i32>(
10182    a: float32x4_t,
10183    b: float32x4_t,
10184    c: float32x2_t,
10185) -> float32x4_t {
10186    static_assert_uimm_bits!(LANE, 1);
10187    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10188}
10189#[doc = "Floating-point fused multiply-add to accumulator"]
10190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10191#[inline]
10192#[target_feature(enable = "neon")]
10193#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10194#[rustc_legacy_const_generics(3)]
10195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10196pub fn vfmaq_laneq_f32<const LANE: i32>(
10197    a: float32x4_t,
10198    b: float32x4_t,
10199    c: float32x4_t,
10200) -> float32x4_t {
10201    static_assert_uimm_bits!(LANE, 2);
10202    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10203}
10204#[doc = "Floating-point fused multiply-add to accumulator"]
10205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10206#[inline]
10207#[target_feature(enable = "neon")]
10208#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10209#[rustc_legacy_const_generics(3)]
10210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10211pub fn vfmaq_laneq_f64<const LANE: i32>(
10212    a: float64x2_t,
10213    b: float64x2_t,
10214    c: float64x2_t,
10215) -> float64x2_t {
10216    static_assert_uimm_bits!(LANE, 1);
10217    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10218}
10219#[doc = "Floating-point fused multiply-add to accumulator"]
10220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10221#[inline]
10222#[target_feature(enable = "neon")]
10223#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10224#[rustc_legacy_const_generics(3)]
10225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10226pub fn vfma_lane_f64<const LANE: i32>(
10227    a: float64x1_t,
10228    b: float64x1_t,
10229    c: float64x1_t,
10230) -> float64x1_t {
10231    static_assert!(LANE == 0);
10232    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10233}
10234#[doc = "Floating-point fused multiply-add to accumulator"]
10235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10236#[inline]
10237#[target_feature(enable = "neon")]
10238#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10239#[rustc_legacy_const_generics(3)]
10240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10241pub fn vfma_laneq_f64<const LANE: i32>(
10242    a: float64x1_t,
10243    b: float64x1_t,
10244    c: float64x2_t,
10245) -> float64x1_t {
10246    static_assert_uimm_bits!(LANE, 1);
10247    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10248}
10249#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10251#[inline]
10252#[target_feature(enable = "neon,fp16")]
10253#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10254#[cfg_attr(test, assert_instr(fmla))]
10255pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10256    vfma_f16(a, b, vdup_n_f16(c))
10257}
10258#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10260#[inline]
10261#[target_feature(enable = "neon,fp16")]
10262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10263#[cfg_attr(test, assert_instr(fmla))]
10264pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10265    vfmaq_f16(a, b, vdupq_n_f16(c))
10266}
10267#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10269#[inline]
10270#[target_feature(enable = "neon")]
10271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10272#[cfg_attr(test, assert_instr(fmadd))]
10273pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10274    vfma_f64(a, b, vdup_n_f64(c))
10275}
10276#[doc = "Floating-point fused multiply-add to accumulator"]
10277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10278#[inline]
10279#[target_feature(enable = "neon")]
10280#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10281#[rustc_legacy_const_generics(3)]
10282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10283pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10284    unsafe extern "unadjusted" {
10285        #[cfg_attr(
10286            any(target_arch = "aarch64", target_arch = "arm64ec"),
10287            link_name = "llvm.fma.f64"
10288        )]
10289        fn _vfmad_lane_f64(a: f64, b: f64, c: f64) -> f64;
10290    }
10291    static_assert!(LANE == 0);
10292    unsafe {
10293        let c: f64 = simd_extract!(c, LANE as u32);
10294        _vfmad_lane_f64(b, c, a)
10295    }
10296}
10297#[doc = "Floating-point fused multiply-add to accumulator"]
10298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10299#[inline]
10300#[cfg_attr(test, assert_instr(fmadd))]
10301#[target_feature(enable = "neon,fp16")]
10302#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10303pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10304    unsafe extern "unadjusted" {
10305        #[cfg_attr(
10306            any(target_arch = "aarch64", target_arch = "arm64ec"),
10307            link_name = "llvm.fma.f16"
10308        )]
10309        fn _vfmah_f16(a: f16, b: f16, c: f16) -> f16;
10310    }
10311    unsafe { _vfmah_f16(b, c, a) }
10312}
10313#[doc = "Floating-point fused multiply-add to accumulator"]
10314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10315#[inline]
10316#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10317#[rustc_legacy_const_generics(3)]
10318#[target_feature(enable = "neon,fp16")]
10319#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10320pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10321    static_assert_uimm_bits!(LANE, 2);
10322    unsafe {
10323        let c: f16 = simd_extract!(v, LANE as u32);
10324        vfmah_f16(a, b, c)
10325    }
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10329#[inline]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10335    static_assert_uimm_bits!(LANE, 3);
10336    unsafe {
10337        let c: f16 = simd_extract!(v, LANE as u32);
10338        vfmah_f16(a, b, c)
10339    }
10340}
10341#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10343#[inline]
10344#[target_feature(enable = "neon")]
10345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10346#[cfg_attr(test, assert_instr(fmla))]
10347pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10348    unsafe extern "unadjusted" {
10349        #[cfg_attr(
10350            any(target_arch = "aarch64", target_arch = "arm64ec"),
10351            link_name = "llvm.fma.v2f64"
10352        )]
10353        fn _vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
10354    }
10355    unsafe { _vfmaq_f64(b, c, a) }
10356}
10357#[doc = "Floating-point fused multiply-add to accumulator"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10359#[inline]
10360#[target_feature(enable = "neon")]
10361#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10362#[rustc_legacy_const_generics(3)]
10363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10364pub fn vfmaq_lane_f64<const LANE: i32>(
10365    a: float64x2_t,
10366    b: float64x2_t,
10367    c: float64x1_t,
10368) -> float64x2_t {
10369    static_assert!(LANE == 0);
10370    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10371}
10372#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10374#[inline]
10375#[target_feature(enable = "neon")]
10376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10377#[cfg_attr(test, assert_instr(fmla))]
10378pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10379    vfmaq_f64(a, b, vdupq_n_f64(c))
10380}
10381#[doc = "Floating-point fused multiply-add to accumulator"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10383#[inline]
10384#[target_feature(enable = "neon")]
10385#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10386#[rustc_legacy_const_generics(3)]
10387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10388pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10389    unsafe extern "unadjusted" {
10390        #[cfg_attr(
10391            any(target_arch = "aarch64", target_arch = "arm64ec"),
10392            link_name = "llvm.fma.f32"
10393        )]
10394        fn _vfmas_lane_f32(a: f32, b: f32, c: f32) -> f32;
10395    }
10396    static_assert_uimm_bits!(LANE, 1);
10397    unsafe {
10398        let c: f32 = simd_extract!(c, LANE as u32);
10399        _vfmas_lane_f32(b, c, a)
10400    }
10401}
10402#[doc = "Floating-point fused multiply-add to accumulator"]
10403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10404#[inline]
10405#[target_feature(enable = "neon")]
10406#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10407#[rustc_legacy_const_generics(3)]
10408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10409pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10410    unsafe extern "unadjusted" {
10411        #[cfg_attr(
10412            any(target_arch = "aarch64", target_arch = "arm64ec"),
10413            link_name = "llvm.fma.f32"
10414        )]
10415        fn _vfmas_laneq_f32(a: f32, b: f32, c: f32) -> f32;
10416    }
10417    static_assert_uimm_bits!(LANE, 2);
10418    unsafe {
10419        let c: f32 = simd_extract!(c, LANE as u32);
10420        _vfmas_laneq_f32(b, c, a)
10421    }
10422}
10423#[doc = "Floating-point fused multiply-add to accumulator"]
10424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10425#[inline]
10426#[target_feature(enable = "neon")]
10427#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10428#[rustc_legacy_const_generics(3)]
10429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10430pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10431    unsafe extern "unadjusted" {
10432        #[cfg_attr(
10433            any(target_arch = "aarch64", target_arch = "arm64ec"),
10434            link_name = "llvm.fma.f64"
10435        )]
10436        fn _vfmad_laneq_f64(a: f64, b: f64, c: f64) -> f64;
10437    }
10438    static_assert_uimm_bits!(LANE, 1);
10439    unsafe {
10440        let c: f64 = simd_extract!(c, LANE as u32);
10441        _vfmad_laneq_f64(b, c, a)
10442    }
10443}
10444#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10446#[inline]
10447#[target_feature(enable = "neon,fp16")]
10448#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10449#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10450#[cfg_attr(test, assert_instr(fmlal2))]
10451pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10452    unsafe extern "unadjusted" {
10453        #[cfg_attr(
10454            any(target_arch = "aarch64", target_arch = "arm64ec"),
10455            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10456        )]
10457        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10458    }
10459    unsafe { _vfmlal_high_f16(r, a, b) }
10460}
10461#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10463#[inline]
10464#[target_feature(enable = "neon,fp16")]
10465#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10466#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10467#[cfg_attr(test, assert_instr(fmlal2))]
10468pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10469    unsafe extern "unadjusted" {
10470        #[cfg_attr(
10471            any(target_arch = "aarch64", target_arch = "arm64ec"),
10472            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10473        )]
10474        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10475    }
10476    unsafe { _vfmlalq_high_f16(r, a, b) }
10477}
10478#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10480#[inline]
10481#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10482#[target_feature(enable = "neon,fp16")]
10483#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10484#[rustc_legacy_const_generics(3)]
10485#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10486pub fn vfmlal_lane_high_f16<const LANE: i32>(
10487    r: float32x2_t,
10488    a: float16x4_t,
10489    b: float16x4_t,
10490) -> float32x2_t {
10491    static_assert_uimm_bits!(LANE, 2);
10492    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10493}
10494#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10496#[inline]
10497#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10498#[target_feature(enable = "neon,fp16")]
10499#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10500#[rustc_legacy_const_generics(3)]
10501#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10502pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10503    r: float32x2_t,
10504    a: float16x4_t,
10505    b: float16x8_t,
10506) -> float32x2_t {
10507    static_assert_uimm_bits!(LANE, 3);
10508    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10509}
10510#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10512#[inline]
10513#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10514#[target_feature(enable = "neon,fp16")]
10515#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10516#[rustc_legacy_const_generics(3)]
10517#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10518pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10519    r: float32x4_t,
10520    a: float16x8_t,
10521    b: float16x4_t,
10522) -> float32x4_t {
10523    static_assert_uimm_bits!(LANE, 2);
10524    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10525}
10526#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10528#[inline]
10529#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10530#[target_feature(enable = "neon,fp16")]
10531#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10532#[rustc_legacy_const_generics(3)]
10533#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10534pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10535    r: float32x4_t,
10536    a: float16x8_t,
10537    b: float16x8_t,
10538) -> float32x4_t {
10539    static_assert_uimm_bits!(LANE, 3);
10540    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10541}
10542#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10544#[inline]
10545#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10546#[target_feature(enable = "neon,fp16")]
10547#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10548#[rustc_legacy_const_generics(3)]
10549#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10550pub fn vfmlal_lane_low_f16<const LANE: i32>(
10551    r: float32x2_t,
10552    a: float16x4_t,
10553    b: float16x4_t,
10554) -> float32x2_t {
10555    static_assert_uimm_bits!(LANE, 2);
10556    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10557}
10558#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10560#[inline]
10561#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10562#[target_feature(enable = "neon,fp16")]
10563#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10564#[rustc_legacy_const_generics(3)]
10565#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10566pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10567    r: float32x2_t,
10568    a: float16x4_t,
10569    b: float16x8_t,
10570) -> float32x2_t {
10571    static_assert_uimm_bits!(LANE, 3);
10572    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10573}
10574#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10576#[inline]
10577#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10578#[target_feature(enable = "neon,fp16")]
10579#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10580#[rustc_legacy_const_generics(3)]
10581#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10582pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10583    r: float32x4_t,
10584    a: float16x8_t,
10585    b: float16x4_t,
10586) -> float32x4_t {
10587    static_assert_uimm_bits!(LANE, 2);
10588    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10589}
10590#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10592#[inline]
10593#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10594#[target_feature(enable = "neon,fp16")]
10595#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10596#[rustc_legacy_const_generics(3)]
10597#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10598pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10599    r: float32x4_t,
10600    a: float16x8_t,
10601    b: float16x8_t,
10602) -> float32x4_t {
10603    static_assert_uimm_bits!(LANE, 3);
10604    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10605}
10606#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10608#[inline]
10609#[target_feature(enable = "neon,fp16")]
10610#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10611#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10612#[cfg_attr(test, assert_instr(fmlal))]
10613pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10614    unsafe extern "unadjusted" {
10615        #[cfg_attr(
10616            any(target_arch = "aarch64", target_arch = "arm64ec"),
10617            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10618        )]
10619        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10620    }
10621    unsafe { _vfmlal_low_f16(r, a, b) }
10622}
10623#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10625#[inline]
10626#[target_feature(enable = "neon,fp16")]
10627#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10628#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631    unsafe extern "unadjusted" {
10632        #[cfg_attr(
10633            any(target_arch = "aarch64", target_arch = "arm64ec"),
10634            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635        )]
10636        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637    }
10638    unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10646#[cfg_attr(test, assert_instr(fmlsl2))]
10647pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10648    unsafe extern "unadjusted" {
10649        #[cfg_attr(
10650            any(target_arch = "aarch64", target_arch = "arm64ec"),
10651            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10652        )]
10653        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10654    }
10655    unsafe { _vfmlsl_high_f16(r, a, b) }
10656}
10657#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10659#[inline]
10660#[target_feature(enable = "neon,fp16")]
10661#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10662#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10663#[cfg_attr(test, assert_instr(fmlsl2))]
10664pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10665    unsafe extern "unadjusted" {
10666        #[cfg_attr(
10667            any(target_arch = "aarch64", target_arch = "arm64ec"),
10668            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10669        )]
10670        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10671    }
10672    unsafe { _vfmlslq_high_f16(r, a, b) }
10673}
10674#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10676#[inline]
10677#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10678#[target_feature(enable = "neon,fp16")]
10679#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10680#[rustc_legacy_const_generics(3)]
10681#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10682pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10683    r: float32x2_t,
10684    a: float16x4_t,
10685    b: float16x4_t,
10686) -> float32x2_t {
10687    static_assert_uimm_bits!(LANE, 2);
10688    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10689}
10690#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10692#[inline]
10693#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10694#[target_feature(enable = "neon,fp16")]
10695#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10696#[rustc_legacy_const_generics(3)]
10697#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10698pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10699    r: float32x2_t,
10700    a: float16x4_t,
10701    b: float16x8_t,
10702) -> float32x2_t {
10703    static_assert_uimm_bits!(LANE, 3);
10704    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10705}
10706#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10708#[inline]
10709#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10710#[target_feature(enable = "neon,fp16")]
10711#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10712#[rustc_legacy_const_generics(3)]
10713#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10714pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10715    r: float32x4_t,
10716    a: float16x8_t,
10717    b: float16x4_t,
10718) -> float32x4_t {
10719    static_assert_uimm_bits!(LANE, 2);
10720    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10721}
10722#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10724#[inline]
10725#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10726#[target_feature(enable = "neon,fp16")]
10727#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10728#[rustc_legacy_const_generics(3)]
10729#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10730pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10731    r: float32x4_t,
10732    a: float16x8_t,
10733    b: float16x8_t,
10734) -> float32x4_t {
10735    static_assert_uimm_bits!(LANE, 3);
10736    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10737}
10738#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10740#[inline]
10741#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10742#[target_feature(enable = "neon,fp16")]
10743#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10744#[rustc_legacy_const_generics(3)]
10745#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10746pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10747    r: float32x2_t,
10748    a: float16x4_t,
10749    b: float16x4_t,
10750) -> float32x2_t {
10751    static_assert_uimm_bits!(LANE, 2);
10752    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10753}
10754#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10756#[inline]
10757#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10758#[target_feature(enable = "neon,fp16")]
10759#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10760#[rustc_legacy_const_generics(3)]
10761#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10762pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10763    r: float32x2_t,
10764    a: float16x4_t,
10765    b: float16x8_t,
10766) -> float32x2_t {
10767    static_assert_uimm_bits!(LANE, 3);
10768    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10769}
10770#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10772#[inline]
10773#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10774#[target_feature(enable = "neon,fp16")]
10775#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10776#[rustc_legacy_const_generics(3)]
10777#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10778pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10779    r: float32x4_t,
10780    a: float16x8_t,
10781    b: float16x4_t,
10782) -> float32x4_t {
10783    static_assert_uimm_bits!(LANE, 2);
10784    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10785}
10786#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10788#[inline]
10789#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10790#[target_feature(enable = "neon,fp16")]
10791#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10792#[rustc_legacy_const_generics(3)]
10793#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10794pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10795    r: float32x4_t,
10796    a: float16x8_t,
10797    b: float16x8_t,
10798) -> float32x4_t {
10799    static_assert_uimm_bits!(LANE, 3);
10800    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10801}
10802#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10804#[inline]
10805#[target_feature(enable = "neon,fp16")]
10806#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10807#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10808#[cfg_attr(test, assert_instr(fmlsl))]
10809pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10810    unsafe extern "unadjusted" {
10811        #[cfg_attr(
10812            any(target_arch = "aarch64", target_arch = "arm64ec"),
10813            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10814        )]
10815        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10816    }
10817    unsafe { _vfmlsl_low_f16(r, a, b) }
10818}
10819#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10821#[inline]
10822#[target_feature(enable = "neon,fp16")]
10823#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10824#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10825#[cfg_attr(test, assert_instr(fmlsl))]
10826pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10827    unsafe extern "unadjusted" {
10828        #[cfg_attr(
10829            any(target_arch = "aarch64", target_arch = "arm64ec"),
10830            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10831        )]
10832        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10833    }
10834    unsafe { _vfmlslq_low_f16(r, a, b) }
10835}
10836#[doc = "Floating-point fused multiply-subtract from accumulator"]
10837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10838#[inline]
10839#[target_feature(enable = "neon")]
10840#[cfg_attr(test, assert_instr(fmsub))]
10841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10842pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10843    unsafe {
10844        let b: float64x1_t = simd_neg(b);
10845        vfma_f64(a, b, c)
10846    }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10850#[inline]
10851#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10852#[rustc_legacy_const_generics(3)]
10853#[target_feature(enable = "neon,fp16")]
10854#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10855pub fn vfms_lane_f16<const LANE: i32>(
10856    a: float16x4_t,
10857    b: float16x4_t,
10858    c: float16x4_t,
10859) -> float16x4_t {
10860    static_assert_uimm_bits!(LANE, 2);
10861    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10862}
10863#[doc = "Floating-point fused multiply-subtract from accumulator"]
10864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10865#[inline]
10866#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10867#[rustc_legacy_const_generics(3)]
10868#[target_feature(enable = "neon,fp16")]
10869#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10870pub fn vfms_laneq_f16<const LANE: i32>(
10871    a: float16x4_t,
10872    b: float16x4_t,
10873    c: float16x8_t,
10874) -> float16x4_t {
10875    static_assert_uimm_bits!(LANE, 3);
10876    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10877}
10878#[doc = "Floating-point fused multiply-subtract from accumulator"]
10879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10880#[inline]
10881#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10882#[rustc_legacy_const_generics(3)]
10883#[target_feature(enable = "neon,fp16")]
10884#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10885pub fn vfmsq_lane_f16<const LANE: i32>(
10886    a: float16x8_t,
10887    b: float16x8_t,
10888    c: float16x4_t,
10889) -> float16x8_t {
10890    static_assert_uimm_bits!(LANE, 2);
10891    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10892}
10893#[doc = "Floating-point fused multiply-subtract from accumulator"]
10894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10895#[inline]
10896#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10897#[rustc_legacy_const_generics(3)]
10898#[target_feature(enable = "neon,fp16")]
10899#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10900pub fn vfmsq_laneq_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x8_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 3);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract to accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10910#[inline]
10911#[target_feature(enable = "neon")]
10912#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10913#[rustc_legacy_const_generics(3)]
10914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10915pub fn vfms_lane_f32<const LANE: i32>(
10916    a: float32x2_t,
10917    b: float32x2_t,
10918    c: float32x2_t,
10919) -> float32x2_t {
10920    static_assert_uimm_bits!(LANE, 1);
10921    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10922}
10923#[doc = "Floating-point fused multiply-subtract to accumulator"]
10924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10925#[inline]
10926#[target_feature(enable = "neon")]
10927#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10928#[rustc_legacy_const_generics(3)]
10929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10930pub fn vfms_laneq_f32<const LANE: i32>(
10931    a: float32x2_t,
10932    b: float32x2_t,
10933    c: float32x4_t,
10934) -> float32x2_t {
10935    static_assert_uimm_bits!(LANE, 2);
10936    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10937}
10938#[doc = "Floating-point fused multiply-subtract to accumulator"]
10939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10940#[inline]
10941#[target_feature(enable = "neon")]
10942#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10943#[rustc_legacy_const_generics(3)]
10944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10945pub fn vfmsq_lane_f32<const LANE: i32>(
10946    a: float32x4_t,
10947    b: float32x4_t,
10948    c: float32x2_t,
10949) -> float32x4_t {
10950    static_assert_uimm_bits!(LANE, 1);
10951    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10952}
10953#[doc = "Floating-point fused multiply-subtract to accumulator"]
10954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10955#[inline]
10956#[target_feature(enable = "neon")]
10957#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10958#[rustc_legacy_const_generics(3)]
10959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10960pub fn vfmsq_laneq_f32<const LANE: i32>(
10961    a: float32x4_t,
10962    b: float32x4_t,
10963    c: float32x4_t,
10964) -> float32x4_t {
10965    static_assert_uimm_bits!(LANE, 2);
10966    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10967}
10968#[doc = "Floating-point fused multiply-subtract to accumulator"]
10969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10970#[inline]
10971#[target_feature(enable = "neon")]
10972#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10973#[rustc_legacy_const_generics(3)]
10974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10975pub fn vfmsq_laneq_f64<const LANE: i32>(
10976    a: float64x2_t,
10977    b: float64x2_t,
10978    c: float64x2_t,
10979) -> float64x2_t {
10980    static_assert_uimm_bits!(LANE, 1);
10981    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10982}
10983#[doc = "Floating-point fused multiply-subtract to accumulator"]
10984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10985#[inline]
10986#[target_feature(enable = "neon")]
10987#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10988#[rustc_legacy_const_generics(3)]
10989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10990pub fn vfms_lane_f64<const LANE: i32>(
10991    a: float64x1_t,
10992    b: float64x1_t,
10993    c: float64x1_t,
10994) -> float64x1_t {
10995    static_assert!(LANE == 0);
10996    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10997}
10998#[doc = "Floating-point fused multiply-subtract to accumulator"]
10999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11000#[inline]
11001#[target_feature(enable = "neon")]
11002#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11003#[rustc_legacy_const_generics(3)]
11004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11005pub fn vfms_laneq_f64<const LANE: i32>(
11006    a: float64x1_t,
11007    b: float64x1_t,
11008    c: float64x2_t,
11009) -> float64x1_t {
11010    static_assert_uimm_bits!(LANE, 1);
11011    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11012}
11013#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11015#[inline]
11016#[target_feature(enable = "neon,fp16")]
11017#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11018#[cfg_attr(test, assert_instr(fmls))]
11019pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11020    vfms_f16(a, b, vdup_n_f16(c))
11021}
11022#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11024#[inline]
11025#[target_feature(enable = "neon,fp16")]
11026#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11027#[cfg_attr(test, assert_instr(fmls))]
11028pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11029    vfmsq_f16(a, b, vdupq_n_f16(c))
11030}
11031#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11033#[inline]
11034#[target_feature(enable = "neon")]
11035#[cfg_attr(test, assert_instr(fmsub))]
11036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11037pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11038    vfms_f64(a, b, vdup_n_f64(c))
11039}
11040#[doc = "Floating-point fused multiply-subtract from accumulator"]
11041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11042#[inline]
11043#[cfg_attr(test, assert_instr(fmsub))]
11044#[target_feature(enable = "neon,fp16")]
11045#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11046pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11047    vfmah_f16(a, -b, c)
11048}
11049#[doc = "Floating-point fused multiply-subtract from accumulator"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11051#[inline]
11052#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11053#[rustc_legacy_const_generics(3)]
11054#[target_feature(enable = "neon,fp16")]
11055#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11056pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11057    static_assert_uimm_bits!(LANE, 2);
11058    unsafe {
11059        let c: f16 = simd_extract!(v, LANE as u32);
11060        vfmsh_f16(a, b, c)
11061    }
11062}
11063#[doc = "Floating-point fused multiply-subtract from accumulator"]
11064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11065#[inline]
11066#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11067#[rustc_legacy_const_generics(3)]
11068#[target_feature(enable = "neon,fp16")]
11069#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11070pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11071    static_assert_uimm_bits!(LANE, 3);
11072    unsafe {
11073        let c: f16 = simd_extract!(v, LANE as u32);
11074        vfmsh_f16(a, b, c)
11075    }
11076}
11077#[doc = "Floating-point fused multiply-subtract from accumulator"]
11078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11079#[inline]
11080#[target_feature(enable = "neon")]
11081#[cfg_attr(test, assert_instr(fmls))]
11082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11083pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11084    unsafe {
11085        let b: float64x2_t = simd_neg(b);
11086        vfmaq_f64(a, b, c)
11087    }
11088}
11089#[doc = "Floating-point fused multiply-subtract to accumulator"]
11090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11091#[inline]
11092#[target_feature(enable = "neon")]
11093#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11094#[rustc_legacy_const_generics(3)]
11095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11096pub fn vfmsq_lane_f64<const LANE: i32>(
11097    a: float64x2_t,
11098    b: float64x2_t,
11099    c: float64x1_t,
11100) -> float64x2_t {
11101    static_assert!(LANE == 0);
11102    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11103}
11104#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11106#[inline]
11107#[target_feature(enable = "neon")]
11108#[cfg_attr(test, assert_instr(fmls))]
11109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11110pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11111    vfmsq_f64(a, b, vdupq_n_f64(c))
11112}
11113#[doc = "Floating-point fused multiply-subtract to accumulator"]
11114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11115#[inline]
11116#[target_feature(enable = "neon")]
11117#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11118#[rustc_legacy_const_generics(3)]
11119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11120pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11121    vfmas_lane_f32::<LANE>(a, -b, c)
11122}
11123#[doc = "Floating-point fused multiply-subtract to accumulator"]
11124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11125#[inline]
11126#[target_feature(enable = "neon")]
11127#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11128#[rustc_legacy_const_generics(3)]
11129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11130pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11131    vfmas_laneq_f32::<LANE>(a, -b, c)
11132}
11133#[doc = "Floating-point fused multiply-subtract to accumulator"]
11134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11135#[inline]
11136#[target_feature(enable = "neon")]
11137#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11138#[rustc_legacy_const_generics(3)]
11139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11140pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11141    vfmad_lane_f64::<LANE>(a, -b, c)
11142}
11143#[doc = "Floating-point fused multiply-subtract to accumulator"]
11144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11145#[inline]
11146#[target_feature(enable = "neon")]
11147#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11148#[rustc_legacy_const_generics(3)]
11149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11150pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11151    vfmad_laneq_f64::<LANE>(a, -b, c)
11152}
11153#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11155#[doc = "## Safety"]
11156#[doc = "  * Neon instrinsic unsafe"]
11157#[inline]
11158#[target_feature(enable = "neon,fp16")]
11159#[cfg_attr(test, assert_instr(ldr))]
11160#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11161pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11162    crate::ptr::read_unaligned(ptr.cast())
11163}
11164#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11166#[doc = "## Safety"]
11167#[doc = "  * Neon instrinsic unsafe"]
11168#[inline]
11169#[target_feature(enable = "neon,fp16")]
11170#[cfg_attr(test, assert_instr(ldr))]
11171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11172pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11173    crate::ptr::read_unaligned(ptr.cast())
11174}
11175#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11177#[doc = "## Safety"]
11178#[doc = "  * Neon instrinsic unsafe"]
11179#[inline]
11180#[target_feature(enable = "neon")]
11181#[cfg_attr(test, assert_instr(ldr))]
11182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11183pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11184    crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11188#[doc = "## Safety"]
11189#[doc = "  * Neon instrinsic unsafe"]
11190#[inline]
11191#[target_feature(enable = "neon")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11194pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11195    crate::ptr::read_unaligned(ptr.cast())
11196}
11197#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11199#[doc = "## Safety"]
11200#[doc = "  * Neon instrinsic unsafe"]
11201#[inline]
11202#[target_feature(enable = "neon")]
11203#[cfg_attr(test, assert_instr(ldr))]
11204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11205pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11206    crate::ptr::read_unaligned(ptr.cast())
11207}
11208#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11210#[doc = "## Safety"]
11211#[doc = "  * Neon instrinsic unsafe"]
11212#[inline]
11213#[target_feature(enable = "neon")]
11214#[cfg_attr(test, assert_instr(ldr))]
11215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11216pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11217    crate::ptr::read_unaligned(ptr.cast())
11218}
11219#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11221#[doc = "## Safety"]
11222#[doc = "  * Neon instrinsic unsafe"]
11223#[inline]
11224#[target_feature(enable = "neon")]
11225#[cfg_attr(test, assert_instr(ldr))]
11226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11227pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11228    crate::ptr::read_unaligned(ptr.cast())
11229}
11230#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11232#[doc = "## Safety"]
11233#[doc = "  * Neon instrinsic unsafe"]
11234#[inline]
11235#[target_feature(enable = "neon")]
11236#[cfg_attr(test, assert_instr(ldr))]
11237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11238pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11239    crate::ptr::read_unaligned(ptr.cast())
11240}
11241#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11243#[doc = "## Safety"]
11244#[doc = "  * Neon instrinsic unsafe"]
11245#[inline]
11246#[target_feature(enable = "neon")]
11247#[cfg_attr(test, assert_instr(ldr))]
11248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11249pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11250    crate::ptr::read_unaligned(ptr.cast())
11251}
11252#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11254#[doc = "## Safety"]
11255#[doc = "  * Neon instrinsic unsafe"]
11256#[inline]
11257#[target_feature(enable = "neon")]
11258#[cfg_attr(test, assert_instr(ldr))]
11259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11260pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11261    crate::ptr::read_unaligned(ptr.cast())
11262}
11263#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11265#[doc = "## Safety"]
11266#[doc = "  * Neon instrinsic unsafe"]
11267#[inline]
11268#[target_feature(enable = "neon")]
11269#[cfg_attr(test, assert_instr(ldr))]
11270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11271pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11272    crate::ptr::read_unaligned(ptr.cast())
11273}
11274#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11276#[doc = "## Safety"]
11277#[doc = "  * Neon instrinsic unsafe"]
11278#[inline]
11279#[target_feature(enable = "neon")]
11280#[cfg_attr(test, assert_instr(ldr))]
11281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11282pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11283    crate::ptr::read_unaligned(ptr.cast())
11284}
11285#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11287#[doc = "## Safety"]
11288#[doc = "  * Neon instrinsic unsafe"]
11289#[inline]
11290#[target_feature(enable = "neon")]
11291#[cfg_attr(test, assert_instr(ldr))]
11292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11293pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11294    crate::ptr::read_unaligned(ptr.cast())
11295}
11296#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11298#[doc = "## Safety"]
11299#[doc = "  * Neon instrinsic unsafe"]
11300#[inline]
11301#[target_feature(enable = "neon")]
11302#[cfg_attr(test, assert_instr(ldr))]
11303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11304pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11305    crate::ptr::read_unaligned(ptr.cast())
11306}
11307#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11309#[doc = "## Safety"]
11310#[doc = "  * Neon instrinsic unsafe"]
11311#[inline]
11312#[target_feature(enable = "neon")]
11313#[cfg_attr(test, assert_instr(ldr))]
11314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11315pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11316    crate::ptr::read_unaligned(ptr.cast())
11317}
11318#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11320#[doc = "## Safety"]
11321#[doc = "  * Neon instrinsic unsafe"]
11322#[inline]
11323#[target_feature(enable = "neon")]
11324#[cfg_attr(test, assert_instr(ldr))]
11325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11326pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11327    crate::ptr::read_unaligned(ptr.cast())
11328}
11329#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11331#[doc = "## Safety"]
11332#[doc = "  * Neon instrinsic unsafe"]
11333#[inline]
11334#[target_feature(enable = "neon")]
11335#[cfg_attr(test, assert_instr(ldr))]
11336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11337pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11338    crate::ptr::read_unaligned(ptr.cast())
11339}
11340#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11342#[doc = "## Safety"]
11343#[doc = "  * Neon instrinsic unsafe"]
11344#[inline]
11345#[target_feature(enable = "neon")]
11346#[cfg_attr(test, assert_instr(ldr))]
11347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11348pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11349    crate::ptr::read_unaligned(ptr.cast())
11350}
11351#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11353#[doc = "## Safety"]
11354#[doc = "  * Neon instrinsic unsafe"]
11355#[inline]
11356#[target_feature(enable = "neon")]
11357#[cfg_attr(test, assert_instr(ldr))]
11358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11359pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11360    crate::ptr::read_unaligned(ptr.cast())
11361}
11362#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11364#[doc = "## Safety"]
11365#[doc = "  * Neon instrinsic unsafe"]
11366#[inline]
11367#[target_feature(enable = "neon")]
11368#[cfg_attr(test, assert_instr(ldr))]
11369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11370pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11371    crate::ptr::read_unaligned(ptr.cast())
11372}
11373#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11375#[doc = "## Safety"]
11376#[doc = "  * Neon instrinsic unsafe"]
11377#[inline]
11378#[target_feature(enable = "neon")]
11379#[cfg_attr(test, assert_instr(ldr))]
11380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11381pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11382    crate::ptr::read_unaligned(ptr.cast())
11383}
11384#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11386#[doc = "## Safety"]
11387#[doc = "  * Neon instrinsic unsafe"]
11388#[inline]
11389#[target_feature(enable = "neon")]
11390#[cfg_attr(test, assert_instr(ldr))]
11391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11392pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11393    crate::ptr::read_unaligned(ptr.cast())
11394}
11395#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11397#[doc = "## Safety"]
11398#[doc = "  * Neon instrinsic unsafe"]
11399#[inline]
11400#[target_feature(enable = "neon")]
11401#[cfg_attr(test, assert_instr(ldr))]
11402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11403pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11404    crate::ptr::read_unaligned(ptr.cast())
11405}
11406#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11408#[doc = "## Safety"]
11409#[doc = "  * Neon instrinsic unsafe"]
11410#[inline]
11411#[target_feature(enable = "neon")]
11412#[cfg_attr(test, assert_instr(ldr))]
11413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11414pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11415    crate::ptr::read_unaligned(ptr.cast())
11416}
11417#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11419#[doc = "## Safety"]
11420#[doc = "  * Neon instrinsic unsafe"]
11421#[inline]
11422#[target_feature(enable = "neon")]
11423#[cfg_attr(test, assert_instr(ldr))]
11424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11425pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11426    crate::ptr::read_unaligned(ptr.cast())
11427}
11428#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11430#[doc = "## Safety"]
11431#[doc = "  * Neon instrinsic unsafe"]
11432#[inline]
11433#[target_feature(enable = "neon")]
11434#[cfg_attr(test, assert_instr(ldr))]
11435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11436pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11437    crate::ptr::read_unaligned(ptr.cast())
11438}
11439#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11441#[doc = "## Safety"]
11442#[doc = "  * Neon instrinsic unsafe"]
11443#[inline]
11444#[target_feature(enable = "neon,aes")]
11445#[cfg_attr(test, assert_instr(ldr))]
11446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11447pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11448    crate::ptr::read_unaligned(ptr.cast())
11449}
11450#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11452#[doc = "## Safety"]
11453#[doc = "  * Neon instrinsic unsafe"]
11454#[inline]
11455#[target_feature(enable = "neon,aes")]
11456#[cfg_attr(test, assert_instr(ldr))]
11457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11458pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11459    crate::ptr::read_unaligned(ptr.cast())
11460}
11461#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11463#[doc = "## Safety"]
11464#[doc = "  * Neon instrinsic unsafe"]
11465#[inline]
11466#[target_feature(enable = "neon")]
11467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11468#[cfg_attr(test, assert_instr(ld1))]
11469pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11470    unsafe extern "unadjusted" {
11471        #[cfg_attr(
11472            any(target_arch = "aarch64", target_arch = "arm64ec"),
11473            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0f64"
11474        )]
11475        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11476    }
11477    _vld1_f64_x2(a)
11478}
11479#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11481#[doc = "## Safety"]
11482#[doc = "  * Neon instrinsic unsafe"]
11483#[inline]
11484#[target_feature(enable = "neon")]
11485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11486#[cfg_attr(test, assert_instr(ld1))]
11487pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11488    unsafe extern "unadjusted" {
11489        #[cfg_attr(
11490            any(target_arch = "aarch64", target_arch = "arm64ec"),
11491            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0f64"
11492        )]
11493        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11494    }
11495    _vld1_f64_x3(a)
11496}
11497#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11499#[doc = "## Safety"]
11500#[doc = "  * Neon instrinsic unsafe"]
11501#[inline]
11502#[target_feature(enable = "neon")]
11503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11504#[cfg_attr(test, assert_instr(ld1))]
11505pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11506    unsafe extern "unadjusted" {
11507        #[cfg_attr(
11508            any(target_arch = "aarch64", target_arch = "arm64ec"),
11509            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0f64"
11510        )]
11511        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11512    }
11513    _vld1_f64_x4(a)
11514}
11515#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11517#[doc = "## Safety"]
11518#[doc = "  * Neon instrinsic unsafe"]
11519#[inline]
11520#[target_feature(enable = "neon")]
11521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11522#[cfg_attr(test, assert_instr(ld1))]
11523pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11524    unsafe extern "unadjusted" {
11525        #[cfg_attr(
11526            any(target_arch = "aarch64", target_arch = "arm64ec"),
11527            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0f64"
11528        )]
11529        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11530    }
11531    _vld1q_f64_x2(a)
11532}
11533#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11535#[doc = "## Safety"]
11536#[doc = "  * Neon instrinsic unsafe"]
11537#[inline]
11538#[target_feature(enable = "neon")]
11539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11540#[cfg_attr(test, assert_instr(ld1))]
11541pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11542    unsafe extern "unadjusted" {
11543        #[cfg_attr(
11544            any(target_arch = "aarch64", target_arch = "arm64ec"),
11545            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0f64"
11546        )]
11547        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11548    }
11549    _vld1q_f64_x3(a)
11550}
11551#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11553#[doc = "## Safety"]
11554#[doc = "  * Neon instrinsic unsafe"]
11555#[inline]
11556#[target_feature(enable = "neon")]
11557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11558#[cfg_attr(test, assert_instr(ld1))]
11559pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11560    unsafe extern "unadjusted" {
11561        #[cfg_attr(
11562            any(target_arch = "aarch64", target_arch = "arm64ec"),
11563            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0f64"
11564        )]
11565        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11566    }
11567    _vld1q_f64_x4(a)
11568}
11569#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11571#[doc = "## Safety"]
11572#[doc = "  * Neon instrinsic unsafe"]
11573#[inline]
11574#[target_feature(enable = "neon")]
11575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11576#[cfg_attr(test, assert_instr(ld2r))]
11577pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11578    unsafe extern "unadjusted" {
11579        #[cfg_attr(
11580            any(target_arch = "aarch64", target_arch = "arm64ec"),
11581            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0f64"
11582        )]
11583        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11584    }
11585    _vld2_dup_f64(a as _)
11586}
11587#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11589#[doc = "## Safety"]
11590#[doc = "  * Neon instrinsic unsafe"]
11591#[inline]
11592#[target_feature(enable = "neon")]
11593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11594#[cfg_attr(test, assert_instr(ld2r))]
11595pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11596    unsafe extern "unadjusted" {
11597        #[cfg_attr(
11598            any(target_arch = "aarch64", target_arch = "arm64ec"),
11599            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0f64"
11600        )]
11601        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11602    }
11603    _vld2q_dup_f64(a as _)
11604}
11605#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11607#[doc = "## Safety"]
11608#[doc = "  * Neon instrinsic unsafe"]
11609#[inline]
11610#[target_feature(enable = "neon")]
11611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11612#[cfg_attr(test, assert_instr(ld2r))]
11613pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11614    unsafe extern "unadjusted" {
11615        #[cfg_attr(
11616            any(target_arch = "aarch64", target_arch = "arm64ec"),
11617            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0i64"
11618        )]
11619        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11620    }
11621    _vld2q_dup_s64(a as _)
11622}
11623#[doc = "Load multiple 2-element structures to two registers"]
11624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11625#[doc = "## Safety"]
11626#[doc = "  * Neon instrinsic unsafe"]
11627#[inline]
11628#[target_feature(enable = "neon")]
11629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11630#[cfg_attr(test, assert_instr(nop))]
11631pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11632    unsafe extern "unadjusted" {
11633        #[cfg_attr(
11634            any(target_arch = "aarch64", target_arch = "arm64ec"),
11635            link_name = "llvm.aarch64.neon.ld2.v1f64.p0v1f64"
11636        )]
11637        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11638    }
11639    _vld2_f64(a as _)
11640}
11641#[doc = "Load multiple 2-element structures to two registers"]
11642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11643#[doc = "## Safety"]
11644#[doc = "  * Neon instrinsic unsafe"]
11645#[inline]
11646#[target_feature(enable = "neon")]
11647#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11648#[rustc_legacy_const_generics(2)]
11649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11650pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11651    static_assert!(LANE == 0);
11652    unsafe extern "unadjusted" {
11653        #[cfg_attr(
11654            any(target_arch = "aarch64", target_arch = "arm64ec"),
11655            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8"
11656        )]
11657        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11658    }
11659    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11660}
11661#[doc = "Load multiple 2-element structures to two registers"]
11662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11663#[doc = "## Safety"]
11664#[doc = "  * Neon instrinsic unsafe"]
11665#[inline]
11666#[target_feature(enable = "neon")]
11667#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11668#[rustc_legacy_const_generics(2)]
11669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11670pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11671    static_assert!(LANE == 0);
11672    unsafe extern "unadjusted" {
11673        #[cfg_attr(
11674            any(target_arch = "aarch64", target_arch = "arm64ec"),
11675            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8"
11676        )]
11677        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11678    }
11679    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11680}
11681#[doc = "Load multiple 2-element structures to two registers"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11683#[doc = "## Safety"]
11684#[doc = "  * Neon instrinsic unsafe"]
11685#[inline]
11686#[target_feature(enable = "neon,aes")]
11687#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11688#[rustc_legacy_const_generics(2)]
11689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11690pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11691    static_assert!(LANE == 0);
11692    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11693}
11694#[doc = "Load multiple 2-element structures to two registers"]
11695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11696#[doc = "## Safety"]
11697#[doc = "  * Neon instrinsic unsafe"]
11698#[inline]
11699#[target_feature(enable = "neon")]
11700#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11701#[rustc_legacy_const_generics(2)]
11702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11703pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11704    static_assert!(LANE == 0);
11705    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11706}
11707#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11709#[doc = "## Safety"]
11710#[doc = "  * Neon instrinsic unsafe"]
11711#[inline]
11712#[cfg(target_endian = "little")]
11713#[target_feature(enable = "neon,aes")]
11714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11715#[cfg_attr(test, assert_instr(ld2r))]
11716pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11717    transmute(vld2q_dup_s64(transmute(a)))
11718}
11719#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11721#[doc = "## Safety"]
11722#[doc = "  * Neon instrinsic unsafe"]
11723#[inline]
11724#[cfg(target_endian = "big")]
11725#[target_feature(enable = "neon,aes")]
11726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11727#[cfg_attr(test, assert_instr(ld2r))]
11728pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11729    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11730    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11731    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11732    ret_val
11733}
11734#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11736#[doc = "## Safety"]
11737#[doc = "  * Neon instrinsic unsafe"]
11738#[inline]
11739#[cfg(target_endian = "little")]
11740#[target_feature(enable = "neon")]
11741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11742#[cfg_attr(test, assert_instr(ld2r))]
11743pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11744    transmute(vld2q_dup_s64(transmute(a)))
11745}
11746#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11748#[doc = "## Safety"]
11749#[doc = "  * Neon instrinsic unsafe"]
11750#[inline]
11751#[cfg(target_endian = "big")]
11752#[target_feature(enable = "neon")]
11753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11754#[cfg_attr(test, assert_instr(ld2r))]
11755pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11756    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11757    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11758    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11759    ret_val
11760}
11761#[doc = "Load multiple 2-element structures to two registers"]
11762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11763#[doc = "## Safety"]
11764#[doc = "  * Neon instrinsic unsafe"]
11765#[inline]
11766#[target_feature(enable = "neon")]
11767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11768#[cfg_attr(test, assert_instr(ld2))]
11769pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11770    unsafe extern "unadjusted" {
11771        #[cfg_attr(
11772            any(target_arch = "aarch64", target_arch = "arm64ec"),
11773            link_name = "llvm.aarch64.neon.ld2.v2f64.p0v2f64"
11774        )]
11775        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11776    }
11777    _vld2q_f64(a as _)
11778}
11779#[doc = "Load multiple 2-element structures to two registers"]
11780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11781#[doc = "## Safety"]
11782#[doc = "  * Neon instrinsic unsafe"]
11783#[inline]
11784#[target_feature(enable = "neon")]
11785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11786#[cfg_attr(test, assert_instr(ld2))]
11787pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11788    unsafe extern "unadjusted" {
11789        #[cfg_attr(
11790            any(target_arch = "aarch64", target_arch = "arm64ec"),
11791            link_name = "llvm.aarch64.neon.ld2.v2i64.p0v2i64"
11792        )]
11793        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11794    }
11795    _vld2q_s64(a as _)
11796}
11797#[doc = "Load multiple 2-element structures to two registers"]
11798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11799#[doc = "## Safety"]
11800#[doc = "  * Neon instrinsic unsafe"]
11801#[inline]
11802#[target_feature(enable = "neon")]
11803#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11804#[rustc_legacy_const_generics(2)]
11805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11806pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11807    static_assert_uimm_bits!(LANE, 1);
11808    unsafe extern "unadjusted" {
11809        #[cfg_attr(
11810            any(target_arch = "aarch64", target_arch = "arm64ec"),
11811            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8"
11812        )]
11813        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11814            -> float64x2x2_t;
11815    }
11816    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11817}
11818#[doc = "Load multiple 2-element structures to two registers"]
11819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11820#[doc = "## Safety"]
11821#[doc = "  * Neon instrinsic unsafe"]
11822#[inline]
11823#[target_feature(enable = "neon")]
11824#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11825#[rustc_legacy_const_generics(2)]
11826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11827pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11828    static_assert_uimm_bits!(LANE, 4);
11829    unsafe extern "unadjusted" {
11830        #[cfg_attr(
11831            any(target_arch = "aarch64", target_arch = "arm64ec"),
11832            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8"
11833        )]
11834        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11835    }
11836    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11837}
11838#[doc = "Load multiple 2-element structures to two registers"]
11839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11840#[doc = "## Safety"]
11841#[doc = "  * Neon instrinsic unsafe"]
11842#[inline]
11843#[target_feature(enable = "neon")]
11844#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11845#[rustc_legacy_const_generics(2)]
11846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11847pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11848    static_assert_uimm_bits!(LANE, 1);
11849    unsafe extern "unadjusted" {
11850        #[cfg_attr(
11851            any(target_arch = "aarch64", target_arch = "arm64ec"),
11852            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8"
11853        )]
11854        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11855    }
11856    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11857}
11858#[doc = "Load multiple 2-element structures to two registers"]
11859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11860#[doc = "## Safety"]
11861#[doc = "  * Neon instrinsic unsafe"]
11862#[inline]
11863#[target_feature(enable = "neon,aes")]
11864#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11865#[rustc_legacy_const_generics(2)]
11866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11867pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11868    static_assert_uimm_bits!(LANE, 1);
11869    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11870}
11871#[doc = "Load multiple 2-element structures to two registers"]
11872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11873#[doc = "## Safety"]
11874#[doc = "  * Neon instrinsic unsafe"]
11875#[inline]
11876#[target_feature(enable = "neon")]
11877#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11878#[rustc_legacy_const_generics(2)]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11881    static_assert_uimm_bits!(LANE, 4);
11882    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11883}
11884#[doc = "Load multiple 2-element structures to two registers"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11886#[doc = "## Safety"]
11887#[doc = "  * Neon instrinsic unsafe"]
11888#[inline]
11889#[target_feature(enable = "neon")]
11890#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11891#[rustc_legacy_const_generics(2)]
11892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11893pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11894    static_assert_uimm_bits!(LANE, 1);
11895    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11896}
11897#[doc = "Load multiple 2-element structures to two registers"]
11898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11899#[doc = "## Safety"]
11900#[doc = "  * Neon instrinsic unsafe"]
11901#[inline]
11902#[target_feature(enable = "neon")]
11903#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11904#[rustc_legacy_const_generics(2)]
11905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11906pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11907    static_assert_uimm_bits!(LANE, 4);
11908    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11909}
11910#[doc = "Load multiple 2-element structures to two registers"]
11911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11912#[doc = "## Safety"]
11913#[doc = "  * Neon instrinsic unsafe"]
11914#[inline]
11915#[cfg(target_endian = "little")]
11916#[target_feature(enable = "neon,aes")]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918#[cfg_attr(test, assert_instr(ld2))]
11919pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11920    transmute(vld2q_s64(transmute(a)))
11921}
11922#[doc = "Load multiple 2-element structures to two registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11924#[doc = "## Safety"]
11925#[doc = "  * Neon instrinsic unsafe"]
11926#[inline]
11927#[cfg(target_endian = "big")]
11928#[target_feature(enable = "neon,aes")]
11929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11930#[cfg_attr(test, assert_instr(ld2))]
11931pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11932    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11933    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11934    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11935    ret_val
11936}
11937#[doc = "Load multiple 2-element structures to two registers"]
11938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11939#[doc = "## Safety"]
11940#[doc = "  * Neon instrinsic unsafe"]
11941#[inline]
11942#[cfg(target_endian = "little")]
11943#[target_feature(enable = "neon")]
11944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11945#[cfg_attr(test, assert_instr(ld2))]
11946pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11947    transmute(vld2q_s64(transmute(a)))
11948}
11949#[doc = "Load multiple 2-element structures to two registers"]
11950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11951#[doc = "## Safety"]
11952#[doc = "  * Neon instrinsic unsafe"]
11953#[inline]
11954#[cfg(target_endian = "big")]
11955#[target_feature(enable = "neon")]
11956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11957#[cfg_attr(test, assert_instr(ld2))]
11958pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11959    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11960    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11961    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11962    ret_val
11963}
11964#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11966#[doc = "## Safety"]
11967#[doc = "  * Neon instrinsic unsafe"]
11968#[inline]
11969#[target_feature(enable = "neon")]
11970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11971#[cfg_attr(test, assert_instr(ld3r))]
11972pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11973    unsafe extern "unadjusted" {
11974        #[cfg_attr(
11975            any(target_arch = "aarch64", target_arch = "arm64ec"),
11976            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0f64"
11977        )]
11978        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11979    }
11980    _vld3_dup_f64(a as _)
11981}
11982#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11984#[doc = "## Safety"]
11985#[doc = "  * Neon instrinsic unsafe"]
11986#[inline]
11987#[target_feature(enable = "neon")]
11988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11989#[cfg_attr(test, assert_instr(ld3r))]
11990pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11991    unsafe extern "unadjusted" {
11992        #[cfg_attr(
11993            any(target_arch = "aarch64", target_arch = "arm64ec"),
11994            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0f64"
11995        )]
11996        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11997    }
11998    _vld3q_dup_f64(a as _)
11999}
12000#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
12002#[doc = "## Safety"]
12003#[doc = "  * Neon instrinsic unsafe"]
12004#[inline]
12005#[target_feature(enable = "neon")]
12006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12007#[cfg_attr(test, assert_instr(ld3r))]
12008pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
12009    unsafe extern "unadjusted" {
12010        #[cfg_attr(
12011            any(target_arch = "aarch64", target_arch = "arm64ec"),
12012            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0i64"
12013        )]
12014        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
12015    }
12016    _vld3q_dup_s64(a as _)
12017}
12018#[doc = "Load multiple 3-element structures to three registers"]
12019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
12020#[doc = "## Safety"]
12021#[doc = "  * Neon instrinsic unsafe"]
12022#[inline]
12023#[target_feature(enable = "neon")]
12024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12025#[cfg_attr(test, assert_instr(nop))]
12026pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
12027    unsafe extern "unadjusted" {
12028        #[cfg_attr(
12029            any(target_arch = "aarch64", target_arch = "arm64ec"),
12030            link_name = "llvm.aarch64.neon.ld3.v1f64.p0v1f64"
12031        )]
12032        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
12033    }
12034    _vld3_f64(a as _)
12035}
12036#[doc = "Load multiple 3-element structures to three registers"]
12037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
12038#[doc = "## Safety"]
12039#[doc = "  * Neon instrinsic unsafe"]
12040#[inline]
12041#[target_feature(enable = "neon")]
12042#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12043#[rustc_legacy_const_generics(2)]
12044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12045pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
12046    static_assert!(LANE == 0);
12047    unsafe extern "unadjusted" {
12048        #[cfg_attr(
12049            any(target_arch = "aarch64", target_arch = "arm64ec"),
12050            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8"
12051        )]
12052        fn _vld3_lane_f64(
12053            a: float64x1_t,
12054            b: float64x1_t,
12055            c: float64x1_t,
12056            n: i64,
12057            ptr: *const i8,
12058        ) -> float64x1x3_t;
12059    }
12060    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12061}
12062#[doc = "Load multiple 3-element structures to three registers"]
12063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12064#[doc = "## Safety"]
12065#[doc = "  * Neon instrinsic unsafe"]
12066#[inline]
12067#[target_feature(enable = "neon,aes")]
12068#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12069#[rustc_legacy_const_generics(2)]
12070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12071pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12072    static_assert!(LANE == 0);
12073    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12074}
12075#[doc = "Load multiple 3-element structures to two registers"]
12076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12077#[doc = "## Safety"]
12078#[doc = "  * Neon instrinsic unsafe"]
12079#[inline]
12080#[target_feature(enable = "neon")]
12081#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12082#[rustc_legacy_const_generics(2)]
12083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12084pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12085    static_assert!(LANE == 0);
12086    unsafe extern "unadjusted" {
12087        #[cfg_attr(
12088            any(target_arch = "aarch64", target_arch = "arm64ec"),
12089            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8"
12090        )]
12091        fn _vld3_lane_s64(
12092            a: int64x1_t,
12093            b: int64x1_t,
12094            c: int64x1_t,
12095            n: i64,
12096            ptr: *const i8,
12097        ) -> int64x1x3_t;
12098    }
12099    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12100}
12101#[doc = "Load multiple 3-element structures to three registers"]
12102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12103#[doc = "## Safety"]
12104#[doc = "  * Neon instrinsic unsafe"]
12105#[inline]
12106#[target_feature(enable = "neon")]
12107#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12108#[rustc_legacy_const_generics(2)]
12109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12110pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12111    static_assert!(LANE == 0);
12112    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12113}
12114#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12116#[doc = "## Safety"]
12117#[doc = "  * Neon instrinsic unsafe"]
12118#[inline]
12119#[cfg(target_endian = "little")]
12120#[target_feature(enable = "neon,aes")]
12121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12122#[cfg_attr(test, assert_instr(ld3r))]
12123pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12124    transmute(vld3q_dup_s64(transmute(a)))
12125}
12126#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12128#[doc = "## Safety"]
12129#[doc = "  * Neon instrinsic unsafe"]
12130#[inline]
12131#[cfg(target_endian = "big")]
12132#[target_feature(enable = "neon,aes")]
12133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12134#[cfg_attr(test, assert_instr(ld3r))]
12135pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12136    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12137    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12138    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12139    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12140    ret_val
12141}
12142#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12144#[doc = "## Safety"]
12145#[doc = "  * Neon instrinsic unsafe"]
12146#[inline]
12147#[cfg(target_endian = "little")]
12148#[target_feature(enable = "neon")]
12149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12150#[cfg_attr(test, assert_instr(ld3r))]
12151pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12152    transmute(vld3q_dup_s64(transmute(a)))
12153}
12154#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12156#[doc = "## Safety"]
12157#[doc = "  * Neon instrinsic unsafe"]
12158#[inline]
12159#[cfg(target_endian = "big")]
12160#[target_feature(enable = "neon")]
12161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12162#[cfg_attr(test, assert_instr(ld3r))]
12163pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12164    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12165    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12166    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12167    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12168    ret_val
12169}
12170#[doc = "Load multiple 3-element structures to three registers"]
12171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12172#[doc = "## Safety"]
12173#[doc = "  * Neon instrinsic unsafe"]
12174#[inline]
12175#[target_feature(enable = "neon")]
12176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12177#[cfg_attr(test, assert_instr(ld3))]
12178pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12179    unsafe extern "unadjusted" {
12180        #[cfg_attr(
12181            any(target_arch = "aarch64", target_arch = "arm64ec"),
12182            link_name = "llvm.aarch64.neon.ld3.v2f64.p0v2f64"
12183        )]
12184        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12185    }
12186    _vld3q_f64(a as _)
12187}
12188#[doc = "Load multiple 3-element structures to three registers"]
12189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12190#[doc = "## Safety"]
12191#[doc = "  * Neon instrinsic unsafe"]
12192#[inline]
12193#[target_feature(enable = "neon")]
12194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12195#[cfg_attr(test, assert_instr(ld3))]
12196pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12197    unsafe extern "unadjusted" {
12198        #[cfg_attr(
12199            any(target_arch = "aarch64", target_arch = "arm64ec"),
12200            link_name = "llvm.aarch64.neon.ld3.v2i64.p0v2i64"
12201        )]
12202        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12203    }
12204    _vld3q_s64(a as _)
12205}
12206#[doc = "Load multiple 3-element structures to three registers"]
12207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12208#[doc = "## Safety"]
12209#[doc = "  * Neon instrinsic unsafe"]
12210#[inline]
12211#[target_feature(enable = "neon")]
12212#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12213#[rustc_legacy_const_generics(2)]
12214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12215pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12216    static_assert_uimm_bits!(LANE, 1);
12217    unsafe extern "unadjusted" {
12218        #[cfg_attr(
12219            any(target_arch = "aarch64", target_arch = "arm64ec"),
12220            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8"
12221        )]
12222        fn _vld3q_lane_f64(
12223            a: float64x2_t,
12224            b: float64x2_t,
12225            c: float64x2_t,
12226            n: i64,
12227            ptr: *const i8,
12228        ) -> float64x2x3_t;
12229    }
12230    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12231}
12232#[doc = "Load multiple 3-element structures to three registers"]
12233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12234#[doc = "## Safety"]
12235#[doc = "  * Neon instrinsic unsafe"]
12236#[inline]
12237#[target_feature(enable = "neon,aes")]
12238#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12239#[rustc_legacy_const_generics(2)]
12240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12241pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12242    static_assert_uimm_bits!(LANE, 1);
12243    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12244}
12245#[doc = "Load multiple 3-element structures to two registers"]
12246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12247#[doc = "## Safety"]
12248#[doc = "  * Neon instrinsic unsafe"]
12249#[inline]
12250#[target_feature(enable = "neon")]
12251#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12252#[rustc_legacy_const_generics(2)]
12253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12254pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12255    static_assert_uimm_bits!(LANE, 3);
12256    unsafe extern "unadjusted" {
12257        #[cfg_attr(
12258            any(target_arch = "aarch64", target_arch = "arm64ec"),
12259            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8"
12260        )]
12261        fn _vld3q_lane_s8(
12262            a: int8x16_t,
12263            b: int8x16_t,
12264            c: int8x16_t,
12265            n: i64,
12266            ptr: *const i8,
12267        ) -> int8x16x3_t;
12268    }
12269    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12270}
12271#[doc = "Load multiple 3-element structures to two registers"]
12272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12273#[doc = "## Safety"]
12274#[doc = "  * Neon instrinsic unsafe"]
12275#[inline]
12276#[target_feature(enable = "neon")]
12277#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12278#[rustc_legacy_const_generics(2)]
12279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12280pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12281    static_assert_uimm_bits!(LANE, 1);
12282    unsafe extern "unadjusted" {
12283        #[cfg_attr(
12284            any(target_arch = "aarch64", target_arch = "arm64ec"),
12285            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8"
12286        )]
12287        fn _vld3q_lane_s64(
12288            a: int64x2_t,
12289            b: int64x2_t,
12290            c: int64x2_t,
12291            n: i64,
12292            ptr: *const i8,
12293        ) -> int64x2x3_t;
12294    }
12295    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12296}
12297#[doc = "Load multiple 3-element structures to three registers"]
12298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12299#[doc = "## Safety"]
12300#[doc = "  * Neon instrinsic unsafe"]
12301#[inline]
12302#[target_feature(enable = "neon")]
12303#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12304#[rustc_legacy_const_generics(2)]
12305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12306pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12307    static_assert_uimm_bits!(LANE, 4);
12308    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12309}
12310#[doc = "Load multiple 3-element structures to three registers"]
12311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12312#[doc = "## Safety"]
12313#[doc = "  * Neon instrinsic unsafe"]
12314#[inline]
12315#[target_feature(enable = "neon")]
12316#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12317#[rustc_legacy_const_generics(2)]
12318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12319pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12320    static_assert_uimm_bits!(LANE, 1);
12321    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12322}
12323#[doc = "Load multiple 3-element structures to three registers"]
12324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12325#[doc = "## Safety"]
12326#[doc = "  * Neon instrinsic unsafe"]
12327#[inline]
12328#[target_feature(enable = "neon")]
12329#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12330#[rustc_legacy_const_generics(2)]
12331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12332pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12333    static_assert_uimm_bits!(LANE, 4);
12334    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12335}
12336#[doc = "Load multiple 3-element structures to three registers"]
12337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12338#[doc = "## Safety"]
12339#[doc = "  * Neon instrinsic unsafe"]
12340#[inline]
12341#[cfg(target_endian = "little")]
12342#[target_feature(enable = "neon,aes")]
12343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12344#[cfg_attr(test, assert_instr(ld3))]
12345pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12346    transmute(vld3q_s64(transmute(a)))
12347}
12348#[doc = "Load multiple 3-element structures to three registers"]
12349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12350#[doc = "## Safety"]
12351#[doc = "  * Neon instrinsic unsafe"]
12352#[inline]
12353#[cfg(target_endian = "big")]
12354#[target_feature(enable = "neon,aes")]
12355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12356#[cfg_attr(test, assert_instr(ld3))]
12357pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12358    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12359    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12360    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12361    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12362    ret_val
12363}
12364#[doc = "Load multiple 3-element structures to three registers"]
12365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12366#[doc = "## Safety"]
12367#[doc = "  * Neon instrinsic unsafe"]
12368#[inline]
12369#[cfg(target_endian = "little")]
12370#[target_feature(enable = "neon")]
12371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12372#[cfg_attr(test, assert_instr(ld3))]
12373pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12374    transmute(vld3q_s64(transmute(a)))
12375}
12376#[doc = "Load multiple 3-element structures to three registers"]
12377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12378#[doc = "## Safety"]
12379#[doc = "  * Neon instrinsic unsafe"]
12380#[inline]
12381#[cfg(target_endian = "big")]
12382#[target_feature(enable = "neon")]
12383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12384#[cfg_attr(test, assert_instr(ld3))]
12385pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12386    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12387    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12388    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12389    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12390    ret_val
12391}
12392#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12394#[doc = "## Safety"]
12395#[doc = "  * Neon instrinsic unsafe"]
12396#[inline]
12397#[target_feature(enable = "neon")]
12398#[cfg_attr(test, assert_instr(ld4r))]
12399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12400pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12401    unsafe extern "unadjusted" {
12402        #[cfg_attr(
12403            any(target_arch = "aarch64", target_arch = "arm64ec"),
12404            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0f64"
12405        )]
12406        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12407    }
12408    _vld4_dup_f64(a as _)
12409}
12410#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12412#[doc = "## Safety"]
12413#[doc = "  * Neon instrinsic unsafe"]
12414#[inline]
12415#[target_feature(enable = "neon")]
12416#[cfg_attr(test, assert_instr(ld4r))]
12417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12418pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12419    unsafe extern "unadjusted" {
12420        #[cfg_attr(
12421            any(target_arch = "aarch64", target_arch = "arm64ec"),
12422            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0f64"
12423        )]
12424        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12425    }
12426    _vld4q_dup_f64(a as _)
12427}
12428#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12430#[doc = "## Safety"]
12431#[doc = "  * Neon instrinsic unsafe"]
12432#[inline]
12433#[target_feature(enable = "neon")]
12434#[cfg_attr(test, assert_instr(ld4r))]
12435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12436pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12437    unsafe extern "unadjusted" {
12438        #[cfg_attr(
12439            any(target_arch = "aarch64", target_arch = "arm64ec"),
12440            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0i64"
12441        )]
12442        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12443    }
12444    _vld4q_dup_s64(a as _)
12445}
12446#[doc = "Load multiple 4-element structures to four registers"]
12447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12448#[doc = "## Safety"]
12449#[doc = "  * Neon instrinsic unsafe"]
12450#[inline]
12451#[target_feature(enable = "neon")]
12452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12453#[cfg_attr(test, assert_instr(nop))]
12454pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12455    unsafe extern "unadjusted" {
12456        #[cfg_attr(
12457            any(target_arch = "aarch64", target_arch = "arm64ec"),
12458            link_name = "llvm.aarch64.neon.ld4.v1f64.p0v1f64"
12459        )]
12460        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12461    }
12462    _vld4_f64(a as _)
12463}
12464#[doc = "Load multiple 4-element structures to four registers"]
12465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12466#[doc = "## Safety"]
12467#[doc = "  * Neon instrinsic unsafe"]
12468#[inline]
12469#[target_feature(enable = "neon")]
12470#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12471#[rustc_legacy_const_generics(2)]
12472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12473pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12474    static_assert!(LANE == 0);
12475    unsafe extern "unadjusted" {
12476        #[cfg_attr(
12477            any(target_arch = "aarch64", target_arch = "arm64ec"),
12478            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8"
12479        )]
12480        fn _vld4_lane_f64(
12481            a: float64x1_t,
12482            b: float64x1_t,
12483            c: float64x1_t,
12484            d: float64x1_t,
12485            n: i64,
12486            ptr: *const i8,
12487        ) -> float64x1x4_t;
12488    }
12489    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12490}
12491#[doc = "Load multiple 4-element structures to four registers"]
12492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12493#[doc = "## Safety"]
12494#[doc = "  * Neon instrinsic unsafe"]
12495#[inline]
12496#[target_feature(enable = "neon")]
12497#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12498#[rustc_legacy_const_generics(2)]
12499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12500pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12501    static_assert!(LANE == 0);
12502    unsafe extern "unadjusted" {
12503        #[cfg_attr(
12504            any(target_arch = "aarch64", target_arch = "arm64ec"),
12505            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8"
12506        )]
12507        fn _vld4_lane_s64(
12508            a: int64x1_t,
12509            b: int64x1_t,
12510            c: int64x1_t,
12511            d: int64x1_t,
12512            n: i64,
12513            ptr: *const i8,
12514        ) -> int64x1x4_t;
12515    }
12516    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12517}
12518#[doc = "Load multiple 4-element structures to four registers"]
12519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12520#[doc = "## Safety"]
12521#[doc = "  * Neon instrinsic unsafe"]
12522#[inline]
12523#[target_feature(enable = "neon,aes")]
12524#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12525#[rustc_legacy_const_generics(2)]
12526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12527pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12528    static_assert!(LANE == 0);
12529    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12530}
12531#[doc = "Load multiple 4-element structures to four registers"]
12532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12533#[doc = "## Safety"]
12534#[doc = "  * Neon instrinsic unsafe"]
12535#[inline]
12536#[target_feature(enable = "neon")]
12537#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12538#[rustc_legacy_const_generics(2)]
12539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12540pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12541    static_assert!(LANE == 0);
12542    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12543}
12544#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12546#[doc = "## Safety"]
12547#[doc = "  * Neon instrinsic unsafe"]
12548#[inline]
12549#[cfg(target_endian = "little")]
12550#[target_feature(enable = "neon,aes")]
12551#[cfg_attr(test, assert_instr(ld4r))]
12552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12553pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12554    transmute(vld4q_dup_s64(transmute(a)))
12555}
12556#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12558#[doc = "## Safety"]
12559#[doc = "  * Neon instrinsic unsafe"]
12560#[inline]
12561#[cfg(target_endian = "big")]
12562#[target_feature(enable = "neon,aes")]
12563#[cfg_attr(test, assert_instr(ld4r))]
12564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12565pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12566    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12567    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12568    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12569    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12570    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12571    ret_val
12572}
12573#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12575#[doc = "## Safety"]
12576#[doc = "  * Neon instrinsic unsafe"]
12577#[inline]
12578#[cfg(target_endian = "little")]
12579#[target_feature(enable = "neon")]
12580#[cfg_attr(test, assert_instr(ld4r))]
12581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12582pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12583    transmute(vld4q_dup_s64(transmute(a)))
12584}
12585#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12587#[doc = "## Safety"]
12588#[doc = "  * Neon instrinsic unsafe"]
12589#[inline]
12590#[cfg(target_endian = "big")]
12591#[target_feature(enable = "neon")]
12592#[cfg_attr(test, assert_instr(ld4r))]
12593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12594pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12595    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12596    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12597    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12598    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12599    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12600    ret_val
12601}
12602#[doc = "Load multiple 4-element structures to four registers"]
12603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12604#[doc = "## Safety"]
12605#[doc = "  * Neon instrinsic unsafe"]
12606#[inline]
12607#[target_feature(enable = "neon")]
12608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12609#[cfg_attr(test, assert_instr(ld4))]
12610pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12611    unsafe extern "unadjusted" {
12612        #[cfg_attr(
12613            any(target_arch = "aarch64", target_arch = "arm64ec"),
12614            link_name = "llvm.aarch64.neon.ld4.v2f64.p0v2f64"
12615        )]
12616        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12617    }
12618    _vld4q_f64(a as _)
12619}
12620#[doc = "Load multiple 4-element structures to four registers"]
12621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12622#[doc = "## Safety"]
12623#[doc = "  * Neon instrinsic unsafe"]
12624#[inline]
12625#[target_feature(enable = "neon")]
12626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12627#[cfg_attr(test, assert_instr(ld4))]
12628pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12629    unsafe extern "unadjusted" {
12630        #[cfg_attr(
12631            any(target_arch = "aarch64", target_arch = "arm64ec"),
12632            link_name = "llvm.aarch64.neon.ld4.v2i64.p0v2i64"
12633        )]
12634        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12635    }
12636    _vld4q_s64(a as _)
12637}
12638#[doc = "Load multiple 4-element structures to four registers"]
12639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12640#[doc = "## Safety"]
12641#[doc = "  * Neon instrinsic unsafe"]
12642#[inline]
12643#[target_feature(enable = "neon")]
12644#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12645#[rustc_legacy_const_generics(2)]
12646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12647pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12648    static_assert_uimm_bits!(LANE, 1);
12649    unsafe extern "unadjusted" {
12650        #[cfg_attr(
12651            any(target_arch = "aarch64", target_arch = "arm64ec"),
12652            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8"
12653        )]
12654        fn _vld4q_lane_f64(
12655            a: float64x2_t,
12656            b: float64x2_t,
12657            c: float64x2_t,
12658            d: float64x2_t,
12659            n: i64,
12660            ptr: *const i8,
12661        ) -> float64x2x4_t;
12662    }
12663    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12664}
12665#[doc = "Load multiple 4-element structures to four registers"]
12666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12667#[doc = "## Safety"]
12668#[doc = "  * Neon instrinsic unsafe"]
12669#[inline]
12670#[target_feature(enable = "neon")]
12671#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12672#[rustc_legacy_const_generics(2)]
12673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12674pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12675    static_assert_uimm_bits!(LANE, 3);
12676    unsafe extern "unadjusted" {
12677        #[cfg_attr(
12678            any(target_arch = "aarch64", target_arch = "arm64ec"),
12679            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8"
12680        )]
12681        fn _vld4q_lane_s8(
12682            a: int8x16_t,
12683            b: int8x16_t,
12684            c: int8x16_t,
12685            d: int8x16_t,
12686            n: i64,
12687            ptr: *const i8,
12688        ) -> int8x16x4_t;
12689    }
12690    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12691}
12692#[doc = "Load multiple 4-element structures to four registers"]
12693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12694#[doc = "## Safety"]
12695#[doc = "  * Neon instrinsic unsafe"]
12696#[inline]
12697#[target_feature(enable = "neon")]
12698#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12699#[rustc_legacy_const_generics(2)]
12700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12701pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12702    static_assert_uimm_bits!(LANE, 1);
12703    unsafe extern "unadjusted" {
12704        #[cfg_attr(
12705            any(target_arch = "aarch64", target_arch = "arm64ec"),
12706            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8"
12707        )]
12708        fn _vld4q_lane_s64(
12709            a: int64x2_t,
12710            b: int64x2_t,
12711            c: int64x2_t,
12712            d: int64x2_t,
12713            n: i64,
12714            ptr: *const i8,
12715        ) -> int64x2x4_t;
12716    }
12717    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12718}
12719#[doc = "Load multiple 4-element structures to four registers"]
12720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12721#[doc = "## Safety"]
12722#[doc = "  * Neon instrinsic unsafe"]
12723#[inline]
12724#[target_feature(enable = "neon,aes")]
12725#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12726#[rustc_legacy_const_generics(2)]
12727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12728pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12729    static_assert_uimm_bits!(LANE, 1);
12730    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12731}
12732#[doc = "Load multiple 4-element structures to four registers"]
12733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12734#[doc = "## Safety"]
12735#[doc = "  * Neon instrinsic unsafe"]
12736#[inline]
12737#[target_feature(enable = "neon")]
12738#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12739#[rustc_legacy_const_generics(2)]
12740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12741pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12742    static_assert_uimm_bits!(LANE, 4);
12743    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12744}
12745#[doc = "Load multiple 4-element structures to four registers"]
12746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12747#[doc = "## Safety"]
12748#[doc = "  * Neon instrinsic unsafe"]
12749#[inline]
12750#[target_feature(enable = "neon")]
12751#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12752#[rustc_legacy_const_generics(2)]
12753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12754pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12755    static_assert_uimm_bits!(LANE, 1);
12756    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12757}
12758#[doc = "Load multiple 4-element structures to four registers"]
12759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12760#[doc = "## Safety"]
12761#[doc = "  * Neon instrinsic unsafe"]
12762#[inline]
12763#[target_feature(enable = "neon")]
12764#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12765#[rustc_legacy_const_generics(2)]
12766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12767pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12768    static_assert_uimm_bits!(LANE, 4);
12769    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12770}
12771#[doc = "Load multiple 4-element structures to four registers"]
12772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12773#[doc = "## Safety"]
12774#[doc = "  * Neon instrinsic unsafe"]
12775#[inline]
12776#[cfg(target_endian = "little")]
12777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12778#[target_feature(enable = "neon,aes")]
12779#[cfg_attr(test, assert_instr(ld4))]
12780pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12781    transmute(vld4q_s64(transmute(a)))
12782}
12783#[doc = "Load multiple 4-element structures to four registers"]
12784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12785#[doc = "## Safety"]
12786#[doc = "  * Neon instrinsic unsafe"]
12787#[inline]
12788#[cfg(target_endian = "big")]
12789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12790#[target_feature(enable = "neon,aes")]
12791#[cfg_attr(test, assert_instr(ld4))]
12792pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12793    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12794    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12795    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12796    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12797    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12798    ret_val
12799}
12800#[doc = "Load multiple 4-element structures to four registers"]
12801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12802#[doc = "## Safety"]
12803#[doc = "  * Neon instrinsic unsafe"]
12804#[inline]
12805#[cfg(target_endian = "little")]
12806#[target_feature(enable = "neon")]
12807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12808#[cfg_attr(test, assert_instr(ld4))]
12809pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12810    transmute(vld4q_s64(transmute(a)))
12811}
12812#[doc = "Load multiple 4-element structures to four registers"]
12813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12814#[doc = "## Safety"]
12815#[doc = "  * Neon instrinsic unsafe"]
12816#[inline]
12817#[cfg(target_endian = "big")]
12818#[target_feature(enable = "neon")]
12819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12820#[cfg_attr(test, assert_instr(ld4))]
12821pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12822    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12823    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12824    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12825    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12826    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12827    ret_val
12828}
12829#[doc = "Lookup table read with 2-bit indices"]
12830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12831#[doc = "## Safety"]
12832#[doc = "  * Neon instrinsic unsafe"]
12833#[inline]
12834#[target_feature(enable = "neon,lut")]
12835#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12836#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12837#[rustc_legacy_const_generics(2)]
12838pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12839    static_assert!(LANE >= 0 && LANE <= 1);
12840    unsafe extern "unadjusted" {
12841        #[cfg_attr(
12842            any(target_arch = "aarch64", target_arch = "arm64ec"),
12843            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12844        )]
12845        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12846    }
12847    _vluti2_lane_s8(a, b, LANE)
12848}
12849#[doc = "Lookup table read with 2-bit indices"]
12850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12851#[doc = "## Safety"]
12852#[doc = "  * Neon instrinsic unsafe"]
12853#[inline]
12854#[target_feature(enable = "neon,lut")]
12855#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12856#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12857#[rustc_legacy_const_generics(2)]
12858pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12859    static_assert!(LANE >= 0 && LANE <= 1);
12860    unsafe extern "unadjusted" {
12861        #[cfg_attr(
12862            any(target_arch = "aarch64", target_arch = "arm64ec"),
12863            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12864        )]
12865        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12866    }
12867    _vluti2q_lane_s8(a, b, LANE)
12868}
12869#[doc = "Lookup table read with 2-bit indices"]
12870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12871#[doc = "## Safety"]
12872#[doc = "  * Neon instrinsic unsafe"]
12873#[inline]
12874#[target_feature(enable = "neon,lut")]
12875#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12876#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12877#[rustc_legacy_const_generics(2)]
12878pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12879    static_assert!(LANE >= 0 && LANE <= 3);
12880    unsafe extern "unadjusted" {
12881        #[cfg_attr(
12882            any(target_arch = "aarch64", target_arch = "arm64ec"),
12883            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12884        )]
12885        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12886    }
12887    _vluti2_lane_s16(a, b, LANE)
12888}
12889#[doc = "Lookup table read with 2-bit indices"]
12890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12891#[doc = "## Safety"]
12892#[doc = "  * Neon instrinsic unsafe"]
12893#[inline]
12894#[target_feature(enable = "neon,lut")]
12895#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12896#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12897#[rustc_legacy_const_generics(2)]
12898pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12899    static_assert!(LANE >= 0 && LANE <= 3);
12900    unsafe extern "unadjusted" {
12901        #[cfg_attr(
12902            any(target_arch = "aarch64", target_arch = "arm64ec"),
12903            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12904        )]
12905        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12906    }
12907    _vluti2q_lane_s16(a, b, LANE)
12908}
12909#[doc = "Lookup table read with 2-bit indices"]
12910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12911#[doc = "## Safety"]
12912#[doc = "  * Neon instrinsic unsafe"]
12913#[inline]
12914#[target_feature(enable = "neon,lut")]
12915#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12916#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12917#[rustc_legacy_const_generics(2)]
12918pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12919    static_assert!(LANE >= 0 && LANE <= 1);
12920    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12921}
12922#[doc = "Lookup table read with 2-bit indices"]
12923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12924#[doc = "## Safety"]
12925#[doc = "  * Neon instrinsic unsafe"]
12926#[inline]
12927#[target_feature(enable = "neon,lut")]
12928#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12929#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12930#[rustc_legacy_const_generics(2)]
12931pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12932    static_assert!(LANE >= 0 && LANE <= 1);
12933    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12934}
12935#[doc = "Lookup table read with 2-bit indices"]
12936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12937#[doc = "## Safety"]
12938#[doc = "  * Neon instrinsic unsafe"]
12939#[inline]
12940#[target_feature(enable = "neon,lut")]
12941#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12942#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12943#[rustc_legacy_const_generics(2)]
12944pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12945    static_assert!(LANE >= 0 && LANE <= 3);
12946    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12947}
12948#[doc = "Lookup table read with 2-bit indices"]
12949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12950#[doc = "## Safety"]
12951#[doc = "  * Neon instrinsic unsafe"]
12952#[inline]
12953#[target_feature(enable = "neon,lut")]
12954#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12955#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12956#[rustc_legacy_const_generics(2)]
12957pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12958    static_assert!(LANE >= 0 && LANE <= 3);
12959    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12960}
12961#[doc = "Lookup table read with 2-bit indices"]
12962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12963#[doc = "## Safety"]
12964#[doc = "  * Neon instrinsic unsafe"]
12965#[inline]
12966#[target_feature(enable = "neon,lut")]
12967#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12968#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12969#[rustc_legacy_const_generics(2)]
12970pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12971    static_assert!(LANE >= 0 && LANE <= 1);
12972    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12973}
12974#[doc = "Lookup table read with 2-bit indices"]
12975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12976#[doc = "## Safety"]
12977#[doc = "  * Neon instrinsic unsafe"]
12978#[inline]
12979#[target_feature(enable = "neon,lut")]
12980#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12981#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12982#[rustc_legacy_const_generics(2)]
12983pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12984    static_assert!(LANE >= 0 && LANE <= 1);
12985    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12986}
12987#[doc = "Lookup table read with 2-bit indices"]
12988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12989#[doc = "## Safety"]
12990#[doc = "  * Neon instrinsic unsafe"]
12991#[inline]
12992#[target_feature(enable = "neon,lut")]
12993#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12994#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12995#[rustc_legacy_const_generics(2)]
12996pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12997    static_assert!(LANE >= 0 && LANE <= 3);
12998    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12999}
13000#[doc = "Lookup table read with 2-bit indices"]
13001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
13002#[doc = "## Safety"]
13003#[doc = "  * Neon instrinsic unsafe"]
13004#[inline]
13005#[target_feature(enable = "neon,lut")]
13006#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13007#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13008#[rustc_legacy_const_generics(2)]
13009pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
13010    static_assert!(LANE >= 0 && LANE <= 3);
13011    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
13012}
13013#[doc = "Lookup table read with 4-bit indices"]
13014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13015#[doc = "## Safety"]
13016#[doc = "  * Neon instrinsic unsafe"]
13017#[inline]
13018#[target_feature(enable = "neon,lut,fp16")]
13019#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13020#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13021#[rustc_legacy_const_generics(2)]
13022pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13023    static_assert!(LANE >= 0 && LANE <= 1);
13024    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13025}
13026#[doc = "Lookup table read with 4-bit indices"]
13027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13028#[doc = "## Safety"]
13029#[doc = "  * Neon instrinsic unsafe"]
13030#[inline]
13031#[target_feature(enable = "neon,lut")]
13032#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13033#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13034#[rustc_legacy_const_generics(2)]
13035pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13036    static_assert!(LANE >= 0 && LANE <= 1);
13037    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13038}
13039#[doc = "Lookup table read with 4-bit indices"]
13040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13041#[doc = "## Safety"]
13042#[doc = "  * Neon instrinsic unsafe"]
13043#[inline]
13044#[target_feature(enable = "neon,lut")]
13045#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13046#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13047#[rustc_legacy_const_generics(2)]
13048pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13049    static_assert!(LANE >= 0 && LANE <= 1);
13050    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13051}
13052#[doc = "Lookup table read with 4-bit indices"]
13053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13054#[doc = "## Safety"]
13055#[doc = "  * Neon instrinsic unsafe"]
13056#[inline]
13057#[target_feature(enable = "neon,lut")]
13058#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13059#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13060#[rustc_legacy_const_generics(2)]
13061pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13062    static_assert!(LANE >= 0 && LANE <= 1);
13063    unsafe extern "unadjusted" {
13064        #[cfg_attr(
13065            any(target_arch = "aarch64", target_arch = "arm64ec"),
13066            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13067        )]
13068        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13069    }
13070    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13071}
13072#[doc = "Lookup table read with 4-bit indices"]
13073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13074#[doc = "## Safety"]
13075#[doc = "  * Neon instrinsic unsafe"]
13076#[inline]
13077#[target_feature(enable = "neon,lut")]
13078#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13079#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13080#[rustc_legacy_const_generics(2)]
13081pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13082    static_assert!(LANE == 0);
13083    unsafe extern "unadjusted" {
13084        #[cfg_attr(
13085            any(target_arch = "aarch64", target_arch = "arm64ec"),
13086            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13087        )]
13088        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13089    }
13090    _vluti4q_lane_s8(a, b, LANE)
13091}
13092#[doc = "Lookup table read with 4-bit indices"]
13093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13094#[doc = "## Safety"]
13095#[doc = "  * Neon instrinsic unsafe"]
13096#[inline]
13097#[target_feature(enable = "neon,lut")]
13098#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13099#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13100#[rustc_legacy_const_generics(2)]
13101pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13102    static_assert!(LANE == 0);
13103    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13104}
13105#[doc = "Lookup table read with 4-bit indices"]
13106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13107#[doc = "## Safety"]
13108#[doc = "  * Neon instrinsic unsafe"]
13109#[inline]
13110#[target_feature(enable = "neon,lut")]
13111#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13112#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13113#[rustc_legacy_const_generics(2)]
13114pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13115    static_assert!(LANE == 0);
13116    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13117}
13118#[doc = "Lookup table read with 4-bit indices"]
13119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13120#[doc = "## Safety"]
13121#[doc = "  * Neon instrinsic unsafe"]
13122#[inline]
13123#[target_feature(enable = "neon,lut,fp16")]
13124#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13125#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13126#[rustc_legacy_const_generics(2)]
13127pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13128    a: float16x8x2_t,
13129    b: uint8x16_t,
13130) -> float16x8_t {
13131    static_assert!(LANE >= 0 && LANE <= 3);
13132    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13133}
13134#[doc = "Lookup table read with 4-bit indices"]
13135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13136#[doc = "## Safety"]
13137#[doc = "  * Neon instrinsic unsafe"]
13138#[inline]
13139#[target_feature(enable = "neon,lut")]
13140#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13141#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13142#[rustc_legacy_const_generics(2)]
13143pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13144    static_assert!(LANE >= 0 && LANE <= 3);
13145    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13146}
13147#[doc = "Lookup table read with 4-bit indices"]
13148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13149#[doc = "## Safety"]
13150#[doc = "  * Neon instrinsic unsafe"]
13151#[inline]
13152#[target_feature(enable = "neon,lut")]
13153#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13154#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13155#[rustc_legacy_const_generics(2)]
13156pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13157    static_assert!(LANE >= 0 && LANE <= 3);
13158    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13159}
13160#[doc = "Lookup table read with 4-bit indices"]
13161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13162#[doc = "## Safety"]
13163#[doc = "  * Neon instrinsic unsafe"]
13164#[inline]
13165#[target_feature(enable = "neon,lut")]
13166#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13167#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13168#[rustc_legacy_const_generics(2)]
13169pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13170    static_assert!(LANE >= 0 && LANE <= 3);
13171    unsafe extern "unadjusted" {
13172        #[cfg_attr(
13173            any(target_arch = "aarch64", target_arch = "arm64ec"),
13174            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13175        )]
13176        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13177    }
13178    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13179}
13180#[doc = "Lookup table read with 4-bit indices"]
13181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13182#[doc = "## Safety"]
13183#[doc = "  * Neon instrinsic unsafe"]
13184#[inline]
13185#[target_feature(enable = "neon,lut")]
13186#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13187#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13188#[rustc_legacy_const_generics(2)]
13189pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13190    static_assert!(LANE >= 0 && LANE <= 1);
13191    unsafe extern "unadjusted" {
13192        #[cfg_attr(
13193            any(target_arch = "aarch64", target_arch = "arm64ec"),
13194            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13195        )]
13196        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13197    }
13198    _vluti4q_laneq_s8(a, b, LANE)
13199}
13200#[doc = "Lookup table read with 4-bit indices"]
13201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13202#[doc = "## Safety"]
13203#[doc = "  * Neon instrinsic unsafe"]
13204#[inline]
13205#[target_feature(enable = "neon,lut")]
13206#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13207#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13208#[rustc_legacy_const_generics(2)]
13209pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13210    static_assert!(LANE >= 0 && LANE <= 1);
13211    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13212}
13213#[doc = "Lookup table read with 4-bit indices"]
13214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13215#[doc = "## Safety"]
13216#[doc = "  * Neon instrinsic unsafe"]
13217#[inline]
13218#[target_feature(enable = "neon,lut")]
13219#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13220#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13221#[rustc_legacy_const_generics(2)]
13222pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13223    static_assert!(LANE >= 0 && LANE <= 1);
13224    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13225}
13226#[doc = "Maximum (vector)"]
13227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13228#[inline]
13229#[target_feature(enable = "neon")]
13230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13231#[cfg_attr(test, assert_instr(fmax))]
13232pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13233    unsafe extern "unadjusted" {
13234        #[cfg_attr(
13235            any(target_arch = "aarch64", target_arch = "arm64ec"),
13236            link_name = "llvm.aarch64.neon.fmax.v1f64"
13237        )]
13238        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13239    }
13240    unsafe { _vmax_f64(a, b) }
13241}
13242#[doc = "Maximum (vector)"]
13243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13244#[inline]
13245#[target_feature(enable = "neon")]
13246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13247#[cfg_attr(test, assert_instr(fmax))]
13248pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13249    unsafe extern "unadjusted" {
13250        #[cfg_attr(
13251            any(target_arch = "aarch64", target_arch = "arm64ec"),
13252            link_name = "llvm.aarch64.neon.fmax.v2f64"
13253        )]
13254        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13255    }
13256    unsafe { _vmaxq_f64(a, b) }
13257}
13258#[doc = "Maximum (vector)"]
13259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13260#[inline]
13261#[target_feature(enable = "neon,fp16")]
13262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13263#[cfg_attr(test, assert_instr(fmax))]
13264pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13265    unsafe extern "unadjusted" {
13266        #[cfg_attr(
13267            any(target_arch = "aarch64", target_arch = "arm64ec"),
13268            link_name = "llvm.aarch64.neon.fmax.f16"
13269        )]
13270        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13271    }
13272    unsafe { _vmaxh_f16(a, b) }
13273}
13274#[doc = "Floating-point Maximum Number (vector)"]
13275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13276#[inline]
13277#[target_feature(enable = "neon")]
13278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13279#[cfg_attr(test, assert_instr(fmaxnm))]
13280pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13281    unsafe extern "unadjusted" {
13282        #[cfg_attr(
13283            any(target_arch = "aarch64", target_arch = "arm64ec"),
13284            link_name = "llvm.aarch64.neon.fmaxnm.v1f64"
13285        )]
13286        fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13287    }
13288    unsafe { _vmaxnm_f64(a, b) }
13289}
13290#[doc = "Floating-point Maximum Number (vector)"]
13291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13292#[inline]
13293#[target_feature(enable = "neon")]
13294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13295#[cfg_attr(test, assert_instr(fmaxnm))]
13296pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13297    unsafe extern "unadjusted" {
13298        #[cfg_attr(
13299            any(target_arch = "aarch64", target_arch = "arm64ec"),
13300            link_name = "llvm.aarch64.neon.fmaxnm.v2f64"
13301        )]
13302        fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13303    }
13304    unsafe { _vmaxnmq_f64(a, b) }
13305}
13306#[doc = "Floating-point Maximum Number"]
13307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13308#[inline]
13309#[target_feature(enable = "neon,fp16")]
13310#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13311#[cfg_attr(test, assert_instr(fmaxnm))]
13312pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13313    unsafe extern "unadjusted" {
13314        #[cfg_attr(
13315            any(target_arch = "aarch64", target_arch = "arm64ec"),
13316            link_name = "llvm.aarch64.neon.fmaxnm.f16"
13317        )]
13318        fn _vmaxnmh_f16(a: f16, b: f16) -> f16;
13319    }
13320    unsafe { _vmaxnmh_f16(a, b) }
13321}
13322#[doc = "Floating-point maximum number across vector"]
13323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13324#[inline]
13325#[target_feature(enable = "neon,fp16")]
13326#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13327#[cfg_attr(test, assert_instr(fmaxnmv))]
13328pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13329    unsafe extern "unadjusted" {
13330        #[cfg_attr(
13331            any(target_arch = "aarch64", target_arch = "arm64ec"),
13332            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16"
13333        )]
13334        fn _vmaxnmv_f16(a: float16x4_t) -> f16;
13335    }
13336    unsafe { _vmaxnmv_f16(a) }
13337}
13338#[doc = "Floating-point maximum number across vector"]
13339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13340#[inline]
13341#[target_feature(enable = "neon,fp16")]
13342#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13343#[cfg_attr(test, assert_instr(fmaxnmv))]
13344pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13345    unsafe extern "unadjusted" {
13346        #[cfg_attr(
13347            any(target_arch = "aarch64", target_arch = "arm64ec"),
13348            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16"
13349        )]
13350        fn _vmaxnmvq_f16(a: float16x8_t) -> f16;
13351    }
13352    unsafe { _vmaxnmvq_f16(a) }
13353}
13354#[doc = "Floating-point maximum number across vector"]
13355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13356#[inline]
13357#[target_feature(enable = "neon")]
13358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13359#[cfg_attr(test, assert_instr(fmaxnmp))]
13360pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13361    unsafe extern "unadjusted" {
13362        #[cfg_attr(
13363            any(target_arch = "aarch64", target_arch = "arm64ec"),
13364            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
13365        )]
13366        fn _vmaxnmv_f32(a: float32x2_t) -> f32;
13367    }
13368    unsafe { _vmaxnmv_f32(a) }
13369}
13370#[doc = "Floating-point maximum number across vector"]
13371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13372#[inline]
13373#[target_feature(enable = "neon")]
13374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13375#[cfg_attr(test, assert_instr(fmaxnmp))]
13376pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13377    unsafe extern "unadjusted" {
13378        #[cfg_attr(
13379            any(target_arch = "aarch64", target_arch = "arm64ec"),
13380            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
13381        )]
13382        fn _vmaxnmvq_f64(a: float64x2_t) -> f64;
13383    }
13384    unsafe { _vmaxnmvq_f64(a) }
13385}
13386#[doc = "Floating-point maximum number across vector"]
13387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13388#[inline]
13389#[target_feature(enable = "neon")]
13390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13391#[cfg_attr(test, assert_instr(fmaxnmv))]
13392pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13393    unsafe extern "unadjusted" {
13394        #[cfg_attr(
13395            any(target_arch = "aarch64", target_arch = "arm64ec"),
13396            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32"
13397        )]
13398        fn _vmaxnmvq_f32(a: float32x4_t) -> f32;
13399    }
13400    unsafe { _vmaxnmvq_f32(a) }
13401}
13402#[doc = "Floating-point maximum number across vector"]
13403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13404#[inline]
13405#[target_feature(enable = "neon,fp16")]
13406#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13407#[cfg_attr(test, assert_instr(fmaxv))]
13408pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13409    unsafe extern "unadjusted" {
13410        #[cfg_attr(
13411            any(target_arch = "aarch64", target_arch = "arm64ec"),
13412            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13413        )]
13414        fn _vmaxv_f16(a: float16x4_t) -> f16;
13415    }
13416    unsafe { _vmaxv_f16(a) }
13417}
13418#[doc = "Floating-point maximum number across vector"]
13419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13420#[inline]
13421#[target_feature(enable = "neon,fp16")]
13422#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13423#[cfg_attr(test, assert_instr(fmaxv))]
13424pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13425    unsafe extern "unadjusted" {
13426        #[cfg_attr(
13427            any(target_arch = "aarch64", target_arch = "arm64ec"),
13428            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13429        )]
13430        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13431    }
13432    unsafe { _vmaxvq_f16(a) }
13433}
13434#[doc = "Horizontal vector max."]
13435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13436#[inline]
13437#[target_feature(enable = "neon")]
13438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13439#[cfg_attr(test, assert_instr(fmaxp))]
13440pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13441    unsafe extern "unadjusted" {
13442        #[cfg_attr(
13443            any(target_arch = "aarch64", target_arch = "arm64ec"),
13444            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13445        )]
13446        fn _vmaxv_f32(a: float32x2_t) -> f32;
13447    }
13448    unsafe { _vmaxv_f32(a) }
13449}
13450#[doc = "Horizontal vector max."]
13451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13452#[inline]
13453#[target_feature(enable = "neon")]
13454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13455#[cfg_attr(test, assert_instr(fmaxv))]
13456pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13457    unsafe extern "unadjusted" {
13458        #[cfg_attr(
13459            any(target_arch = "aarch64", target_arch = "arm64ec"),
13460            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13461        )]
13462        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13463    }
13464    unsafe { _vmaxvq_f32(a) }
13465}
13466#[doc = "Horizontal vector max."]
13467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13468#[inline]
13469#[target_feature(enable = "neon")]
13470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13471#[cfg_attr(test, assert_instr(fmaxp))]
13472pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13473    unsafe extern "unadjusted" {
13474        #[cfg_attr(
13475            any(target_arch = "aarch64", target_arch = "arm64ec"),
13476            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13477        )]
13478        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13479    }
13480    unsafe { _vmaxvq_f64(a) }
13481}
13482#[doc = "Horizontal vector max."]
13483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13484#[inline]
13485#[target_feature(enable = "neon")]
13486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13487#[cfg_attr(test, assert_instr(smaxv))]
13488pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13489    unsafe extern "unadjusted" {
13490        #[cfg_attr(
13491            any(target_arch = "aarch64", target_arch = "arm64ec"),
13492            link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"
13493        )]
13494        fn _vmaxv_s8(a: int8x8_t) -> i8;
13495    }
13496    unsafe { _vmaxv_s8(a) }
13497}
13498#[doc = "Horizontal vector max."]
13499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13500#[inline]
13501#[target_feature(enable = "neon")]
13502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13503#[cfg_attr(test, assert_instr(smaxv))]
13504pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13505    unsafe extern "unadjusted" {
13506        #[cfg_attr(
13507            any(target_arch = "aarch64", target_arch = "arm64ec"),
13508            link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"
13509        )]
13510        fn _vmaxvq_s8(a: int8x16_t) -> i8;
13511    }
13512    unsafe { _vmaxvq_s8(a) }
13513}
13514#[doc = "Horizontal vector max."]
13515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13516#[inline]
13517#[target_feature(enable = "neon")]
13518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13519#[cfg_attr(test, assert_instr(smaxv))]
13520pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13521    unsafe extern "unadjusted" {
13522        #[cfg_attr(
13523            any(target_arch = "aarch64", target_arch = "arm64ec"),
13524            link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"
13525        )]
13526        fn _vmaxv_s16(a: int16x4_t) -> i16;
13527    }
13528    unsafe { _vmaxv_s16(a) }
13529}
13530#[doc = "Horizontal vector max."]
13531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13532#[inline]
13533#[target_feature(enable = "neon")]
13534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13535#[cfg_attr(test, assert_instr(smaxv))]
13536pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13537    unsafe extern "unadjusted" {
13538        #[cfg_attr(
13539            any(target_arch = "aarch64", target_arch = "arm64ec"),
13540            link_name = "llvm.aarch64.neon.smaxv.i16.v8i16"
13541        )]
13542        fn _vmaxvq_s16(a: int16x8_t) -> i16;
13543    }
13544    unsafe { _vmaxvq_s16(a) }
13545}
13546#[doc = "Horizontal vector max."]
13547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13548#[inline]
13549#[target_feature(enable = "neon")]
13550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13551#[cfg_attr(test, assert_instr(smaxp))]
13552pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13553    unsafe extern "unadjusted" {
13554        #[cfg_attr(
13555            any(target_arch = "aarch64", target_arch = "arm64ec"),
13556            link_name = "llvm.aarch64.neon.smaxv.i32.v2i32"
13557        )]
13558        fn _vmaxv_s32(a: int32x2_t) -> i32;
13559    }
13560    unsafe { _vmaxv_s32(a) }
13561}
13562#[doc = "Horizontal vector max."]
13563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13564#[inline]
13565#[target_feature(enable = "neon")]
13566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13567#[cfg_attr(test, assert_instr(smaxv))]
13568pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13569    unsafe extern "unadjusted" {
13570        #[cfg_attr(
13571            any(target_arch = "aarch64", target_arch = "arm64ec"),
13572            link_name = "llvm.aarch64.neon.smaxv.i32.v4i32"
13573        )]
13574        fn _vmaxvq_s32(a: int32x4_t) -> i32;
13575    }
13576    unsafe { _vmaxvq_s32(a) }
13577}
13578#[doc = "Horizontal vector max."]
13579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13580#[inline]
13581#[target_feature(enable = "neon")]
13582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13583#[cfg_attr(test, assert_instr(umaxv))]
13584pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13585    unsafe extern "unadjusted" {
13586        #[cfg_attr(
13587            any(target_arch = "aarch64", target_arch = "arm64ec"),
13588            link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"
13589        )]
13590        fn _vmaxv_u8(a: uint8x8_t) -> u8;
13591    }
13592    unsafe { _vmaxv_u8(a) }
13593}
13594#[doc = "Horizontal vector max."]
13595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13596#[inline]
13597#[target_feature(enable = "neon")]
13598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13599#[cfg_attr(test, assert_instr(umaxv))]
13600pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13601    unsafe extern "unadjusted" {
13602        #[cfg_attr(
13603            any(target_arch = "aarch64", target_arch = "arm64ec"),
13604            link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"
13605        )]
13606        fn _vmaxvq_u8(a: uint8x16_t) -> u8;
13607    }
13608    unsafe { _vmaxvq_u8(a) }
13609}
13610#[doc = "Horizontal vector max."]
13611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13612#[inline]
13613#[target_feature(enable = "neon")]
13614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13615#[cfg_attr(test, assert_instr(umaxv))]
13616pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13617    unsafe extern "unadjusted" {
13618        #[cfg_attr(
13619            any(target_arch = "aarch64", target_arch = "arm64ec"),
13620            link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"
13621        )]
13622        fn _vmaxv_u16(a: uint16x4_t) -> u16;
13623    }
13624    unsafe { _vmaxv_u16(a) }
13625}
13626#[doc = "Horizontal vector max."]
13627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13628#[inline]
13629#[target_feature(enable = "neon")]
13630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13631#[cfg_attr(test, assert_instr(umaxv))]
13632pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13633    unsafe extern "unadjusted" {
13634        #[cfg_attr(
13635            any(target_arch = "aarch64", target_arch = "arm64ec"),
13636            link_name = "llvm.aarch64.neon.umaxv.i16.v8i16"
13637        )]
13638        fn _vmaxvq_u16(a: uint16x8_t) -> u16;
13639    }
13640    unsafe { _vmaxvq_u16(a) }
13641}
13642#[doc = "Horizontal vector max."]
13643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13644#[inline]
13645#[target_feature(enable = "neon")]
13646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13647#[cfg_attr(test, assert_instr(umaxp))]
13648pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13649    unsafe extern "unadjusted" {
13650        #[cfg_attr(
13651            any(target_arch = "aarch64", target_arch = "arm64ec"),
13652            link_name = "llvm.aarch64.neon.umaxv.i32.v2i32"
13653        )]
13654        fn _vmaxv_u32(a: uint32x2_t) -> u32;
13655    }
13656    unsafe { _vmaxv_u32(a) }
13657}
13658#[doc = "Horizontal vector max."]
13659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13660#[inline]
13661#[target_feature(enable = "neon")]
13662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13663#[cfg_attr(test, assert_instr(umaxv))]
13664pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13665    unsafe extern "unadjusted" {
13666        #[cfg_attr(
13667            any(target_arch = "aarch64", target_arch = "arm64ec"),
13668            link_name = "llvm.aarch64.neon.umaxv.i32.v4i32"
13669        )]
13670        fn _vmaxvq_u32(a: uint32x4_t) -> u32;
13671    }
13672    unsafe { _vmaxvq_u32(a) }
13673}
13674#[doc = "Minimum (vector)"]
13675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13676#[inline]
13677#[target_feature(enable = "neon")]
13678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13679#[cfg_attr(test, assert_instr(fmin))]
13680pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13681    unsafe extern "unadjusted" {
13682        #[cfg_attr(
13683            any(target_arch = "aarch64", target_arch = "arm64ec"),
13684            link_name = "llvm.aarch64.neon.fmin.v1f64"
13685        )]
13686        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13687    }
13688    unsafe { _vmin_f64(a, b) }
13689}
13690#[doc = "Minimum (vector)"]
13691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13692#[inline]
13693#[target_feature(enable = "neon")]
13694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13695#[cfg_attr(test, assert_instr(fmin))]
13696pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13697    unsafe extern "unadjusted" {
13698        #[cfg_attr(
13699            any(target_arch = "aarch64", target_arch = "arm64ec"),
13700            link_name = "llvm.aarch64.neon.fmin.v2f64"
13701        )]
13702        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13703    }
13704    unsafe { _vminq_f64(a, b) }
13705}
13706#[doc = "Minimum (vector)"]
13707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13708#[inline]
13709#[target_feature(enable = "neon,fp16")]
13710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13711#[cfg_attr(test, assert_instr(fmin))]
13712pub fn vminh_f16(a: f16, b: f16) -> f16 {
13713    unsafe extern "unadjusted" {
13714        #[cfg_attr(
13715            any(target_arch = "aarch64", target_arch = "arm64ec"),
13716            link_name = "llvm.aarch64.neon.fmin.f16"
13717        )]
13718        fn _vminh_f16(a: f16, b: f16) -> f16;
13719    }
13720    unsafe { _vminh_f16(a, b) }
13721}
13722#[doc = "Floating-point Minimum Number (vector)"]
13723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13724#[inline]
13725#[target_feature(enable = "neon")]
13726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13727#[cfg_attr(test, assert_instr(fminnm))]
13728pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13729    unsafe extern "unadjusted" {
13730        #[cfg_attr(
13731            any(target_arch = "aarch64", target_arch = "arm64ec"),
13732            link_name = "llvm.aarch64.neon.fminnm.v1f64"
13733        )]
13734        fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13735    }
13736    unsafe { _vminnm_f64(a, b) }
13737}
13738#[doc = "Floating-point Minimum Number (vector)"]
13739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13740#[inline]
13741#[target_feature(enable = "neon")]
13742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13743#[cfg_attr(test, assert_instr(fminnm))]
13744pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13745    unsafe extern "unadjusted" {
13746        #[cfg_attr(
13747            any(target_arch = "aarch64", target_arch = "arm64ec"),
13748            link_name = "llvm.aarch64.neon.fminnm.v2f64"
13749        )]
13750        fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13751    }
13752    unsafe { _vminnmq_f64(a, b) }
13753}
13754#[doc = "Floating-point Minimum Number"]
13755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13756#[inline]
13757#[target_feature(enable = "neon,fp16")]
13758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13759#[cfg_attr(test, assert_instr(fminnm))]
13760pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13761    unsafe extern "unadjusted" {
13762        #[cfg_attr(
13763            any(target_arch = "aarch64", target_arch = "arm64ec"),
13764            link_name = "llvm.aarch64.neon.fminnm.f16"
13765        )]
13766        fn _vminnmh_f16(a: f16, b: f16) -> f16;
13767    }
13768    unsafe { _vminnmh_f16(a, b) }
13769}
13770#[doc = "Floating-point minimum number across vector"]
13771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13772#[inline]
13773#[target_feature(enable = "neon,fp16")]
13774#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13775#[cfg_attr(test, assert_instr(fminnmv))]
13776pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13777    unsafe extern "unadjusted" {
13778        #[cfg_attr(
13779            any(target_arch = "aarch64", target_arch = "arm64ec"),
13780            link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16"
13781        )]
13782        fn _vminnmv_f16(a: float16x4_t) -> f16;
13783    }
13784    unsafe { _vminnmv_f16(a) }
13785}
13786#[doc = "Floating-point minimum number across vector"]
13787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13788#[inline]
13789#[target_feature(enable = "neon,fp16")]
13790#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13791#[cfg_attr(test, assert_instr(fminnmv))]
13792pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13793    unsafe extern "unadjusted" {
13794        #[cfg_attr(
13795            any(target_arch = "aarch64", target_arch = "arm64ec"),
13796            link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16"
13797        )]
13798        fn _vminnmvq_f16(a: float16x8_t) -> f16;
13799    }
13800    unsafe { _vminnmvq_f16(a) }
13801}
13802#[doc = "Floating-point minimum number across vector"]
13803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13804#[inline]
13805#[target_feature(enable = "neon")]
13806#[cfg_attr(test, assert_instr(fminnmp))]
13807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13808pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13809    unsafe extern "unadjusted" {
13810        #[cfg_attr(
13811            any(target_arch = "aarch64", target_arch = "arm64ec"),
13812            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
13813        )]
13814        fn _vminnmv_f32(a: float32x2_t) -> f32;
13815    }
13816    unsafe { _vminnmv_f32(a) }
13817}
13818#[doc = "Floating-point minimum number across vector"]
13819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13820#[inline]
13821#[target_feature(enable = "neon")]
13822#[cfg_attr(test, assert_instr(fminnmp))]
13823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13824pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13825    unsafe extern "unadjusted" {
13826        #[cfg_attr(
13827            any(target_arch = "aarch64", target_arch = "arm64ec"),
13828            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
13829        )]
13830        fn _vminnmvq_f64(a: float64x2_t) -> f64;
13831    }
13832    unsafe { _vminnmvq_f64(a) }
13833}
13834#[doc = "Floating-point minimum number across vector"]
13835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13836#[inline]
13837#[target_feature(enable = "neon")]
13838#[cfg_attr(test, assert_instr(fminnmv))]
13839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13840pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13841    unsafe extern "unadjusted" {
13842        #[cfg_attr(
13843            any(target_arch = "aarch64", target_arch = "arm64ec"),
13844            link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32"
13845        )]
13846        fn _vminnmvq_f32(a: float32x4_t) -> f32;
13847    }
13848    unsafe { _vminnmvq_f32(a) }
13849}
13850#[doc = "Floating-point minimum number across vector"]
13851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13852#[inline]
13853#[target_feature(enable = "neon,fp16")]
13854#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13855#[cfg_attr(test, assert_instr(fminv))]
13856pub fn vminv_f16(a: float16x4_t) -> f16 {
13857    unsafe extern "unadjusted" {
13858        #[cfg_attr(
13859            any(target_arch = "aarch64", target_arch = "arm64ec"),
13860            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13861        )]
13862        fn _vminv_f16(a: float16x4_t) -> f16;
13863    }
13864    unsafe { _vminv_f16(a) }
13865}
13866#[doc = "Floating-point minimum number across vector"]
13867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13868#[inline]
13869#[target_feature(enable = "neon,fp16")]
13870#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13871#[cfg_attr(test, assert_instr(fminv))]
13872pub fn vminvq_f16(a: float16x8_t) -> f16 {
13873    unsafe extern "unadjusted" {
13874        #[cfg_attr(
13875            any(target_arch = "aarch64", target_arch = "arm64ec"),
13876            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13877        )]
13878        fn _vminvq_f16(a: float16x8_t) -> f16;
13879    }
13880    unsafe { _vminvq_f16(a) }
13881}
13882#[doc = "Horizontal vector min."]
13883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13884#[inline]
13885#[target_feature(enable = "neon")]
13886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13887#[cfg_attr(test, assert_instr(fminp))]
13888pub fn vminv_f32(a: float32x2_t) -> f32 {
13889    unsafe extern "unadjusted" {
13890        #[cfg_attr(
13891            any(target_arch = "aarch64", target_arch = "arm64ec"),
13892            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13893        )]
13894        fn _vminv_f32(a: float32x2_t) -> f32;
13895    }
13896    unsafe { _vminv_f32(a) }
13897}
13898#[doc = "Horizontal vector min."]
13899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13900#[inline]
13901#[target_feature(enable = "neon")]
13902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13903#[cfg_attr(test, assert_instr(fminv))]
13904pub fn vminvq_f32(a: float32x4_t) -> f32 {
13905    unsafe extern "unadjusted" {
13906        #[cfg_attr(
13907            any(target_arch = "aarch64", target_arch = "arm64ec"),
13908            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13909        )]
13910        fn _vminvq_f32(a: float32x4_t) -> f32;
13911    }
13912    unsafe { _vminvq_f32(a) }
13913}
13914#[doc = "Horizontal vector min."]
13915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13916#[inline]
13917#[target_feature(enable = "neon")]
13918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13919#[cfg_attr(test, assert_instr(fminp))]
13920pub fn vminvq_f64(a: float64x2_t) -> f64 {
13921    unsafe extern "unadjusted" {
13922        #[cfg_attr(
13923            any(target_arch = "aarch64", target_arch = "arm64ec"),
13924            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13925        )]
13926        fn _vminvq_f64(a: float64x2_t) -> f64;
13927    }
13928    unsafe { _vminvq_f64(a) }
13929}
13930#[doc = "Horizontal vector min."]
13931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13932#[inline]
13933#[target_feature(enable = "neon")]
13934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13935#[cfg_attr(test, assert_instr(sminv))]
13936pub fn vminv_s8(a: int8x8_t) -> i8 {
13937    unsafe extern "unadjusted" {
13938        #[cfg_attr(
13939            any(target_arch = "aarch64", target_arch = "arm64ec"),
13940            link_name = "llvm.aarch64.neon.sminv.i8.v8i8"
13941        )]
13942        fn _vminv_s8(a: int8x8_t) -> i8;
13943    }
13944    unsafe { _vminv_s8(a) }
13945}
13946#[doc = "Horizontal vector min."]
13947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13948#[inline]
13949#[target_feature(enable = "neon")]
13950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13951#[cfg_attr(test, assert_instr(sminv))]
13952pub fn vminvq_s8(a: int8x16_t) -> i8 {
13953    unsafe extern "unadjusted" {
13954        #[cfg_attr(
13955            any(target_arch = "aarch64", target_arch = "arm64ec"),
13956            link_name = "llvm.aarch64.neon.sminv.i8.v16i8"
13957        )]
13958        fn _vminvq_s8(a: int8x16_t) -> i8;
13959    }
13960    unsafe { _vminvq_s8(a) }
13961}
13962#[doc = "Horizontal vector min."]
13963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13964#[inline]
13965#[target_feature(enable = "neon")]
13966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13967#[cfg_attr(test, assert_instr(sminv))]
13968pub fn vminv_s16(a: int16x4_t) -> i16 {
13969    unsafe extern "unadjusted" {
13970        #[cfg_attr(
13971            any(target_arch = "aarch64", target_arch = "arm64ec"),
13972            link_name = "llvm.aarch64.neon.sminv.i16.v4i16"
13973        )]
13974        fn _vminv_s16(a: int16x4_t) -> i16;
13975    }
13976    unsafe { _vminv_s16(a) }
13977}
13978#[doc = "Horizontal vector min."]
13979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13980#[inline]
13981#[target_feature(enable = "neon")]
13982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13983#[cfg_attr(test, assert_instr(sminv))]
13984pub fn vminvq_s16(a: int16x8_t) -> i16 {
13985    unsafe extern "unadjusted" {
13986        #[cfg_attr(
13987            any(target_arch = "aarch64", target_arch = "arm64ec"),
13988            link_name = "llvm.aarch64.neon.sminv.i16.v8i16"
13989        )]
13990        fn _vminvq_s16(a: int16x8_t) -> i16;
13991    }
13992    unsafe { _vminvq_s16(a) }
13993}
13994#[doc = "Horizontal vector min."]
13995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13996#[inline]
13997#[target_feature(enable = "neon")]
13998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13999#[cfg_attr(test, assert_instr(sminp))]
14000pub fn vminv_s32(a: int32x2_t) -> i32 {
14001    unsafe extern "unadjusted" {
14002        #[cfg_attr(
14003            any(target_arch = "aarch64", target_arch = "arm64ec"),
14004            link_name = "llvm.aarch64.neon.sminv.i32.v2i32"
14005        )]
14006        fn _vminv_s32(a: int32x2_t) -> i32;
14007    }
14008    unsafe { _vminv_s32(a) }
14009}
14010#[doc = "Horizontal vector min."]
14011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14012#[inline]
14013#[target_feature(enable = "neon")]
14014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14015#[cfg_attr(test, assert_instr(sminv))]
14016pub fn vminvq_s32(a: int32x4_t) -> i32 {
14017    unsafe extern "unadjusted" {
14018        #[cfg_attr(
14019            any(target_arch = "aarch64", target_arch = "arm64ec"),
14020            link_name = "llvm.aarch64.neon.sminv.i32.v4i32"
14021        )]
14022        fn _vminvq_s32(a: int32x4_t) -> i32;
14023    }
14024    unsafe { _vminvq_s32(a) }
14025}
14026#[doc = "Horizontal vector min."]
14027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14028#[inline]
14029#[target_feature(enable = "neon")]
14030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14031#[cfg_attr(test, assert_instr(uminv))]
14032pub fn vminv_u8(a: uint8x8_t) -> u8 {
14033    unsafe extern "unadjusted" {
14034        #[cfg_attr(
14035            any(target_arch = "aarch64", target_arch = "arm64ec"),
14036            link_name = "llvm.aarch64.neon.uminv.i8.v8i8"
14037        )]
14038        fn _vminv_u8(a: uint8x8_t) -> u8;
14039    }
14040    unsafe { _vminv_u8(a) }
14041}
14042#[doc = "Horizontal vector min."]
14043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14044#[inline]
14045#[target_feature(enable = "neon")]
14046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14047#[cfg_attr(test, assert_instr(uminv))]
14048pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14049    unsafe extern "unadjusted" {
14050        #[cfg_attr(
14051            any(target_arch = "aarch64", target_arch = "arm64ec"),
14052            link_name = "llvm.aarch64.neon.uminv.i8.v16i8"
14053        )]
14054        fn _vminvq_u8(a: uint8x16_t) -> u8;
14055    }
14056    unsafe { _vminvq_u8(a) }
14057}
14058#[doc = "Horizontal vector min."]
14059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14060#[inline]
14061#[target_feature(enable = "neon")]
14062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14063#[cfg_attr(test, assert_instr(uminv))]
14064pub fn vminv_u16(a: uint16x4_t) -> u16 {
14065    unsafe extern "unadjusted" {
14066        #[cfg_attr(
14067            any(target_arch = "aarch64", target_arch = "arm64ec"),
14068            link_name = "llvm.aarch64.neon.uminv.i16.v4i16"
14069        )]
14070        fn _vminv_u16(a: uint16x4_t) -> u16;
14071    }
14072    unsafe { _vminv_u16(a) }
14073}
14074#[doc = "Horizontal vector min."]
14075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14076#[inline]
14077#[target_feature(enable = "neon")]
14078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14079#[cfg_attr(test, assert_instr(uminv))]
14080pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14081    unsafe extern "unadjusted" {
14082        #[cfg_attr(
14083            any(target_arch = "aarch64", target_arch = "arm64ec"),
14084            link_name = "llvm.aarch64.neon.uminv.i16.v8i16"
14085        )]
14086        fn _vminvq_u16(a: uint16x8_t) -> u16;
14087    }
14088    unsafe { _vminvq_u16(a) }
14089}
14090#[doc = "Horizontal vector min."]
14091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14092#[inline]
14093#[target_feature(enable = "neon")]
14094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14095#[cfg_attr(test, assert_instr(uminp))]
14096pub fn vminv_u32(a: uint32x2_t) -> u32 {
14097    unsafe extern "unadjusted" {
14098        #[cfg_attr(
14099            any(target_arch = "aarch64", target_arch = "arm64ec"),
14100            link_name = "llvm.aarch64.neon.uminv.i32.v2i32"
14101        )]
14102        fn _vminv_u32(a: uint32x2_t) -> u32;
14103    }
14104    unsafe { _vminv_u32(a) }
14105}
14106#[doc = "Horizontal vector min."]
14107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14108#[inline]
14109#[target_feature(enable = "neon")]
14110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14111#[cfg_attr(test, assert_instr(uminv))]
14112pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14113    unsafe extern "unadjusted" {
14114        #[cfg_attr(
14115            any(target_arch = "aarch64", target_arch = "arm64ec"),
14116            link_name = "llvm.aarch64.neon.uminv.i32.v4i32"
14117        )]
14118        fn _vminvq_u32(a: uint32x4_t) -> u32;
14119    }
14120    unsafe { _vminvq_u32(a) }
14121}
14122#[doc = "Floating-point multiply-add to accumulator"]
14123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14124#[inline]
14125#[target_feature(enable = "neon")]
14126#[cfg_attr(test, assert_instr(fmul))]
14127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14128pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14129    unsafe { simd_add(a, simd_mul(b, c)) }
14130}
14131#[doc = "Floating-point multiply-add to accumulator"]
14132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14133#[inline]
14134#[target_feature(enable = "neon")]
14135#[cfg_attr(test, assert_instr(fmul))]
14136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14137pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14138    unsafe { simd_add(a, simd_mul(b, c)) }
14139}
14140#[doc = "Multiply-add long"]
14141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14142#[inline]
14143#[target_feature(enable = "neon")]
14144#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14145#[rustc_legacy_const_generics(3)]
14146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14147pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14148    static_assert_uimm_bits!(LANE, 2);
14149    unsafe {
14150        vmlal_high_s16(
14151            a,
14152            b,
14153            simd_shuffle!(
14154                c,
14155                c,
14156                [
14157                    LANE as u32,
14158                    LANE as u32,
14159                    LANE as u32,
14160                    LANE as u32,
14161                    LANE as u32,
14162                    LANE as u32,
14163                    LANE as u32,
14164                    LANE as u32
14165                ]
14166            ),
14167        )
14168    }
14169}
14170#[doc = "Multiply-add long"]
14171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14172#[inline]
14173#[target_feature(enable = "neon")]
14174#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14175#[rustc_legacy_const_generics(3)]
14176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14177pub fn vmlal_high_laneq_s16<const LANE: i32>(
14178    a: int32x4_t,
14179    b: int16x8_t,
14180    c: int16x8_t,
14181) -> int32x4_t {
14182    static_assert_uimm_bits!(LANE, 3);
14183    unsafe {
14184        vmlal_high_s16(
14185            a,
14186            b,
14187            simd_shuffle!(
14188                c,
14189                c,
14190                [
14191                    LANE as u32,
14192                    LANE as u32,
14193                    LANE as u32,
14194                    LANE as u32,
14195                    LANE as u32,
14196                    LANE as u32,
14197                    LANE as u32,
14198                    LANE as u32
14199                ]
14200            ),
14201        )
14202    }
14203}
14204#[doc = "Multiply-add long"]
14205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14206#[inline]
14207#[target_feature(enable = "neon")]
14208#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14209#[rustc_legacy_const_generics(3)]
14210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14211pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14212    static_assert_uimm_bits!(LANE, 1);
14213    unsafe {
14214        vmlal_high_s32(
14215            a,
14216            b,
14217            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14218        )
14219    }
14220}
14221#[doc = "Multiply-add long"]
14222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14223#[inline]
14224#[target_feature(enable = "neon")]
14225#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14226#[rustc_legacy_const_generics(3)]
14227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14228pub fn vmlal_high_laneq_s32<const LANE: i32>(
14229    a: int64x2_t,
14230    b: int32x4_t,
14231    c: int32x4_t,
14232) -> int64x2_t {
14233    static_assert_uimm_bits!(LANE, 2);
14234    unsafe {
14235        vmlal_high_s32(
14236            a,
14237            b,
14238            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14239        )
14240    }
14241}
14242#[doc = "Multiply-add long"]
14243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14244#[inline]
14245#[target_feature(enable = "neon")]
14246#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14247#[rustc_legacy_const_generics(3)]
14248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14249pub fn vmlal_high_lane_u16<const LANE: i32>(
14250    a: uint32x4_t,
14251    b: uint16x8_t,
14252    c: uint16x4_t,
14253) -> uint32x4_t {
14254    static_assert_uimm_bits!(LANE, 2);
14255    unsafe {
14256        vmlal_high_u16(
14257            a,
14258            b,
14259            simd_shuffle!(
14260                c,
14261                c,
14262                [
14263                    LANE as u32,
14264                    LANE as u32,
14265                    LANE as u32,
14266                    LANE as u32,
14267                    LANE as u32,
14268                    LANE as u32,
14269                    LANE as u32,
14270                    LANE as u32
14271                ]
14272            ),
14273        )
14274    }
14275}
14276#[doc = "Multiply-add long"]
14277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14278#[inline]
14279#[target_feature(enable = "neon")]
14280#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14281#[rustc_legacy_const_generics(3)]
14282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14283pub fn vmlal_high_laneq_u16<const LANE: i32>(
14284    a: uint32x4_t,
14285    b: uint16x8_t,
14286    c: uint16x8_t,
14287) -> uint32x4_t {
14288    static_assert_uimm_bits!(LANE, 3);
14289    unsafe {
14290        vmlal_high_u16(
14291            a,
14292            b,
14293            simd_shuffle!(
14294                c,
14295                c,
14296                [
14297                    LANE as u32,
14298                    LANE as u32,
14299                    LANE as u32,
14300                    LANE as u32,
14301                    LANE as u32,
14302                    LANE as u32,
14303                    LANE as u32,
14304                    LANE as u32
14305                ]
14306            ),
14307        )
14308    }
14309}
14310#[doc = "Multiply-add long"]
14311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14312#[inline]
14313#[target_feature(enable = "neon")]
14314#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14315#[rustc_legacy_const_generics(3)]
14316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14317pub fn vmlal_high_lane_u32<const LANE: i32>(
14318    a: uint64x2_t,
14319    b: uint32x4_t,
14320    c: uint32x2_t,
14321) -> uint64x2_t {
14322    static_assert_uimm_bits!(LANE, 1);
14323    unsafe {
14324        vmlal_high_u32(
14325            a,
14326            b,
14327            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14328        )
14329    }
14330}
14331#[doc = "Multiply-add long"]
14332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14333#[inline]
14334#[target_feature(enable = "neon")]
14335#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14336#[rustc_legacy_const_generics(3)]
14337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14338pub fn vmlal_high_laneq_u32<const LANE: i32>(
14339    a: uint64x2_t,
14340    b: uint32x4_t,
14341    c: uint32x4_t,
14342) -> uint64x2_t {
14343    static_assert_uimm_bits!(LANE, 2);
14344    unsafe {
14345        vmlal_high_u32(
14346            a,
14347            b,
14348            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14349        )
14350    }
14351}
14352#[doc = "Multiply-add long"]
14353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14354#[inline]
14355#[target_feature(enable = "neon")]
14356#[cfg_attr(test, assert_instr(smlal2))]
14357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14358pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14359    vmlal_high_s16(a, b, vdupq_n_s16(c))
14360}
14361#[doc = "Multiply-add long"]
14362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14363#[inline]
14364#[target_feature(enable = "neon")]
14365#[cfg_attr(test, assert_instr(smlal2))]
14366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14367pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14368    vmlal_high_s32(a, b, vdupq_n_s32(c))
14369}
14370#[doc = "Multiply-add long"]
14371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14372#[inline]
14373#[target_feature(enable = "neon")]
14374#[cfg_attr(test, assert_instr(umlal2))]
14375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14376pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14377    vmlal_high_u16(a, b, vdupq_n_u16(c))
14378}
14379#[doc = "Multiply-add long"]
14380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14381#[inline]
14382#[target_feature(enable = "neon")]
14383#[cfg_attr(test, assert_instr(umlal2))]
14384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14385pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14386    vmlal_high_u32(a, b, vdupq_n_u32(c))
14387}
14388#[doc = "Signed multiply-add long"]
14389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14390#[inline]
14391#[target_feature(enable = "neon")]
14392#[cfg_attr(test, assert_instr(smlal2))]
14393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14394pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14395    unsafe {
14396        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14397        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14398        vmlal_s8(a, b, c)
14399    }
14400}
14401#[doc = "Signed multiply-add long"]
14402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14403#[inline]
14404#[target_feature(enable = "neon")]
14405#[cfg_attr(test, assert_instr(smlal2))]
14406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14407pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14408    unsafe {
14409        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14410        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14411        vmlal_s16(a, b, c)
14412    }
14413}
14414#[doc = "Signed multiply-add long"]
14415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14416#[inline]
14417#[target_feature(enable = "neon")]
14418#[cfg_attr(test, assert_instr(smlal2))]
14419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14420pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14421    unsafe {
14422        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14423        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14424        vmlal_s32(a, b, c)
14425    }
14426}
14427#[doc = "Unsigned multiply-add long"]
14428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14429#[inline]
14430#[target_feature(enable = "neon")]
14431#[cfg_attr(test, assert_instr(umlal2))]
14432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14433pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14434    unsafe {
14435        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14436        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14437        vmlal_u8(a, b, c)
14438    }
14439}
14440#[doc = "Unsigned multiply-add long"]
14441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14442#[inline]
14443#[target_feature(enable = "neon")]
14444#[cfg_attr(test, assert_instr(umlal2))]
14445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14446pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14447    unsafe {
14448        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14449        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14450        vmlal_u16(a, b, c)
14451    }
14452}
14453#[doc = "Unsigned multiply-add long"]
14454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14455#[inline]
14456#[target_feature(enable = "neon")]
14457#[cfg_attr(test, assert_instr(umlal2))]
14458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14459pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14460    unsafe {
14461        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14462        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14463        vmlal_u32(a, b, c)
14464    }
14465}
14466#[doc = "Floating-point multiply-subtract from accumulator"]
14467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14468#[inline]
14469#[target_feature(enable = "neon")]
14470#[cfg_attr(test, assert_instr(fmul))]
14471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14472pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14473    unsafe { simd_sub(a, simd_mul(b, c)) }
14474}
14475#[doc = "Floating-point multiply-subtract from accumulator"]
14476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14477#[inline]
14478#[target_feature(enable = "neon")]
14479#[cfg_attr(test, assert_instr(fmul))]
14480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14481pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14482    unsafe { simd_sub(a, simd_mul(b, c)) }
14483}
14484#[doc = "Multiply-subtract long"]
14485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14486#[inline]
14487#[target_feature(enable = "neon")]
14488#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14489#[rustc_legacy_const_generics(3)]
14490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14491pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14492    static_assert_uimm_bits!(LANE, 2);
14493    unsafe {
14494        vmlsl_high_s16(
14495            a,
14496            b,
14497            simd_shuffle!(
14498                c,
14499                c,
14500                [
14501                    LANE as u32,
14502                    LANE as u32,
14503                    LANE as u32,
14504                    LANE as u32,
14505                    LANE as u32,
14506                    LANE as u32,
14507                    LANE as u32,
14508                    LANE as u32
14509                ]
14510            ),
14511        )
14512    }
14513}
14514#[doc = "Multiply-subtract long"]
14515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14516#[inline]
14517#[target_feature(enable = "neon")]
14518#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14519#[rustc_legacy_const_generics(3)]
14520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14521pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14522    a: int32x4_t,
14523    b: int16x8_t,
14524    c: int16x8_t,
14525) -> int32x4_t {
14526    static_assert_uimm_bits!(LANE, 3);
14527    unsafe {
14528        vmlsl_high_s16(
14529            a,
14530            b,
14531            simd_shuffle!(
14532                c,
14533                c,
14534                [
14535                    LANE as u32,
14536                    LANE as u32,
14537                    LANE as u32,
14538                    LANE as u32,
14539                    LANE as u32,
14540                    LANE as u32,
14541                    LANE as u32,
14542                    LANE as u32
14543                ]
14544            ),
14545        )
14546    }
14547}
14548#[doc = "Multiply-subtract long"]
14549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14550#[inline]
14551#[target_feature(enable = "neon")]
14552#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14553#[rustc_legacy_const_generics(3)]
14554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14555pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14556    static_assert_uimm_bits!(LANE, 1);
14557    unsafe {
14558        vmlsl_high_s32(
14559            a,
14560            b,
14561            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14562        )
14563    }
14564}
14565#[doc = "Multiply-subtract long"]
14566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14567#[inline]
14568#[target_feature(enable = "neon")]
14569#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14570#[rustc_legacy_const_generics(3)]
14571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14572pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14573    a: int64x2_t,
14574    b: int32x4_t,
14575    c: int32x4_t,
14576) -> int64x2_t {
14577    static_assert_uimm_bits!(LANE, 2);
14578    unsafe {
14579        vmlsl_high_s32(
14580            a,
14581            b,
14582            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14583        )
14584    }
14585}
14586#[doc = "Multiply-subtract long"]
14587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14588#[inline]
14589#[target_feature(enable = "neon")]
14590#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14591#[rustc_legacy_const_generics(3)]
14592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14593pub fn vmlsl_high_lane_u16<const LANE: i32>(
14594    a: uint32x4_t,
14595    b: uint16x8_t,
14596    c: uint16x4_t,
14597) -> uint32x4_t {
14598    static_assert_uimm_bits!(LANE, 2);
14599    unsafe {
14600        vmlsl_high_u16(
14601            a,
14602            b,
14603            simd_shuffle!(
14604                c,
14605                c,
14606                [
14607                    LANE as u32,
14608                    LANE as u32,
14609                    LANE as u32,
14610                    LANE as u32,
14611                    LANE as u32,
14612                    LANE as u32,
14613                    LANE as u32,
14614                    LANE as u32
14615                ]
14616            ),
14617        )
14618    }
14619}
14620#[doc = "Multiply-subtract long"]
14621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14622#[inline]
14623#[target_feature(enable = "neon")]
14624#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14625#[rustc_legacy_const_generics(3)]
14626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14627pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14628    a: uint32x4_t,
14629    b: uint16x8_t,
14630    c: uint16x8_t,
14631) -> uint32x4_t {
14632    static_assert_uimm_bits!(LANE, 3);
14633    unsafe {
14634        vmlsl_high_u16(
14635            a,
14636            b,
14637            simd_shuffle!(
14638                c,
14639                c,
14640                [
14641                    LANE as u32,
14642                    LANE as u32,
14643                    LANE as u32,
14644                    LANE as u32,
14645                    LANE as u32,
14646                    LANE as u32,
14647                    LANE as u32,
14648                    LANE as u32
14649                ]
14650            ),
14651        )
14652    }
14653}
14654#[doc = "Multiply-subtract long"]
14655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14656#[inline]
14657#[target_feature(enable = "neon")]
14658#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14659#[rustc_legacy_const_generics(3)]
14660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14661pub fn vmlsl_high_lane_u32<const LANE: i32>(
14662    a: uint64x2_t,
14663    b: uint32x4_t,
14664    c: uint32x2_t,
14665) -> uint64x2_t {
14666    static_assert_uimm_bits!(LANE, 1);
14667    unsafe {
14668        vmlsl_high_u32(
14669            a,
14670            b,
14671            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14672        )
14673    }
14674}
14675#[doc = "Multiply-subtract long"]
14676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14677#[inline]
14678#[target_feature(enable = "neon")]
14679#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14680#[rustc_legacy_const_generics(3)]
14681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14682pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14683    a: uint64x2_t,
14684    b: uint32x4_t,
14685    c: uint32x4_t,
14686) -> uint64x2_t {
14687    static_assert_uimm_bits!(LANE, 2);
14688    unsafe {
14689        vmlsl_high_u32(
14690            a,
14691            b,
14692            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14693        )
14694    }
14695}
14696#[doc = "Multiply-subtract long"]
14697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14698#[inline]
14699#[target_feature(enable = "neon")]
14700#[cfg_attr(test, assert_instr(smlsl2))]
14701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14702pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14703    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14704}
14705#[doc = "Multiply-subtract long"]
14706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14707#[inline]
14708#[target_feature(enable = "neon")]
14709#[cfg_attr(test, assert_instr(smlsl2))]
14710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14711pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14712    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14713}
14714#[doc = "Multiply-subtract long"]
14715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14716#[inline]
14717#[target_feature(enable = "neon")]
14718#[cfg_attr(test, assert_instr(umlsl2))]
14719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14720pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14721    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14722}
14723#[doc = "Multiply-subtract long"]
14724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14725#[inline]
14726#[target_feature(enable = "neon")]
14727#[cfg_attr(test, assert_instr(umlsl2))]
14728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14729pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14730    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14731}
14732#[doc = "Signed multiply-subtract long"]
14733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14734#[inline]
14735#[target_feature(enable = "neon")]
14736#[cfg_attr(test, assert_instr(smlsl2))]
14737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14738pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14739    unsafe {
14740        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14741        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14742        vmlsl_s8(a, b, c)
14743    }
14744}
14745#[doc = "Signed multiply-subtract long"]
14746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14747#[inline]
14748#[target_feature(enable = "neon")]
14749#[cfg_attr(test, assert_instr(smlsl2))]
14750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14751pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14752    unsafe {
14753        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14754        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14755        vmlsl_s16(a, b, c)
14756    }
14757}
14758#[doc = "Signed multiply-subtract long"]
14759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14760#[inline]
14761#[target_feature(enable = "neon")]
14762#[cfg_attr(test, assert_instr(smlsl2))]
14763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14764pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14765    unsafe {
14766        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14767        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14768        vmlsl_s32(a, b, c)
14769    }
14770}
14771#[doc = "Unsigned multiply-subtract long"]
14772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14773#[inline]
14774#[target_feature(enable = "neon")]
14775#[cfg_attr(test, assert_instr(umlsl2))]
14776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14777pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14778    unsafe {
14779        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14780        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14781        vmlsl_u8(a, b, c)
14782    }
14783}
14784#[doc = "Unsigned multiply-subtract long"]
14785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14786#[inline]
14787#[target_feature(enable = "neon")]
14788#[cfg_attr(test, assert_instr(umlsl2))]
14789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14790pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14791    unsafe {
14792        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14793        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14794        vmlsl_u16(a, b, c)
14795    }
14796}
14797#[doc = "Unsigned multiply-subtract long"]
14798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14799#[inline]
14800#[target_feature(enable = "neon")]
14801#[cfg_attr(test, assert_instr(umlsl2))]
14802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14803pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14804    unsafe {
14805        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14806        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14807        vmlsl_u32(a, b, c)
14808    }
14809}
14810#[doc = "Vector move"]
14811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14812#[inline]
14813#[target_feature(enable = "neon")]
14814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14815#[cfg_attr(test, assert_instr(sxtl2))]
14816pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14817    unsafe {
14818        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14819        vmovl_s8(a)
14820    }
14821}
14822#[doc = "Vector move"]
14823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14824#[inline]
14825#[target_feature(enable = "neon")]
14826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14827#[cfg_attr(test, assert_instr(sxtl2))]
14828pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14829    unsafe {
14830        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14831        vmovl_s16(a)
14832    }
14833}
14834#[doc = "Vector move"]
14835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14836#[inline]
14837#[target_feature(enable = "neon")]
14838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14839#[cfg_attr(test, assert_instr(sxtl2))]
14840pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14841    unsafe {
14842        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14843        vmovl_s32(a)
14844    }
14845}
14846#[doc = "Vector move"]
14847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14848#[inline]
14849#[target_feature(enable = "neon")]
14850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14851#[cfg_attr(test, assert_instr(uxtl2))]
14852pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14853    unsafe {
14854        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14855        vmovl_u8(a)
14856    }
14857}
14858#[doc = "Vector move"]
14859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14860#[inline]
14861#[target_feature(enable = "neon")]
14862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14863#[cfg_attr(test, assert_instr(uxtl2))]
14864pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14865    unsafe {
14866        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14867        vmovl_u16(a)
14868    }
14869}
14870#[doc = "Vector move"]
14871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14872#[inline]
14873#[target_feature(enable = "neon")]
14874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14875#[cfg_attr(test, assert_instr(uxtl2))]
14876pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14877    unsafe {
14878        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14879        vmovl_u32(a)
14880    }
14881}
14882#[doc = "Extract narrow"]
14883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14884#[inline]
14885#[target_feature(enable = "neon")]
14886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14887#[cfg_attr(test, assert_instr(xtn2))]
14888pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14889    unsafe {
14890        let c: int8x8_t = simd_cast(b);
14891        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14892    }
14893}
14894#[doc = "Extract narrow"]
14895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14896#[inline]
14897#[target_feature(enable = "neon")]
14898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14899#[cfg_attr(test, assert_instr(xtn2))]
14900pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14901    unsafe {
14902        let c: int16x4_t = simd_cast(b);
14903        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14904    }
14905}
14906#[doc = "Extract narrow"]
14907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14908#[inline]
14909#[target_feature(enable = "neon")]
14910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14911#[cfg_attr(test, assert_instr(xtn2))]
14912pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14913    unsafe {
14914        let c: int32x2_t = simd_cast(b);
14915        simd_shuffle!(a, c, [0, 1, 2, 3])
14916    }
14917}
14918#[doc = "Extract narrow"]
14919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14920#[inline]
14921#[target_feature(enable = "neon")]
14922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14923#[cfg_attr(test, assert_instr(xtn2))]
14924pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14925    unsafe {
14926        let c: uint8x8_t = simd_cast(b);
14927        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14928    }
14929}
14930#[doc = "Extract narrow"]
14931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14932#[inline]
14933#[target_feature(enable = "neon")]
14934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14935#[cfg_attr(test, assert_instr(xtn2))]
14936pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14937    unsafe {
14938        let c: uint16x4_t = simd_cast(b);
14939        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14940    }
14941}
14942#[doc = "Extract narrow"]
14943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14944#[inline]
14945#[target_feature(enable = "neon")]
14946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14947#[cfg_attr(test, assert_instr(xtn2))]
14948pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14949    unsafe {
14950        let c: uint32x2_t = simd_cast(b);
14951        simd_shuffle!(a, c, [0, 1, 2, 3])
14952    }
14953}
14954#[doc = "Multiply"]
14955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14956#[inline]
14957#[target_feature(enable = "neon")]
14958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14959#[cfg_attr(test, assert_instr(fmul))]
14960pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14961    unsafe { simd_mul(a, b) }
14962}
14963#[doc = "Multiply"]
14964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14965#[inline]
14966#[target_feature(enable = "neon")]
14967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14968#[cfg_attr(test, assert_instr(fmul))]
14969pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14970    unsafe { simd_mul(a, b) }
14971}
14972#[doc = "Floating-point multiply"]
14973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14974#[inline]
14975#[target_feature(enable = "neon")]
14976#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14977#[rustc_legacy_const_generics(2)]
14978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14979pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14980    static_assert!(LANE == 0);
14981    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14982}
14983#[doc = "Floating-point multiply"]
14984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14985#[inline]
14986#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14987#[rustc_legacy_const_generics(2)]
14988#[target_feature(enable = "neon,fp16")]
14989#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14990pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14991    static_assert_uimm_bits!(LANE, 3);
14992    unsafe {
14993        simd_mul(
14994            a,
14995            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14996        )
14997    }
14998}
14999#[doc = "Floating-point multiply"]
15000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
15001#[inline]
15002#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15003#[rustc_legacy_const_generics(2)]
15004#[target_feature(enable = "neon,fp16")]
15005#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15006pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15007    static_assert_uimm_bits!(LANE, 3);
15008    unsafe {
15009        simd_mul(
15010            a,
15011            simd_shuffle!(
15012                b,
15013                b,
15014                [
15015                    LANE as u32,
15016                    LANE as u32,
15017                    LANE as u32,
15018                    LANE as u32,
15019                    LANE as u32,
15020                    LANE as u32,
15021                    LANE as u32,
15022                    LANE as u32
15023                ]
15024            ),
15025        )
15026    }
15027}
15028#[doc = "Floating-point multiply"]
15029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
15030#[inline]
15031#[target_feature(enable = "neon")]
15032#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15033#[rustc_legacy_const_generics(2)]
15034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15035pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15036    static_assert_uimm_bits!(LANE, 1);
15037    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15038}
15039#[doc = "Vector multiply by scalar"]
15040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
15041#[inline]
15042#[target_feature(enable = "neon")]
15043#[cfg_attr(test, assert_instr(fmul))]
15044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15045pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
15046    unsafe { simd_mul(a, vdup_n_f64(b)) }
15047}
15048#[doc = "Vector multiply by scalar"]
15049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15050#[inline]
15051#[target_feature(enable = "neon")]
15052#[cfg_attr(test, assert_instr(fmul))]
15053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15054pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15055    unsafe { simd_mul(a, vdupq_n_f64(b)) }
15056}
15057#[doc = "Floating-point multiply"]
15058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15059#[inline]
15060#[target_feature(enable = "neon")]
15061#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15062#[rustc_legacy_const_generics(2)]
15063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15064pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15065    static_assert!(LANE == 0);
15066    unsafe {
15067        let b: f64 = simd_extract!(b, LANE as u32);
15068        a * b
15069    }
15070}
15071#[doc = "Add"]
15072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15073#[inline]
15074#[target_feature(enable = "neon,fp16")]
15075#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15076#[cfg_attr(test, assert_instr(nop))]
15077pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15078    a * b
15079}
15080#[doc = "Floating-point multiply"]
15081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15082#[inline]
15083#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15084#[rustc_legacy_const_generics(2)]
15085#[target_feature(enable = "neon,fp16")]
15086#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15087pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15088    static_assert_uimm_bits!(LANE, 2);
15089    unsafe {
15090        let b: f16 = simd_extract!(b, LANE as u32);
15091        a * b
15092    }
15093}
15094#[doc = "Floating-point multiply"]
15095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15096#[inline]
15097#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15098#[rustc_legacy_const_generics(2)]
15099#[target_feature(enable = "neon,fp16")]
15100#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15101pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15102    static_assert_uimm_bits!(LANE, 3);
15103    unsafe {
15104        let b: f16 = simd_extract!(b, LANE as u32);
15105        a * b
15106    }
15107}
15108#[doc = "Multiply long"]
15109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15110#[inline]
15111#[target_feature(enable = "neon")]
15112#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15113#[rustc_legacy_const_generics(2)]
15114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15115pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15116    static_assert_uimm_bits!(LANE, 2);
15117    unsafe {
15118        vmull_high_s16(
15119            a,
15120            simd_shuffle!(
15121                b,
15122                b,
15123                [
15124                    LANE as u32,
15125                    LANE as u32,
15126                    LANE as u32,
15127                    LANE as u32,
15128                    LANE as u32,
15129                    LANE as u32,
15130                    LANE as u32,
15131                    LANE as u32
15132                ]
15133            ),
15134        )
15135    }
15136}
15137#[doc = "Multiply long"]
15138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15139#[inline]
15140#[target_feature(enable = "neon")]
15141#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15142#[rustc_legacy_const_generics(2)]
15143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15144pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15145    static_assert_uimm_bits!(LANE, 3);
15146    unsafe {
15147        vmull_high_s16(
15148            a,
15149            simd_shuffle!(
15150                b,
15151                b,
15152                [
15153                    LANE as u32,
15154                    LANE as u32,
15155                    LANE as u32,
15156                    LANE as u32,
15157                    LANE as u32,
15158                    LANE as u32,
15159                    LANE as u32,
15160                    LANE as u32
15161                ]
15162            ),
15163        )
15164    }
15165}
15166#[doc = "Multiply long"]
15167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15168#[inline]
15169#[target_feature(enable = "neon")]
15170#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15171#[rustc_legacy_const_generics(2)]
15172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15173pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15174    static_assert_uimm_bits!(LANE, 1);
15175    unsafe {
15176        vmull_high_s32(
15177            a,
15178            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15179        )
15180    }
15181}
15182#[doc = "Multiply long"]
15183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15184#[inline]
15185#[target_feature(enable = "neon")]
15186#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15187#[rustc_legacy_const_generics(2)]
15188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15189pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15190    static_assert_uimm_bits!(LANE, 2);
15191    unsafe {
15192        vmull_high_s32(
15193            a,
15194            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15195        )
15196    }
15197}
15198#[doc = "Multiply long"]
15199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15200#[inline]
15201#[target_feature(enable = "neon")]
15202#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15203#[rustc_legacy_const_generics(2)]
15204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15205pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15206    static_assert_uimm_bits!(LANE, 2);
15207    unsafe {
15208        vmull_high_u16(
15209            a,
15210            simd_shuffle!(
15211                b,
15212                b,
15213                [
15214                    LANE as u32,
15215                    LANE as u32,
15216                    LANE as u32,
15217                    LANE as u32,
15218                    LANE as u32,
15219                    LANE as u32,
15220                    LANE as u32,
15221                    LANE as u32
15222                ]
15223            ),
15224        )
15225    }
15226}
15227#[doc = "Multiply long"]
15228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15229#[inline]
15230#[target_feature(enable = "neon")]
15231#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15232#[rustc_legacy_const_generics(2)]
15233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15234pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15235    static_assert_uimm_bits!(LANE, 3);
15236    unsafe {
15237        vmull_high_u16(
15238            a,
15239            simd_shuffle!(
15240                b,
15241                b,
15242                [
15243                    LANE as u32,
15244                    LANE as u32,
15245                    LANE as u32,
15246                    LANE as u32,
15247                    LANE as u32,
15248                    LANE as u32,
15249                    LANE as u32,
15250                    LANE as u32
15251                ]
15252            ),
15253        )
15254    }
15255}
15256#[doc = "Multiply long"]
15257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15258#[inline]
15259#[target_feature(enable = "neon")]
15260#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15261#[rustc_legacy_const_generics(2)]
15262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15263pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15264    static_assert_uimm_bits!(LANE, 1);
15265    unsafe {
15266        vmull_high_u32(
15267            a,
15268            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15269        )
15270    }
15271}
15272#[doc = "Multiply long"]
15273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15274#[inline]
15275#[target_feature(enable = "neon")]
15276#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15277#[rustc_legacy_const_generics(2)]
15278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15279pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15280    static_assert_uimm_bits!(LANE, 2);
15281    unsafe {
15282        vmull_high_u32(
15283            a,
15284            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15285        )
15286    }
15287}
15288#[doc = "Multiply long"]
15289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15290#[inline]
15291#[target_feature(enable = "neon")]
15292#[cfg_attr(test, assert_instr(smull2))]
15293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15294pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15295    vmull_high_s16(a, vdupq_n_s16(b))
15296}
15297#[doc = "Multiply long"]
15298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15299#[inline]
15300#[target_feature(enable = "neon")]
15301#[cfg_attr(test, assert_instr(smull2))]
15302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15303pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15304    vmull_high_s32(a, vdupq_n_s32(b))
15305}
15306#[doc = "Multiply long"]
15307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15308#[inline]
15309#[target_feature(enable = "neon")]
15310#[cfg_attr(test, assert_instr(umull2))]
15311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15312pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15313    vmull_high_u16(a, vdupq_n_u16(b))
15314}
15315#[doc = "Multiply long"]
15316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15317#[inline]
15318#[target_feature(enable = "neon")]
15319#[cfg_attr(test, assert_instr(umull2))]
15320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15321pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15322    vmull_high_u32(a, vdupq_n_u32(b))
15323}
15324#[doc = "Polynomial multiply long"]
15325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15326#[inline]
15327#[target_feature(enable = "neon,aes")]
15328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15329#[cfg_attr(test, assert_instr(pmull))]
15330pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15331    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15332}
15333#[doc = "Polynomial multiply long"]
15334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15335#[inline]
15336#[target_feature(enable = "neon")]
15337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15338#[cfg_attr(test, assert_instr(pmull))]
15339pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15340    unsafe {
15341        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15342        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15343        vmull_p8(a, b)
15344    }
15345}
15346#[doc = "Signed multiply long"]
15347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15348#[inline]
15349#[target_feature(enable = "neon")]
15350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15351#[cfg_attr(test, assert_instr(smull2))]
15352pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15353    unsafe {
15354        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15355        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15356        vmull_s8(a, b)
15357    }
15358}
15359#[doc = "Signed multiply long"]
15360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15361#[inline]
15362#[target_feature(enable = "neon")]
15363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15364#[cfg_attr(test, assert_instr(smull2))]
15365pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15366    unsafe {
15367        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15368        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15369        vmull_s16(a, b)
15370    }
15371}
15372#[doc = "Signed multiply long"]
15373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15374#[inline]
15375#[target_feature(enable = "neon")]
15376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15377#[cfg_attr(test, assert_instr(smull2))]
15378pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15379    unsafe {
15380        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15381        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15382        vmull_s32(a, b)
15383    }
15384}
15385#[doc = "Unsigned multiply long"]
15386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15387#[inline]
15388#[target_feature(enable = "neon")]
15389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15390#[cfg_attr(test, assert_instr(umull2))]
15391pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15392    unsafe {
15393        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15394        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15395        vmull_u8(a, b)
15396    }
15397}
15398#[doc = "Unsigned multiply long"]
15399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15400#[inline]
15401#[target_feature(enable = "neon")]
15402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15403#[cfg_attr(test, assert_instr(umull2))]
15404pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15405    unsafe {
15406        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15407        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15408        vmull_u16(a, b)
15409    }
15410}
15411#[doc = "Unsigned multiply long"]
15412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15413#[inline]
15414#[target_feature(enable = "neon")]
15415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15416#[cfg_attr(test, assert_instr(umull2))]
15417pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15418    unsafe {
15419        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15420        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15421        vmull_u32(a, b)
15422    }
15423}
15424#[doc = "Polynomial multiply long"]
15425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15426#[inline]
15427#[target_feature(enable = "neon,aes")]
15428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15429#[cfg_attr(test, assert_instr(pmull))]
15430pub fn vmull_p64(a: p64, b: p64) -> p128 {
15431    unsafe extern "unadjusted" {
15432        #[cfg_attr(
15433            any(target_arch = "aarch64", target_arch = "arm64ec"),
15434            link_name = "llvm.aarch64.neon.pmull64"
15435        )]
15436        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15437    }
15438    unsafe { transmute(_vmull_p64(a, b)) }
15439}
15440#[doc = "Floating-point multiply"]
15441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15442#[inline]
15443#[target_feature(enable = "neon")]
15444#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15445#[rustc_legacy_const_generics(2)]
15446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15447pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15448    static_assert!(LANE == 0);
15449    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15450}
15451#[doc = "Floating-point multiply"]
15452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15453#[inline]
15454#[target_feature(enable = "neon")]
15455#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15456#[rustc_legacy_const_generics(2)]
15457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15458pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15459    static_assert_uimm_bits!(LANE, 1);
15460    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15461}
15462#[doc = "Floating-point multiply"]
15463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15464#[inline]
15465#[target_feature(enable = "neon")]
15466#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15467#[rustc_legacy_const_generics(2)]
15468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15469pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15470    static_assert_uimm_bits!(LANE, 1);
15471    unsafe {
15472        let b: f32 = simd_extract!(b, LANE as u32);
15473        a * b
15474    }
15475}
15476#[doc = "Floating-point multiply"]
15477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15478#[inline]
15479#[target_feature(enable = "neon")]
15480#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15481#[rustc_legacy_const_generics(2)]
15482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15483pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15484    static_assert_uimm_bits!(LANE, 2);
15485    unsafe {
15486        let b: f32 = simd_extract!(b, LANE as u32);
15487        a * b
15488    }
15489}
15490#[doc = "Floating-point multiply"]
15491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15492#[inline]
15493#[target_feature(enable = "neon")]
15494#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15495#[rustc_legacy_const_generics(2)]
15496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15497pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15498    static_assert_uimm_bits!(LANE, 1);
15499    unsafe {
15500        let b: f64 = simd_extract!(b, LANE as u32);
15501        a * b
15502    }
15503}
15504#[doc = "Floating-point multiply extended"]
15505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15506#[inline]
15507#[target_feature(enable = "neon,fp16")]
15508#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15509#[cfg_attr(test, assert_instr(fmulx))]
15510pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15511    unsafe extern "unadjusted" {
15512        #[cfg_attr(
15513            any(target_arch = "aarch64", target_arch = "arm64ec"),
15514            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15515        )]
15516        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15517    }
15518    unsafe { _vmulx_f16(a, b) }
15519}
15520#[doc = "Floating-point multiply extended"]
15521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15522#[inline]
15523#[target_feature(enable = "neon,fp16")]
15524#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15525#[cfg_attr(test, assert_instr(fmulx))]
15526pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15527    unsafe extern "unadjusted" {
15528        #[cfg_attr(
15529            any(target_arch = "aarch64", target_arch = "arm64ec"),
15530            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15531        )]
15532        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15533    }
15534    unsafe { _vmulxq_f16(a, b) }
15535}
15536#[doc = "Floating-point multiply extended"]
15537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15538#[inline]
15539#[target_feature(enable = "neon")]
15540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15541#[cfg_attr(test, assert_instr(fmulx))]
15542pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15543    unsafe extern "unadjusted" {
15544        #[cfg_attr(
15545            any(target_arch = "aarch64", target_arch = "arm64ec"),
15546            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15547        )]
15548        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15549    }
15550    unsafe { _vmulx_f32(a, b) }
15551}
15552#[doc = "Floating-point multiply extended"]
15553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15554#[inline]
15555#[target_feature(enable = "neon")]
15556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15557#[cfg_attr(test, assert_instr(fmulx))]
15558pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15559    unsafe extern "unadjusted" {
15560        #[cfg_attr(
15561            any(target_arch = "aarch64", target_arch = "arm64ec"),
15562            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15563        )]
15564        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15565    }
15566    unsafe { _vmulxq_f32(a, b) }
15567}
15568#[doc = "Floating-point multiply extended"]
15569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15570#[inline]
15571#[target_feature(enable = "neon")]
15572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15573#[cfg_attr(test, assert_instr(fmulx))]
15574pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15575    unsafe extern "unadjusted" {
15576        #[cfg_attr(
15577            any(target_arch = "aarch64", target_arch = "arm64ec"),
15578            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15579        )]
15580        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15581    }
15582    unsafe { _vmulx_f64(a, b) }
15583}
15584#[doc = "Floating-point multiply extended"]
15585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15586#[inline]
15587#[target_feature(enable = "neon")]
15588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15589#[cfg_attr(test, assert_instr(fmulx))]
15590pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15591    unsafe extern "unadjusted" {
15592        #[cfg_attr(
15593            any(target_arch = "aarch64", target_arch = "arm64ec"),
15594            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15595        )]
15596        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15597    }
15598    unsafe { _vmulxq_f64(a, b) }
15599}
15600#[doc = "Floating-point multiply extended"]
15601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15602#[inline]
15603#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15604#[rustc_legacy_const_generics(2)]
15605#[target_feature(enable = "neon,fp16")]
15606#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15607pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15608    static_assert_uimm_bits!(LANE, 2);
15609    unsafe {
15610        vmulx_f16(
15611            a,
15612            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15613        )
15614    }
15615}
15616#[doc = "Floating-point multiply extended"]
15617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15618#[inline]
15619#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15620#[rustc_legacy_const_generics(2)]
15621#[target_feature(enable = "neon,fp16")]
15622#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15623pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15624    static_assert_uimm_bits!(LANE, 3);
15625    unsafe {
15626        vmulx_f16(
15627            a,
15628            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15629        )
15630    }
15631}
15632#[doc = "Floating-point multiply extended"]
15633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15634#[inline]
15635#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15636#[rustc_legacy_const_generics(2)]
15637#[target_feature(enable = "neon,fp16")]
15638#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15639pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15640    static_assert_uimm_bits!(LANE, 2);
15641    unsafe {
15642        vmulxq_f16(
15643            a,
15644            simd_shuffle!(
15645                b,
15646                b,
15647                [
15648                    LANE as u32,
15649                    LANE as u32,
15650                    LANE as u32,
15651                    LANE as u32,
15652                    LANE as u32,
15653                    LANE as u32,
15654                    LANE as u32,
15655                    LANE as u32
15656                ]
15657            ),
15658        )
15659    }
15660}
15661#[doc = "Floating-point multiply extended"]
15662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15663#[inline]
15664#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15665#[rustc_legacy_const_generics(2)]
15666#[target_feature(enable = "neon,fp16")]
15667#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15668pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15669    static_assert_uimm_bits!(LANE, 3);
15670    unsafe {
15671        vmulxq_f16(
15672            a,
15673            simd_shuffle!(
15674                b,
15675                b,
15676                [
15677                    LANE as u32,
15678                    LANE as u32,
15679                    LANE as u32,
15680                    LANE as u32,
15681                    LANE as u32,
15682                    LANE as u32,
15683                    LANE as u32,
15684                    LANE as u32
15685                ]
15686            ),
15687        )
15688    }
15689}
15690#[doc = "Floating-point multiply extended"]
15691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15692#[inline]
15693#[target_feature(enable = "neon")]
15694#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15695#[rustc_legacy_const_generics(2)]
15696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15697pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15698    static_assert_uimm_bits!(LANE, 1);
15699    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15700}
15701#[doc = "Floating-point multiply extended"]
15702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15703#[inline]
15704#[target_feature(enable = "neon")]
15705#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15706#[rustc_legacy_const_generics(2)]
15707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15708pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15709    static_assert_uimm_bits!(LANE, 2);
15710    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15711}
15712#[doc = "Floating-point multiply extended"]
15713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15714#[inline]
15715#[target_feature(enable = "neon")]
15716#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15717#[rustc_legacy_const_generics(2)]
15718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15719pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15720    static_assert_uimm_bits!(LANE, 1);
15721    unsafe {
15722        vmulxq_f32(
15723            a,
15724            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15725        )
15726    }
15727}
15728#[doc = "Floating-point multiply extended"]
15729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15730#[inline]
15731#[target_feature(enable = "neon")]
15732#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15733#[rustc_legacy_const_generics(2)]
15734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15735pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15736    static_assert_uimm_bits!(LANE, 2);
15737    unsafe {
15738        vmulxq_f32(
15739            a,
15740            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15741        )
15742    }
15743}
15744#[doc = "Floating-point multiply extended"]
15745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15746#[inline]
15747#[target_feature(enable = "neon")]
15748#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15749#[rustc_legacy_const_generics(2)]
15750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15751pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15752    static_assert_uimm_bits!(LANE, 1);
15753    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15754}
15755#[doc = "Floating-point multiply extended"]
15756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15757#[inline]
15758#[target_feature(enable = "neon")]
15759#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15760#[rustc_legacy_const_generics(2)]
15761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15762pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15763    static_assert!(LANE == 0);
15764    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15765}
15766#[doc = "Floating-point multiply extended"]
15767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15768#[inline]
15769#[target_feature(enable = "neon")]
15770#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15771#[rustc_legacy_const_generics(2)]
15772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15773pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15774    static_assert_uimm_bits!(LANE, 1);
15775    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15776}
15777#[doc = "Vector multiply by scalar"]
15778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15779#[inline]
15780#[cfg_attr(test, assert_instr(fmulx))]
15781#[target_feature(enable = "neon,fp16")]
15782#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15783pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15784    vmulx_f16(a, vdup_n_f16(b))
15785}
15786#[doc = "Vector multiply by scalar"]
15787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15788#[inline]
15789#[cfg_attr(test, assert_instr(fmulx))]
15790#[target_feature(enable = "neon,fp16")]
15791#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15792pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15793    vmulxq_f16(a, vdupq_n_f16(b))
15794}
15795#[doc = "Floating-point multiply extended"]
15796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15797#[inline]
15798#[target_feature(enable = "neon")]
15799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15800#[cfg_attr(test, assert_instr(fmulx))]
15801pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15802    unsafe extern "unadjusted" {
15803        #[cfg_attr(
15804            any(target_arch = "aarch64", target_arch = "arm64ec"),
15805            link_name = "llvm.aarch64.neon.fmulx.f64"
15806        )]
15807        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15808    }
15809    unsafe { _vmulxd_f64(a, b) }
15810}
15811#[doc = "Floating-point multiply extended"]
15812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15813#[inline]
15814#[target_feature(enable = "neon")]
15815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15816#[cfg_attr(test, assert_instr(fmulx))]
15817pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15818    unsafe extern "unadjusted" {
15819        #[cfg_attr(
15820            any(target_arch = "aarch64", target_arch = "arm64ec"),
15821            link_name = "llvm.aarch64.neon.fmulx.f32"
15822        )]
15823        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15824    }
15825    unsafe { _vmulxs_f32(a, b) }
15826}
15827#[doc = "Floating-point multiply extended"]
15828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15829#[inline]
15830#[target_feature(enable = "neon")]
15831#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15832#[rustc_legacy_const_generics(2)]
15833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15834pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15835    static_assert!(LANE == 0);
15836    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15837}
15838#[doc = "Floating-point multiply extended"]
15839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15840#[inline]
15841#[target_feature(enable = "neon")]
15842#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15843#[rustc_legacy_const_generics(2)]
15844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15845pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15846    static_assert_uimm_bits!(LANE, 1);
15847    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15848}
15849#[doc = "Floating-point multiply extended"]
15850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15851#[inline]
15852#[target_feature(enable = "neon")]
15853#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15854#[rustc_legacy_const_generics(2)]
15855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15856pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15857    static_assert_uimm_bits!(LANE, 1);
15858    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15859}
15860#[doc = "Floating-point multiply extended"]
15861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15862#[inline]
15863#[target_feature(enable = "neon")]
15864#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15865#[rustc_legacy_const_generics(2)]
15866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15867pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15868    static_assert_uimm_bits!(LANE, 2);
15869    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15870}
15871#[doc = "Floating-point multiply extended"]
15872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15873#[inline]
15874#[target_feature(enable = "neon,fp16")]
15875#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15876#[cfg_attr(test, assert_instr(fmulx))]
15877pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15878    unsafe extern "unadjusted" {
15879        #[cfg_attr(
15880            any(target_arch = "aarch64", target_arch = "arm64ec"),
15881            link_name = "llvm.aarch64.neon.fmulx.f16"
15882        )]
15883        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15884    }
15885    unsafe { _vmulxh_f16(a, b) }
15886}
15887#[doc = "Floating-point multiply extended"]
15888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15889#[inline]
15890#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15891#[rustc_legacy_const_generics(2)]
15892#[target_feature(enable = "neon,fp16")]
15893#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15894pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15895    static_assert_uimm_bits!(LANE, 2);
15896    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15897}
15898#[doc = "Floating-point multiply extended"]
15899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15900#[inline]
15901#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15902#[rustc_legacy_const_generics(2)]
15903#[target_feature(enable = "neon,fp16")]
15904#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15905pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15906    static_assert_uimm_bits!(LANE, 3);
15907    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15908}
15909#[doc = "Floating-point multiply extended"]
15910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15911#[inline]
15912#[target_feature(enable = "neon")]
15913#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15914#[rustc_legacy_const_generics(2)]
15915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15916pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15917    static_assert!(LANE == 0);
15918    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15919}
15920#[doc = "Negate"]
15921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15922#[inline]
15923#[target_feature(enable = "neon")]
15924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15925#[cfg_attr(test, assert_instr(fneg))]
15926pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15927    unsafe { simd_neg(a) }
15928}
15929#[doc = "Negate"]
15930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15931#[inline]
15932#[target_feature(enable = "neon")]
15933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15934#[cfg_attr(test, assert_instr(fneg))]
15935pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15936    unsafe { simd_neg(a) }
15937}
15938#[doc = "Negate"]
15939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15940#[inline]
15941#[target_feature(enable = "neon")]
15942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15943#[cfg_attr(test, assert_instr(neg))]
15944pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15945    unsafe { simd_neg(a) }
15946}
15947#[doc = "Negate"]
15948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15949#[inline]
15950#[target_feature(enable = "neon")]
15951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15952#[cfg_attr(test, assert_instr(neg))]
15953pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15954    unsafe { simd_neg(a) }
15955}
15956#[doc = "Negate"]
15957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15958#[inline]
15959#[target_feature(enable = "neon")]
15960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15961#[cfg_attr(test, assert_instr(neg))]
15962pub fn vnegd_s64(a: i64) -> i64 {
15963    a.wrapping_neg()
15964}
15965#[doc = "Negate"]
15966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15967#[inline]
15968#[target_feature(enable = "neon,fp16")]
15969#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15970#[cfg_attr(test, assert_instr(fneg))]
15971pub fn vnegh_f16(a: f16) -> f16 {
15972    -a
15973}
15974#[doc = "Floating-point add pairwise"]
15975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15976#[inline]
15977#[target_feature(enable = "neon")]
15978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15979#[cfg_attr(test, assert_instr(nop))]
15980pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15981    unsafe {
15982        let a1: f64 = simd_extract!(a, 0);
15983        let a2: f64 = simd_extract!(a, 1);
15984        a1 + a2
15985    }
15986}
15987#[doc = "Floating-point add pairwise"]
15988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15989#[inline]
15990#[target_feature(enable = "neon")]
15991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15992#[cfg_attr(test, assert_instr(nop))]
15993pub fn vpadds_f32(a: float32x2_t) -> f32 {
15994    unsafe {
15995        let a1: f32 = simd_extract!(a, 0);
15996        let a2: f32 = simd_extract!(a, 1);
15997        a1 + a2
15998    }
15999}
16000#[doc = "Add pairwise"]
16001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
16002#[inline]
16003#[cfg(target_endian = "little")]
16004#[target_feature(enable = "neon")]
16005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16006#[cfg_attr(test, assert_instr(addp))]
16007pub fn vpaddd_s64(a: int64x2_t) -> i64 {
16008    unsafe { transmute(vaddvq_u64(transmute(a))) }
16009}
16010#[doc = "Add pairwise"]
16011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
16012#[inline]
16013#[cfg(target_endian = "big")]
16014#[target_feature(enable = "neon")]
16015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16016#[cfg_attr(test, assert_instr(addp))]
16017pub fn vpaddd_s64(a: int64x2_t) -> i64 {
16018    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16019    unsafe { transmute(vaddvq_u64(transmute(a))) }
16020}
16021#[doc = "Add pairwise"]
16022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
16023#[inline]
16024#[target_feature(enable = "neon")]
16025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16026#[cfg_attr(test, assert_instr(addp))]
16027pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
16028    vaddvq_u64(a)
16029}
16030#[doc = "Floating-point add pairwise"]
16031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
16032#[inline]
16033#[target_feature(enable = "neon,fp16")]
16034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16035#[cfg_attr(test, assert_instr(faddp))]
16036pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16037    unsafe extern "unadjusted" {
16038        #[cfg_attr(
16039            any(target_arch = "aarch64", target_arch = "arm64ec"),
16040            link_name = "llvm.aarch64.neon.faddp.v8f16"
16041        )]
16042        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16043    }
16044    unsafe { _vpaddq_f16(a, b) }
16045}
16046#[doc = "Floating-point add pairwise"]
16047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
16048#[inline]
16049#[target_feature(enable = "neon")]
16050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16051#[cfg_attr(test, assert_instr(faddp))]
16052pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16053    unsafe extern "unadjusted" {
16054        #[cfg_attr(
16055            any(target_arch = "aarch64", target_arch = "arm64ec"),
16056            link_name = "llvm.aarch64.neon.faddp.v4f32"
16057        )]
16058        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16059    }
16060    unsafe { _vpaddq_f32(a, b) }
16061}
16062#[doc = "Floating-point add pairwise"]
16063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16064#[inline]
16065#[target_feature(enable = "neon")]
16066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16067#[cfg_attr(test, assert_instr(faddp))]
16068pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16069    unsafe extern "unadjusted" {
16070        #[cfg_attr(
16071            any(target_arch = "aarch64", target_arch = "arm64ec"),
16072            link_name = "llvm.aarch64.neon.faddp.v2f64"
16073        )]
16074        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16075    }
16076    unsafe { _vpaddq_f64(a, b) }
16077}
16078#[doc = "Add Pairwise"]
16079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16080#[inline]
16081#[target_feature(enable = "neon")]
16082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16083#[cfg_attr(test, assert_instr(addp))]
16084pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16085    unsafe extern "unadjusted" {
16086        #[cfg_attr(
16087            any(target_arch = "aarch64", target_arch = "arm64ec"),
16088            link_name = "llvm.aarch64.neon.addp.v16i8"
16089        )]
16090        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16091    }
16092    unsafe { _vpaddq_s8(a, b) }
16093}
16094#[doc = "Add Pairwise"]
16095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16096#[inline]
16097#[target_feature(enable = "neon")]
16098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16099#[cfg_attr(test, assert_instr(addp))]
16100pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16101    unsafe extern "unadjusted" {
16102        #[cfg_attr(
16103            any(target_arch = "aarch64", target_arch = "arm64ec"),
16104            link_name = "llvm.aarch64.neon.addp.v8i16"
16105        )]
16106        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16107    }
16108    unsafe { _vpaddq_s16(a, b) }
16109}
16110#[doc = "Add Pairwise"]
16111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16112#[inline]
16113#[target_feature(enable = "neon")]
16114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16115#[cfg_attr(test, assert_instr(addp))]
16116pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16117    unsafe extern "unadjusted" {
16118        #[cfg_attr(
16119            any(target_arch = "aarch64", target_arch = "arm64ec"),
16120            link_name = "llvm.aarch64.neon.addp.v4i32"
16121        )]
16122        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16123    }
16124    unsafe { _vpaddq_s32(a, b) }
16125}
16126#[doc = "Add Pairwise"]
16127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16128#[inline]
16129#[target_feature(enable = "neon")]
16130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16131#[cfg_attr(test, assert_instr(addp))]
16132pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16133    unsafe extern "unadjusted" {
16134        #[cfg_attr(
16135            any(target_arch = "aarch64", target_arch = "arm64ec"),
16136            link_name = "llvm.aarch64.neon.addp.v2i64"
16137        )]
16138        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
16139    }
16140    unsafe { _vpaddq_s64(a, b) }
16141}
16142#[doc = "Add Pairwise"]
16143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16144#[inline]
16145#[cfg(target_endian = "little")]
16146#[target_feature(enable = "neon")]
16147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16148#[cfg_attr(test, assert_instr(addp))]
16149pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16150    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
16151}
16152#[doc = "Add Pairwise"]
16153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16154#[inline]
16155#[cfg(target_endian = "big")]
16156#[target_feature(enable = "neon")]
16157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16158#[cfg_attr(test, assert_instr(addp))]
16159pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16160    let a: uint8x16_t =
16161        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16162    let b: uint8x16_t =
16163        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16164    unsafe {
16165        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
16166        simd_shuffle!(
16167            ret_val,
16168            ret_val,
16169            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
16170        )
16171    }
16172}
16173#[doc = "Add Pairwise"]
16174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16175#[inline]
16176#[cfg(target_endian = "little")]
16177#[target_feature(enable = "neon")]
16178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16179#[cfg_attr(test, assert_instr(addp))]
16180pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16181    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
16182}
16183#[doc = "Add Pairwise"]
16184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16185#[inline]
16186#[cfg(target_endian = "big")]
16187#[target_feature(enable = "neon")]
16188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16189#[cfg_attr(test, assert_instr(addp))]
16190pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16191    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
16192    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
16193    unsafe {
16194        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
16195        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
16196    }
16197}
16198#[doc = "Add Pairwise"]
16199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16200#[inline]
16201#[cfg(target_endian = "little")]
16202#[target_feature(enable = "neon")]
16203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16204#[cfg_attr(test, assert_instr(addp))]
16205pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16206    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
16207}
16208#[doc = "Add Pairwise"]
16209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16210#[inline]
16211#[cfg(target_endian = "big")]
16212#[target_feature(enable = "neon")]
16213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16214#[cfg_attr(test, assert_instr(addp))]
16215pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16216    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
16217    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
16218    unsafe {
16219        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
16220        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
16221    }
16222}
16223#[doc = "Add Pairwise"]
16224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16225#[inline]
16226#[cfg(target_endian = "little")]
16227#[target_feature(enable = "neon")]
16228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16229#[cfg_attr(test, assert_instr(addp))]
16230pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16231    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
16232}
16233#[doc = "Add Pairwise"]
16234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16235#[inline]
16236#[cfg(target_endian = "big")]
16237#[target_feature(enable = "neon")]
16238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16239#[cfg_attr(test, assert_instr(addp))]
16240pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16241    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16242    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16243    unsafe {
16244        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16245        simd_shuffle!(ret_val, ret_val, [1, 0])
16246    }
16247}
16248#[doc = "Floating-point add pairwise"]
16249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16250#[inline]
16251#[target_feature(enable = "neon,fp16")]
16252#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16253#[cfg_attr(test, assert_instr(fmaxp))]
16254pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16255    unsafe extern "unadjusted" {
16256        #[cfg_attr(
16257            any(target_arch = "aarch64", target_arch = "arm64ec"),
16258            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16259        )]
16260        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16261    }
16262    unsafe { _vpmax_f16(a, b) }
16263}
16264#[doc = "Floating-point add pairwise"]
16265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16266#[inline]
16267#[target_feature(enable = "neon,fp16")]
16268#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16269#[cfg_attr(test, assert_instr(fmaxp))]
16270pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16271    unsafe extern "unadjusted" {
16272        #[cfg_attr(
16273            any(target_arch = "aarch64", target_arch = "arm64ec"),
16274            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16275        )]
16276        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16277    }
16278    unsafe { _vpmaxq_f16(a, b) }
16279}
16280#[doc = "Floating-point add pairwise"]
16281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16282#[inline]
16283#[target_feature(enable = "neon,fp16")]
16284#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16285#[cfg_attr(test, assert_instr(fmaxnmp))]
16286pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16287    unsafe extern "unadjusted" {
16288        #[cfg_attr(
16289            any(target_arch = "aarch64", target_arch = "arm64ec"),
16290            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16291        )]
16292        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16293    }
16294    unsafe { _vpmaxnm_f16(a, b) }
16295}
16296#[doc = "Floating-point add pairwise"]
16297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16298#[inline]
16299#[target_feature(enable = "neon,fp16")]
16300#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16301#[cfg_attr(test, assert_instr(fmaxnmp))]
16302pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16303    unsafe extern "unadjusted" {
16304        #[cfg_attr(
16305            any(target_arch = "aarch64", target_arch = "arm64ec"),
16306            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16307        )]
16308        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16309    }
16310    unsafe { _vpmaxnmq_f16(a, b) }
16311}
16312#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16314#[inline]
16315#[target_feature(enable = "neon")]
16316#[cfg_attr(test, assert_instr(fmaxnmp))]
16317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16318pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16319    unsafe extern "unadjusted" {
16320        #[cfg_attr(
16321            any(target_arch = "aarch64", target_arch = "arm64ec"),
16322            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16323        )]
16324        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16325    }
16326    unsafe { _vpmaxnm_f32(a, b) }
16327}
16328#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16330#[inline]
16331#[target_feature(enable = "neon")]
16332#[cfg_attr(test, assert_instr(fmaxnmp))]
16333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16334pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16335    unsafe extern "unadjusted" {
16336        #[cfg_attr(
16337            any(target_arch = "aarch64", target_arch = "arm64ec"),
16338            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16339        )]
16340        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16341    }
16342    unsafe { _vpmaxnmq_f32(a, b) }
16343}
16344#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16346#[inline]
16347#[target_feature(enable = "neon")]
16348#[cfg_attr(test, assert_instr(fmaxnmp))]
16349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16350pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16351    unsafe extern "unadjusted" {
16352        #[cfg_attr(
16353            any(target_arch = "aarch64", target_arch = "arm64ec"),
16354            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16355        )]
16356        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16357    }
16358    unsafe { _vpmaxnmq_f64(a, b) }
16359}
16360#[doc = "Floating-point maximum number pairwise"]
16361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16362#[inline]
16363#[target_feature(enable = "neon")]
16364#[cfg_attr(test, assert_instr(fmaxnmp))]
16365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16366pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16367    unsafe extern "unadjusted" {
16368        #[cfg_attr(
16369            any(target_arch = "aarch64", target_arch = "arm64ec"),
16370            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16371        )]
16372        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16373    }
16374    unsafe { _vpmaxnmqd_f64(a) }
16375}
16376#[doc = "Floating-point maximum number pairwise"]
16377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16378#[inline]
16379#[target_feature(enable = "neon")]
16380#[cfg_attr(test, assert_instr(fmaxnmp))]
16381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16382pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16383    unsafe extern "unadjusted" {
16384        #[cfg_attr(
16385            any(target_arch = "aarch64", target_arch = "arm64ec"),
16386            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16387        )]
16388        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16389    }
16390    unsafe { _vpmaxnms_f32(a) }
16391}
16392#[doc = "Folding maximum of adjacent pairs"]
16393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16394#[inline]
16395#[target_feature(enable = "neon")]
16396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16397#[cfg_attr(test, assert_instr(fmaxp))]
16398pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16399    unsafe extern "unadjusted" {
16400        #[cfg_attr(
16401            any(target_arch = "aarch64", target_arch = "arm64ec"),
16402            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16403        )]
16404        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16405    }
16406    unsafe { _vpmaxq_f32(a, b) }
16407}
16408#[doc = "Folding maximum of adjacent pairs"]
16409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16410#[inline]
16411#[target_feature(enable = "neon")]
16412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16413#[cfg_attr(test, assert_instr(fmaxp))]
16414pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16415    unsafe extern "unadjusted" {
16416        #[cfg_attr(
16417            any(target_arch = "aarch64", target_arch = "arm64ec"),
16418            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16419        )]
16420        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16421    }
16422    unsafe { _vpmaxq_f64(a, b) }
16423}
16424#[doc = "Folding maximum of adjacent pairs"]
16425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16426#[inline]
16427#[target_feature(enable = "neon")]
16428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16429#[cfg_attr(test, assert_instr(smaxp))]
16430pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16431    unsafe extern "unadjusted" {
16432        #[cfg_attr(
16433            any(target_arch = "aarch64", target_arch = "arm64ec"),
16434            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16435        )]
16436        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16437    }
16438    unsafe { _vpmaxq_s8(a, b) }
16439}
16440#[doc = "Folding maximum of adjacent pairs"]
16441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16442#[inline]
16443#[target_feature(enable = "neon")]
16444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16445#[cfg_attr(test, assert_instr(smaxp))]
16446pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16447    unsafe extern "unadjusted" {
16448        #[cfg_attr(
16449            any(target_arch = "aarch64", target_arch = "arm64ec"),
16450            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16451        )]
16452        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16453    }
16454    unsafe { _vpmaxq_s16(a, b) }
16455}
16456#[doc = "Folding maximum of adjacent pairs"]
16457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16458#[inline]
16459#[target_feature(enable = "neon")]
16460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16461#[cfg_attr(test, assert_instr(smaxp))]
16462pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16463    unsafe extern "unadjusted" {
16464        #[cfg_attr(
16465            any(target_arch = "aarch64", target_arch = "arm64ec"),
16466            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16467        )]
16468        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16469    }
16470    unsafe { _vpmaxq_s32(a, b) }
16471}
16472#[doc = "Folding maximum of adjacent pairs"]
16473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16474#[inline]
16475#[target_feature(enable = "neon")]
16476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16477#[cfg_attr(test, assert_instr(umaxp))]
16478pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16479    unsafe extern "unadjusted" {
16480        #[cfg_attr(
16481            any(target_arch = "aarch64", target_arch = "arm64ec"),
16482            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16483        )]
16484        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16485    }
16486    unsafe { _vpmaxq_u8(a, b) }
16487}
16488#[doc = "Folding maximum of adjacent pairs"]
16489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16490#[inline]
16491#[target_feature(enable = "neon")]
16492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16493#[cfg_attr(test, assert_instr(umaxp))]
16494pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16495    unsafe extern "unadjusted" {
16496        #[cfg_attr(
16497            any(target_arch = "aarch64", target_arch = "arm64ec"),
16498            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16499        )]
16500        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16501    }
16502    unsafe { _vpmaxq_u16(a, b) }
16503}
16504#[doc = "Folding maximum of adjacent pairs"]
16505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16506#[inline]
16507#[target_feature(enable = "neon")]
16508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16509#[cfg_attr(test, assert_instr(umaxp))]
16510pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16511    unsafe extern "unadjusted" {
16512        #[cfg_attr(
16513            any(target_arch = "aarch64", target_arch = "arm64ec"),
16514            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16515        )]
16516        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16517    }
16518    unsafe { _vpmaxq_u32(a, b) }
16519}
16520#[doc = "Floating-point maximum pairwise"]
16521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16522#[inline]
16523#[target_feature(enable = "neon")]
16524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16525#[cfg_attr(test, assert_instr(fmaxp))]
16526pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16527    unsafe extern "unadjusted" {
16528        #[cfg_attr(
16529            any(target_arch = "aarch64", target_arch = "arm64ec"),
16530            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16531        )]
16532        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16533    }
16534    unsafe { _vpmaxqd_f64(a) }
16535}
16536#[doc = "Floating-point maximum pairwise"]
16537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16538#[inline]
16539#[target_feature(enable = "neon")]
16540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16541#[cfg_attr(test, assert_instr(fmaxp))]
16542pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16543    unsafe extern "unadjusted" {
16544        #[cfg_attr(
16545            any(target_arch = "aarch64", target_arch = "arm64ec"),
16546            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16547        )]
16548        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16549    }
16550    unsafe { _vpmaxs_f32(a) }
16551}
16552#[doc = "Floating-point add pairwise"]
16553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16554#[inline]
16555#[target_feature(enable = "neon,fp16")]
16556#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16557#[cfg_attr(test, assert_instr(fminp))]
16558pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16559    unsafe extern "unadjusted" {
16560        #[cfg_attr(
16561            any(target_arch = "aarch64", target_arch = "arm64ec"),
16562            link_name = "llvm.aarch64.neon.fminp.v4f16"
16563        )]
16564        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16565    }
16566    unsafe { _vpmin_f16(a, b) }
16567}
16568#[doc = "Floating-point add pairwise"]
16569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16570#[inline]
16571#[target_feature(enable = "neon,fp16")]
16572#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16573#[cfg_attr(test, assert_instr(fminp))]
16574pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16575    unsafe extern "unadjusted" {
16576        #[cfg_attr(
16577            any(target_arch = "aarch64", target_arch = "arm64ec"),
16578            link_name = "llvm.aarch64.neon.fminp.v8f16"
16579        )]
16580        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16581    }
16582    unsafe { _vpminq_f16(a, b) }
16583}
16584#[doc = "Floating-point add pairwise"]
16585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16586#[inline]
16587#[target_feature(enable = "neon,fp16")]
16588#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16589#[cfg_attr(test, assert_instr(fminnmp))]
16590pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16591    unsafe extern "unadjusted" {
16592        #[cfg_attr(
16593            any(target_arch = "aarch64", target_arch = "arm64ec"),
16594            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16595        )]
16596        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16597    }
16598    unsafe { _vpminnm_f16(a, b) }
16599}
16600#[doc = "Floating-point add pairwise"]
16601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16602#[inline]
16603#[target_feature(enable = "neon,fp16")]
16604#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16605#[cfg_attr(test, assert_instr(fminnmp))]
16606pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16607    unsafe extern "unadjusted" {
16608        #[cfg_attr(
16609            any(target_arch = "aarch64", target_arch = "arm64ec"),
16610            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16611        )]
16612        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16613    }
16614    unsafe { _vpminnmq_f16(a, b) }
16615}
16616#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16618#[inline]
16619#[target_feature(enable = "neon")]
16620#[cfg_attr(test, assert_instr(fminnmp))]
16621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16622pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16623    unsafe extern "unadjusted" {
16624        #[cfg_attr(
16625            any(target_arch = "aarch64", target_arch = "arm64ec"),
16626            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16627        )]
16628        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16629    }
16630    unsafe { _vpminnm_f32(a, b) }
16631}
16632#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16634#[inline]
16635#[target_feature(enable = "neon")]
16636#[cfg_attr(test, assert_instr(fminnmp))]
16637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16638pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16639    unsafe extern "unadjusted" {
16640        #[cfg_attr(
16641            any(target_arch = "aarch64", target_arch = "arm64ec"),
16642            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16643        )]
16644        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16645    }
16646    unsafe { _vpminnmq_f32(a, b) }
16647}
16648#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16650#[inline]
16651#[target_feature(enable = "neon")]
16652#[cfg_attr(test, assert_instr(fminnmp))]
16653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16654pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16655    unsafe extern "unadjusted" {
16656        #[cfg_attr(
16657            any(target_arch = "aarch64", target_arch = "arm64ec"),
16658            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16659        )]
16660        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16661    }
16662    unsafe { _vpminnmq_f64(a, b) }
16663}
16664#[doc = "Floating-point minimum number pairwise"]
16665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16666#[inline]
16667#[target_feature(enable = "neon")]
16668#[cfg_attr(test, assert_instr(fminnmp))]
16669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16670pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16671    unsafe extern "unadjusted" {
16672        #[cfg_attr(
16673            any(target_arch = "aarch64", target_arch = "arm64ec"),
16674            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16675        )]
16676        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16677    }
16678    unsafe { _vpminnmqd_f64(a) }
16679}
16680#[doc = "Floating-point minimum number pairwise"]
16681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16682#[inline]
16683#[target_feature(enable = "neon")]
16684#[cfg_attr(test, assert_instr(fminnmp))]
16685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16686pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16687    unsafe extern "unadjusted" {
16688        #[cfg_attr(
16689            any(target_arch = "aarch64", target_arch = "arm64ec"),
16690            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16691        )]
16692        fn _vpminnms_f32(a: float32x2_t) -> f32;
16693    }
16694    unsafe { _vpminnms_f32(a) }
16695}
16696#[doc = "Folding minimum of adjacent pairs"]
16697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16698#[inline]
16699#[target_feature(enable = "neon")]
16700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16701#[cfg_attr(test, assert_instr(fminp))]
16702pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16703    unsafe extern "unadjusted" {
16704        #[cfg_attr(
16705            any(target_arch = "aarch64", target_arch = "arm64ec"),
16706            link_name = "llvm.aarch64.neon.fminp.v4f32"
16707        )]
16708        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16709    }
16710    unsafe { _vpminq_f32(a, b) }
16711}
16712#[doc = "Folding minimum of adjacent pairs"]
16713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16714#[inline]
16715#[target_feature(enable = "neon")]
16716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16717#[cfg_attr(test, assert_instr(fminp))]
16718pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16719    unsafe extern "unadjusted" {
16720        #[cfg_attr(
16721            any(target_arch = "aarch64", target_arch = "arm64ec"),
16722            link_name = "llvm.aarch64.neon.fminp.v2f64"
16723        )]
16724        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16725    }
16726    unsafe { _vpminq_f64(a, b) }
16727}
16728#[doc = "Folding minimum of adjacent pairs"]
16729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16730#[inline]
16731#[target_feature(enable = "neon")]
16732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16733#[cfg_attr(test, assert_instr(sminp))]
16734pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16735    unsafe extern "unadjusted" {
16736        #[cfg_attr(
16737            any(target_arch = "aarch64", target_arch = "arm64ec"),
16738            link_name = "llvm.aarch64.neon.sminp.v16i8"
16739        )]
16740        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16741    }
16742    unsafe { _vpminq_s8(a, b) }
16743}
16744#[doc = "Folding minimum of adjacent pairs"]
16745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16746#[inline]
16747#[target_feature(enable = "neon")]
16748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16749#[cfg_attr(test, assert_instr(sminp))]
16750pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16751    unsafe extern "unadjusted" {
16752        #[cfg_attr(
16753            any(target_arch = "aarch64", target_arch = "arm64ec"),
16754            link_name = "llvm.aarch64.neon.sminp.v8i16"
16755        )]
16756        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16757    }
16758    unsafe { _vpminq_s16(a, b) }
16759}
16760#[doc = "Folding minimum of adjacent pairs"]
16761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16762#[inline]
16763#[target_feature(enable = "neon")]
16764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16765#[cfg_attr(test, assert_instr(sminp))]
16766pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16767    unsafe extern "unadjusted" {
16768        #[cfg_attr(
16769            any(target_arch = "aarch64", target_arch = "arm64ec"),
16770            link_name = "llvm.aarch64.neon.sminp.v4i32"
16771        )]
16772        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16773    }
16774    unsafe { _vpminq_s32(a, b) }
16775}
16776#[doc = "Folding minimum of adjacent pairs"]
16777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16778#[inline]
16779#[target_feature(enable = "neon")]
16780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16781#[cfg_attr(test, assert_instr(uminp))]
16782pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16783    unsafe extern "unadjusted" {
16784        #[cfg_attr(
16785            any(target_arch = "aarch64", target_arch = "arm64ec"),
16786            link_name = "llvm.aarch64.neon.uminp.v16i8"
16787        )]
16788        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16789    }
16790    unsafe { _vpminq_u8(a, b) }
16791}
16792#[doc = "Folding minimum of adjacent pairs"]
16793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16794#[inline]
16795#[target_feature(enable = "neon")]
16796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16797#[cfg_attr(test, assert_instr(uminp))]
16798pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16799    unsafe extern "unadjusted" {
16800        #[cfg_attr(
16801            any(target_arch = "aarch64", target_arch = "arm64ec"),
16802            link_name = "llvm.aarch64.neon.uminp.v8i16"
16803        )]
16804        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16805    }
16806    unsafe { _vpminq_u16(a, b) }
16807}
16808#[doc = "Folding minimum of adjacent pairs"]
16809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16810#[inline]
16811#[target_feature(enable = "neon")]
16812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16813#[cfg_attr(test, assert_instr(uminp))]
16814pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16815    unsafe extern "unadjusted" {
16816        #[cfg_attr(
16817            any(target_arch = "aarch64", target_arch = "arm64ec"),
16818            link_name = "llvm.aarch64.neon.uminp.v4i32"
16819        )]
16820        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16821    }
16822    unsafe { _vpminq_u32(a, b) }
16823}
16824#[doc = "Floating-point minimum pairwise"]
16825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16826#[inline]
16827#[target_feature(enable = "neon")]
16828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16829#[cfg_attr(test, assert_instr(fminp))]
16830pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16831    unsafe extern "unadjusted" {
16832        #[cfg_attr(
16833            any(target_arch = "aarch64", target_arch = "arm64ec"),
16834            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16835        )]
16836        fn _vpminqd_f64(a: float64x2_t) -> f64;
16837    }
16838    unsafe { _vpminqd_f64(a) }
16839}
16840#[doc = "Floating-point minimum pairwise"]
16841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16842#[inline]
16843#[target_feature(enable = "neon")]
16844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16845#[cfg_attr(test, assert_instr(fminp))]
16846pub fn vpmins_f32(a: float32x2_t) -> f32 {
16847    unsafe extern "unadjusted" {
16848        #[cfg_attr(
16849            any(target_arch = "aarch64", target_arch = "arm64ec"),
16850            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16851        )]
16852        fn _vpmins_f32(a: float32x2_t) -> f32;
16853    }
16854    unsafe { _vpmins_f32(a) }
16855}
16856#[doc = "Signed saturating Absolute value"]
16857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16858#[inline]
16859#[target_feature(enable = "neon")]
16860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16861#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16862pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16863    unsafe extern "unadjusted" {
16864        #[cfg_attr(
16865            any(target_arch = "aarch64", target_arch = "arm64ec"),
16866            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16867        )]
16868        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16869    }
16870    unsafe { _vqabs_s64(a) }
16871}
16872#[doc = "Signed saturating Absolute value"]
16873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16874#[inline]
16875#[target_feature(enable = "neon")]
16876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16877#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16878pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16879    unsafe extern "unadjusted" {
16880        #[cfg_attr(
16881            any(target_arch = "aarch64", target_arch = "arm64ec"),
16882            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16883        )]
16884        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16885    }
16886    unsafe { _vqabsq_s64(a) }
16887}
16888#[doc = "Signed saturating absolute value"]
16889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16890#[inline]
16891#[target_feature(enable = "neon")]
16892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16893#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16894pub fn vqabsb_s8(a: i8) -> i8 {
16895    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16896}
16897#[doc = "Signed saturating absolute value"]
16898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16899#[inline]
16900#[target_feature(enable = "neon")]
16901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16902#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16903pub fn vqabsh_s16(a: i16) -> i16 {
16904    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16905}
16906#[doc = "Signed saturating absolute value"]
16907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16908#[inline]
16909#[target_feature(enable = "neon")]
16910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16911#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16912pub fn vqabss_s32(a: i32) -> i32 {
16913    unsafe extern "unadjusted" {
16914        #[cfg_attr(
16915            any(target_arch = "aarch64", target_arch = "arm64ec"),
16916            link_name = "llvm.aarch64.neon.sqabs.i32"
16917        )]
16918        fn _vqabss_s32(a: i32) -> i32;
16919    }
16920    unsafe { _vqabss_s32(a) }
16921}
16922#[doc = "Signed saturating absolute value"]
16923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16924#[inline]
16925#[target_feature(enable = "neon")]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16928pub fn vqabsd_s64(a: i64) -> i64 {
16929    unsafe extern "unadjusted" {
16930        #[cfg_attr(
16931            any(target_arch = "aarch64", target_arch = "arm64ec"),
16932            link_name = "llvm.aarch64.neon.sqabs.i64"
16933        )]
16934        fn _vqabsd_s64(a: i64) -> i64;
16935    }
16936    unsafe { _vqabsd_s64(a) }
16937}
16938#[doc = "Saturating add"]
16939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16940#[inline]
16941#[target_feature(enable = "neon")]
16942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16943#[cfg_attr(test, assert_instr(sqadd))]
16944pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16945    let a: int8x8_t = vdup_n_s8(a);
16946    let b: int8x8_t = vdup_n_s8(b);
16947    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16948}
16949#[doc = "Saturating add"]
16950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16951#[inline]
16952#[target_feature(enable = "neon")]
16953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16954#[cfg_attr(test, assert_instr(sqadd))]
16955pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16956    let a: int16x4_t = vdup_n_s16(a);
16957    let b: int16x4_t = vdup_n_s16(b);
16958    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16959}
16960#[doc = "Saturating add"]
16961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16962#[inline]
16963#[target_feature(enable = "neon")]
16964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16965#[cfg_attr(test, assert_instr(uqadd))]
16966pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16967    let a: uint8x8_t = vdup_n_u8(a);
16968    let b: uint8x8_t = vdup_n_u8(b);
16969    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16970}
16971#[doc = "Saturating add"]
16972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16973#[inline]
16974#[target_feature(enable = "neon")]
16975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16976#[cfg_attr(test, assert_instr(uqadd))]
16977pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16978    let a: uint16x4_t = vdup_n_u16(a);
16979    let b: uint16x4_t = vdup_n_u16(b);
16980    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16981}
16982#[doc = "Saturating add"]
16983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16984#[inline]
16985#[target_feature(enable = "neon")]
16986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16987#[cfg_attr(test, assert_instr(sqadd))]
16988pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16989    unsafe extern "unadjusted" {
16990        #[cfg_attr(
16991            any(target_arch = "aarch64", target_arch = "arm64ec"),
16992            link_name = "llvm.aarch64.neon.sqadd.i32"
16993        )]
16994        fn _vqadds_s32(a: i32, b: i32) -> i32;
16995    }
16996    unsafe { _vqadds_s32(a, b) }
16997}
16998#[doc = "Saturating add"]
16999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
17000#[inline]
17001#[target_feature(enable = "neon")]
17002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17003#[cfg_attr(test, assert_instr(sqadd))]
17004pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
17005    unsafe extern "unadjusted" {
17006        #[cfg_attr(
17007            any(target_arch = "aarch64", target_arch = "arm64ec"),
17008            link_name = "llvm.aarch64.neon.sqadd.i64"
17009        )]
17010        fn _vqaddd_s64(a: i64, b: i64) -> i64;
17011    }
17012    unsafe { _vqaddd_s64(a, b) }
17013}
17014#[doc = "Saturating add"]
17015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
17016#[inline]
17017#[target_feature(enable = "neon")]
17018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17019#[cfg_attr(test, assert_instr(uqadd))]
17020pub fn vqadds_u32(a: u32, b: u32) -> u32 {
17021    unsafe extern "unadjusted" {
17022        #[cfg_attr(
17023            any(target_arch = "aarch64", target_arch = "arm64ec"),
17024            link_name = "llvm.aarch64.neon.uqadd.i32"
17025        )]
17026        fn _vqadds_u32(a: u32, b: u32) -> u32;
17027    }
17028    unsafe { _vqadds_u32(a, b) }
17029}
17030#[doc = "Saturating add"]
17031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
17032#[inline]
17033#[target_feature(enable = "neon")]
17034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17035#[cfg_attr(test, assert_instr(uqadd))]
17036pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
17037    unsafe extern "unadjusted" {
17038        #[cfg_attr(
17039            any(target_arch = "aarch64", target_arch = "arm64ec"),
17040            link_name = "llvm.aarch64.neon.uqadd.i64"
17041        )]
17042        fn _vqaddd_u64(a: u64, b: u64) -> u64;
17043    }
17044    unsafe { _vqaddd_u64(a, b) }
17045}
17046#[doc = "Signed saturating doubling multiply-add long"]
17047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
17048#[inline]
17049#[target_feature(enable = "neon")]
17050#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17051#[rustc_legacy_const_generics(3)]
17052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17053pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17054    static_assert_uimm_bits!(N, 2);
17055    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17056}
17057#[doc = "Signed saturating doubling multiply-add long"]
17058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
17059#[inline]
17060#[target_feature(enable = "neon")]
17061#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17062#[rustc_legacy_const_generics(3)]
17063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17064pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17065    static_assert_uimm_bits!(N, 3);
17066    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17067}
17068#[doc = "Signed saturating doubling multiply-add long"]
17069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
17070#[inline]
17071#[target_feature(enable = "neon")]
17072#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17073#[rustc_legacy_const_generics(3)]
17074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17075pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17076    static_assert_uimm_bits!(N, 1);
17077    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17078}
17079#[doc = "Signed saturating doubling multiply-add long"]
17080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17081#[inline]
17082#[target_feature(enable = "neon")]
17083#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17084#[rustc_legacy_const_generics(3)]
17085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17086pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17087    static_assert_uimm_bits!(N, 2);
17088    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17089}
17090#[doc = "Signed saturating doubling multiply-add long"]
17091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17092#[inline]
17093#[target_feature(enable = "neon")]
17094#[cfg_attr(test, assert_instr(sqdmlal2))]
17095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17096pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17097    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17098}
17099#[doc = "Signed saturating doubling multiply-add long"]
17100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17101#[inline]
17102#[target_feature(enable = "neon")]
17103#[cfg_attr(test, assert_instr(sqdmlal2))]
17104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17105pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17106    vqaddq_s32(a, vqdmull_high_s16(b, c))
17107}
17108#[doc = "Signed saturating doubling multiply-add long"]
17109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17110#[inline]
17111#[target_feature(enable = "neon")]
17112#[cfg_attr(test, assert_instr(sqdmlal2))]
17113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17114pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17115    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17116}
17117#[doc = "Signed saturating doubling multiply-add long"]
17118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17119#[inline]
17120#[target_feature(enable = "neon")]
17121#[cfg_attr(test, assert_instr(sqdmlal2))]
17122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17123pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17124    vqaddq_s64(a, vqdmull_high_s32(b, c))
17125}
17126#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17128#[inline]
17129#[target_feature(enable = "neon")]
17130#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17131#[rustc_legacy_const_generics(3)]
17132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17133pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17134    static_assert_uimm_bits!(N, 3);
17135    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17136}
17137#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17139#[inline]
17140#[target_feature(enable = "neon")]
17141#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17142#[rustc_legacy_const_generics(3)]
17143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17144pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17145    static_assert_uimm_bits!(N, 2);
17146    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17147}
17148#[doc = "Signed saturating doubling multiply-add long"]
17149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17150#[inline]
17151#[target_feature(enable = "neon")]
17152#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17153#[rustc_legacy_const_generics(3)]
17154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17155pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17156    static_assert_uimm_bits!(LANE, 2);
17157    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17158}
17159#[doc = "Signed saturating doubling multiply-add long"]
17160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17161#[inline]
17162#[target_feature(enable = "neon")]
17163#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17164#[rustc_legacy_const_generics(3)]
17165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17166pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17167    static_assert_uimm_bits!(LANE, 3);
17168    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17169}
17170#[doc = "Signed saturating doubling multiply-add long"]
17171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17172#[inline]
17173#[target_feature(enable = "neon")]
17174#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17175#[rustc_legacy_const_generics(3)]
17176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17177pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17178    static_assert_uimm_bits!(LANE, 1);
17179    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17180}
17181#[doc = "Signed saturating doubling multiply-add long"]
17182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17183#[inline]
17184#[target_feature(enable = "neon")]
17185#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17186#[rustc_legacy_const_generics(3)]
17187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17188pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17189    static_assert_uimm_bits!(LANE, 2);
17190    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17191}
17192#[doc = "Signed saturating doubling multiply-add long"]
17193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17194#[inline]
17195#[target_feature(enable = "neon")]
17196#[cfg_attr(test, assert_instr(sqdmlal))]
17197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17198pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17199    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17200    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17201}
17202#[doc = "Signed saturating doubling multiply-add long"]
17203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17204#[inline]
17205#[target_feature(enable = "neon")]
17206#[cfg_attr(test, assert_instr(sqdmlal))]
17207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17208pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17209    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17210    x as i64
17211}
17212#[doc = "Signed saturating doubling multiply-subtract long"]
17213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17214#[inline]
17215#[target_feature(enable = "neon")]
17216#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17217#[rustc_legacy_const_generics(3)]
17218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17219pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17220    static_assert_uimm_bits!(N, 2);
17221    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17222}
17223#[doc = "Signed saturating doubling multiply-subtract long"]
17224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17225#[inline]
17226#[target_feature(enable = "neon")]
17227#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17228#[rustc_legacy_const_generics(3)]
17229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17230pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17231    static_assert_uimm_bits!(N, 3);
17232    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17233}
17234#[doc = "Signed saturating doubling multiply-subtract long"]
17235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17236#[inline]
17237#[target_feature(enable = "neon")]
17238#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17239#[rustc_legacy_const_generics(3)]
17240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17241pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17242    static_assert_uimm_bits!(N, 1);
17243    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17244}
17245#[doc = "Signed saturating doubling multiply-subtract long"]
17246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17247#[inline]
17248#[target_feature(enable = "neon")]
17249#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17250#[rustc_legacy_const_generics(3)]
17251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17252pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17253    static_assert_uimm_bits!(N, 2);
17254    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17255}
17256#[doc = "Signed saturating doubling multiply-subtract long"]
17257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17258#[inline]
17259#[target_feature(enable = "neon")]
17260#[cfg_attr(test, assert_instr(sqdmlsl2))]
17261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17262pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17263    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17264}
17265#[doc = "Signed saturating doubling multiply-subtract long"]
17266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17267#[inline]
17268#[target_feature(enable = "neon")]
17269#[cfg_attr(test, assert_instr(sqdmlsl2))]
17270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17271pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17272    vqsubq_s32(a, vqdmull_high_s16(b, c))
17273}
17274#[doc = "Signed saturating doubling multiply-subtract long"]
17275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17276#[inline]
17277#[target_feature(enable = "neon")]
17278#[cfg_attr(test, assert_instr(sqdmlsl2))]
17279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17280pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17281    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17282}
17283#[doc = "Signed saturating doubling multiply-subtract long"]
17284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17285#[inline]
17286#[target_feature(enable = "neon")]
17287#[cfg_attr(test, assert_instr(sqdmlsl2))]
17288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17289pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17290    vqsubq_s64(a, vqdmull_high_s32(b, c))
17291}
17292#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17294#[inline]
17295#[target_feature(enable = "neon")]
17296#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17297#[rustc_legacy_const_generics(3)]
17298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17299pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17300    static_assert_uimm_bits!(N, 3);
17301    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17302}
17303#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17305#[inline]
17306#[target_feature(enable = "neon")]
17307#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17308#[rustc_legacy_const_generics(3)]
17309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17310pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17311    static_assert_uimm_bits!(N, 2);
17312    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17313}
17314#[doc = "Signed saturating doubling multiply-subtract long"]
17315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17316#[inline]
17317#[target_feature(enable = "neon")]
17318#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17319#[rustc_legacy_const_generics(3)]
17320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17321pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17322    static_assert_uimm_bits!(LANE, 2);
17323    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17324}
17325#[doc = "Signed saturating doubling multiply-subtract long"]
17326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17327#[inline]
17328#[target_feature(enable = "neon")]
17329#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17330#[rustc_legacy_const_generics(3)]
17331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17332pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17333    static_assert_uimm_bits!(LANE, 3);
17334    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17335}
17336#[doc = "Signed saturating doubling multiply-subtract long"]
17337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17338#[inline]
17339#[target_feature(enable = "neon")]
17340#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17341#[rustc_legacy_const_generics(3)]
17342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17343pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17344    static_assert_uimm_bits!(LANE, 1);
17345    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17346}
17347#[doc = "Signed saturating doubling multiply-subtract long"]
17348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17349#[inline]
17350#[target_feature(enable = "neon")]
17351#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17352#[rustc_legacy_const_generics(3)]
17353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17354pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17355    static_assert_uimm_bits!(LANE, 2);
17356    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17357}
17358#[doc = "Signed saturating doubling multiply-subtract long"]
17359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17360#[inline]
17361#[target_feature(enable = "neon")]
17362#[cfg_attr(test, assert_instr(sqdmlsl))]
17363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17364pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17365    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17366    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17367}
17368#[doc = "Signed saturating doubling multiply-subtract long"]
17369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17370#[inline]
17371#[target_feature(enable = "neon")]
17372#[cfg_attr(test, assert_instr(sqdmlsl))]
17373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17374pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17375    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17376    x as i64
17377}
17378#[doc = "Vector saturating doubling multiply high by scalar"]
17379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17380#[inline]
17381#[target_feature(enable = "neon")]
17382#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17383#[rustc_legacy_const_generics(2)]
17384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17385pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17386    static_assert_uimm_bits!(LANE, 2);
17387    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17388}
17389#[doc = "Vector saturating doubling multiply high by scalar"]
17390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17391#[inline]
17392#[target_feature(enable = "neon")]
17393#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17394#[rustc_legacy_const_generics(2)]
17395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17396pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17397    static_assert_uimm_bits!(LANE, 2);
17398    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17399}
17400#[doc = "Vector saturating doubling multiply high by scalar"]
17401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17402#[inline]
17403#[target_feature(enable = "neon")]
17404#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17405#[rustc_legacy_const_generics(2)]
17406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17407pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17408    static_assert_uimm_bits!(LANE, 1);
17409    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17410}
17411#[doc = "Vector saturating doubling multiply high by scalar"]
17412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17413#[inline]
17414#[target_feature(enable = "neon")]
17415#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17416#[rustc_legacy_const_generics(2)]
17417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17418pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17419    static_assert_uimm_bits!(LANE, 1);
17420    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17421}
17422#[doc = "Signed saturating doubling multiply returning high half"]
17423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17424#[inline]
17425#[target_feature(enable = "neon")]
17426#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17427#[rustc_legacy_const_generics(2)]
17428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17429pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17430    static_assert_uimm_bits!(N, 2);
17431    unsafe {
17432        let b: i16 = simd_extract!(b, N as u32);
17433        vqdmulhh_s16(a, b)
17434    }
17435}
17436#[doc = "Signed saturating doubling multiply returning high half"]
17437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17438#[inline]
17439#[target_feature(enable = "neon")]
17440#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17441#[rustc_legacy_const_generics(2)]
17442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17443pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17444    static_assert_uimm_bits!(N, 3);
17445    unsafe {
17446        let b: i16 = simd_extract!(b, N as u32);
17447        vqdmulhh_s16(a, b)
17448    }
17449}
17450#[doc = "Signed saturating doubling multiply returning high half"]
17451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17452#[inline]
17453#[target_feature(enable = "neon")]
17454#[cfg_attr(test, assert_instr(sqdmulh))]
17455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17456pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17457    let a: int16x4_t = vdup_n_s16(a);
17458    let b: int16x4_t = vdup_n_s16(b);
17459    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17460}
17461#[doc = "Signed saturating doubling multiply returning high half"]
17462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17463#[inline]
17464#[target_feature(enable = "neon")]
17465#[cfg_attr(test, assert_instr(sqdmulh))]
17466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17467pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17468    let a: int32x2_t = vdup_n_s32(a);
17469    let b: int32x2_t = vdup_n_s32(b);
17470    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17471}
17472#[doc = "Signed saturating doubling multiply returning high half"]
17473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17474#[inline]
17475#[target_feature(enable = "neon")]
17476#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17477#[rustc_legacy_const_generics(2)]
17478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17479pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17480    static_assert_uimm_bits!(N, 1);
17481    unsafe {
17482        let b: i32 = simd_extract!(b, N as u32);
17483        vqdmulhs_s32(a, b)
17484    }
17485}
17486#[doc = "Signed saturating doubling multiply returning high half"]
17487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17488#[inline]
17489#[target_feature(enable = "neon")]
17490#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17491#[rustc_legacy_const_generics(2)]
17492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17493pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17494    static_assert_uimm_bits!(N, 2);
17495    unsafe {
17496        let b: i32 = simd_extract!(b, N as u32);
17497        vqdmulhs_s32(a, b)
17498    }
17499}
17500#[doc = "Signed saturating doubling multiply long"]
17501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17502#[inline]
17503#[target_feature(enable = "neon")]
17504#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17505#[rustc_legacy_const_generics(2)]
17506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17507pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17508    static_assert_uimm_bits!(N, 2);
17509    unsafe {
17510        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17511        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17512        vqdmull_s16(a, b)
17513    }
17514}
17515#[doc = "Signed saturating doubling multiply long"]
17516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17517#[inline]
17518#[target_feature(enable = "neon")]
17519#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17520#[rustc_legacy_const_generics(2)]
17521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17522pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17523    static_assert_uimm_bits!(N, 2);
17524    unsafe {
17525        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17526        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17527        vqdmull_s32(a, b)
17528    }
17529}
17530#[doc = "Signed saturating doubling multiply long"]
17531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17532#[inline]
17533#[target_feature(enable = "neon")]
17534#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17535#[rustc_legacy_const_generics(2)]
17536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17537pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17538    static_assert_uimm_bits!(N, 1);
17539    unsafe {
17540        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17541        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17542        vqdmull_s32(a, b)
17543    }
17544}
17545#[doc = "Signed saturating doubling multiply long"]
17546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17547#[inline]
17548#[target_feature(enable = "neon")]
17549#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17550#[rustc_legacy_const_generics(2)]
17551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17552pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17553    static_assert_uimm_bits!(N, 3);
17554    unsafe {
17555        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17556        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17557        vqdmull_s16(a, b)
17558    }
17559}
17560#[doc = "Signed saturating doubling multiply long"]
17561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17562#[inline]
17563#[target_feature(enable = "neon")]
17564#[cfg_attr(test, assert_instr(sqdmull2))]
17565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17566pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17567    unsafe {
17568        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17569        let b: int16x4_t = vdup_n_s16(b);
17570        vqdmull_s16(a, b)
17571    }
17572}
17573#[doc = "Signed saturating doubling multiply long"]
17574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17575#[inline]
17576#[target_feature(enable = "neon")]
17577#[cfg_attr(test, assert_instr(sqdmull2))]
17578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17579pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17580    unsafe {
17581        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17582        let b: int32x2_t = vdup_n_s32(b);
17583        vqdmull_s32(a, b)
17584    }
17585}
17586#[doc = "Signed saturating doubling multiply long"]
17587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17588#[inline]
17589#[target_feature(enable = "neon")]
17590#[cfg_attr(test, assert_instr(sqdmull2))]
17591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17592pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17593    unsafe {
17594        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17595        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17596        vqdmull_s16(a, b)
17597    }
17598}
17599#[doc = "Signed saturating doubling multiply long"]
17600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17601#[inline]
17602#[target_feature(enable = "neon")]
17603#[cfg_attr(test, assert_instr(sqdmull2))]
17604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17605pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17606    unsafe {
17607        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17608        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17609        vqdmull_s32(a, b)
17610    }
17611}
17612#[doc = "Vector saturating doubling long multiply by scalar"]
17613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17614#[inline]
17615#[target_feature(enable = "neon")]
17616#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17617#[rustc_legacy_const_generics(2)]
17618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17619pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17620    static_assert_uimm_bits!(N, 3);
17621    unsafe {
17622        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17623        vqdmull_s16(a, b)
17624    }
17625}
17626#[doc = "Vector saturating doubling long multiply by scalar"]
17627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17628#[inline]
17629#[target_feature(enable = "neon")]
17630#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17631#[rustc_legacy_const_generics(2)]
17632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17633pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17634    static_assert_uimm_bits!(N, 2);
17635    unsafe {
17636        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17637        vqdmull_s32(a, b)
17638    }
17639}
17640#[doc = "Signed saturating doubling multiply long"]
17641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17642#[inline]
17643#[target_feature(enable = "neon")]
17644#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17645#[rustc_legacy_const_generics(2)]
17646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17647pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17648    static_assert_uimm_bits!(N, 2);
17649    unsafe {
17650        let b: i16 = simd_extract!(b, N as u32);
17651        vqdmullh_s16(a, b)
17652    }
17653}
17654#[doc = "Signed saturating doubling multiply long"]
17655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17656#[inline]
17657#[target_feature(enable = "neon")]
17658#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17659#[rustc_legacy_const_generics(2)]
17660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17661pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17662    static_assert_uimm_bits!(N, 2);
17663    unsafe {
17664        let b: i32 = simd_extract!(b, N as u32);
17665        vqdmulls_s32(a, b)
17666    }
17667}
17668#[doc = "Signed saturating doubling multiply long"]
17669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17670#[inline]
17671#[target_feature(enable = "neon")]
17672#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17673#[rustc_legacy_const_generics(2)]
17674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17675pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17676    static_assert_uimm_bits!(N, 3);
17677    unsafe {
17678        let b: i16 = simd_extract!(b, N as u32);
17679        vqdmullh_s16(a, b)
17680    }
17681}
17682#[doc = "Signed saturating doubling multiply long"]
17683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17684#[inline]
17685#[target_feature(enable = "neon")]
17686#[cfg_attr(test, assert_instr(sqdmull))]
17687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17688pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17689    let a: int16x4_t = vdup_n_s16(a);
17690    let b: int16x4_t = vdup_n_s16(b);
17691    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17692}
17693#[doc = "Signed saturating doubling multiply long"]
17694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17695#[inline]
17696#[target_feature(enable = "neon")]
17697#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17698#[rustc_legacy_const_generics(2)]
17699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17700pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17701    static_assert_uimm_bits!(N, 1);
17702    unsafe {
17703        let b: i32 = simd_extract!(b, N as u32);
17704        vqdmulls_s32(a, b)
17705    }
17706}
17707#[doc = "Signed saturating doubling multiply long"]
17708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17709#[inline]
17710#[target_feature(enable = "neon")]
17711#[cfg_attr(test, assert_instr(sqdmull))]
17712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17713pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17714    unsafe extern "unadjusted" {
17715        #[cfg_attr(
17716            any(target_arch = "aarch64", target_arch = "arm64ec"),
17717            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17718        )]
17719        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17720    }
17721    unsafe { _vqdmulls_s32(a, b) }
17722}
17723#[doc = "Signed saturating extract narrow"]
17724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17725#[inline]
17726#[target_feature(enable = "neon")]
17727#[cfg_attr(test, assert_instr(sqxtn2))]
17728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17729pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17730    unsafe {
17731        simd_shuffle!(
17732            a,
17733            vqmovn_s16(b),
17734            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17735        )
17736    }
17737}
17738#[doc = "Signed saturating extract narrow"]
17739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17740#[inline]
17741#[target_feature(enable = "neon")]
17742#[cfg_attr(test, assert_instr(sqxtn2))]
17743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17744pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17745    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17746}
17747#[doc = "Signed saturating extract narrow"]
17748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17749#[inline]
17750#[target_feature(enable = "neon")]
17751#[cfg_attr(test, assert_instr(sqxtn2))]
17752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17753pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17754    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17755}
17756#[doc = "Signed saturating extract narrow"]
17757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17758#[inline]
17759#[target_feature(enable = "neon")]
17760#[cfg_attr(test, assert_instr(uqxtn2))]
17761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17762pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17763    unsafe {
17764        simd_shuffle!(
17765            a,
17766            vqmovn_u16(b),
17767            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17768        )
17769    }
17770}
17771#[doc = "Signed saturating extract narrow"]
17772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17773#[inline]
17774#[target_feature(enable = "neon")]
17775#[cfg_attr(test, assert_instr(uqxtn2))]
17776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17777pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17778    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17779}
17780#[doc = "Signed saturating extract narrow"]
17781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17782#[inline]
17783#[target_feature(enable = "neon")]
17784#[cfg_attr(test, assert_instr(uqxtn2))]
17785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17786pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17787    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17788}
17789#[doc = "Saturating extract narrow"]
17790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17791#[inline]
17792#[target_feature(enable = "neon")]
17793#[cfg_attr(test, assert_instr(sqxtn))]
17794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17795pub fn vqmovnd_s64(a: i64) -> i32 {
17796    unsafe extern "unadjusted" {
17797        #[cfg_attr(
17798            any(target_arch = "aarch64", target_arch = "arm64ec"),
17799            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17800        )]
17801        fn _vqmovnd_s64(a: i64) -> i32;
17802    }
17803    unsafe { _vqmovnd_s64(a) }
17804}
17805#[doc = "Saturating extract narrow"]
17806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17807#[inline]
17808#[target_feature(enable = "neon")]
17809#[cfg_attr(test, assert_instr(uqxtn))]
17810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17811pub fn vqmovnd_u64(a: u64) -> u32 {
17812    unsafe extern "unadjusted" {
17813        #[cfg_attr(
17814            any(target_arch = "aarch64", target_arch = "arm64ec"),
17815            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17816        )]
17817        fn _vqmovnd_u64(a: u64) -> u32;
17818    }
17819    unsafe { _vqmovnd_u64(a) }
17820}
17821#[doc = "Saturating extract narrow"]
17822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17823#[inline]
17824#[target_feature(enable = "neon")]
17825#[cfg_attr(test, assert_instr(sqxtn))]
17826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17827pub fn vqmovnh_s16(a: i16) -> i8 {
17828    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17829}
17830#[doc = "Saturating extract narrow"]
17831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17832#[inline]
17833#[target_feature(enable = "neon")]
17834#[cfg_attr(test, assert_instr(sqxtn))]
17835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17836pub fn vqmovns_s32(a: i32) -> i16 {
17837    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17838}
17839#[doc = "Saturating extract narrow"]
17840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17841#[inline]
17842#[target_feature(enable = "neon")]
17843#[cfg_attr(test, assert_instr(uqxtn))]
17844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17845pub fn vqmovnh_u16(a: u16) -> u8 {
17846    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17847}
17848#[doc = "Saturating extract narrow"]
17849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17850#[inline]
17851#[target_feature(enable = "neon")]
17852#[cfg_attr(test, assert_instr(uqxtn))]
17853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17854pub fn vqmovns_u32(a: u32) -> u16 {
17855    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17856}
17857#[doc = "Signed saturating extract unsigned narrow"]
17858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17859#[inline]
17860#[target_feature(enable = "neon")]
17861#[cfg_attr(test, assert_instr(sqxtun2))]
17862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17863pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17864    unsafe {
17865        simd_shuffle!(
17866            a,
17867            vqmovun_s16(b),
17868            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17869        )
17870    }
17871}
17872#[doc = "Signed saturating extract unsigned narrow"]
17873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17874#[inline]
17875#[target_feature(enable = "neon")]
17876#[cfg_attr(test, assert_instr(sqxtun2))]
17877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17878pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17879    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17880}
17881#[doc = "Signed saturating extract unsigned narrow"]
17882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17883#[inline]
17884#[target_feature(enable = "neon")]
17885#[cfg_attr(test, assert_instr(sqxtun2))]
17886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17887pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17888    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17889}
17890#[doc = "Signed saturating extract unsigned narrow"]
17891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17892#[inline]
17893#[target_feature(enable = "neon")]
17894#[cfg_attr(test, assert_instr(sqxtun))]
17895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17896pub fn vqmovunh_s16(a: i16) -> u8 {
17897    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17898}
17899#[doc = "Signed saturating extract unsigned narrow"]
17900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17901#[inline]
17902#[target_feature(enable = "neon")]
17903#[cfg_attr(test, assert_instr(sqxtun))]
17904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17905pub fn vqmovuns_s32(a: i32) -> u16 {
17906    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17907}
17908#[doc = "Signed saturating extract unsigned narrow"]
17909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17910#[inline]
17911#[target_feature(enable = "neon")]
17912#[cfg_attr(test, assert_instr(sqxtun))]
17913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17914pub fn vqmovund_s64(a: i64) -> u32 {
17915    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17916}
17917#[doc = "Signed saturating negate"]
17918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17919#[inline]
17920#[target_feature(enable = "neon")]
17921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17922#[cfg_attr(test, assert_instr(sqneg))]
17923pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17924    unsafe extern "unadjusted" {
17925        #[cfg_attr(
17926            any(target_arch = "aarch64", target_arch = "arm64ec"),
17927            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17928        )]
17929        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17930    }
17931    unsafe { _vqneg_s64(a) }
17932}
17933#[doc = "Signed saturating negate"]
17934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17935#[inline]
17936#[target_feature(enable = "neon")]
17937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17938#[cfg_attr(test, assert_instr(sqneg))]
17939pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17940    unsafe extern "unadjusted" {
17941        #[cfg_attr(
17942            any(target_arch = "aarch64", target_arch = "arm64ec"),
17943            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17944        )]
17945        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17946    }
17947    unsafe { _vqnegq_s64(a) }
17948}
17949#[doc = "Signed saturating negate"]
17950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17951#[inline]
17952#[target_feature(enable = "neon")]
17953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17954#[cfg_attr(test, assert_instr(sqneg))]
17955pub fn vqnegb_s8(a: i8) -> i8 {
17956    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17957}
17958#[doc = "Signed saturating negate"]
17959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17960#[inline]
17961#[target_feature(enable = "neon")]
17962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17963#[cfg_attr(test, assert_instr(sqneg))]
17964pub fn vqnegh_s16(a: i16) -> i16 {
17965    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17966}
17967#[doc = "Signed saturating negate"]
17968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17969#[inline]
17970#[target_feature(enable = "neon")]
17971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17972#[cfg_attr(test, assert_instr(sqneg))]
17973pub fn vqnegs_s32(a: i32) -> i32 {
17974    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17975}
17976#[doc = "Signed saturating negate"]
17977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17978#[inline]
17979#[target_feature(enable = "neon")]
17980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17981#[cfg_attr(test, assert_instr(sqneg))]
17982pub fn vqnegd_s64(a: i64) -> i64 {
17983    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17984}
17985#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17987#[inline]
17988#[target_feature(enable = "rdm")]
17989#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17990#[rustc_legacy_const_generics(3)]
17991#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17992pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17993    static_assert_uimm_bits!(LANE, 2);
17994    unsafe {
17995        let c: int16x4_t =
17996            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17997        vqrdmlah_s16(a, b, c)
17998    }
17999}
18000#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
18002#[inline]
18003#[target_feature(enable = "rdm")]
18004#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18005#[rustc_legacy_const_generics(3)]
18006#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18007pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18008    static_assert_uimm_bits!(LANE, 1);
18009    unsafe {
18010        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18011        vqrdmlah_s32(a, b, c)
18012    }
18013}
18014#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
18016#[inline]
18017#[target_feature(enable = "rdm")]
18018#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18019#[rustc_legacy_const_generics(3)]
18020#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18021pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18022    static_assert_uimm_bits!(LANE, 3);
18023    unsafe {
18024        let c: int16x4_t =
18025            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18026        vqrdmlah_s16(a, b, c)
18027    }
18028}
18029#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
18031#[inline]
18032#[target_feature(enable = "rdm")]
18033#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18034#[rustc_legacy_const_generics(3)]
18035#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18036pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18037    static_assert_uimm_bits!(LANE, 2);
18038    unsafe {
18039        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18040        vqrdmlah_s32(a, b, c)
18041    }
18042}
18043#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
18045#[inline]
18046#[target_feature(enable = "rdm")]
18047#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18048#[rustc_legacy_const_generics(3)]
18049#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18050pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18051    static_assert_uimm_bits!(LANE, 2);
18052    unsafe {
18053        let c: int16x8_t = simd_shuffle!(
18054            c,
18055            c,
18056            [
18057                LANE as u32,
18058                LANE as u32,
18059                LANE as u32,
18060                LANE as u32,
18061                LANE as u32,
18062                LANE as u32,
18063                LANE as u32,
18064                LANE as u32
18065            ]
18066        );
18067        vqrdmlahq_s16(a, b, c)
18068    }
18069}
18070#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
18072#[inline]
18073#[target_feature(enable = "rdm")]
18074#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18075#[rustc_legacy_const_generics(3)]
18076#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18077pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18078    static_assert_uimm_bits!(LANE, 1);
18079    unsafe {
18080        let c: int32x4_t =
18081            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18082        vqrdmlahq_s32(a, b, c)
18083    }
18084}
18085#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18087#[inline]
18088#[target_feature(enable = "rdm")]
18089#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18090#[rustc_legacy_const_generics(3)]
18091#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18092pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18093    static_assert_uimm_bits!(LANE, 3);
18094    unsafe {
18095        let c: int16x8_t = simd_shuffle!(
18096            c,
18097            c,
18098            [
18099                LANE as u32,
18100                LANE as u32,
18101                LANE as u32,
18102                LANE as u32,
18103                LANE as u32,
18104                LANE as u32,
18105                LANE as u32,
18106                LANE as u32
18107            ]
18108        );
18109        vqrdmlahq_s16(a, b, c)
18110    }
18111}
18112#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18114#[inline]
18115#[target_feature(enable = "rdm")]
18116#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18117#[rustc_legacy_const_generics(3)]
18118#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18119pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18120    static_assert_uimm_bits!(LANE, 2);
18121    unsafe {
18122        let c: int32x4_t =
18123            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18124        vqrdmlahq_s32(a, b, c)
18125    }
18126}
18127#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18129#[inline]
18130#[target_feature(enable = "rdm")]
18131#[cfg_attr(test, assert_instr(sqrdmlah))]
18132#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18133pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18134    unsafe extern "unadjusted" {
18135        #[cfg_attr(
18136            any(target_arch = "aarch64", target_arch = "arm64ec"),
18137            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18138        )]
18139        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18140    }
18141    unsafe { _vqrdmlah_s16(a, b, c) }
18142}
18143#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18145#[inline]
18146#[target_feature(enable = "rdm")]
18147#[cfg_attr(test, assert_instr(sqrdmlah))]
18148#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18149pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18150    unsafe extern "unadjusted" {
18151        #[cfg_attr(
18152            any(target_arch = "aarch64", target_arch = "arm64ec"),
18153            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18154        )]
18155        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18156    }
18157    unsafe { _vqrdmlahq_s16(a, b, c) }
18158}
18159#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18161#[inline]
18162#[target_feature(enable = "rdm")]
18163#[cfg_attr(test, assert_instr(sqrdmlah))]
18164#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18165pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18166    unsafe extern "unadjusted" {
18167        #[cfg_attr(
18168            any(target_arch = "aarch64", target_arch = "arm64ec"),
18169            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18170        )]
18171        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18172    }
18173    unsafe { _vqrdmlah_s32(a, b, c) }
18174}
18175#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18177#[inline]
18178#[target_feature(enable = "rdm")]
18179#[cfg_attr(test, assert_instr(sqrdmlah))]
18180#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18181pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18182    unsafe extern "unadjusted" {
18183        #[cfg_attr(
18184            any(target_arch = "aarch64", target_arch = "arm64ec"),
18185            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18186        )]
18187        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18188    }
18189    unsafe { _vqrdmlahq_s32(a, b, c) }
18190}
18191#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18193#[inline]
18194#[target_feature(enable = "rdm")]
18195#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18196#[rustc_legacy_const_generics(3)]
18197#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18198pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18199    static_assert_uimm_bits!(LANE, 2);
18200    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18201}
18202#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18204#[inline]
18205#[target_feature(enable = "rdm")]
18206#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18207#[rustc_legacy_const_generics(3)]
18208#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18209pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18210    static_assert_uimm_bits!(LANE, 3);
18211    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18212}
18213#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18215#[inline]
18216#[target_feature(enable = "rdm")]
18217#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18218#[rustc_legacy_const_generics(3)]
18219#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18220pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18221    static_assert_uimm_bits!(LANE, 1);
18222    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18223}
18224#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18226#[inline]
18227#[target_feature(enable = "rdm")]
18228#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18229#[rustc_legacy_const_generics(3)]
18230#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18231pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18232    static_assert_uimm_bits!(LANE, 2);
18233    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18234}
18235#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18237#[inline]
18238#[target_feature(enable = "rdm")]
18239#[cfg_attr(test, assert_instr(sqrdmlah))]
18240#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18241pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18242    let a: int16x4_t = vdup_n_s16(a);
18243    let b: int16x4_t = vdup_n_s16(b);
18244    let c: int16x4_t = vdup_n_s16(c);
18245    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18246}
18247#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18249#[inline]
18250#[target_feature(enable = "rdm")]
18251#[cfg_attr(test, assert_instr(sqrdmlah))]
18252#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18253pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18254    let a: int32x2_t = vdup_n_s32(a);
18255    let b: int32x2_t = vdup_n_s32(b);
18256    let c: int32x2_t = vdup_n_s32(c);
18257    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18258}
18259#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18261#[inline]
18262#[target_feature(enable = "rdm")]
18263#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18264#[rustc_legacy_const_generics(3)]
18265#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18266pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18267    static_assert_uimm_bits!(LANE, 2);
18268    unsafe {
18269        let c: int16x4_t =
18270            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18271        vqrdmlsh_s16(a, b, c)
18272    }
18273}
18274#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18276#[inline]
18277#[target_feature(enable = "rdm")]
18278#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18279#[rustc_legacy_const_generics(3)]
18280#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18281pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18282    static_assert_uimm_bits!(LANE, 1);
18283    unsafe {
18284        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18285        vqrdmlsh_s32(a, b, c)
18286    }
18287}
18288#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18290#[inline]
18291#[target_feature(enable = "rdm")]
18292#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18293#[rustc_legacy_const_generics(3)]
18294#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18295pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18296    static_assert_uimm_bits!(LANE, 3);
18297    unsafe {
18298        let c: int16x4_t =
18299            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18300        vqrdmlsh_s16(a, b, c)
18301    }
18302}
18303#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18305#[inline]
18306#[target_feature(enable = "rdm")]
18307#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18308#[rustc_legacy_const_generics(3)]
18309#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18310pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18311    static_assert_uimm_bits!(LANE, 2);
18312    unsafe {
18313        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18314        vqrdmlsh_s32(a, b, c)
18315    }
18316}
18317#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18319#[inline]
18320#[target_feature(enable = "rdm")]
18321#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18322#[rustc_legacy_const_generics(3)]
18323#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18324pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18325    static_assert_uimm_bits!(LANE, 2);
18326    unsafe {
18327        let c: int16x8_t = simd_shuffle!(
18328            c,
18329            c,
18330            [
18331                LANE as u32,
18332                LANE as u32,
18333                LANE as u32,
18334                LANE as u32,
18335                LANE as u32,
18336                LANE as u32,
18337                LANE as u32,
18338                LANE as u32
18339            ]
18340        );
18341        vqrdmlshq_s16(a, b, c)
18342    }
18343}
18344#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18346#[inline]
18347#[target_feature(enable = "rdm")]
18348#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18349#[rustc_legacy_const_generics(3)]
18350#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18351pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18352    static_assert_uimm_bits!(LANE, 1);
18353    unsafe {
18354        let c: int32x4_t =
18355            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18356        vqrdmlshq_s32(a, b, c)
18357    }
18358}
18359#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18361#[inline]
18362#[target_feature(enable = "rdm")]
18363#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18364#[rustc_legacy_const_generics(3)]
18365#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18366pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18367    static_assert_uimm_bits!(LANE, 3);
18368    unsafe {
18369        let c: int16x8_t = simd_shuffle!(
18370            c,
18371            c,
18372            [
18373                LANE as u32,
18374                LANE as u32,
18375                LANE as u32,
18376                LANE as u32,
18377                LANE as u32,
18378                LANE as u32,
18379                LANE as u32,
18380                LANE as u32
18381            ]
18382        );
18383        vqrdmlshq_s16(a, b, c)
18384    }
18385}
18386#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18388#[inline]
18389#[target_feature(enable = "rdm")]
18390#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18391#[rustc_legacy_const_generics(3)]
18392#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18393pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18394    static_assert_uimm_bits!(LANE, 2);
18395    unsafe {
18396        let c: int32x4_t =
18397            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18398        vqrdmlshq_s32(a, b, c)
18399    }
18400}
18401#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18403#[inline]
18404#[target_feature(enable = "rdm")]
18405#[cfg_attr(test, assert_instr(sqrdmlsh))]
18406#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18407pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18408    unsafe extern "unadjusted" {
18409        #[cfg_attr(
18410            any(target_arch = "aarch64", target_arch = "arm64ec"),
18411            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18412        )]
18413        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18414    }
18415    unsafe { _vqrdmlsh_s16(a, b, c) }
18416}
18417#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18419#[inline]
18420#[target_feature(enable = "rdm")]
18421#[cfg_attr(test, assert_instr(sqrdmlsh))]
18422#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18423pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18424    unsafe extern "unadjusted" {
18425        #[cfg_attr(
18426            any(target_arch = "aarch64", target_arch = "arm64ec"),
18427            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18428        )]
18429        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18430    }
18431    unsafe { _vqrdmlshq_s16(a, b, c) }
18432}
18433#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18435#[inline]
18436#[target_feature(enable = "rdm")]
18437#[cfg_attr(test, assert_instr(sqrdmlsh))]
18438#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18439pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18440    unsafe extern "unadjusted" {
18441        #[cfg_attr(
18442            any(target_arch = "aarch64", target_arch = "arm64ec"),
18443            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18444        )]
18445        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18446    }
18447    unsafe { _vqrdmlsh_s32(a, b, c) }
18448}
18449#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18451#[inline]
18452#[target_feature(enable = "rdm")]
18453#[cfg_attr(test, assert_instr(sqrdmlsh))]
18454#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18455pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18456    unsafe extern "unadjusted" {
18457        #[cfg_attr(
18458            any(target_arch = "aarch64", target_arch = "arm64ec"),
18459            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18460        )]
18461        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18462    }
18463    unsafe { _vqrdmlshq_s32(a, b, c) }
18464}
18465#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18467#[inline]
18468#[target_feature(enable = "rdm")]
18469#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18470#[rustc_legacy_const_generics(3)]
18471#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18472pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18473    static_assert_uimm_bits!(LANE, 2);
18474    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18475}
18476#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18478#[inline]
18479#[target_feature(enable = "rdm")]
18480#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18481#[rustc_legacy_const_generics(3)]
18482#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18483pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18484    static_assert_uimm_bits!(LANE, 3);
18485    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18486}
18487#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18489#[inline]
18490#[target_feature(enable = "rdm")]
18491#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18492#[rustc_legacy_const_generics(3)]
18493#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18494pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18495    static_assert_uimm_bits!(LANE, 1);
18496    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18497}
18498#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18500#[inline]
18501#[target_feature(enable = "rdm")]
18502#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18503#[rustc_legacy_const_generics(3)]
18504#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18505pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18506    static_assert_uimm_bits!(LANE, 2);
18507    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18508}
18509#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18511#[inline]
18512#[target_feature(enable = "rdm")]
18513#[cfg_attr(test, assert_instr(sqrdmlsh))]
18514#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18515pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18516    let a: int16x4_t = vdup_n_s16(a);
18517    let b: int16x4_t = vdup_n_s16(b);
18518    let c: int16x4_t = vdup_n_s16(c);
18519    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18520}
18521#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18523#[inline]
18524#[target_feature(enable = "rdm")]
18525#[cfg_attr(test, assert_instr(sqrdmlsh))]
18526#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18527pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18528    let a: int32x2_t = vdup_n_s32(a);
18529    let b: int32x2_t = vdup_n_s32(b);
18530    let c: int32x2_t = vdup_n_s32(c);
18531    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18532}
18533#[doc = "Signed saturating rounding doubling multiply returning high half"]
18534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18535#[inline]
18536#[target_feature(enable = "neon")]
18537#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18538#[rustc_legacy_const_generics(2)]
18539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18540pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18541    static_assert_uimm_bits!(LANE, 2);
18542    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18543}
18544#[doc = "Signed saturating rounding doubling multiply returning high half"]
18545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18546#[inline]
18547#[target_feature(enable = "neon")]
18548#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18549#[rustc_legacy_const_generics(2)]
18550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18551pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18552    static_assert_uimm_bits!(LANE, 3);
18553    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18554}
18555#[doc = "Signed saturating rounding doubling multiply returning high half"]
18556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18557#[inline]
18558#[target_feature(enable = "neon")]
18559#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18560#[rustc_legacy_const_generics(2)]
18561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18562pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18563    static_assert_uimm_bits!(LANE, 1);
18564    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18565}
18566#[doc = "Signed saturating rounding doubling multiply returning high half"]
18567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18568#[inline]
18569#[target_feature(enable = "neon")]
18570#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18571#[rustc_legacy_const_generics(2)]
18572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18573pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18574    static_assert_uimm_bits!(LANE, 2);
18575    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18576}
18577#[doc = "Signed saturating rounding doubling multiply returning high half"]
18578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18579#[inline]
18580#[target_feature(enable = "neon")]
18581#[cfg_attr(test, assert_instr(sqrdmulh))]
18582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18583pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18584    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18585}
18586#[doc = "Signed saturating rounding doubling multiply returning high half"]
18587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18588#[inline]
18589#[target_feature(enable = "neon")]
18590#[cfg_attr(test, assert_instr(sqrdmulh))]
18591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18592pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18593    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18594}
18595#[doc = "Signed saturating rounding shift left"]
18596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18597#[inline]
18598#[target_feature(enable = "neon")]
18599#[cfg_attr(test, assert_instr(sqrshl))]
18600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18601pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18602    let a: int8x8_t = vdup_n_s8(a);
18603    let b: int8x8_t = vdup_n_s8(b);
18604    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18605}
18606#[doc = "Signed saturating rounding shift left"]
18607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18608#[inline]
18609#[target_feature(enable = "neon")]
18610#[cfg_attr(test, assert_instr(sqrshl))]
18611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18612pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18613    let a: int16x4_t = vdup_n_s16(a);
18614    let b: int16x4_t = vdup_n_s16(b);
18615    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18616}
18617#[doc = "Unsigned signed saturating rounding shift left"]
18618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18619#[inline]
18620#[target_feature(enable = "neon")]
18621#[cfg_attr(test, assert_instr(uqrshl))]
18622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18623pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18624    let a: uint8x8_t = vdup_n_u8(a);
18625    let b: int8x8_t = vdup_n_s8(b);
18626    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18627}
18628#[doc = "Unsigned signed saturating rounding shift left"]
18629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18630#[inline]
18631#[target_feature(enable = "neon")]
18632#[cfg_attr(test, assert_instr(uqrshl))]
18633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18634pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18635    let a: uint16x4_t = vdup_n_u16(a);
18636    let b: int16x4_t = vdup_n_s16(b);
18637    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18638}
18639#[doc = "Signed saturating rounding shift left"]
18640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18641#[inline]
18642#[target_feature(enable = "neon")]
18643#[cfg_attr(test, assert_instr(sqrshl))]
18644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18645pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18646    unsafe extern "unadjusted" {
18647        #[cfg_attr(
18648            any(target_arch = "aarch64", target_arch = "arm64ec"),
18649            link_name = "llvm.aarch64.neon.sqrshl.i64"
18650        )]
18651        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18652    }
18653    unsafe { _vqrshld_s64(a, b) }
18654}
18655#[doc = "Signed saturating rounding shift left"]
18656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18657#[inline]
18658#[target_feature(enable = "neon")]
18659#[cfg_attr(test, assert_instr(sqrshl))]
18660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18661pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18662    unsafe extern "unadjusted" {
18663        #[cfg_attr(
18664            any(target_arch = "aarch64", target_arch = "arm64ec"),
18665            link_name = "llvm.aarch64.neon.sqrshl.i32"
18666        )]
18667        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18668    }
18669    unsafe { _vqrshls_s32(a, b) }
18670}
18671#[doc = "Unsigned signed saturating rounding shift left"]
18672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18673#[inline]
18674#[target_feature(enable = "neon")]
18675#[cfg_attr(test, assert_instr(uqrshl))]
18676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18677pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18678    unsafe extern "unadjusted" {
18679        #[cfg_attr(
18680            any(target_arch = "aarch64", target_arch = "arm64ec"),
18681            link_name = "llvm.aarch64.neon.uqrshl.i32"
18682        )]
18683        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18684    }
18685    unsafe { _vqrshls_u32(a, b) }
18686}
18687#[doc = "Unsigned signed saturating rounding shift left"]
18688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18689#[inline]
18690#[target_feature(enable = "neon")]
18691#[cfg_attr(test, assert_instr(uqrshl))]
18692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18693pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18694    unsafe extern "unadjusted" {
18695        #[cfg_attr(
18696            any(target_arch = "aarch64", target_arch = "arm64ec"),
18697            link_name = "llvm.aarch64.neon.uqrshl.i64"
18698        )]
18699        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18700    }
18701    unsafe { _vqrshld_u64(a, b) }
18702}
18703#[doc = "Signed saturating rounded shift right narrow"]
18704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18705#[inline]
18706#[target_feature(enable = "neon")]
18707#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18708#[rustc_legacy_const_generics(2)]
18709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18710pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18711    static_assert!(N >= 1 && N <= 8);
18712    unsafe {
18713        simd_shuffle!(
18714            a,
18715            vqrshrn_n_s16::<N>(b),
18716            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18717        )
18718    }
18719}
18720#[doc = "Signed saturating rounded shift right narrow"]
18721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18722#[inline]
18723#[target_feature(enable = "neon")]
18724#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18725#[rustc_legacy_const_generics(2)]
18726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18727pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18728    static_assert!(N >= 1 && N <= 16);
18729    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18730}
18731#[doc = "Signed saturating rounded shift right narrow"]
18732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18733#[inline]
18734#[target_feature(enable = "neon")]
18735#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18736#[rustc_legacy_const_generics(2)]
18737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18738pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18739    static_assert!(N >= 1 && N <= 32);
18740    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18741}
18742#[doc = "Unsigned saturating rounded shift right narrow"]
18743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18744#[inline]
18745#[target_feature(enable = "neon")]
18746#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18747#[rustc_legacy_const_generics(2)]
18748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18749pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18750    static_assert!(N >= 1 && N <= 8);
18751    unsafe {
18752        simd_shuffle!(
18753            a,
18754            vqrshrn_n_u16::<N>(b),
18755            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18756        )
18757    }
18758}
18759#[doc = "Unsigned saturating rounded shift right narrow"]
18760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18761#[inline]
18762#[target_feature(enable = "neon")]
18763#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18764#[rustc_legacy_const_generics(2)]
18765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18766pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18767    static_assert!(N >= 1 && N <= 16);
18768    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18769}
18770#[doc = "Unsigned saturating rounded shift right narrow"]
18771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18772#[inline]
18773#[target_feature(enable = "neon")]
18774#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18775#[rustc_legacy_const_generics(2)]
18776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18777pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18778    static_assert!(N >= 1 && N <= 32);
18779    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18780}
18781#[doc = "Unsigned saturating rounded shift right narrow"]
18782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18783#[inline]
18784#[target_feature(enable = "neon")]
18785#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18786#[rustc_legacy_const_generics(1)]
18787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18788pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18789    static_assert!(N >= 1 && N <= 32);
18790    let a: uint64x2_t = vdupq_n_u64(a);
18791    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18792}
18793#[doc = "Unsigned saturating rounded shift right narrow"]
18794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18795#[inline]
18796#[target_feature(enable = "neon")]
18797#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18798#[rustc_legacy_const_generics(1)]
18799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18800pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18801    static_assert!(N >= 1 && N <= 8);
18802    let a: uint16x8_t = vdupq_n_u16(a);
18803    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18804}
18805#[doc = "Unsigned saturating rounded shift right narrow"]
18806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18807#[inline]
18808#[target_feature(enable = "neon")]
18809#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18810#[rustc_legacy_const_generics(1)]
18811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18812pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18813    static_assert!(N >= 1 && N <= 16);
18814    let a: uint32x4_t = vdupq_n_u32(a);
18815    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18816}
18817#[doc = "Signed saturating rounded shift right narrow"]
18818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18819#[inline]
18820#[target_feature(enable = "neon")]
18821#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18822#[rustc_legacy_const_generics(1)]
18823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18824pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18825    static_assert!(N >= 1 && N <= 8);
18826    let a: int16x8_t = vdupq_n_s16(a);
18827    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18828}
18829#[doc = "Signed saturating rounded shift right narrow"]
18830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18831#[inline]
18832#[target_feature(enable = "neon")]
18833#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18834#[rustc_legacy_const_generics(1)]
18835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18836pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18837    static_assert!(N >= 1 && N <= 16);
18838    let a: int32x4_t = vdupq_n_s32(a);
18839    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18840}
18841#[doc = "Signed saturating rounded shift right narrow"]
18842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18843#[inline]
18844#[target_feature(enable = "neon")]
18845#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18846#[rustc_legacy_const_generics(1)]
18847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18848pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18849    static_assert!(N >= 1 && N <= 32);
18850    let a: int64x2_t = vdupq_n_s64(a);
18851    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18852}
18853#[doc = "Signed saturating rounded shift right unsigned narrow"]
18854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18855#[inline]
18856#[target_feature(enable = "neon")]
18857#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18858#[rustc_legacy_const_generics(2)]
18859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18860pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18861    static_assert!(N >= 1 && N <= 8);
18862    unsafe {
18863        simd_shuffle!(
18864            a,
18865            vqrshrun_n_s16::<N>(b),
18866            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18867        )
18868    }
18869}
18870#[doc = "Signed saturating rounded shift right unsigned narrow"]
18871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18872#[inline]
18873#[target_feature(enable = "neon")]
18874#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18875#[rustc_legacy_const_generics(2)]
18876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18877pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18878    static_assert!(N >= 1 && N <= 16);
18879    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18880}
18881#[doc = "Signed saturating rounded shift right unsigned narrow"]
18882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18883#[inline]
18884#[target_feature(enable = "neon")]
18885#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18886#[rustc_legacy_const_generics(2)]
18887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18888pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18889    static_assert!(N >= 1 && N <= 32);
18890    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18891}
18892#[doc = "Signed saturating rounded shift right unsigned narrow"]
18893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18894#[inline]
18895#[target_feature(enable = "neon")]
18896#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18897#[rustc_legacy_const_generics(1)]
18898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18899pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18900    static_assert!(N >= 1 && N <= 32);
18901    let a: int64x2_t = vdupq_n_s64(a);
18902    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18903}
18904#[doc = "Signed saturating rounded shift right unsigned narrow"]
18905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18906#[inline]
18907#[target_feature(enable = "neon")]
18908#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18909#[rustc_legacy_const_generics(1)]
18910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18911pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18912    static_assert!(N >= 1 && N <= 8);
18913    let a: int16x8_t = vdupq_n_s16(a);
18914    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18915}
18916#[doc = "Signed saturating rounded shift right unsigned narrow"]
18917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18918#[inline]
18919#[target_feature(enable = "neon")]
18920#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18921#[rustc_legacy_const_generics(1)]
18922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18923pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18924    static_assert!(N >= 1 && N <= 16);
18925    let a: int32x4_t = vdupq_n_s32(a);
18926    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18927}
18928#[doc = "Signed saturating shift left"]
18929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18930#[inline]
18931#[target_feature(enable = "neon")]
18932#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18933#[rustc_legacy_const_generics(1)]
18934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18935pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18936    static_assert_uimm_bits!(N, 3);
18937    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18938}
18939#[doc = "Signed saturating shift left"]
18940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18941#[inline]
18942#[target_feature(enable = "neon")]
18943#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18944#[rustc_legacy_const_generics(1)]
18945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18946pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18947    static_assert_uimm_bits!(N, 6);
18948    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18949}
18950#[doc = "Signed saturating shift left"]
18951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18952#[inline]
18953#[target_feature(enable = "neon")]
18954#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18955#[rustc_legacy_const_generics(1)]
18956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18957pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18958    static_assert_uimm_bits!(N, 4);
18959    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18960}
18961#[doc = "Signed saturating shift left"]
18962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18963#[inline]
18964#[target_feature(enable = "neon")]
18965#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18966#[rustc_legacy_const_generics(1)]
18967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18968pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18969    static_assert_uimm_bits!(N, 5);
18970    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18971}
18972#[doc = "Unsigned saturating shift left"]
18973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18974#[inline]
18975#[target_feature(enable = "neon")]
18976#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18977#[rustc_legacy_const_generics(1)]
18978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18979pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18980    static_assert_uimm_bits!(N, 3);
18981    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18982}
18983#[doc = "Unsigned saturating shift left"]
18984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18985#[inline]
18986#[target_feature(enable = "neon")]
18987#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18988#[rustc_legacy_const_generics(1)]
18989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18990pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18991    static_assert_uimm_bits!(N, 6);
18992    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18993}
18994#[doc = "Unsigned saturating shift left"]
18995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18996#[inline]
18997#[target_feature(enable = "neon")]
18998#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18999#[rustc_legacy_const_generics(1)]
19000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19001pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
19002    static_assert_uimm_bits!(N, 4);
19003    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
19004}
19005#[doc = "Unsigned saturating shift left"]
19006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
19007#[inline]
19008#[target_feature(enable = "neon")]
19009#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19010#[rustc_legacy_const_generics(1)]
19011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19012pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
19013    static_assert_uimm_bits!(N, 5);
19014    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
19015}
19016#[doc = "Signed saturating shift left"]
19017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
19018#[inline]
19019#[target_feature(enable = "neon")]
19020#[cfg_attr(test, assert_instr(sqshl))]
19021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19022pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
19023    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
19024    unsafe { simd_extract!(c, 0) }
19025}
19026#[doc = "Signed saturating shift left"]
19027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
19028#[inline]
19029#[target_feature(enable = "neon")]
19030#[cfg_attr(test, assert_instr(sqshl))]
19031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19032pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
19033    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
19034    unsafe { simd_extract!(c, 0) }
19035}
19036#[doc = "Signed saturating shift left"]
19037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
19038#[inline]
19039#[target_feature(enable = "neon")]
19040#[cfg_attr(test, assert_instr(sqshl))]
19041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19042pub fn vqshls_s32(a: i32, b: i32) -> i32 {
19043    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
19044    unsafe { simd_extract!(c, 0) }
19045}
19046#[doc = "Unsigned saturating shift left"]
19047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
19048#[inline]
19049#[target_feature(enable = "neon")]
19050#[cfg_attr(test, assert_instr(uqshl))]
19051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19052pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
19053    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
19054    unsafe { simd_extract!(c, 0) }
19055}
19056#[doc = "Unsigned saturating shift left"]
19057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
19058#[inline]
19059#[target_feature(enable = "neon")]
19060#[cfg_attr(test, assert_instr(uqshl))]
19061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19062pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
19063    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
19064    unsafe { simd_extract!(c, 0) }
19065}
19066#[doc = "Unsigned saturating shift left"]
19067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
19068#[inline]
19069#[target_feature(enable = "neon")]
19070#[cfg_attr(test, assert_instr(uqshl))]
19071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19072pub fn vqshls_u32(a: u32, b: i32) -> u32 {
19073    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
19074    unsafe { simd_extract!(c, 0) }
19075}
19076#[doc = "Signed saturating shift left"]
19077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19078#[inline]
19079#[target_feature(enable = "neon")]
19080#[cfg_attr(test, assert_instr(sqshl))]
19081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19082pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19083    unsafe extern "unadjusted" {
19084        #[cfg_attr(
19085            any(target_arch = "aarch64", target_arch = "arm64ec"),
19086            link_name = "llvm.aarch64.neon.sqshl.i64"
19087        )]
19088        fn _vqshld_s64(a: i64, b: i64) -> i64;
19089    }
19090    unsafe { _vqshld_s64(a, b) }
19091}
19092#[doc = "Unsigned saturating shift left"]
19093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19094#[inline]
19095#[target_feature(enable = "neon")]
19096#[cfg_attr(test, assert_instr(uqshl))]
19097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19098pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19099    unsafe extern "unadjusted" {
19100        #[cfg_attr(
19101            any(target_arch = "aarch64", target_arch = "arm64ec"),
19102            link_name = "llvm.aarch64.neon.uqshl.i64"
19103        )]
19104        fn _vqshld_u64(a: u64, b: i64) -> u64;
19105    }
19106    unsafe { _vqshld_u64(a, b) }
19107}
19108#[doc = "Signed saturating shift left unsigned"]
19109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19110#[inline]
19111#[target_feature(enable = "neon")]
19112#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19113#[rustc_legacy_const_generics(1)]
19114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19115pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19116    static_assert_uimm_bits!(N, 3);
19117    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19118}
19119#[doc = "Signed saturating shift left unsigned"]
19120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19121#[inline]
19122#[target_feature(enable = "neon")]
19123#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19124#[rustc_legacy_const_generics(1)]
19125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19126pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19127    static_assert_uimm_bits!(N, 6);
19128    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19129}
19130#[doc = "Signed saturating shift left unsigned"]
19131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19132#[inline]
19133#[target_feature(enable = "neon")]
19134#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19135#[rustc_legacy_const_generics(1)]
19136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19137pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19138    static_assert_uimm_bits!(N, 4);
19139    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19140}
19141#[doc = "Signed saturating shift left unsigned"]
19142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19143#[inline]
19144#[target_feature(enable = "neon")]
19145#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19146#[rustc_legacy_const_generics(1)]
19147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19148pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19149    static_assert_uimm_bits!(N, 5);
19150    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19151}
19152#[doc = "Signed saturating shift right narrow"]
19153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19154#[inline]
19155#[target_feature(enable = "neon")]
19156#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19157#[rustc_legacy_const_generics(2)]
19158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19159pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19160    static_assert!(N >= 1 && N <= 8);
19161    unsafe {
19162        simd_shuffle!(
19163            a,
19164            vqshrn_n_s16::<N>(b),
19165            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19166        )
19167    }
19168}
19169#[doc = "Signed saturating shift right narrow"]
19170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19171#[inline]
19172#[target_feature(enable = "neon")]
19173#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19174#[rustc_legacy_const_generics(2)]
19175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19176pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19177    static_assert!(N >= 1 && N <= 16);
19178    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19179}
19180#[doc = "Signed saturating shift right narrow"]
19181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19182#[inline]
19183#[target_feature(enable = "neon")]
19184#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19185#[rustc_legacy_const_generics(2)]
19186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19187pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19188    static_assert!(N >= 1 && N <= 32);
19189    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19190}
19191#[doc = "Unsigned saturating shift right narrow"]
19192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19193#[inline]
19194#[target_feature(enable = "neon")]
19195#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19196#[rustc_legacy_const_generics(2)]
19197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19198pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19199    static_assert!(N >= 1 && N <= 8);
19200    unsafe {
19201        simd_shuffle!(
19202            a,
19203            vqshrn_n_u16::<N>(b),
19204            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19205        )
19206    }
19207}
19208#[doc = "Unsigned saturating shift right narrow"]
19209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19210#[inline]
19211#[target_feature(enable = "neon")]
19212#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19213#[rustc_legacy_const_generics(2)]
19214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19215pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19216    static_assert!(N >= 1 && N <= 16);
19217    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19218}
19219#[doc = "Unsigned saturating shift right narrow"]
19220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19221#[inline]
19222#[target_feature(enable = "neon")]
19223#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19224#[rustc_legacy_const_generics(2)]
19225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19226pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19227    static_assert!(N >= 1 && N <= 32);
19228    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19229}
19230#[doc = "Signed saturating shift right narrow"]
19231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19232#[inline]
19233#[target_feature(enable = "neon")]
19234#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19235#[rustc_legacy_const_generics(1)]
19236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19237pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19238    static_assert!(N >= 1 && N <= 32);
19239    unsafe extern "unadjusted" {
19240        #[cfg_attr(
19241            any(target_arch = "aarch64", target_arch = "arm64ec"),
19242            link_name = "llvm.aarch64.neon.sqshrn.i32"
19243        )]
19244        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19245    }
19246    unsafe { _vqshrnd_n_s64(a, N) }
19247}
19248#[doc = "Unsigned saturating shift right narrow"]
19249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19250#[inline]
19251#[target_feature(enable = "neon")]
19252#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19253#[rustc_legacy_const_generics(1)]
19254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19255pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19256    static_assert!(N >= 1 && N <= 32);
19257    unsafe extern "unadjusted" {
19258        #[cfg_attr(
19259            any(target_arch = "aarch64", target_arch = "arm64ec"),
19260            link_name = "llvm.aarch64.neon.uqshrn.i32"
19261        )]
19262        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19263    }
19264    unsafe { _vqshrnd_n_u64(a, N) }
19265}
19266#[doc = "Signed saturating shift right narrow"]
19267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19268#[inline]
19269#[target_feature(enable = "neon")]
19270#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19271#[rustc_legacy_const_generics(1)]
19272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19273pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19274    static_assert!(N >= 1 && N <= 8);
19275    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19276}
19277#[doc = "Signed saturating shift right narrow"]
19278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19279#[inline]
19280#[target_feature(enable = "neon")]
19281#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19282#[rustc_legacy_const_generics(1)]
19283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19284pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19285    static_assert!(N >= 1 && N <= 16);
19286    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19287}
19288#[doc = "Unsigned saturating shift right narrow"]
19289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19290#[inline]
19291#[target_feature(enable = "neon")]
19292#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19293#[rustc_legacy_const_generics(1)]
19294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19295pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19296    static_assert!(N >= 1 && N <= 8);
19297    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19298}
19299#[doc = "Unsigned saturating shift right narrow"]
19300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19301#[inline]
19302#[target_feature(enable = "neon")]
19303#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19304#[rustc_legacy_const_generics(1)]
19305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19306pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19307    static_assert!(N >= 1 && N <= 16);
19308    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19309}
19310#[doc = "Signed saturating shift right unsigned narrow"]
19311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19312#[inline]
19313#[target_feature(enable = "neon")]
19314#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19315#[rustc_legacy_const_generics(2)]
19316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19317pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19318    static_assert!(N >= 1 && N <= 8);
19319    unsafe {
19320        simd_shuffle!(
19321            a,
19322            vqshrun_n_s16::<N>(b),
19323            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19324        )
19325    }
19326}
19327#[doc = "Signed saturating shift right unsigned narrow"]
19328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19329#[inline]
19330#[target_feature(enable = "neon")]
19331#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19332#[rustc_legacy_const_generics(2)]
19333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19334pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19335    static_assert!(N >= 1 && N <= 16);
19336    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19337}
19338#[doc = "Signed saturating shift right unsigned narrow"]
19339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19340#[inline]
19341#[target_feature(enable = "neon")]
19342#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19343#[rustc_legacy_const_generics(2)]
19344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19345pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19346    static_assert!(N >= 1 && N <= 32);
19347    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19348}
19349#[doc = "Signed saturating shift right unsigned narrow"]
19350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19351#[inline]
19352#[target_feature(enable = "neon")]
19353#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19354#[rustc_legacy_const_generics(1)]
19355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19356pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19357    static_assert!(N >= 1 && N <= 32);
19358    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19359}
19360#[doc = "Signed saturating shift right unsigned narrow"]
19361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19362#[inline]
19363#[target_feature(enable = "neon")]
19364#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19365#[rustc_legacy_const_generics(1)]
19366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19367pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19368    static_assert!(N >= 1 && N <= 8);
19369    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19370}
19371#[doc = "Signed saturating shift right unsigned narrow"]
19372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19373#[inline]
19374#[target_feature(enable = "neon")]
19375#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19376#[rustc_legacy_const_generics(1)]
19377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19378pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19379    static_assert!(N >= 1 && N <= 16);
19380    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19381}
19382#[doc = "Saturating subtract"]
19383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19384#[inline]
19385#[target_feature(enable = "neon")]
19386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19387#[cfg_attr(test, assert_instr(sqsub))]
19388pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19389    let a: int8x8_t = vdup_n_s8(a);
19390    let b: int8x8_t = vdup_n_s8(b);
19391    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19392}
19393#[doc = "Saturating subtract"]
19394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19395#[inline]
19396#[target_feature(enable = "neon")]
19397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19398#[cfg_attr(test, assert_instr(sqsub))]
19399pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19400    let a: int16x4_t = vdup_n_s16(a);
19401    let b: int16x4_t = vdup_n_s16(b);
19402    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19403}
19404#[doc = "Saturating subtract"]
19405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19406#[inline]
19407#[target_feature(enable = "neon")]
19408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19409#[cfg_attr(test, assert_instr(uqsub))]
19410pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19411    let a: uint8x8_t = vdup_n_u8(a);
19412    let b: uint8x8_t = vdup_n_u8(b);
19413    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19414}
19415#[doc = "Saturating subtract"]
19416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19417#[inline]
19418#[target_feature(enable = "neon")]
19419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19420#[cfg_attr(test, assert_instr(uqsub))]
19421pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19422    let a: uint16x4_t = vdup_n_u16(a);
19423    let b: uint16x4_t = vdup_n_u16(b);
19424    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19425}
19426#[doc = "Saturating subtract"]
19427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19428#[inline]
19429#[target_feature(enable = "neon")]
19430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19431#[cfg_attr(test, assert_instr(sqsub))]
19432pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19433    unsafe extern "unadjusted" {
19434        #[cfg_attr(
19435            any(target_arch = "aarch64", target_arch = "arm64ec"),
19436            link_name = "llvm.aarch64.neon.sqsub.i32"
19437        )]
19438        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19439    }
19440    unsafe { _vqsubs_s32(a, b) }
19441}
19442#[doc = "Saturating subtract"]
19443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19444#[inline]
19445#[target_feature(enable = "neon")]
19446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19447#[cfg_attr(test, assert_instr(sqsub))]
19448pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19449    unsafe extern "unadjusted" {
19450        #[cfg_attr(
19451            any(target_arch = "aarch64", target_arch = "arm64ec"),
19452            link_name = "llvm.aarch64.neon.sqsub.i64"
19453        )]
19454        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19455    }
19456    unsafe { _vqsubd_s64(a, b) }
19457}
19458#[doc = "Saturating subtract"]
19459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19460#[inline]
19461#[target_feature(enable = "neon")]
19462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19463#[cfg_attr(test, assert_instr(uqsub))]
19464pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19465    unsafe extern "unadjusted" {
19466        #[cfg_attr(
19467            any(target_arch = "aarch64", target_arch = "arm64ec"),
19468            link_name = "llvm.aarch64.neon.uqsub.i32"
19469        )]
19470        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19471    }
19472    unsafe { _vqsubs_u32(a, b) }
19473}
19474#[doc = "Saturating subtract"]
19475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19476#[inline]
19477#[target_feature(enable = "neon")]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479#[cfg_attr(test, assert_instr(uqsub))]
19480pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19481    unsafe extern "unadjusted" {
19482        #[cfg_attr(
19483            any(target_arch = "aarch64", target_arch = "arm64ec"),
19484            link_name = "llvm.aarch64.neon.uqsub.i64"
19485        )]
19486        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19487    }
19488    unsafe { _vqsubd_u64(a, b) }
19489}
19490#[doc = "Table look-up"]
19491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19492#[inline]
19493#[target_feature(enable = "neon")]
19494#[cfg_attr(test, assert_instr(tbl))]
19495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19496fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19497    unsafe extern "unadjusted" {
19498        #[cfg_attr(
19499            any(target_arch = "aarch64", target_arch = "arm64ec"),
19500            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19501        )]
19502        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19503    }
19504    unsafe { _vqtbl1(a, b) }
19505}
19506#[doc = "Table look-up"]
19507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19508#[inline]
19509#[target_feature(enable = "neon")]
19510#[cfg_attr(test, assert_instr(tbl))]
19511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19512fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19513    unsafe extern "unadjusted" {
19514        #[cfg_attr(
19515            any(target_arch = "aarch64", target_arch = "arm64ec"),
19516            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19517        )]
19518        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19519    }
19520    unsafe { _vqtbl1q(a, b) }
19521}
19522#[doc = "Table look-up"]
19523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19524#[inline]
19525#[target_feature(enable = "neon")]
19526#[cfg_attr(test, assert_instr(tbl))]
19527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19528pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19529    vqtbl1(a, b)
19530}
19531#[doc = "Table look-up"]
19532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19533#[inline]
19534#[target_feature(enable = "neon")]
19535#[cfg_attr(test, assert_instr(tbl))]
19536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19537pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19538    vqtbl1q(a, b)
19539}
19540#[doc = "Table look-up"]
19541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19542#[inline]
19543#[target_feature(enable = "neon")]
19544#[cfg_attr(test, assert_instr(tbl))]
19545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19546pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19547    unsafe {
19548        let x = transmute(vqtbl1(transmute(a), b));
19549        x
19550    }
19551}
19552#[doc = "Table look-up"]
19553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19554#[inline]
19555#[target_feature(enable = "neon")]
19556#[cfg_attr(test, assert_instr(tbl))]
19557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19558pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19559    unsafe {
19560        let x = transmute(vqtbl1q(transmute(a), b));
19561        x
19562    }
19563}
19564#[doc = "Table look-up"]
19565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19566#[inline]
19567#[target_feature(enable = "neon")]
19568#[cfg_attr(test, assert_instr(tbl))]
19569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19570pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19571    unsafe {
19572        let x = transmute(vqtbl1(transmute(a), b));
19573        x
19574    }
19575}
19576#[doc = "Table look-up"]
19577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19578#[inline]
19579#[target_feature(enable = "neon")]
19580#[cfg_attr(test, assert_instr(tbl))]
19581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19582pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19583    unsafe {
19584        let x = transmute(vqtbl1q(transmute(a), b));
19585        x
19586    }
19587}
19588#[doc = "Table look-up"]
19589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19590#[inline]
19591#[target_feature(enable = "neon")]
19592#[cfg_attr(test, assert_instr(tbl))]
19593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19594fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19595    unsafe extern "unadjusted" {
19596        #[cfg_attr(
19597            any(target_arch = "aarch64", target_arch = "arm64ec"),
19598            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19599        )]
19600        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19601    }
19602    unsafe { _vqtbl2(a, b, c) }
19603}
19604#[doc = "Table look-up"]
19605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19606#[inline]
19607#[target_feature(enable = "neon")]
19608#[cfg_attr(test, assert_instr(tbl))]
19609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19610fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19611    unsafe extern "unadjusted" {
19612        #[cfg_attr(
19613            any(target_arch = "aarch64", target_arch = "arm64ec"),
19614            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19615        )]
19616        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19617    }
19618    unsafe { _vqtbl2q(a, b, c) }
19619}
19620#[doc = "Table look-up"]
19621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19622#[inline]
19623#[target_feature(enable = "neon")]
19624#[cfg_attr(test, assert_instr(tbl))]
19625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19626pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19627    vqtbl2(a.0, a.1, b)
19628}
19629#[doc = "Table look-up"]
19630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19631#[inline]
19632#[target_feature(enable = "neon")]
19633#[cfg_attr(test, assert_instr(tbl))]
19634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19635pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19636    vqtbl2q(a.0, a.1, b)
19637}
19638#[doc = "Table look-up"]
19639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19640#[inline]
19641#[cfg(target_endian = "little")]
19642#[target_feature(enable = "neon")]
19643#[cfg_attr(test, assert_instr(tbl))]
19644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19645pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19646    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19647}
19648#[doc = "Table look-up"]
19649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19650#[inline]
19651#[cfg(target_endian = "big")]
19652#[target_feature(enable = "neon")]
19653#[cfg_attr(test, assert_instr(tbl))]
19654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19655pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19656    let mut a: uint8x16x2_t = a;
19657    a.0 = unsafe {
19658        simd_shuffle!(
19659            a.0,
19660            a.0,
19661            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19662        )
19663    };
19664    a.1 = unsafe {
19665        simd_shuffle!(
19666            a.1,
19667            a.1,
19668            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19669        )
19670    };
19671    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19672    unsafe {
19673        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19674        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19675    }
19676}
19677#[doc = "Table look-up"]
19678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19679#[inline]
19680#[cfg(target_endian = "little")]
19681#[target_feature(enable = "neon")]
19682#[cfg_attr(test, assert_instr(tbl))]
19683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19684pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19685    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19686}
19687#[doc = "Table look-up"]
19688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19689#[inline]
19690#[cfg(target_endian = "big")]
19691#[target_feature(enable = "neon")]
19692#[cfg_attr(test, assert_instr(tbl))]
19693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19694pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19695    let mut a: uint8x16x2_t = a;
19696    a.0 = unsafe {
19697        simd_shuffle!(
19698            a.0,
19699            a.0,
19700            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19701        )
19702    };
19703    a.1 = unsafe {
19704        simd_shuffle!(
19705            a.1,
19706            a.1,
19707            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19708        )
19709    };
19710    let b: uint8x16_t =
19711        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19712    unsafe {
19713        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19714        simd_shuffle!(
19715            ret_val,
19716            ret_val,
19717            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19718        )
19719    }
19720}
19721#[doc = "Table look-up"]
19722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19723#[inline]
19724#[cfg(target_endian = "little")]
19725#[target_feature(enable = "neon")]
19726#[cfg_attr(test, assert_instr(tbl))]
19727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19728pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19729    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19730}
19731#[doc = "Table look-up"]
19732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19733#[inline]
19734#[cfg(target_endian = "big")]
19735#[target_feature(enable = "neon")]
19736#[cfg_attr(test, assert_instr(tbl))]
19737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19738pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19739    let mut a: poly8x16x2_t = a;
19740    a.0 = unsafe {
19741        simd_shuffle!(
19742            a.0,
19743            a.0,
19744            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19745        )
19746    };
19747    a.1 = unsafe {
19748        simd_shuffle!(
19749            a.1,
19750            a.1,
19751            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19752        )
19753    };
19754    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19755    unsafe {
19756        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19757        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19758    }
19759}
19760#[doc = "Table look-up"]
19761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19762#[inline]
19763#[cfg(target_endian = "little")]
19764#[target_feature(enable = "neon")]
19765#[cfg_attr(test, assert_instr(tbl))]
19766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19767pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19768    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19769}
19770#[doc = "Table look-up"]
19771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19772#[inline]
19773#[cfg(target_endian = "big")]
19774#[target_feature(enable = "neon")]
19775#[cfg_attr(test, assert_instr(tbl))]
19776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19777pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19778    let mut a: poly8x16x2_t = a;
19779    a.0 = unsafe {
19780        simd_shuffle!(
19781            a.0,
19782            a.0,
19783            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19784        )
19785    };
19786    a.1 = unsafe {
19787        simd_shuffle!(
19788            a.1,
19789            a.1,
19790            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19791        )
19792    };
19793    let b: uint8x16_t =
19794        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19795    unsafe {
19796        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19797        simd_shuffle!(
19798            ret_val,
19799            ret_val,
19800            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19801        )
19802    }
19803}
19804#[doc = "Table look-up"]
19805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19806#[inline]
19807#[target_feature(enable = "neon")]
19808#[cfg_attr(test, assert_instr(tbl))]
19809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19810fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19811    unsafe extern "unadjusted" {
19812        #[cfg_attr(
19813            any(target_arch = "aarch64", target_arch = "arm64ec"),
19814            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19815        )]
19816        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19817    }
19818    unsafe { _vqtbl3(a, b, c, d) }
19819}
19820#[doc = "Table look-up"]
19821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19822#[inline]
19823#[target_feature(enable = "neon")]
19824#[cfg_attr(test, assert_instr(tbl))]
19825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19826fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19827    unsafe extern "unadjusted" {
19828        #[cfg_attr(
19829            any(target_arch = "aarch64", target_arch = "arm64ec"),
19830            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19831        )]
19832        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19833    }
19834    unsafe { _vqtbl3q(a, b, c, d) }
19835}
19836#[doc = "Table look-up"]
19837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19838#[inline]
19839#[target_feature(enable = "neon")]
19840#[cfg_attr(test, assert_instr(tbl))]
19841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19842pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19843    vqtbl3(a.0, a.1, a.2, b)
19844}
19845#[doc = "Table look-up"]
19846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19847#[inline]
19848#[target_feature(enable = "neon")]
19849#[cfg_attr(test, assert_instr(tbl))]
19850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19851pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19852    vqtbl3q(a.0, a.1, a.2, b)
19853}
19854#[doc = "Table look-up"]
19855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19856#[inline]
19857#[cfg(target_endian = "little")]
19858#[target_feature(enable = "neon")]
19859#[cfg_attr(test, assert_instr(tbl))]
19860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19861pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19862    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19863}
19864#[doc = "Table look-up"]
19865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19866#[inline]
19867#[cfg(target_endian = "big")]
19868#[target_feature(enable = "neon")]
19869#[cfg_attr(test, assert_instr(tbl))]
19870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19871pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19872    let mut a: uint8x16x3_t = a;
19873    a.0 = unsafe {
19874        simd_shuffle!(
19875            a.0,
19876            a.0,
19877            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19878        )
19879    };
19880    a.1 = unsafe {
19881        simd_shuffle!(
19882            a.1,
19883            a.1,
19884            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19885        )
19886    };
19887    a.2 = unsafe {
19888        simd_shuffle!(
19889            a.2,
19890            a.2,
19891            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19892        )
19893    };
19894    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19895    unsafe {
19896        let ret_val: uint8x8_t =
19897            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19898        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19899    }
19900}
19901#[doc = "Table look-up"]
19902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19903#[inline]
19904#[cfg(target_endian = "little")]
19905#[target_feature(enable = "neon")]
19906#[cfg_attr(test, assert_instr(tbl))]
19907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19908pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19909    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19910}
19911#[doc = "Table look-up"]
19912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19913#[inline]
19914#[cfg(target_endian = "big")]
19915#[target_feature(enable = "neon")]
19916#[cfg_attr(test, assert_instr(tbl))]
19917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19918pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19919    let mut a: uint8x16x3_t = a;
19920    a.0 = unsafe {
19921        simd_shuffle!(
19922            a.0,
19923            a.0,
19924            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19925        )
19926    };
19927    a.1 = unsafe {
19928        simd_shuffle!(
19929            a.1,
19930            a.1,
19931            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19932        )
19933    };
19934    a.2 = unsafe {
19935        simd_shuffle!(
19936            a.2,
19937            a.2,
19938            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19939        )
19940    };
19941    let b: uint8x16_t =
19942        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19943    unsafe {
19944        let ret_val: uint8x16_t =
19945            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19946        simd_shuffle!(
19947            ret_val,
19948            ret_val,
19949            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19950        )
19951    }
19952}
19953#[doc = "Table look-up"]
19954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19955#[inline]
19956#[cfg(target_endian = "little")]
19957#[target_feature(enable = "neon")]
19958#[cfg_attr(test, assert_instr(tbl))]
19959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19960pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19961    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19962}
19963#[doc = "Table look-up"]
19964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19965#[inline]
19966#[cfg(target_endian = "big")]
19967#[target_feature(enable = "neon")]
19968#[cfg_attr(test, assert_instr(tbl))]
19969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19970pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19971    let mut a: poly8x16x3_t = a;
19972    a.0 = unsafe {
19973        simd_shuffle!(
19974            a.0,
19975            a.0,
19976            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19977        )
19978    };
19979    a.1 = unsafe {
19980        simd_shuffle!(
19981            a.1,
19982            a.1,
19983            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19984        )
19985    };
19986    a.2 = unsafe {
19987        simd_shuffle!(
19988            a.2,
19989            a.2,
19990            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19991        )
19992    };
19993    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19994    unsafe {
19995        let ret_val: poly8x8_t =
19996            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19997        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19998    }
19999}
20000#[doc = "Table look-up"]
20001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20002#[inline]
20003#[cfg(target_endian = "little")]
20004#[target_feature(enable = "neon")]
20005#[cfg_attr(test, assert_instr(tbl))]
20006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20007pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20008    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20009}
20010#[doc = "Table look-up"]
20011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20012#[inline]
20013#[cfg(target_endian = "big")]
20014#[target_feature(enable = "neon")]
20015#[cfg_attr(test, assert_instr(tbl))]
20016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20017pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20018    let mut a: poly8x16x3_t = a;
20019    a.0 = unsafe {
20020        simd_shuffle!(
20021            a.0,
20022            a.0,
20023            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20024        )
20025    };
20026    a.1 = unsafe {
20027        simd_shuffle!(
20028            a.1,
20029            a.1,
20030            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20031        )
20032    };
20033    a.2 = unsafe {
20034        simd_shuffle!(
20035            a.2,
20036            a.2,
20037            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20038        )
20039    };
20040    let b: uint8x16_t =
20041        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20042    unsafe {
20043        let ret_val: poly8x16_t =
20044            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
20045        simd_shuffle!(
20046            ret_val,
20047            ret_val,
20048            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20049        )
20050    }
20051}
20052#[doc = "Table look-up"]
20053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
20054#[inline]
20055#[target_feature(enable = "neon")]
20056#[cfg_attr(test, assert_instr(tbl))]
20057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20058fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20059    unsafe extern "unadjusted" {
20060        #[cfg_attr(
20061            any(target_arch = "aarch64", target_arch = "arm64ec"),
20062            link_name = "llvm.aarch64.neon.tbl4.v8i8"
20063        )]
20064        fn _vqtbl4(
20065            a: int8x16_t,
20066            b: int8x16_t,
20067            c: int8x16_t,
20068            d: int8x16_t,
20069            e: uint8x8_t,
20070        ) -> int8x8_t;
20071    }
20072    unsafe { _vqtbl4(a, b, c, d, e) }
20073}
20074#[doc = "Table look-up"]
20075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
20076#[inline]
20077#[target_feature(enable = "neon")]
20078#[cfg_attr(test, assert_instr(tbl))]
20079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20080fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20081    unsafe extern "unadjusted" {
20082        #[cfg_attr(
20083            any(target_arch = "aarch64", target_arch = "arm64ec"),
20084            link_name = "llvm.aarch64.neon.tbl4.v16i8"
20085        )]
20086        fn _vqtbl4q(
20087            a: int8x16_t,
20088            b: int8x16_t,
20089            c: int8x16_t,
20090            d: int8x16_t,
20091            e: uint8x16_t,
20092        ) -> int8x16_t;
20093    }
20094    unsafe { _vqtbl4q(a, b, c, d, e) }
20095}
20096#[doc = "Table look-up"]
20097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
20098#[inline]
20099#[target_feature(enable = "neon")]
20100#[cfg_attr(test, assert_instr(tbl))]
20101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20102pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
20103    vqtbl4(a.0, a.1, a.2, a.3, b)
20104}
20105#[doc = "Table look-up"]
20106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
20107#[inline]
20108#[target_feature(enable = "neon")]
20109#[cfg_attr(test, assert_instr(tbl))]
20110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20111pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
20112    vqtbl4q(a.0, a.1, a.2, a.3, b)
20113}
20114#[doc = "Table look-up"]
20115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20116#[inline]
20117#[cfg(target_endian = "little")]
20118#[target_feature(enable = "neon")]
20119#[cfg_attr(test, assert_instr(tbl))]
20120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20121pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20122    unsafe {
20123        transmute(vqtbl4(
20124            transmute(a.0),
20125            transmute(a.1),
20126            transmute(a.2),
20127            transmute(a.3),
20128            b,
20129        ))
20130    }
20131}
20132#[doc = "Table look-up"]
20133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20134#[inline]
20135#[cfg(target_endian = "big")]
20136#[target_feature(enable = "neon")]
20137#[cfg_attr(test, assert_instr(tbl))]
20138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20139pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20140    let mut a: uint8x16x4_t = a;
20141    a.0 = unsafe {
20142        simd_shuffle!(
20143            a.0,
20144            a.0,
20145            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20146        )
20147    };
20148    a.1 = unsafe {
20149        simd_shuffle!(
20150            a.1,
20151            a.1,
20152            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20153        )
20154    };
20155    a.2 = unsafe {
20156        simd_shuffle!(
20157            a.2,
20158            a.2,
20159            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20160        )
20161    };
20162    a.3 = unsafe {
20163        simd_shuffle!(
20164            a.3,
20165            a.3,
20166            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20167        )
20168    };
20169    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20170    unsafe {
20171        let ret_val: uint8x8_t = transmute(vqtbl4(
20172            transmute(a.0),
20173            transmute(a.1),
20174            transmute(a.2),
20175            transmute(a.3),
20176            b,
20177        ));
20178        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20179    }
20180}
20181#[doc = "Table look-up"]
20182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20183#[inline]
20184#[cfg(target_endian = "little")]
20185#[target_feature(enable = "neon")]
20186#[cfg_attr(test, assert_instr(tbl))]
20187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20188pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20189    unsafe {
20190        transmute(vqtbl4q(
20191            transmute(a.0),
20192            transmute(a.1),
20193            transmute(a.2),
20194            transmute(a.3),
20195            b,
20196        ))
20197    }
20198}
20199#[doc = "Table look-up"]
20200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20201#[inline]
20202#[cfg(target_endian = "big")]
20203#[target_feature(enable = "neon")]
20204#[cfg_attr(test, assert_instr(tbl))]
20205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20206pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20207    let mut a: uint8x16x4_t = a;
20208    a.0 = unsafe {
20209        simd_shuffle!(
20210            a.0,
20211            a.0,
20212            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20213        )
20214    };
20215    a.1 = unsafe {
20216        simd_shuffle!(
20217            a.1,
20218            a.1,
20219            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20220        )
20221    };
20222    a.2 = unsafe {
20223        simd_shuffle!(
20224            a.2,
20225            a.2,
20226            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20227        )
20228    };
20229    a.3 = unsafe {
20230        simd_shuffle!(
20231            a.3,
20232            a.3,
20233            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20234        )
20235    };
20236    let b: uint8x16_t =
20237        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20238    unsafe {
20239        let ret_val: uint8x16_t = transmute(vqtbl4q(
20240            transmute(a.0),
20241            transmute(a.1),
20242            transmute(a.2),
20243            transmute(a.3),
20244            b,
20245        ));
20246        simd_shuffle!(
20247            ret_val,
20248            ret_val,
20249            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20250        )
20251    }
20252}
20253#[doc = "Table look-up"]
20254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20255#[inline]
20256#[cfg(target_endian = "little")]
20257#[target_feature(enable = "neon")]
20258#[cfg_attr(test, assert_instr(tbl))]
20259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20260pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20261    unsafe {
20262        transmute(vqtbl4(
20263            transmute(a.0),
20264            transmute(a.1),
20265            transmute(a.2),
20266            transmute(a.3),
20267            b,
20268        ))
20269    }
20270}
20271#[doc = "Table look-up"]
20272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20273#[inline]
20274#[cfg(target_endian = "big")]
20275#[target_feature(enable = "neon")]
20276#[cfg_attr(test, assert_instr(tbl))]
20277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20278pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20279    let mut a: poly8x16x4_t = a;
20280    a.0 = unsafe {
20281        simd_shuffle!(
20282            a.0,
20283            a.0,
20284            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20285        )
20286    };
20287    a.1 = unsafe {
20288        simd_shuffle!(
20289            a.1,
20290            a.1,
20291            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20292        )
20293    };
20294    a.2 = unsafe {
20295        simd_shuffle!(
20296            a.2,
20297            a.2,
20298            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20299        )
20300    };
20301    a.3 = unsafe {
20302        simd_shuffle!(
20303            a.3,
20304            a.3,
20305            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20306        )
20307    };
20308    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20309    unsafe {
20310        let ret_val: poly8x8_t = transmute(vqtbl4(
20311            transmute(a.0),
20312            transmute(a.1),
20313            transmute(a.2),
20314            transmute(a.3),
20315            b,
20316        ));
20317        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20318    }
20319}
20320#[doc = "Table look-up"]
20321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20322#[inline]
20323#[cfg(target_endian = "little")]
20324#[target_feature(enable = "neon")]
20325#[cfg_attr(test, assert_instr(tbl))]
20326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20327pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20328    unsafe {
20329        transmute(vqtbl4q(
20330            transmute(a.0),
20331            transmute(a.1),
20332            transmute(a.2),
20333            transmute(a.3),
20334            b,
20335        ))
20336    }
20337}
20338#[doc = "Table look-up"]
20339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20340#[inline]
20341#[cfg(target_endian = "big")]
20342#[target_feature(enable = "neon")]
20343#[cfg_attr(test, assert_instr(tbl))]
20344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20345pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20346    let mut a: poly8x16x4_t = a;
20347    a.0 = unsafe {
20348        simd_shuffle!(
20349            a.0,
20350            a.0,
20351            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20352        )
20353    };
20354    a.1 = unsafe {
20355        simd_shuffle!(
20356            a.1,
20357            a.1,
20358            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20359        )
20360    };
20361    a.2 = unsafe {
20362        simd_shuffle!(
20363            a.2,
20364            a.2,
20365            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20366        )
20367    };
20368    a.3 = unsafe {
20369        simd_shuffle!(
20370            a.3,
20371            a.3,
20372            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20373        )
20374    };
20375    let b: uint8x16_t =
20376        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20377    unsafe {
20378        let ret_val: poly8x16_t = transmute(vqtbl4q(
20379            transmute(a.0),
20380            transmute(a.1),
20381            transmute(a.2),
20382            transmute(a.3),
20383            b,
20384        ));
20385        simd_shuffle!(
20386            ret_val,
20387            ret_val,
20388            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20389        )
20390    }
20391}
20392#[doc = "Extended table look-up"]
20393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20394#[inline]
20395#[target_feature(enable = "neon")]
20396#[cfg_attr(test, assert_instr(tbx))]
20397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20398fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20399    unsafe extern "unadjusted" {
20400        #[cfg_attr(
20401            any(target_arch = "aarch64", target_arch = "arm64ec"),
20402            link_name = "llvm.aarch64.neon.tbx1.v8i8"
20403        )]
20404        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20405    }
20406    unsafe { _vqtbx1(a, b, c) }
20407}
20408#[doc = "Extended table look-up"]
20409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20410#[inline]
20411#[target_feature(enable = "neon")]
20412#[cfg_attr(test, assert_instr(tbx))]
20413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20414fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20415    unsafe extern "unadjusted" {
20416        #[cfg_attr(
20417            any(target_arch = "aarch64", target_arch = "arm64ec"),
20418            link_name = "llvm.aarch64.neon.tbx1.v16i8"
20419        )]
20420        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20421    }
20422    unsafe { _vqtbx1q(a, b, c) }
20423}
20424#[doc = "Extended table look-up"]
20425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20426#[inline]
20427#[target_feature(enable = "neon")]
20428#[cfg_attr(test, assert_instr(tbx))]
20429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20430pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20431    vqtbx1(a, b, c)
20432}
20433#[doc = "Extended table look-up"]
20434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20435#[inline]
20436#[target_feature(enable = "neon")]
20437#[cfg_attr(test, assert_instr(tbx))]
20438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20439pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20440    vqtbx1q(a, b, c)
20441}
20442#[doc = "Extended table look-up"]
20443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20444#[inline]
20445#[target_feature(enable = "neon")]
20446#[cfg_attr(test, assert_instr(tbx))]
20447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20448pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20449    unsafe {
20450        let x = transmute(vqtbx1(transmute(a), transmute(b), c));
20451        x
20452    }
20453}
20454#[doc = "Extended table look-up"]
20455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20456#[inline]
20457#[target_feature(enable = "neon")]
20458#[cfg_attr(test, assert_instr(tbx))]
20459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20460pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20461    unsafe {
20462        let x = transmute(vqtbx1q(transmute(a), transmute(b), c));
20463        x
20464    }
20465}
20466#[doc = "Extended table look-up"]
20467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20468#[inline]
20469#[target_feature(enable = "neon")]
20470#[cfg_attr(test, assert_instr(tbx))]
20471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20472pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20473    unsafe {
20474        let x = transmute(vqtbx1(transmute(a), transmute(b), c));
20475        x
20476    }
20477}
20478#[doc = "Extended table look-up"]
20479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20480#[inline]
20481#[target_feature(enable = "neon")]
20482#[cfg_attr(test, assert_instr(tbx))]
20483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20484pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20485    unsafe {
20486        let x = transmute(vqtbx1q(transmute(a), transmute(b), c));
20487        x
20488    }
20489}
20490#[doc = "Extended table look-up"]
20491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20492#[inline]
20493#[target_feature(enable = "neon")]
20494#[cfg_attr(test, assert_instr(tbx))]
20495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20496fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20497    unsafe extern "unadjusted" {
20498        #[cfg_attr(
20499            any(target_arch = "aarch64", target_arch = "arm64ec"),
20500            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20501        )]
20502        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20503    }
20504    unsafe { _vqtbx2(a, b, c, d) }
20505}
20506#[doc = "Extended table look-up"]
20507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20508#[inline]
20509#[target_feature(enable = "neon")]
20510#[cfg_attr(test, assert_instr(tbx))]
20511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20512fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20513    unsafe extern "unadjusted" {
20514        #[cfg_attr(
20515            any(target_arch = "aarch64", target_arch = "arm64ec"),
20516            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20517        )]
20518        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20519    }
20520    unsafe { _vqtbx2q(a, b, c, d) }
20521}
20522#[doc = "Extended table look-up"]
20523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20524#[inline]
20525#[target_feature(enable = "neon")]
20526#[cfg_attr(test, assert_instr(tbx))]
20527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20528pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20529    vqtbx2(a, b.0, b.1, c)
20530}
20531#[doc = "Extended table look-up"]
20532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20533#[inline]
20534#[target_feature(enable = "neon")]
20535#[cfg_attr(test, assert_instr(tbx))]
20536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20537pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20538    vqtbx2q(a, b.0, b.1, c)
20539}
20540#[doc = "Extended table look-up"]
20541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20542#[inline]
20543#[cfg(target_endian = "little")]
20544#[target_feature(enable = "neon")]
20545#[cfg_attr(test, assert_instr(tbx))]
20546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20547pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20548    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20549}
20550#[doc = "Extended table look-up"]
20551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20552#[inline]
20553#[cfg(target_endian = "big")]
20554#[target_feature(enable = "neon")]
20555#[cfg_attr(test, assert_instr(tbx))]
20556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20557pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20558    let mut b: uint8x16x2_t = b;
20559    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20560    b.0 = unsafe {
20561        simd_shuffle!(
20562            b.0,
20563            b.0,
20564            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20565        )
20566    };
20567    b.1 = unsafe {
20568        simd_shuffle!(
20569            b.1,
20570            b.1,
20571            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20572        )
20573    };
20574    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20575    unsafe {
20576        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20577        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20578    }
20579}
20580#[doc = "Extended table look-up"]
20581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20582#[inline]
20583#[cfg(target_endian = "little")]
20584#[target_feature(enable = "neon")]
20585#[cfg_attr(test, assert_instr(tbx))]
20586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20587pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20588    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20589}
20590#[doc = "Extended table look-up"]
20591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20592#[inline]
20593#[cfg(target_endian = "big")]
20594#[target_feature(enable = "neon")]
20595#[cfg_attr(test, assert_instr(tbx))]
20596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20597pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20598    let mut b: uint8x16x2_t = b;
20599    let a: uint8x16_t =
20600        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20601    b.0 = unsafe {
20602        simd_shuffle!(
20603            b.0,
20604            b.0,
20605            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20606        )
20607    };
20608    b.1 = unsafe {
20609        simd_shuffle!(
20610            b.1,
20611            b.1,
20612            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20613        )
20614    };
20615    let c: uint8x16_t =
20616        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20617    unsafe {
20618        let ret_val: uint8x16_t =
20619            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20620        simd_shuffle!(
20621            ret_val,
20622            ret_val,
20623            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20624        )
20625    }
20626}
20627#[doc = "Extended table look-up"]
20628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20629#[inline]
20630#[cfg(target_endian = "little")]
20631#[target_feature(enable = "neon")]
20632#[cfg_attr(test, assert_instr(tbx))]
20633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20634pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20635    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20636}
20637#[doc = "Extended table look-up"]
20638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20639#[inline]
20640#[cfg(target_endian = "big")]
20641#[target_feature(enable = "neon")]
20642#[cfg_attr(test, assert_instr(tbx))]
20643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20644pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20645    let mut b: poly8x16x2_t = b;
20646    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20647    b.0 = unsafe {
20648        simd_shuffle!(
20649            b.0,
20650            b.0,
20651            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20652        )
20653    };
20654    b.1 = unsafe {
20655        simd_shuffle!(
20656            b.1,
20657            b.1,
20658            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20659        )
20660    };
20661    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20662    unsafe {
20663        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20664        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20665    }
20666}
20667#[doc = "Extended table look-up"]
20668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20669#[inline]
20670#[cfg(target_endian = "little")]
20671#[target_feature(enable = "neon")]
20672#[cfg_attr(test, assert_instr(tbx))]
20673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20674pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20675    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20676}
20677#[doc = "Extended table look-up"]
20678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20679#[inline]
20680#[cfg(target_endian = "big")]
20681#[target_feature(enable = "neon")]
20682#[cfg_attr(test, assert_instr(tbx))]
20683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20684pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20685    let mut b: poly8x16x2_t = b;
20686    let a: poly8x16_t =
20687        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20688    b.0 = unsafe {
20689        simd_shuffle!(
20690            b.0,
20691            b.0,
20692            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20693        )
20694    };
20695    b.1 = unsafe {
20696        simd_shuffle!(
20697            b.1,
20698            b.1,
20699            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20700        )
20701    };
20702    let c: uint8x16_t =
20703        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20704    unsafe {
20705        let ret_val: poly8x16_t =
20706            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20707        simd_shuffle!(
20708            ret_val,
20709            ret_val,
20710            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20711        )
20712    }
20713}
20714#[doc = "Extended table look-up"]
20715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20716#[inline]
20717#[target_feature(enable = "neon")]
20718#[cfg_attr(test, assert_instr(tbx))]
20719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20720fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20721    unsafe extern "unadjusted" {
20722        #[cfg_attr(
20723            any(target_arch = "aarch64", target_arch = "arm64ec"),
20724            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20725        )]
20726        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20727            -> int8x8_t;
20728    }
20729    unsafe { _vqtbx3(a, b, c, d, e) }
20730}
20731#[doc = "Extended table look-up"]
20732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20733#[inline]
20734#[target_feature(enable = "neon")]
20735#[cfg_attr(test, assert_instr(tbx))]
20736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20737fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20738    unsafe extern "unadjusted" {
20739        #[cfg_attr(
20740            any(target_arch = "aarch64", target_arch = "arm64ec"),
20741            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20742        )]
20743        fn _vqtbx3q(
20744            a: int8x16_t,
20745            b: int8x16_t,
20746            c: int8x16_t,
20747            d: int8x16_t,
20748            e: uint8x16_t,
20749        ) -> int8x16_t;
20750    }
20751    unsafe { _vqtbx3q(a, b, c, d, e) }
20752}
20753#[doc = "Extended table look-up"]
20754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20755#[inline]
20756#[target_feature(enable = "neon")]
20757#[cfg_attr(test, assert_instr(tbx))]
20758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20759pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20760    vqtbx3(a, b.0, b.1, b.2, c)
20761}
20762#[doc = "Extended table look-up"]
20763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20764#[inline]
20765#[target_feature(enable = "neon")]
20766#[cfg_attr(test, assert_instr(tbx))]
20767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20768pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20769    vqtbx3q(a, b.0, b.1, b.2, c)
20770}
20771#[doc = "Extended table look-up"]
20772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20773#[inline]
20774#[cfg(target_endian = "little")]
20775#[target_feature(enable = "neon")]
20776#[cfg_attr(test, assert_instr(tbx))]
20777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20778pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20779    unsafe {
20780        transmute(vqtbx3(
20781            transmute(a),
20782            transmute(b.0),
20783            transmute(b.1),
20784            transmute(b.2),
20785            c,
20786        ))
20787    }
20788}
20789#[doc = "Extended table look-up"]
20790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20791#[inline]
20792#[cfg(target_endian = "big")]
20793#[target_feature(enable = "neon")]
20794#[cfg_attr(test, assert_instr(tbx))]
20795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20796pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20797    let mut b: uint8x16x3_t = b;
20798    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20799    b.0 = unsafe {
20800        simd_shuffle!(
20801            b.0,
20802            b.0,
20803            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20804        )
20805    };
20806    b.1 = unsafe {
20807        simd_shuffle!(
20808            b.1,
20809            b.1,
20810            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20811        )
20812    };
20813    b.2 = unsafe {
20814        simd_shuffle!(
20815            b.2,
20816            b.2,
20817            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20818        )
20819    };
20820    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20821    unsafe {
20822        let ret_val: uint8x8_t = transmute(vqtbx3(
20823            transmute(a),
20824            transmute(b.0),
20825            transmute(b.1),
20826            transmute(b.2),
20827            c,
20828        ));
20829        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20830    }
20831}
20832#[doc = "Extended table look-up"]
20833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20834#[inline]
20835#[cfg(target_endian = "little")]
20836#[target_feature(enable = "neon")]
20837#[cfg_attr(test, assert_instr(tbx))]
20838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20839pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20840    unsafe {
20841        transmute(vqtbx3q(
20842            transmute(a),
20843            transmute(b.0),
20844            transmute(b.1),
20845            transmute(b.2),
20846            c,
20847        ))
20848    }
20849}
20850#[doc = "Extended table look-up"]
20851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20852#[inline]
20853#[cfg(target_endian = "big")]
20854#[target_feature(enable = "neon")]
20855#[cfg_attr(test, assert_instr(tbx))]
20856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20857pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20858    let mut b: uint8x16x3_t = b;
20859    let a: uint8x16_t =
20860        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20861    b.0 = unsafe {
20862        simd_shuffle!(
20863            b.0,
20864            b.0,
20865            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20866        )
20867    };
20868    b.1 = unsafe {
20869        simd_shuffle!(
20870            b.1,
20871            b.1,
20872            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20873        )
20874    };
20875    b.2 = unsafe {
20876        simd_shuffle!(
20877            b.2,
20878            b.2,
20879            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20880        )
20881    };
20882    let c: uint8x16_t =
20883        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20884    unsafe {
20885        let ret_val: uint8x16_t = transmute(vqtbx3q(
20886            transmute(a),
20887            transmute(b.0),
20888            transmute(b.1),
20889            transmute(b.2),
20890            c,
20891        ));
20892        simd_shuffle!(
20893            ret_val,
20894            ret_val,
20895            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20896        )
20897    }
20898}
20899#[doc = "Extended table look-up"]
20900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20901#[inline]
20902#[cfg(target_endian = "little")]
20903#[target_feature(enable = "neon")]
20904#[cfg_attr(test, assert_instr(tbx))]
20905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20906pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20907    unsafe {
20908        transmute(vqtbx3(
20909            transmute(a),
20910            transmute(b.0),
20911            transmute(b.1),
20912            transmute(b.2),
20913            c,
20914        ))
20915    }
20916}
20917#[doc = "Extended table look-up"]
20918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20919#[inline]
20920#[cfg(target_endian = "big")]
20921#[target_feature(enable = "neon")]
20922#[cfg_attr(test, assert_instr(tbx))]
20923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20924pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20925    let mut b: poly8x16x3_t = b;
20926    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20927    b.0 = unsafe {
20928        simd_shuffle!(
20929            b.0,
20930            b.0,
20931            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20932        )
20933    };
20934    b.1 = unsafe {
20935        simd_shuffle!(
20936            b.1,
20937            b.1,
20938            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20939        )
20940    };
20941    b.2 = unsafe {
20942        simd_shuffle!(
20943            b.2,
20944            b.2,
20945            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20946        )
20947    };
20948    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20949    unsafe {
20950        let ret_val: poly8x8_t = transmute(vqtbx3(
20951            transmute(a),
20952            transmute(b.0),
20953            transmute(b.1),
20954            transmute(b.2),
20955            c,
20956        ));
20957        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20958    }
20959}
20960#[doc = "Extended table look-up"]
20961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20962#[inline]
20963#[cfg(target_endian = "little")]
20964#[target_feature(enable = "neon")]
20965#[cfg_attr(test, assert_instr(tbx))]
20966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20967pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20968    unsafe {
20969        transmute(vqtbx3q(
20970            transmute(a),
20971            transmute(b.0),
20972            transmute(b.1),
20973            transmute(b.2),
20974            c,
20975        ))
20976    }
20977}
20978#[doc = "Extended table look-up"]
20979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20980#[inline]
20981#[cfg(target_endian = "big")]
20982#[target_feature(enable = "neon")]
20983#[cfg_attr(test, assert_instr(tbx))]
20984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20985pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20986    let mut b: poly8x16x3_t = b;
20987    let a: poly8x16_t =
20988        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20989    b.0 = unsafe {
20990        simd_shuffle!(
20991            b.0,
20992            b.0,
20993            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20994        )
20995    };
20996    b.1 = unsafe {
20997        simd_shuffle!(
20998            b.1,
20999            b.1,
21000            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21001        )
21002    };
21003    b.2 = unsafe {
21004        simd_shuffle!(
21005            b.2,
21006            b.2,
21007            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21008        )
21009    };
21010    let c: uint8x16_t =
21011        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21012    unsafe {
21013        let ret_val: poly8x16_t = transmute(vqtbx3q(
21014            transmute(a),
21015            transmute(b.0),
21016            transmute(b.1),
21017            transmute(b.2),
21018            c,
21019        ));
21020        simd_shuffle!(
21021            ret_val,
21022            ret_val,
21023            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21024        )
21025    }
21026}
21027#[doc = "Extended table look-up"]
21028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
21029#[inline]
21030#[target_feature(enable = "neon")]
21031#[cfg_attr(test, assert_instr(tbx))]
21032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21033fn vqtbx4(
21034    a: int8x8_t,
21035    b: int8x16_t,
21036    c: int8x16_t,
21037    d: int8x16_t,
21038    e: int8x16_t,
21039    f: uint8x8_t,
21040) -> int8x8_t {
21041    unsafe extern "unadjusted" {
21042        #[cfg_attr(
21043            any(target_arch = "aarch64", target_arch = "arm64ec"),
21044            link_name = "llvm.aarch64.neon.tbx4.v8i8"
21045        )]
21046        fn _vqtbx4(
21047            a: int8x8_t,
21048            b: int8x16_t,
21049            c: int8x16_t,
21050            d: int8x16_t,
21051            e: int8x16_t,
21052            f: uint8x8_t,
21053        ) -> int8x8_t;
21054    }
21055    unsafe { _vqtbx4(a, b, c, d, e, f) }
21056}
21057#[doc = "Extended table look-up"]
21058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
21059#[inline]
21060#[target_feature(enable = "neon")]
21061#[cfg_attr(test, assert_instr(tbx))]
21062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21063fn vqtbx4q(
21064    a: int8x16_t,
21065    b: int8x16_t,
21066    c: int8x16_t,
21067    d: int8x16_t,
21068    e: int8x16_t,
21069    f: uint8x16_t,
21070) -> int8x16_t {
21071    unsafe extern "unadjusted" {
21072        #[cfg_attr(
21073            any(target_arch = "aarch64", target_arch = "arm64ec"),
21074            link_name = "llvm.aarch64.neon.tbx4.v16i8"
21075        )]
21076        fn _vqtbx4q(
21077            a: int8x16_t,
21078            b: int8x16_t,
21079            c: int8x16_t,
21080            d: int8x16_t,
21081            e: int8x16_t,
21082            f: uint8x16_t,
21083        ) -> int8x16_t;
21084    }
21085    unsafe { _vqtbx4q(a, b, c, d, e, f) }
21086}
21087#[doc = "Extended table look-up"]
21088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
21089#[inline]
21090#[target_feature(enable = "neon")]
21091#[cfg_attr(test, assert_instr(tbx))]
21092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21093pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
21094    vqtbx4(a, b.0, b.1, b.2, b.3, c)
21095}
21096#[doc = "Extended table look-up"]
21097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
21098#[inline]
21099#[target_feature(enable = "neon")]
21100#[cfg_attr(test, assert_instr(tbx))]
21101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21102pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
21103    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
21104}
21105#[doc = "Extended table look-up"]
21106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21107#[inline]
21108#[cfg(target_endian = "little")]
21109#[target_feature(enable = "neon")]
21110#[cfg_attr(test, assert_instr(tbx))]
21111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21112pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21113    unsafe {
21114        transmute(vqtbx4(
21115            transmute(a),
21116            transmute(b.0),
21117            transmute(b.1),
21118            transmute(b.2),
21119            transmute(b.3),
21120            c,
21121        ))
21122    }
21123}
21124#[doc = "Extended table look-up"]
21125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21126#[inline]
21127#[cfg(target_endian = "big")]
21128#[target_feature(enable = "neon")]
21129#[cfg_attr(test, assert_instr(tbx))]
21130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21131pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21132    let mut b: uint8x16x4_t = b;
21133    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21134    b.0 = unsafe {
21135        simd_shuffle!(
21136            b.0,
21137            b.0,
21138            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21139        )
21140    };
21141    b.1 = unsafe {
21142        simd_shuffle!(
21143            b.1,
21144            b.1,
21145            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21146        )
21147    };
21148    b.2 = unsafe {
21149        simd_shuffle!(
21150            b.2,
21151            b.2,
21152            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21153        )
21154    };
21155    b.3 = unsafe {
21156        simd_shuffle!(
21157            b.3,
21158            b.3,
21159            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21160        )
21161    };
21162    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21163    unsafe {
21164        let ret_val: uint8x8_t = transmute(vqtbx4(
21165            transmute(a),
21166            transmute(b.0),
21167            transmute(b.1),
21168            transmute(b.2),
21169            transmute(b.3),
21170            c,
21171        ));
21172        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21173    }
21174}
21175#[doc = "Extended table look-up"]
21176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21177#[inline]
21178#[cfg(target_endian = "little")]
21179#[target_feature(enable = "neon")]
21180#[cfg_attr(test, assert_instr(tbx))]
21181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21182pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21183    unsafe {
21184        transmute(vqtbx4q(
21185            transmute(a),
21186            transmute(b.0),
21187            transmute(b.1),
21188            transmute(b.2),
21189            transmute(b.3),
21190            c,
21191        ))
21192    }
21193}
21194#[doc = "Extended table look-up"]
21195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21196#[inline]
21197#[cfg(target_endian = "big")]
21198#[target_feature(enable = "neon")]
21199#[cfg_attr(test, assert_instr(tbx))]
21200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21201pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21202    let mut b: uint8x16x4_t = b;
21203    let a: uint8x16_t =
21204        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21205    b.0 = unsafe {
21206        simd_shuffle!(
21207            b.0,
21208            b.0,
21209            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21210        )
21211    };
21212    b.1 = unsafe {
21213        simd_shuffle!(
21214            b.1,
21215            b.1,
21216            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21217        )
21218    };
21219    b.2 = unsafe {
21220        simd_shuffle!(
21221            b.2,
21222            b.2,
21223            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21224        )
21225    };
21226    b.3 = unsafe {
21227        simd_shuffle!(
21228            b.3,
21229            b.3,
21230            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21231        )
21232    };
21233    let c: uint8x16_t =
21234        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21235    unsafe {
21236        let ret_val: uint8x16_t = transmute(vqtbx4q(
21237            transmute(a),
21238            transmute(b.0),
21239            transmute(b.1),
21240            transmute(b.2),
21241            transmute(b.3),
21242            c,
21243        ));
21244        simd_shuffle!(
21245            ret_val,
21246            ret_val,
21247            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21248        )
21249    }
21250}
21251#[doc = "Extended table look-up"]
21252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21253#[inline]
21254#[cfg(target_endian = "little")]
21255#[target_feature(enable = "neon")]
21256#[cfg_attr(test, assert_instr(tbx))]
21257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21258pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21259    unsafe {
21260        transmute(vqtbx4(
21261            transmute(a),
21262            transmute(b.0),
21263            transmute(b.1),
21264            transmute(b.2),
21265            transmute(b.3),
21266            c,
21267        ))
21268    }
21269}
21270#[doc = "Extended table look-up"]
21271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21272#[inline]
21273#[cfg(target_endian = "big")]
21274#[target_feature(enable = "neon")]
21275#[cfg_attr(test, assert_instr(tbx))]
21276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21277pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21278    let mut b: poly8x16x4_t = b;
21279    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21280    b.0 = unsafe {
21281        simd_shuffle!(
21282            b.0,
21283            b.0,
21284            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21285        )
21286    };
21287    b.1 = unsafe {
21288        simd_shuffle!(
21289            b.1,
21290            b.1,
21291            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21292        )
21293    };
21294    b.2 = unsafe {
21295        simd_shuffle!(
21296            b.2,
21297            b.2,
21298            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21299        )
21300    };
21301    b.3 = unsafe {
21302        simd_shuffle!(
21303            b.3,
21304            b.3,
21305            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21306        )
21307    };
21308    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21309    unsafe {
21310        let ret_val: poly8x8_t = transmute(vqtbx4(
21311            transmute(a),
21312            transmute(b.0),
21313            transmute(b.1),
21314            transmute(b.2),
21315            transmute(b.3),
21316            c,
21317        ));
21318        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21319    }
21320}
21321#[doc = "Extended table look-up"]
21322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21323#[inline]
21324#[cfg(target_endian = "little")]
21325#[target_feature(enable = "neon")]
21326#[cfg_attr(test, assert_instr(tbx))]
21327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21328pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21329    unsafe {
21330        transmute(vqtbx4q(
21331            transmute(a),
21332            transmute(b.0),
21333            transmute(b.1),
21334            transmute(b.2),
21335            transmute(b.3),
21336            c,
21337        ))
21338    }
21339}
21340#[doc = "Extended table look-up"]
21341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21342#[inline]
21343#[cfg(target_endian = "big")]
21344#[target_feature(enable = "neon")]
21345#[cfg_attr(test, assert_instr(tbx))]
21346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21347pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21348    let mut b: poly8x16x4_t = b;
21349    let a: poly8x16_t =
21350        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21351    b.0 = unsafe {
21352        simd_shuffle!(
21353            b.0,
21354            b.0,
21355            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21356        )
21357    };
21358    b.1 = unsafe {
21359        simd_shuffle!(
21360            b.1,
21361            b.1,
21362            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21363        )
21364    };
21365    b.2 = unsafe {
21366        simd_shuffle!(
21367            b.2,
21368            b.2,
21369            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21370        )
21371    };
21372    b.3 = unsafe {
21373        simd_shuffle!(
21374            b.3,
21375            b.3,
21376            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21377        )
21378    };
21379    let c: uint8x16_t =
21380        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21381    unsafe {
21382        let ret_val: poly8x16_t = transmute(vqtbx4q(
21383            transmute(a),
21384            transmute(b.0),
21385            transmute(b.1),
21386            transmute(b.2),
21387            transmute(b.3),
21388            c,
21389        ));
21390        simd_shuffle!(
21391            ret_val,
21392            ret_val,
21393            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21394        )
21395    }
21396}
21397#[doc = "Rotate and exclusive OR"]
21398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21399#[inline]
21400#[target_feature(enable = "neon,sha3")]
21401#[cfg_attr(test, assert_instr(rax1))]
21402#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21403pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21404    unsafe extern "unadjusted" {
21405        #[cfg_attr(
21406            any(target_arch = "aarch64", target_arch = "arm64ec"),
21407            link_name = "llvm.aarch64.crypto.rax1"
21408        )]
21409        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21410    }
21411    unsafe { _vrax1q_u64(a, b) }
21412}
21413#[doc = "Reverse bit order"]
21414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21415#[inline]
21416#[target_feature(enable = "neon")]
21417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21418#[cfg_attr(test, assert_instr(rbit))]
21419pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21420    unsafe extern "unadjusted" {
21421        #[cfg_attr(
21422            any(target_arch = "aarch64", target_arch = "arm64ec"),
21423            link_name = "llvm.aarch64.neon.rbit.v8i8"
21424        )]
21425        fn _vrbit_s8(a: int8x8_t) -> int8x8_t;
21426    }
21427    unsafe { _vrbit_s8(a) }
21428}
21429#[doc = "Reverse bit order"]
21430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21431#[inline]
21432#[target_feature(enable = "neon")]
21433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21434#[cfg_attr(test, assert_instr(rbit))]
21435pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21436    unsafe extern "unadjusted" {
21437        #[cfg_attr(
21438            any(target_arch = "aarch64", target_arch = "arm64ec"),
21439            link_name = "llvm.aarch64.neon.rbit.v16i8"
21440        )]
21441        fn _vrbitq_s8(a: int8x16_t) -> int8x16_t;
21442    }
21443    unsafe { _vrbitq_s8(a) }
21444}
21445#[doc = "Reverse bit order"]
21446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21447#[inline]
21448#[cfg(target_endian = "little")]
21449#[target_feature(enable = "neon")]
21450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21451#[cfg_attr(test, assert_instr(rbit))]
21452pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21453    unsafe { transmute(vrbit_s8(transmute(a))) }
21454}
21455#[doc = "Reverse bit order"]
21456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21457#[inline]
21458#[cfg(target_endian = "big")]
21459#[target_feature(enable = "neon")]
21460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21461#[cfg_attr(test, assert_instr(rbit))]
21462pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21463    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21464    unsafe {
21465        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21466        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21467    }
21468}
21469#[doc = "Reverse bit order"]
21470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21471#[inline]
21472#[cfg(target_endian = "little")]
21473#[target_feature(enable = "neon")]
21474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21475#[cfg_attr(test, assert_instr(rbit))]
21476pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21477    unsafe { transmute(vrbitq_s8(transmute(a))) }
21478}
21479#[doc = "Reverse bit order"]
21480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21481#[inline]
21482#[cfg(target_endian = "big")]
21483#[target_feature(enable = "neon")]
21484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21485#[cfg_attr(test, assert_instr(rbit))]
21486pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21487    let a: uint8x16_t =
21488        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21489    unsafe {
21490        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21491        simd_shuffle!(
21492            ret_val,
21493            ret_val,
21494            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21495        )
21496    }
21497}
21498#[doc = "Reverse bit order"]
21499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21500#[inline]
21501#[cfg(target_endian = "little")]
21502#[target_feature(enable = "neon")]
21503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21504#[cfg_attr(test, assert_instr(rbit))]
21505pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21506    unsafe { transmute(vrbit_s8(transmute(a))) }
21507}
21508#[doc = "Reverse bit order"]
21509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21510#[inline]
21511#[cfg(target_endian = "big")]
21512#[target_feature(enable = "neon")]
21513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21514#[cfg_attr(test, assert_instr(rbit))]
21515pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21516    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21517    unsafe {
21518        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21519        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21520    }
21521}
21522#[doc = "Reverse bit order"]
21523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21524#[inline]
21525#[cfg(target_endian = "little")]
21526#[target_feature(enable = "neon")]
21527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21528#[cfg_attr(test, assert_instr(rbit))]
21529pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21530    unsafe { transmute(vrbitq_s8(transmute(a))) }
21531}
21532#[doc = "Reverse bit order"]
21533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21534#[inline]
21535#[cfg(target_endian = "big")]
21536#[target_feature(enable = "neon")]
21537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21538#[cfg_attr(test, assert_instr(rbit))]
21539pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21540    let a: poly8x16_t =
21541        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21542    unsafe {
21543        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21544        simd_shuffle!(
21545            ret_val,
21546            ret_val,
21547            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21548        )
21549    }
21550}
21551#[doc = "Reciprocal estimate."]
21552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21553#[inline]
21554#[target_feature(enable = "neon")]
21555#[cfg_attr(test, assert_instr(frecpe))]
21556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21557pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21558    unsafe extern "unadjusted" {
21559        #[cfg_attr(
21560            any(target_arch = "aarch64", target_arch = "arm64ec"),
21561            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21562        )]
21563        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21564    }
21565    unsafe { _vrecpe_f64(a) }
21566}
21567#[doc = "Reciprocal estimate."]
21568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21569#[inline]
21570#[target_feature(enable = "neon")]
21571#[cfg_attr(test, assert_instr(frecpe))]
21572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21573pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21574    unsafe extern "unadjusted" {
21575        #[cfg_attr(
21576            any(target_arch = "aarch64", target_arch = "arm64ec"),
21577            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21578        )]
21579        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21580    }
21581    unsafe { _vrecpeq_f64(a) }
21582}
21583#[doc = "Reciprocal estimate."]
21584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21585#[inline]
21586#[target_feature(enable = "neon")]
21587#[cfg_attr(test, assert_instr(frecpe))]
21588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21589pub fn vrecped_f64(a: f64) -> f64 {
21590    unsafe extern "unadjusted" {
21591        #[cfg_attr(
21592            any(target_arch = "aarch64", target_arch = "arm64ec"),
21593            link_name = "llvm.aarch64.neon.frecpe.f64"
21594        )]
21595        fn _vrecped_f64(a: f64) -> f64;
21596    }
21597    unsafe { _vrecped_f64(a) }
21598}
21599#[doc = "Reciprocal estimate."]
21600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21601#[inline]
21602#[target_feature(enable = "neon")]
21603#[cfg_attr(test, assert_instr(frecpe))]
21604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21605pub fn vrecpes_f32(a: f32) -> f32 {
21606    unsafe extern "unadjusted" {
21607        #[cfg_attr(
21608            any(target_arch = "aarch64", target_arch = "arm64ec"),
21609            link_name = "llvm.aarch64.neon.frecpe.f32"
21610        )]
21611        fn _vrecpes_f32(a: f32) -> f32;
21612    }
21613    unsafe { _vrecpes_f32(a) }
21614}
21615#[doc = "Reciprocal estimate."]
21616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21617#[inline]
21618#[cfg_attr(test, assert_instr(frecpe))]
21619#[target_feature(enable = "neon,fp16")]
21620#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21621pub fn vrecpeh_f16(a: f16) -> f16 {
21622    unsafe extern "unadjusted" {
21623        #[cfg_attr(
21624            any(target_arch = "aarch64", target_arch = "arm64ec"),
21625            link_name = "llvm.aarch64.neon.frecpe.f16"
21626        )]
21627        fn _vrecpeh_f16(a: f16) -> f16;
21628    }
21629    unsafe { _vrecpeh_f16(a) }
21630}
21631#[doc = "Floating-point reciprocal step"]
21632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21633#[inline]
21634#[target_feature(enable = "neon")]
21635#[cfg_attr(test, assert_instr(frecps))]
21636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21637pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21638    unsafe extern "unadjusted" {
21639        #[cfg_attr(
21640            any(target_arch = "aarch64", target_arch = "arm64ec"),
21641            link_name = "llvm.aarch64.neon.frecps.v1f64"
21642        )]
21643        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21644    }
21645    unsafe { _vrecps_f64(a, b) }
21646}
21647#[doc = "Floating-point reciprocal step"]
21648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21649#[inline]
21650#[target_feature(enable = "neon")]
21651#[cfg_attr(test, assert_instr(frecps))]
21652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21653pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21654    unsafe extern "unadjusted" {
21655        #[cfg_attr(
21656            any(target_arch = "aarch64", target_arch = "arm64ec"),
21657            link_name = "llvm.aarch64.neon.frecps.v2f64"
21658        )]
21659        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21660    }
21661    unsafe { _vrecpsq_f64(a, b) }
21662}
21663#[doc = "Floating-point reciprocal step"]
21664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21665#[inline]
21666#[target_feature(enable = "neon")]
21667#[cfg_attr(test, assert_instr(frecps))]
21668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21669pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21670    unsafe extern "unadjusted" {
21671        #[cfg_attr(
21672            any(target_arch = "aarch64", target_arch = "arm64ec"),
21673            link_name = "llvm.aarch64.neon.frecps.f64"
21674        )]
21675        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21676    }
21677    unsafe { _vrecpsd_f64(a, b) }
21678}
21679#[doc = "Floating-point reciprocal step"]
21680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21681#[inline]
21682#[target_feature(enable = "neon")]
21683#[cfg_attr(test, assert_instr(frecps))]
21684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21685pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21686    unsafe extern "unadjusted" {
21687        #[cfg_attr(
21688            any(target_arch = "aarch64", target_arch = "arm64ec"),
21689            link_name = "llvm.aarch64.neon.frecps.f32"
21690        )]
21691        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21692    }
21693    unsafe { _vrecpss_f32(a, b) }
21694}
21695#[doc = "Floating-point reciprocal step"]
21696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21697#[inline]
21698#[cfg_attr(test, assert_instr(frecps))]
21699#[target_feature(enable = "neon,fp16")]
21700#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21701pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21702    unsafe extern "unadjusted" {
21703        #[cfg_attr(
21704            any(target_arch = "aarch64", target_arch = "arm64ec"),
21705            link_name = "llvm.aarch64.neon.frecps.f16"
21706        )]
21707        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21708    }
21709    unsafe { _vrecpsh_f16(a, b) }
21710}
21711#[doc = "Floating-point reciprocal exponent"]
21712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21713#[inline]
21714#[target_feature(enable = "neon")]
21715#[cfg_attr(test, assert_instr(frecpx))]
21716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21717pub fn vrecpxd_f64(a: f64) -> f64 {
21718    unsafe extern "unadjusted" {
21719        #[cfg_attr(
21720            any(target_arch = "aarch64", target_arch = "arm64ec"),
21721            link_name = "llvm.aarch64.neon.frecpx.f64"
21722        )]
21723        fn _vrecpxd_f64(a: f64) -> f64;
21724    }
21725    unsafe { _vrecpxd_f64(a) }
21726}
21727#[doc = "Floating-point reciprocal exponent"]
21728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21729#[inline]
21730#[target_feature(enable = "neon")]
21731#[cfg_attr(test, assert_instr(frecpx))]
21732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21733pub fn vrecpxs_f32(a: f32) -> f32 {
21734    unsafe extern "unadjusted" {
21735        #[cfg_attr(
21736            any(target_arch = "aarch64", target_arch = "arm64ec"),
21737            link_name = "llvm.aarch64.neon.frecpx.f32"
21738        )]
21739        fn _vrecpxs_f32(a: f32) -> f32;
21740    }
21741    unsafe { _vrecpxs_f32(a) }
21742}
21743#[doc = "Floating-point reciprocal exponent"]
21744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21745#[inline]
21746#[cfg_attr(test, assert_instr(frecpx))]
21747#[target_feature(enable = "neon,fp16")]
21748#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21749pub fn vrecpxh_f16(a: f16) -> f16 {
21750    unsafe extern "unadjusted" {
21751        #[cfg_attr(
21752            any(target_arch = "aarch64", target_arch = "arm64ec"),
21753            link_name = "llvm.aarch64.neon.frecpx.f16"
21754        )]
21755        fn _vrecpxh_f16(a: f16) -> f16;
21756    }
21757    unsafe { _vrecpxh_f16(a) }
21758}
21759#[doc = "Vector reinterpret cast operation"]
21760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21761#[inline]
21762#[cfg(target_endian = "little")]
21763#[target_feature(enable = "neon,fp16")]
21764#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21765#[cfg_attr(test, assert_instr(nop))]
21766pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21767    unsafe { transmute(a) }
21768}
21769#[doc = "Vector reinterpret cast operation"]
21770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21771#[inline]
21772#[cfg(target_endian = "big")]
21773#[target_feature(enable = "neon,fp16")]
21774#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21775#[cfg_attr(test, assert_instr(nop))]
21776pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21777    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21778    unsafe { transmute(a) }
21779}
21780#[doc = "Vector reinterpret cast operation"]
21781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21782#[inline]
21783#[cfg(target_endian = "little")]
21784#[target_feature(enable = "neon,fp16")]
21785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21786#[cfg_attr(test, assert_instr(nop))]
21787pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21788    unsafe { transmute(a) }
21789}
21790#[doc = "Vector reinterpret cast operation"]
21791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21792#[inline]
21793#[cfg(target_endian = "big")]
21794#[target_feature(enable = "neon,fp16")]
21795#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21796#[cfg_attr(test, assert_instr(nop))]
21797pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21798    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21799    unsafe {
21800        let ret_val: float64x2_t = transmute(a);
21801        simd_shuffle!(ret_val, ret_val, [1, 0])
21802    }
21803}
21804#[doc = "Vector reinterpret cast operation"]
21805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21806#[inline]
21807#[cfg(target_endian = "little")]
21808#[target_feature(enable = "neon,fp16")]
21809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21810#[cfg_attr(test, assert_instr(nop))]
21811pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21812    unsafe { transmute(a) }
21813}
21814#[doc = "Vector reinterpret cast operation"]
21815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21816#[inline]
21817#[cfg(target_endian = "big")]
21818#[target_feature(enable = "neon,fp16")]
21819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21820#[cfg_attr(test, assert_instr(nop))]
21821pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21822    unsafe {
21823        let ret_val: float16x4_t = transmute(a);
21824        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21825    }
21826}
21827#[doc = "Vector reinterpret cast operation"]
21828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21829#[inline]
21830#[cfg(target_endian = "little")]
21831#[target_feature(enable = "neon,fp16")]
21832#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21833#[cfg_attr(test, assert_instr(nop))]
21834pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21835    unsafe { transmute(a) }
21836}
21837#[doc = "Vector reinterpret cast operation"]
21838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21839#[inline]
21840#[cfg(target_endian = "big")]
21841#[target_feature(enable = "neon,fp16")]
21842#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21843#[cfg_attr(test, assert_instr(nop))]
21844pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21845    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21846    unsafe {
21847        let ret_val: float16x8_t = transmute(a);
21848        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21849    }
21850}
21851#[doc = "Vector reinterpret cast operation"]
21852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21853#[inline]
21854#[cfg(target_endian = "little")]
21855#[target_feature(enable = "neon")]
21856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21857#[cfg_attr(test, assert_instr(nop))]
21858pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21859    unsafe { transmute(a) }
21860}
21861#[doc = "Vector reinterpret cast operation"]
21862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21863#[inline]
21864#[cfg(target_endian = "big")]
21865#[target_feature(enable = "neon")]
21866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21867#[cfg_attr(test, assert_instr(nop))]
21868pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21869    unsafe {
21870        let ret_val: float64x2_t = transmute(a);
21871        simd_shuffle!(ret_val, ret_val, [1, 0])
21872    }
21873}
21874#[doc = "Vector reinterpret cast operation"]
21875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21876#[inline]
21877#[cfg(target_endian = "little")]
21878#[target_feature(enable = "neon")]
21879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21880#[cfg_attr(test, assert_instr(nop))]
21881pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21882    unsafe { transmute(a) }
21883}
21884#[doc = "Vector reinterpret cast operation"]
21885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21886#[inline]
21887#[cfg(target_endian = "big")]
21888#[target_feature(enable = "neon")]
21889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21890#[cfg_attr(test, assert_instr(nop))]
21891pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21892    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21893    unsafe { transmute(a) }
21894}
21895#[doc = "Vector reinterpret cast operation"]
21896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21897#[inline]
21898#[cfg(target_endian = "little")]
21899#[target_feature(enable = "neon")]
21900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21901#[cfg_attr(test, assert_instr(nop))]
21902pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21903    unsafe { transmute(a) }
21904}
21905#[doc = "Vector reinterpret cast operation"]
21906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21907#[inline]
21908#[cfg(target_endian = "big")]
21909#[target_feature(enable = "neon")]
21910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21911#[cfg_attr(test, assert_instr(nop))]
21912pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21913    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21914    unsafe { transmute(a) }
21915}
21916#[doc = "Vector reinterpret cast operation"]
21917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21918#[inline]
21919#[cfg(target_endian = "little")]
21920#[target_feature(enable = "neon")]
21921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21922#[cfg_attr(test, assert_instr(nop))]
21923pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21924    unsafe { transmute(a) }
21925}
21926#[doc = "Vector reinterpret cast operation"]
21927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21928#[inline]
21929#[cfg(target_endian = "big")]
21930#[target_feature(enable = "neon")]
21931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21932#[cfg_attr(test, assert_instr(nop))]
21933pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21934    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21935    unsafe {
21936        let ret_val: float64x2_t = transmute(a);
21937        simd_shuffle!(ret_val, ret_val, [1, 0])
21938    }
21939}
21940#[doc = "Vector reinterpret cast operation"]
21941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21942#[inline]
21943#[cfg(target_endian = "little")]
21944#[target_feature(enable = "neon")]
21945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21946#[cfg_attr(test, assert_instr(nop))]
21947pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21948    unsafe { transmute(a) }
21949}
21950#[doc = "Vector reinterpret cast operation"]
21951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21952#[inline]
21953#[cfg(target_endian = "big")]
21954#[target_feature(enable = "neon")]
21955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21956#[cfg_attr(test, assert_instr(nop))]
21957pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21958    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21959    unsafe {
21960        let ret_val: poly64x2_t = transmute(a);
21961        simd_shuffle!(ret_val, ret_val, [1, 0])
21962    }
21963}
21964#[doc = "Vector reinterpret cast operation"]
21965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21966#[inline]
21967#[cfg(target_endian = "little")]
21968#[target_feature(enable = "neon")]
21969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21970#[cfg_attr(test, assert_instr(nop))]
21971pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21972    unsafe { transmute(a) }
21973}
21974#[doc = "Vector reinterpret cast operation"]
21975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21976#[inline]
21977#[cfg(target_endian = "big")]
21978#[target_feature(enable = "neon")]
21979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21980#[cfg_attr(test, assert_instr(nop))]
21981pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21982    unsafe {
21983        let ret_val: float32x2_t = transmute(a);
21984        simd_shuffle!(ret_val, ret_val, [1, 0])
21985    }
21986}
21987#[doc = "Vector reinterpret cast operation"]
21988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21989#[inline]
21990#[cfg(target_endian = "little")]
21991#[target_feature(enable = "neon")]
21992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21993#[cfg_attr(test, assert_instr(nop))]
21994pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21995    unsafe { transmute(a) }
21996}
21997#[doc = "Vector reinterpret cast operation"]
21998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21999#[inline]
22000#[cfg(target_endian = "big")]
22001#[target_feature(enable = "neon")]
22002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22003#[cfg_attr(test, assert_instr(nop))]
22004pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
22005    unsafe {
22006        let ret_val: int8x8_t = transmute(a);
22007        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22008    }
22009}
22010#[doc = "Vector reinterpret cast operation"]
22011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22012#[inline]
22013#[cfg(target_endian = "little")]
22014#[target_feature(enable = "neon")]
22015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22016#[cfg_attr(test, assert_instr(nop))]
22017pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22018    unsafe { transmute(a) }
22019}
22020#[doc = "Vector reinterpret cast operation"]
22021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22022#[inline]
22023#[cfg(target_endian = "big")]
22024#[target_feature(enable = "neon")]
22025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22026#[cfg_attr(test, assert_instr(nop))]
22027pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22028    unsafe {
22029        let ret_val: int16x4_t = transmute(a);
22030        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22031    }
22032}
22033#[doc = "Vector reinterpret cast operation"]
22034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22035#[inline]
22036#[cfg(target_endian = "little")]
22037#[target_feature(enable = "neon")]
22038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22039#[cfg_attr(test, assert_instr(nop))]
22040pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22041    unsafe { transmute(a) }
22042}
22043#[doc = "Vector reinterpret cast operation"]
22044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22045#[inline]
22046#[cfg(target_endian = "big")]
22047#[target_feature(enable = "neon")]
22048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22049#[cfg_attr(test, assert_instr(nop))]
22050pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22051    unsafe {
22052        let ret_val: int32x2_t = transmute(a);
22053        simd_shuffle!(ret_val, ret_val, [1, 0])
22054    }
22055}
22056#[doc = "Vector reinterpret cast operation"]
22057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
22058#[inline]
22059#[target_feature(enable = "neon")]
22060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22061#[cfg_attr(test, assert_instr(nop))]
22062pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
22063    unsafe { transmute(a) }
22064}
22065#[doc = "Vector reinterpret cast operation"]
22066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22067#[inline]
22068#[cfg(target_endian = "little")]
22069#[target_feature(enable = "neon")]
22070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22071#[cfg_attr(test, assert_instr(nop))]
22072pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22073    unsafe { transmute(a) }
22074}
22075#[doc = "Vector reinterpret cast operation"]
22076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22077#[inline]
22078#[cfg(target_endian = "big")]
22079#[target_feature(enable = "neon")]
22080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22081#[cfg_attr(test, assert_instr(nop))]
22082pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22083    unsafe {
22084        let ret_val: uint8x8_t = transmute(a);
22085        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22086    }
22087}
22088#[doc = "Vector reinterpret cast operation"]
22089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22090#[inline]
22091#[cfg(target_endian = "little")]
22092#[target_feature(enable = "neon")]
22093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22094#[cfg_attr(test, assert_instr(nop))]
22095pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22096    unsafe { transmute(a) }
22097}
22098#[doc = "Vector reinterpret cast operation"]
22099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22100#[inline]
22101#[cfg(target_endian = "big")]
22102#[target_feature(enable = "neon")]
22103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22104#[cfg_attr(test, assert_instr(nop))]
22105pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22106    unsafe {
22107        let ret_val: uint16x4_t = transmute(a);
22108        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22109    }
22110}
22111#[doc = "Vector reinterpret cast operation"]
22112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22113#[inline]
22114#[cfg(target_endian = "little")]
22115#[target_feature(enable = "neon")]
22116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22117#[cfg_attr(test, assert_instr(nop))]
22118pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22119    unsafe { transmute(a) }
22120}
22121#[doc = "Vector reinterpret cast operation"]
22122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22123#[inline]
22124#[cfg(target_endian = "big")]
22125#[target_feature(enable = "neon")]
22126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22127#[cfg_attr(test, assert_instr(nop))]
22128pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22129    unsafe {
22130        let ret_val: uint32x2_t = transmute(a);
22131        simd_shuffle!(ret_val, ret_val, [1, 0])
22132    }
22133}
22134#[doc = "Vector reinterpret cast operation"]
22135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
22136#[inline]
22137#[target_feature(enable = "neon")]
22138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22139#[cfg_attr(test, assert_instr(nop))]
22140pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
22141    unsafe { transmute(a) }
22142}
22143#[doc = "Vector reinterpret cast operation"]
22144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22145#[inline]
22146#[cfg(target_endian = "little")]
22147#[target_feature(enable = "neon")]
22148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22149#[cfg_attr(test, assert_instr(nop))]
22150pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22151    unsafe { transmute(a) }
22152}
22153#[doc = "Vector reinterpret cast operation"]
22154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22155#[inline]
22156#[cfg(target_endian = "big")]
22157#[target_feature(enable = "neon")]
22158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22159#[cfg_attr(test, assert_instr(nop))]
22160pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22161    unsafe {
22162        let ret_val: poly8x8_t = transmute(a);
22163        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22164    }
22165}
22166#[doc = "Vector reinterpret cast operation"]
22167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22168#[inline]
22169#[cfg(target_endian = "little")]
22170#[target_feature(enable = "neon")]
22171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22172#[cfg_attr(test, assert_instr(nop))]
22173pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22174    unsafe { transmute(a) }
22175}
22176#[doc = "Vector reinterpret cast operation"]
22177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22178#[inline]
22179#[cfg(target_endian = "big")]
22180#[target_feature(enable = "neon")]
22181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22182#[cfg_attr(test, assert_instr(nop))]
22183pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22184    unsafe {
22185        let ret_val: poly16x4_t = transmute(a);
22186        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22187    }
22188}
22189#[doc = "Vector reinterpret cast operation"]
22190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
22191#[inline]
22192#[target_feature(enable = "neon")]
22193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22194#[cfg_attr(test, assert_instr(nop))]
22195pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
22196    unsafe { transmute(a) }
22197}
22198#[doc = "Vector reinterpret cast operation"]
22199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22200#[inline]
22201#[cfg(target_endian = "little")]
22202#[target_feature(enable = "neon")]
22203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22204#[cfg_attr(test, assert_instr(nop))]
22205pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22206    unsafe { transmute(a) }
22207}
22208#[doc = "Vector reinterpret cast operation"]
22209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22210#[inline]
22211#[cfg(target_endian = "big")]
22212#[target_feature(enable = "neon")]
22213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22214#[cfg_attr(test, assert_instr(nop))]
22215pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22216    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22217    unsafe { transmute(a) }
22218}
22219#[doc = "Vector reinterpret cast operation"]
22220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22221#[inline]
22222#[cfg(target_endian = "little")]
22223#[target_feature(enable = "neon")]
22224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22225#[cfg_attr(test, assert_instr(nop))]
22226pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22227    unsafe { transmute(a) }
22228}
22229#[doc = "Vector reinterpret cast operation"]
22230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22231#[inline]
22232#[cfg(target_endian = "big")]
22233#[target_feature(enable = "neon")]
22234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22235#[cfg_attr(test, assert_instr(nop))]
22236pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22237    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22238    unsafe {
22239        let ret_val: float32x4_t = transmute(a);
22240        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22241    }
22242}
22243#[doc = "Vector reinterpret cast operation"]
22244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22245#[inline]
22246#[cfg(target_endian = "little")]
22247#[target_feature(enable = "neon")]
22248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22249#[cfg_attr(test, assert_instr(nop))]
22250pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22251    unsafe { transmute(a) }
22252}
22253#[doc = "Vector reinterpret cast operation"]
22254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22255#[inline]
22256#[cfg(target_endian = "big")]
22257#[target_feature(enable = "neon")]
22258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22259#[cfg_attr(test, assert_instr(nop))]
22260pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22261    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22262    unsafe {
22263        let ret_val: int8x16_t = transmute(a);
22264        simd_shuffle!(
22265            ret_val,
22266            ret_val,
22267            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22268        )
22269    }
22270}
22271#[doc = "Vector reinterpret cast operation"]
22272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22273#[inline]
22274#[cfg(target_endian = "little")]
22275#[target_feature(enable = "neon")]
22276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22277#[cfg_attr(test, assert_instr(nop))]
22278pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22279    unsafe { transmute(a) }
22280}
22281#[doc = "Vector reinterpret cast operation"]
22282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22283#[inline]
22284#[cfg(target_endian = "big")]
22285#[target_feature(enable = "neon")]
22286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22287#[cfg_attr(test, assert_instr(nop))]
22288pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22289    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22290    unsafe {
22291        let ret_val: int16x8_t = transmute(a);
22292        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22293    }
22294}
22295#[doc = "Vector reinterpret cast operation"]
22296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22297#[inline]
22298#[cfg(target_endian = "little")]
22299#[target_feature(enable = "neon")]
22300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22301#[cfg_attr(test, assert_instr(nop))]
22302pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22303    unsafe { transmute(a) }
22304}
22305#[doc = "Vector reinterpret cast operation"]
22306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22307#[inline]
22308#[cfg(target_endian = "big")]
22309#[target_feature(enable = "neon")]
22310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22311#[cfg_attr(test, assert_instr(nop))]
22312pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22313    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22314    unsafe {
22315        let ret_val: int32x4_t = transmute(a);
22316        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22317    }
22318}
22319#[doc = "Vector reinterpret cast operation"]
22320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22321#[inline]
22322#[cfg(target_endian = "little")]
22323#[target_feature(enable = "neon")]
22324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22325#[cfg_attr(test, assert_instr(nop))]
22326pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22327    unsafe { transmute(a) }
22328}
22329#[doc = "Vector reinterpret cast operation"]
22330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22331#[inline]
22332#[cfg(target_endian = "big")]
22333#[target_feature(enable = "neon")]
22334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22335#[cfg_attr(test, assert_instr(nop))]
22336pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22337    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22338    unsafe {
22339        let ret_val: int64x2_t = transmute(a);
22340        simd_shuffle!(ret_val, ret_val, [1, 0])
22341    }
22342}
22343#[doc = "Vector reinterpret cast operation"]
22344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22345#[inline]
22346#[cfg(target_endian = "little")]
22347#[target_feature(enable = "neon")]
22348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22349#[cfg_attr(test, assert_instr(nop))]
22350pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22351    unsafe { transmute(a) }
22352}
22353#[doc = "Vector reinterpret cast operation"]
22354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22355#[inline]
22356#[cfg(target_endian = "big")]
22357#[target_feature(enable = "neon")]
22358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22359#[cfg_attr(test, assert_instr(nop))]
22360pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22361    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22362    unsafe {
22363        let ret_val: uint8x16_t = transmute(a);
22364        simd_shuffle!(
22365            ret_val,
22366            ret_val,
22367            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22368        )
22369    }
22370}
22371#[doc = "Vector reinterpret cast operation"]
22372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22373#[inline]
22374#[cfg(target_endian = "little")]
22375#[target_feature(enable = "neon")]
22376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22377#[cfg_attr(test, assert_instr(nop))]
22378pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22379    unsafe { transmute(a) }
22380}
22381#[doc = "Vector reinterpret cast operation"]
22382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22383#[inline]
22384#[cfg(target_endian = "big")]
22385#[target_feature(enable = "neon")]
22386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22387#[cfg_attr(test, assert_instr(nop))]
22388pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22389    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22390    unsafe {
22391        let ret_val: uint16x8_t = transmute(a);
22392        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22393    }
22394}
22395#[doc = "Vector reinterpret cast operation"]
22396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22397#[inline]
22398#[cfg(target_endian = "little")]
22399#[target_feature(enable = "neon")]
22400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22401#[cfg_attr(test, assert_instr(nop))]
22402pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22403    unsafe { transmute(a) }
22404}
22405#[doc = "Vector reinterpret cast operation"]
22406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22407#[inline]
22408#[cfg(target_endian = "big")]
22409#[target_feature(enable = "neon")]
22410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22411#[cfg_attr(test, assert_instr(nop))]
22412pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22413    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22414    unsafe {
22415        let ret_val: uint32x4_t = transmute(a);
22416        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22417    }
22418}
22419#[doc = "Vector reinterpret cast operation"]
22420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22421#[inline]
22422#[cfg(target_endian = "little")]
22423#[target_feature(enable = "neon")]
22424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22425#[cfg_attr(test, assert_instr(nop))]
22426pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22427    unsafe { transmute(a) }
22428}
22429#[doc = "Vector reinterpret cast operation"]
22430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22431#[inline]
22432#[cfg(target_endian = "big")]
22433#[target_feature(enable = "neon")]
22434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22435#[cfg_attr(test, assert_instr(nop))]
22436pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22437    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22438    unsafe {
22439        let ret_val: uint64x2_t = transmute(a);
22440        simd_shuffle!(ret_val, ret_val, [1, 0])
22441    }
22442}
22443#[doc = "Vector reinterpret cast operation"]
22444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22445#[inline]
22446#[cfg(target_endian = "little")]
22447#[target_feature(enable = "neon")]
22448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22449#[cfg_attr(test, assert_instr(nop))]
22450pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22451    unsafe { transmute(a) }
22452}
22453#[doc = "Vector reinterpret cast operation"]
22454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22455#[inline]
22456#[cfg(target_endian = "big")]
22457#[target_feature(enable = "neon")]
22458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22459#[cfg_attr(test, assert_instr(nop))]
22460pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22461    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22462    unsafe {
22463        let ret_val: poly8x16_t = transmute(a);
22464        simd_shuffle!(
22465            ret_val,
22466            ret_val,
22467            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22468        )
22469    }
22470}
22471#[doc = "Vector reinterpret cast operation"]
22472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22473#[inline]
22474#[cfg(target_endian = "little")]
22475#[target_feature(enable = "neon")]
22476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22477#[cfg_attr(test, assert_instr(nop))]
22478pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22479    unsafe { transmute(a) }
22480}
22481#[doc = "Vector reinterpret cast operation"]
22482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22483#[inline]
22484#[cfg(target_endian = "big")]
22485#[target_feature(enable = "neon")]
22486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22487#[cfg_attr(test, assert_instr(nop))]
22488pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22489    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22490    unsafe {
22491        let ret_val: poly16x8_t = transmute(a);
22492        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22493    }
22494}
22495#[doc = "Vector reinterpret cast operation"]
22496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22497#[inline]
22498#[cfg(target_endian = "little")]
22499#[target_feature(enable = "neon")]
22500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22501#[cfg_attr(test, assert_instr(nop))]
22502pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22503    unsafe { transmute(a) }
22504}
22505#[doc = "Vector reinterpret cast operation"]
22506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22507#[inline]
22508#[cfg(target_endian = "big")]
22509#[target_feature(enable = "neon")]
22510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22511#[cfg_attr(test, assert_instr(nop))]
22512pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22513    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22514    unsafe {
22515        let ret_val: poly64x2_t = transmute(a);
22516        simd_shuffle!(ret_val, ret_val, [1, 0])
22517    }
22518}
22519#[doc = "Vector reinterpret cast operation"]
22520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22521#[inline]
22522#[cfg(target_endian = "little")]
22523#[target_feature(enable = "neon")]
22524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22525#[cfg_attr(test, assert_instr(nop))]
22526pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22527    unsafe { transmute(a) }
22528}
22529#[doc = "Vector reinterpret cast operation"]
22530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22531#[inline]
22532#[cfg(target_endian = "big")]
22533#[target_feature(enable = "neon")]
22534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22535#[cfg_attr(test, assert_instr(nop))]
22536pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22537    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22538    unsafe { transmute(a) }
22539}
22540#[doc = "Vector reinterpret cast operation"]
22541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22542#[inline]
22543#[cfg(target_endian = "little")]
22544#[target_feature(enable = "neon")]
22545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22546#[cfg_attr(test, assert_instr(nop))]
22547pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22548    unsafe { transmute(a) }
22549}
22550#[doc = "Vector reinterpret cast operation"]
22551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22552#[inline]
22553#[cfg(target_endian = "big")]
22554#[target_feature(enable = "neon")]
22555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22556#[cfg_attr(test, assert_instr(nop))]
22557pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22558    let a: int8x16_t =
22559        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22560    unsafe {
22561        let ret_val: float64x2_t = transmute(a);
22562        simd_shuffle!(ret_val, ret_val, [1, 0])
22563    }
22564}
22565#[doc = "Vector reinterpret cast operation"]
22566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22567#[inline]
22568#[cfg(target_endian = "little")]
22569#[target_feature(enable = "neon")]
22570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22571#[cfg_attr(test, assert_instr(nop))]
22572pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22573    unsafe { transmute(a) }
22574}
22575#[doc = "Vector reinterpret cast operation"]
22576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22577#[inline]
22578#[cfg(target_endian = "big")]
22579#[target_feature(enable = "neon")]
22580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22581#[cfg_attr(test, assert_instr(nop))]
22582pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22583    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22584    unsafe { transmute(a) }
22585}
22586#[doc = "Vector reinterpret cast operation"]
22587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22588#[inline]
22589#[cfg(target_endian = "little")]
22590#[target_feature(enable = "neon")]
22591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22592#[cfg_attr(test, assert_instr(nop))]
22593pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22594    unsafe { transmute(a) }
22595}
22596#[doc = "Vector reinterpret cast operation"]
22597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22598#[inline]
22599#[cfg(target_endian = "big")]
22600#[target_feature(enable = "neon")]
22601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22602#[cfg_attr(test, assert_instr(nop))]
22603pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22604    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22605    unsafe {
22606        let ret_val: float64x2_t = transmute(a);
22607        simd_shuffle!(ret_val, ret_val, [1, 0])
22608    }
22609}
22610#[doc = "Vector reinterpret cast operation"]
22611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22612#[inline]
22613#[cfg(target_endian = "little")]
22614#[target_feature(enable = "neon")]
22615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22616#[cfg_attr(test, assert_instr(nop))]
22617pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22618    unsafe { transmute(a) }
22619}
22620#[doc = "Vector reinterpret cast operation"]
22621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22622#[inline]
22623#[cfg(target_endian = "big")]
22624#[target_feature(enable = "neon")]
22625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22626#[cfg_attr(test, assert_instr(nop))]
22627pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22628    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22629    unsafe { transmute(a) }
22630}
22631#[doc = "Vector reinterpret cast operation"]
22632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22633#[inline]
22634#[cfg(target_endian = "little")]
22635#[target_feature(enable = "neon")]
22636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22637#[cfg_attr(test, assert_instr(nop))]
22638pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22639    unsafe { transmute(a) }
22640}
22641#[doc = "Vector reinterpret cast operation"]
22642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22643#[inline]
22644#[cfg(target_endian = "big")]
22645#[target_feature(enable = "neon")]
22646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22647#[cfg_attr(test, assert_instr(nop))]
22648pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22649    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22650    unsafe {
22651        let ret_val: float64x2_t = transmute(a);
22652        simd_shuffle!(ret_val, ret_val, [1, 0])
22653    }
22654}
22655#[doc = "Vector reinterpret cast operation"]
22656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22657#[inline]
22658#[target_feature(enable = "neon")]
22659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22660#[cfg_attr(test, assert_instr(nop))]
22661pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22662    unsafe { transmute(a) }
22663}
22664#[doc = "Vector reinterpret cast operation"]
22665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22666#[inline]
22667#[target_feature(enable = "neon")]
22668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22669#[cfg_attr(test, assert_instr(nop))]
22670pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22671    unsafe { transmute(a) }
22672}
22673#[doc = "Vector reinterpret cast operation"]
22674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22675#[inline]
22676#[cfg(target_endian = "little")]
22677#[target_feature(enable = "neon")]
22678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22679#[cfg_attr(test, assert_instr(nop))]
22680pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22681    unsafe { transmute(a) }
22682}
22683#[doc = "Vector reinterpret cast operation"]
22684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22685#[inline]
22686#[cfg(target_endian = "big")]
22687#[target_feature(enable = "neon")]
22688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22689#[cfg_attr(test, assert_instr(nop))]
22690pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22691    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22692    unsafe {
22693        let ret_val: float64x2_t = transmute(a);
22694        simd_shuffle!(ret_val, ret_val, [1, 0])
22695    }
22696}
22697#[doc = "Vector reinterpret cast operation"]
22698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22699#[inline]
22700#[cfg(target_endian = "little")]
22701#[target_feature(enable = "neon")]
22702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22703#[cfg_attr(test, assert_instr(nop))]
22704pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22705    unsafe { transmute(a) }
22706}
22707#[doc = "Vector reinterpret cast operation"]
22708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22709#[inline]
22710#[cfg(target_endian = "big")]
22711#[target_feature(enable = "neon")]
22712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22713#[cfg_attr(test, assert_instr(nop))]
22714pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22715    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22716    unsafe {
22717        let ret_val: poly64x2_t = transmute(a);
22718        simd_shuffle!(ret_val, ret_val, [1, 0])
22719    }
22720}
22721#[doc = "Vector reinterpret cast operation"]
22722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22723#[inline]
22724#[cfg(target_endian = "little")]
22725#[target_feature(enable = "neon")]
22726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22727#[cfg_attr(test, assert_instr(nop))]
22728pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22729    unsafe { transmute(a) }
22730}
22731#[doc = "Vector reinterpret cast operation"]
22732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22733#[inline]
22734#[cfg(target_endian = "big")]
22735#[target_feature(enable = "neon")]
22736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22737#[cfg_attr(test, assert_instr(nop))]
22738pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22739    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22740    unsafe { transmute(a) }
22741}
22742#[doc = "Vector reinterpret cast operation"]
22743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22744#[inline]
22745#[cfg(target_endian = "little")]
22746#[target_feature(enable = "neon")]
22747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22748#[cfg_attr(test, assert_instr(nop))]
22749pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22750    unsafe { transmute(a) }
22751}
22752#[doc = "Vector reinterpret cast operation"]
22753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22754#[inline]
22755#[cfg(target_endian = "big")]
22756#[target_feature(enable = "neon")]
22757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22758#[cfg_attr(test, assert_instr(nop))]
22759pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22760    let a: uint8x16_t =
22761        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22762    unsafe {
22763        let ret_val: float64x2_t = transmute(a);
22764        simd_shuffle!(ret_val, ret_val, [1, 0])
22765    }
22766}
22767#[doc = "Vector reinterpret cast operation"]
22768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22769#[inline]
22770#[cfg(target_endian = "little")]
22771#[target_feature(enable = "neon")]
22772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22773#[cfg_attr(test, assert_instr(nop))]
22774pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22775    unsafe { transmute(a) }
22776}
22777#[doc = "Vector reinterpret cast operation"]
22778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22779#[inline]
22780#[cfg(target_endian = "big")]
22781#[target_feature(enable = "neon")]
22782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22783#[cfg_attr(test, assert_instr(nop))]
22784pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22785    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22786    unsafe { transmute(a) }
22787}
22788#[doc = "Vector reinterpret cast operation"]
22789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22790#[inline]
22791#[cfg(target_endian = "little")]
22792#[target_feature(enable = "neon")]
22793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22794#[cfg_attr(test, assert_instr(nop))]
22795pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22796    unsafe { transmute(a) }
22797}
22798#[doc = "Vector reinterpret cast operation"]
22799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22800#[inline]
22801#[cfg(target_endian = "big")]
22802#[target_feature(enable = "neon")]
22803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22804#[cfg_attr(test, assert_instr(nop))]
22805pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22806    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22807    unsafe {
22808        let ret_val: float64x2_t = transmute(a);
22809        simd_shuffle!(ret_val, ret_val, [1, 0])
22810    }
22811}
22812#[doc = "Vector reinterpret cast operation"]
22813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22814#[inline]
22815#[cfg(target_endian = "little")]
22816#[target_feature(enable = "neon")]
22817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22818#[cfg_attr(test, assert_instr(nop))]
22819pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22820    unsafe { transmute(a) }
22821}
22822#[doc = "Vector reinterpret cast operation"]
22823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22824#[inline]
22825#[cfg(target_endian = "big")]
22826#[target_feature(enable = "neon")]
22827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22828#[cfg_attr(test, assert_instr(nop))]
22829pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22830    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22831    unsafe { transmute(a) }
22832}
22833#[doc = "Vector reinterpret cast operation"]
22834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22835#[inline]
22836#[cfg(target_endian = "little")]
22837#[target_feature(enable = "neon")]
22838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22839#[cfg_attr(test, assert_instr(nop))]
22840pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22841    unsafe { transmute(a) }
22842}
22843#[doc = "Vector reinterpret cast operation"]
22844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22845#[inline]
22846#[cfg(target_endian = "big")]
22847#[target_feature(enable = "neon")]
22848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22849#[cfg_attr(test, assert_instr(nop))]
22850pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22851    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22852    unsafe {
22853        let ret_val: float64x2_t = transmute(a);
22854        simd_shuffle!(ret_val, ret_val, [1, 0])
22855    }
22856}
22857#[doc = "Vector reinterpret cast operation"]
22858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22859#[inline]
22860#[target_feature(enable = "neon")]
22861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22862#[cfg_attr(test, assert_instr(nop))]
22863pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22864    unsafe { transmute(a) }
22865}
22866#[doc = "Vector reinterpret cast operation"]
22867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22868#[inline]
22869#[target_feature(enable = "neon")]
22870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22871#[cfg_attr(test, assert_instr(nop))]
22872pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22873    unsafe { transmute(a) }
22874}
22875#[doc = "Vector reinterpret cast operation"]
22876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22877#[inline]
22878#[cfg(target_endian = "little")]
22879#[target_feature(enable = "neon")]
22880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22881#[cfg_attr(test, assert_instr(nop))]
22882pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22883    unsafe { transmute(a) }
22884}
22885#[doc = "Vector reinterpret cast operation"]
22886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22887#[inline]
22888#[cfg(target_endian = "big")]
22889#[target_feature(enable = "neon")]
22890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22891#[cfg_attr(test, assert_instr(nop))]
22892pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22893    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22894    unsafe {
22895        let ret_val: float64x2_t = transmute(a);
22896        simd_shuffle!(ret_val, ret_val, [1, 0])
22897    }
22898}
22899#[doc = "Vector reinterpret cast operation"]
22900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22901#[inline]
22902#[cfg(target_endian = "little")]
22903#[target_feature(enable = "neon")]
22904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22905#[cfg_attr(test, assert_instr(nop))]
22906pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22907    unsafe { transmute(a) }
22908}
22909#[doc = "Vector reinterpret cast operation"]
22910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22911#[inline]
22912#[cfg(target_endian = "big")]
22913#[target_feature(enable = "neon")]
22914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22915#[cfg_attr(test, assert_instr(nop))]
22916pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22917    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22918    unsafe {
22919        let ret_val: poly64x2_t = transmute(a);
22920        simd_shuffle!(ret_val, ret_val, [1, 0])
22921    }
22922}
22923#[doc = "Vector reinterpret cast operation"]
22924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22925#[inline]
22926#[cfg(target_endian = "little")]
22927#[target_feature(enable = "neon")]
22928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22929#[cfg_attr(test, assert_instr(nop))]
22930pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22931    unsafe { transmute(a) }
22932}
22933#[doc = "Vector reinterpret cast operation"]
22934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22935#[inline]
22936#[cfg(target_endian = "big")]
22937#[target_feature(enable = "neon")]
22938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22939#[cfg_attr(test, assert_instr(nop))]
22940pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22941    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22942    unsafe { transmute(a) }
22943}
22944#[doc = "Vector reinterpret cast operation"]
22945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22946#[inline]
22947#[cfg(target_endian = "little")]
22948#[target_feature(enable = "neon")]
22949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22950#[cfg_attr(test, assert_instr(nop))]
22951pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22952    unsafe { transmute(a) }
22953}
22954#[doc = "Vector reinterpret cast operation"]
22955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22956#[inline]
22957#[cfg(target_endian = "big")]
22958#[target_feature(enable = "neon")]
22959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22960#[cfg_attr(test, assert_instr(nop))]
22961pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22962    let a: poly8x16_t =
22963        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22964    unsafe {
22965        let ret_val: float64x2_t = transmute(a);
22966        simd_shuffle!(ret_val, ret_val, [1, 0])
22967    }
22968}
22969#[doc = "Vector reinterpret cast operation"]
22970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22971#[inline]
22972#[cfg(target_endian = "little")]
22973#[target_feature(enable = "neon")]
22974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22975#[cfg_attr(test, assert_instr(nop))]
22976pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22977    unsafe { transmute(a) }
22978}
22979#[doc = "Vector reinterpret cast operation"]
22980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22981#[inline]
22982#[cfg(target_endian = "big")]
22983#[target_feature(enable = "neon")]
22984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22985#[cfg_attr(test, assert_instr(nop))]
22986pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22987    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22988    unsafe { transmute(a) }
22989}
22990#[doc = "Vector reinterpret cast operation"]
22991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22992#[inline]
22993#[cfg(target_endian = "little")]
22994#[target_feature(enable = "neon")]
22995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22996#[cfg_attr(test, assert_instr(nop))]
22997pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22998    unsafe { transmute(a) }
22999}
23000#[doc = "Vector reinterpret cast operation"]
23001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
23002#[inline]
23003#[cfg(target_endian = "big")]
23004#[target_feature(enable = "neon")]
23005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23006#[cfg_attr(test, assert_instr(nop))]
23007pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
23008    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
23009    unsafe {
23010        let ret_val: float64x2_t = transmute(a);
23011        simd_shuffle!(ret_val, ret_val, [1, 0])
23012    }
23013}
23014#[doc = "Vector reinterpret cast operation"]
23015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23016#[inline]
23017#[cfg(target_endian = "little")]
23018#[target_feature(enable = "neon")]
23019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23020#[cfg_attr(test, assert_instr(nop))]
23021pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23022    unsafe { transmute(a) }
23023}
23024#[doc = "Vector reinterpret cast operation"]
23025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23026#[inline]
23027#[cfg(target_endian = "big")]
23028#[target_feature(enable = "neon")]
23029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23030#[cfg_attr(test, assert_instr(nop))]
23031pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23032    unsafe {
23033        let ret_val: float32x2_t = transmute(a);
23034        simd_shuffle!(ret_val, ret_val, [1, 0])
23035    }
23036}
23037#[doc = "Vector reinterpret cast operation"]
23038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
23039#[inline]
23040#[target_feature(enable = "neon")]
23041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23042#[cfg_attr(test, assert_instr(nop))]
23043pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
23044    unsafe { transmute(a) }
23045}
23046#[doc = "Vector reinterpret cast operation"]
23047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
23048#[inline]
23049#[target_feature(enable = "neon")]
23050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23051#[cfg_attr(test, assert_instr(nop))]
23052pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
23053    unsafe { transmute(a) }
23054}
23055#[doc = "Vector reinterpret cast operation"]
23056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
23057#[inline]
23058#[target_feature(enable = "neon")]
23059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23060#[cfg_attr(test, assert_instr(nop))]
23061pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
23062    unsafe { transmute(a) }
23063}
23064#[doc = "Vector reinterpret cast operation"]
23065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23066#[inline]
23067#[cfg(target_endian = "little")]
23068#[target_feature(enable = "neon")]
23069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23070#[cfg_attr(test, assert_instr(nop))]
23071pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23072    unsafe { transmute(a) }
23073}
23074#[doc = "Vector reinterpret cast operation"]
23075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23076#[inline]
23077#[cfg(target_endian = "big")]
23078#[target_feature(enable = "neon")]
23079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23080#[cfg_attr(test, assert_instr(nop))]
23081pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23082    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23083    unsafe {
23084        let ret_val: float32x4_t = transmute(a);
23085        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
23086    }
23087}
23088#[doc = "Vector reinterpret cast operation"]
23089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23090#[inline]
23091#[cfg(target_endian = "little")]
23092#[target_feature(enable = "neon")]
23093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23094#[cfg_attr(test, assert_instr(nop))]
23095pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23096    unsafe { transmute(a) }
23097}
23098#[doc = "Vector reinterpret cast operation"]
23099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23100#[inline]
23101#[cfg(target_endian = "big")]
23102#[target_feature(enable = "neon")]
23103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23104#[cfg_attr(test, assert_instr(nop))]
23105pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23106    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23107    unsafe {
23108        let ret_val: float64x2_t = transmute(a);
23109        simd_shuffle!(ret_val, ret_val, [1, 0])
23110    }
23111}
23112#[doc = "Vector reinterpret cast operation"]
23113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23114#[inline]
23115#[cfg(target_endian = "little")]
23116#[target_feature(enable = "neon")]
23117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23118#[cfg_attr(test, assert_instr(nop))]
23119pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23120    unsafe { transmute(a) }
23121}
23122#[doc = "Vector reinterpret cast operation"]
23123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23124#[inline]
23125#[cfg(target_endian = "big")]
23126#[target_feature(enable = "neon")]
23127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23128#[cfg_attr(test, assert_instr(nop))]
23129pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23130    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23131    unsafe {
23132        let ret_val: int64x2_t = transmute(a);
23133        simd_shuffle!(ret_val, ret_val, [1, 0])
23134    }
23135}
23136#[doc = "Vector reinterpret cast operation"]
23137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23138#[inline]
23139#[cfg(target_endian = "little")]
23140#[target_feature(enable = "neon")]
23141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23142#[cfg_attr(test, assert_instr(nop))]
23143pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23144    unsafe { transmute(a) }
23145}
23146#[doc = "Vector reinterpret cast operation"]
23147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23148#[inline]
23149#[cfg(target_endian = "big")]
23150#[target_feature(enable = "neon")]
23151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23152#[cfg_attr(test, assert_instr(nop))]
23153pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23154    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23155    unsafe {
23156        let ret_val: uint64x2_t = transmute(a);
23157        simd_shuffle!(ret_val, ret_val, [1, 0])
23158    }
23159}
23160#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
23162#[inline]
23163#[target_feature(enable = "neon,frintts")]
23164#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23165#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23166pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
23167    unsafe extern "unadjusted" {
23168        #[cfg_attr(
23169            any(target_arch = "aarch64", target_arch = "arm64ec"),
23170            link_name = "llvm.aarch64.neon.frint32x.v2f32"
23171        )]
23172        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
23173    }
23174    unsafe { _vrnd32x_f32(a) }
23175}
23176#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
23178#[inline]
23179#[target_feature(enable = "neon,frintts")]
23180#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23181#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23182pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
23183    unsafe extern "unadjusted" {
23184        #[cfg_attr(
23185            any(target_arch = "aarch64", target_arch = "arm64ec"),
23186            link_name = "llvm.aarch64.neon.frint32x.v4f32"
23187        )]
23188        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
23189    }
23190    unsafe { _vrnd32xq_f32(a) }
23191}
23192#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
23194#[inline]
23195#[target_feature(enable = "neon,frintts")]
23196#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23197#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23198pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
23199    unsafe extern "unadjusted" {
23200        #[cfg_attr(
23201            any(target_arch = "aarch64", target_arch = "arm64ec"),
23202            link_name = "llvm.aarch64.neon.frint32x.v2f64"
23203        )]
23204        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
23205    }
23206    unsafe { _vrnd32xq_f64(a) }
23207}
23208#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
23210#[inline]
23211#[target_feature(enable = "neon,frintts")]
23212#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23213#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23214pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
23215    unsafe extern "unadjusted" {
23216        #[cfg_attr(
23217            any(target_arch = "aarch64", target_arch = "arm64ec"),
23218            link_name = "llvm.aarch64.frint32x.f64"
23219        )]
23220        fn _vrnd32x_f64(a: f64) -> f64;
23221    }
23222    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
23223}
23224#[doc = "Floating-point round to 32-bit integer toward zero"]
23225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
23226#[inline]
23227#[target_feature(enable = "neon,frintts")]
23228#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23229#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23230pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
23231    unsafe extern "unadjusted" {
23232        #[cfg_attr(
23233            any(target_arch = "aarch64", target_arch = "arm64ec"),
23234            link_name = "llvm.aarch64.neon.frint32z.v2f32"
23235        )]
23236        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
23237    }
23238    unsafe { _vrnd32z_f32(a) }
23239}
23240#[doc = "Floating-point round to 32-bit integer toward zero"]
23241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
23242#[inline]
23243#[target_feature(enable = "neon,frintts")]
23244#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23245#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23246pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
23247    unsafe extern "unadjusted" {
23248        #[cfg_attr(
23249            any(target_arch = "aarch64", target_arch = "arm64ec"),
23250            link_name = "llvm.aarch64.neon.frint32z.v4f32"
23251        )]
23252        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
23253    }
23254    unsafe { _vrnd32zq_f32(a) }
23255}
23256#[doc = "Floating-point round to 32-bit integer toward zero"]
23257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
23258#[inline]
23259#[target_feature(enable = "neon,frintts")]
23260#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23261#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23262pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23263    unsafe extern "unadjusted" {
23264        #[cfg_attr(
23265            any(target_arch = "aarch64", target_arch = "arm64ec"),
23266            link_name = "llvm.aarch64.neon.frint32z.v2f64"
23267        )]
23268        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23269    }
23270    unsafe { _vrnd32zq_f64(a) }
23271}
23272#[doc = "Floating-point round to 32-bit integer toward zero"]
23273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23274#[inline]
23275#[target_feature(enable = "neon,frintts")]
23276#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23277#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23278pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23279    unsafe extern "unadjusted" {
23280        #[cfg_attr(
23281            any(target_arch = "aarch64", target_arch = "arm64ec"),
23282            link_name = "llvm.aarch64.frint32z.f64"
23283        )]
23284        fn _vrnd32z_f64(a: f64) -> f64;
23285    }
23286    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23287}
23288#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23290#[inline]
23291#[target_feature(enable = "neon,frintts")]
23292#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23293#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23294pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23295    unsafe extern "unadjusted" {
23296        #[cfg_attr(
23297            any(target_arch = "aarch64", target_arch = "arm64ec"),
23298            link_name = "llvm.aarch64.neon.frint64x.v2f32"
23299        )]
23300        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23301    }
23302    unsafe { _vrnd64x_f32(a) }
23303}
23304#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23306#[inline]
23307#[target_feature(enable = "neon,frintts")]
23308#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23309#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23310pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23311    unsafe extern "unadjusted" {
23312        #[cfg_attr(
23313            any(target_arch = "aarch64", target_arch = "arm64ec"),
23314            link_name = "llvm.aarch64.neon.frint64x.v4f32"
23315        )]
23316        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23317    }
23318    unsafe { _vrnd64xq_f32(a) }
23319}
23320#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23322#[inline]
23323#[target_feature(enable = "neon,frintts")]
23324#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23326pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23327    unsafe extern "unadjusted" {
23328        #[cfg_attr(
23329            any(target_arch = "aarch64", target_arch = "arm64ec"),
23330            link_name = "llvm.aarch64.neon.frint64x.v2f64"
23331        )]
23332        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23333    }
23334    unsafe { _vrnd64xq_f64(a) }
23335}
23336#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23338#[inline]
23339#[target_feature(enable = "neon,frintts")]
23340#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23341#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23342pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23343    unsafe extern "unadjusted" {
23344        #[cfg_attr(
23345            any(target_arch = "aarch64", target_arch = "arm64ec"),
23346            link_name = "llvm.aarch64.frint64x.f64"
23347        )]
23348        fn _vrnd64x_f64(a: f64) -> f64;
23349    }
23350    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23351}
23352#[doc = "Floating-point round to 64-bit integer toward zero"]
23353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23354#[inline]
23355#[target_feature(enable = "neon,frintts")]
23356#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23357#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23358pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23359    unsafe extern "unadjusted" {
23360        #[cfg_attr(
23361            any(target_arch = "aarch64", target_arch = "arm64ec"),
23362            link_name = "llvm.aarch64.neon.frint64z.v2f32"
23363        )]
23364        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23365    }
23366    unsafe { _vrnd64z_f32(a) }
23367}
23368#[doc = "Floating-point round to 64-bit integer toward zero"]
23369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23370#[inline]
23371#[target_feature(enable = "neon,frintts")]
23372#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23373#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23374pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23375    unsafe extern "unadjusted" {
23376        #[cfg_attr(
23377            any(target_arch = "aarch64", target_arch = "arm64ec"),
23378            link_name = "llvm.aarch64.neon.frint64z.v4f32"
23379        )]
23380        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23381    }
23382    unsafe { _vrnd64zq_f32(a) }
23383}
23384#[doc = "Floating-point round to 64-bit integer toward zero"]
23385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23386#[inline]
23387#[target_feature(enable = "neon,frintts")]
23388#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23389#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23390pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23391    unsafe extern "unadjusted" {
23392        #[cfg_attr(
23393            any(target_arch = "aarch64", target_arch = "arm64ec"),
23394            link_name = "llvm.aarch64.neon.frint64z.v2f64"
23395        )]
23396        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23397    }
23398    unsafe { _vrnd64zq_f64(a) }
23399}
23400#[doc = "Floating-point round to 64-bit integer toward zero"]
23401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23402#[inline]
23403#[target_feature(enable = "neon,frintts")]
23404#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23405#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23406pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23407    unsafe extern "unadjusted" {
23408        #[cfg_attr(
23409            any(target_arch = "aarch64", target_arch = "arm64ec"),
23410            link_name = "llvm.aarch64.frint64z.f64"
23411        )]
23412        fn _vrnd64z_f64(a: f64) -> f64;
23413    }
23414    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23415}
23416#[doc = "Floating-point round to integral, toward zero"]
23417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23418#[inline]
23419#[target_feature(enable = "neon,fp16")]
23420#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23421#[cfg_attr(test, assert_instr(frintz))]
23422pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23423    unsafe extern "unadjusted" {
23424        #[cfg_attr(
23425            any(target_arch = "aarch64", target_arch = "arm64ec"),
23426            link_name = "llvm.trunc.v4f16"
23427        )]
23428        fn _vrnd_f16(a: float16x4_t) -> float16x4_t;
23429    }
23430    unsafe { _vrnd_f16(a) }
23431}
23432#[doc = "Floating-point round to integral, toward zero"]
23433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23434#[inline]
23435#[target_feature(enable = "neon,fp16")]
23436#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23437#[cfg_attr(test, assert_instr(frintz))]
23438pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23439    unsafe extern "unadjusted" {
23440        #[cfg_attr(
23441            any(target_arch = "aarch64", target_arch = "arm64ec"),
23442            link_name = "llvm.trunc.v8f16"
23443        )]
23444        fn _vrndq_f16(a: float16x8_t) -> float16x8_t;
23445    }
23446    unsafe { _vrndq_f16(a) }
23447}
23448#[doc = "Floating-point round to integral, toward zero"]
23449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23450#[inline]
23451#[target_feature(enable = "neon")]
23452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23453#[cfg_attr(test, assert_instr(frintz))]
23454pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23455    unsafe extern "unadjusted" {
23456        #[cfg_attr(
23457            any(target_arch = "aarch64", target_arch = "arm64ec"),
23458            link_name = "llvm.trunc.v2f32"
23459        )]
23460        fn _vrnd_f32(a: float32x2_t) -> float32x2_t;
23461    }
23462    unsafe { _vrnd_f32(a) }
23463}
23464#[doc = "Floating-point round to integral, toward zero"]
23465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23466#[inline]
23467#[target_feature(enable = "neon")]
23468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23469#[cfg_attr(test, assert_instr(frintz))]
23470pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23471    unsafe extern "unadjusted" {
23472        #[cfg_attr(
23473            any(target_arch = "aarch64", target_arch = "arm64ec"),
23474            link_name = "llvm.trunc.v4f32"
23475        )]
23476        fn _vrndq_f32(a: float32x4_t) -> float32x4_t;
23477    }
23478    unsafe { _vrndq_f32(a) }
23479}
23480#[doc = "Floating-point round to integral, toward zero"]
23481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23482#[inline]
23483#[target_feature(enable = "neon")]
23484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23485#[cfg_attr(test, assert_instr(frintz))]
23486pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23487    unsafe extern "unadjusted" {
23488        #[cfg_attr(
23489            any(target_arch = "aarch64", target_arch = "arm64ec"),
23490            link_name = "llvm.trunc.v1f64"
23491        )]
23492        fn _vrnd_f64(a: float64x1_t) -> float64x1_t;
23493    }
23494    unsafe { _vrnd_f64(a) }
23495}
23496#[doc = "Floating-point round to integral, toward zero"]
23497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23498#[inline]
23499#[target_feature(enable = "neon")]
23500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23501#[cfg_attr(test, assert_instr(frintz))]
23502pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23503    unsafe extern "unadjusted" {
23504        #[cfg_attr(
23505            any(target_arch = "aarch64", target_arch = "arm64ec"),
23506            link_name = "llvm.trunc.v2f64"
23507        )]
23508        fn _vrndq_f64(a: float64x2_t) -> float64x2_t;
23509    }
23510    unsafe { _vrndq_f64(a) }
23511}
23512#[doc = "Floating-point round to integral, to nearest with ties to away"]
23513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23514#[inline]
23515#[target_feature(enable = "neon,fp16")]
23516#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23517#[cfg_attr(test, assert_instr(frinta))]
23518pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23519    unsafe extern "unadjusted" {
23520        #[cfg_attr(
23521            any(target_arch = "aarch64", target_arch = "arm64ec"),
23522            link_name = "llvm.round.v4f16"
23523        )]
23524        fn _vrnda_f16(a: float16x4_t) -> float16x4_t;
23525    }
23526    unsafe { _vrnda_f16(a) }
23527}
23528#[doc = "Floating-point round to integral, to nearest with ties to away"]
23529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23530#[inline]
23531#[target_feature(enable = "neon,fp16")]
23532#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23533#[cfg_attr(test, assert_instr(frinta))]
23534pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23535    unsafe extern "unadjusted" {
23536        #[cfg_attr(
23537            any(target_arch = "aarch64", target_arch = "arm64ec"),
23538            link_name = "llvm.round.v8f16"
23539        )]
23540        fn _vrndaq_f16(a: float16x8_t) -> float16x8_t;
23541    }
23542    unsafe { _vrndaq_f16(a) }
23543}
23544#[doc = "Floating-point round to integral, to nearest with ties to away"]
23545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23546#[inline]
23547#[target_feature(enable = "neon")]
23548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23549#[cfg_attr(test, assert_instr(frinta))]
23550pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23551    unsafe extern "unadjusted" {
23552        #[cfg_attr(
23553            any(target_arch = "aarch64", target_arch = "arm64ec"),
23554            link_name = "llvm.round.v2f32"
23555        )]
23556        fn _vrnda_f32(a: float32x2_t) -> float32x2_t;
23557    }
23558    unsafe { _vrnda_f32(a) }
23559}
23560#[doc = "Floating-point round to integral, to nearest with ties to away"]
23561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23562#[inline]
23563#[target_feature(enable = "neon")]
23564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23565#[cfg_attr(test, assert_instr(frinta))]
23566pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23567    unsafe extern "unadjusted" {
23568        #[cfg_attr(
23569            any(target_arch = "aarch64", target_arch = "arm64ec"),
23570            link_name = "llvm.round.v4f32"
23571        )]
23572        fn _vrndaq_f32(a: float32x4_t) -> float32x4_t;
23573    }
23574    unsafe { _vrndaq_f32(a) }
23575}
23576#[doc = "Floating-point round to integral, to nearest with ties to away"]
23577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23578#[inline]
23579#[target_feature(enable = "neon")]
23580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23581#[cfg_attr(test, assert_instr(frinta))]
23582pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23583    unsafe extern "unadjusted" {
23584        #[cfg_attr(
23585            any(target_arch = "aarch64", target_arch = "arm64ec"),
23586            link_name = "llvm.round.v1f64"
23587        )]
23588        fn _vrnda_f64(a: float64x1_t) -> float64x1_t;
23589    }
23590    unsafe { _vrnda_f64(a) }
23591}
23592#[doc = "Floating-point round to integral, to nearest with ties to away"]
23593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23594#[inline]
23595#[target_feature(enable = "neon")]
23596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23597#[cfg_attr(test, assert_instr(frinta))]
23598pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23599    unsafe extern "unadjusted" {
23600        #[cfg_attr(
23601            any(target_arch = "aarch64", target_arch = "arm64ec"),
23602            link_name = "llvm.round.v2f64"
23603        )]
23604        fn _vrndaq_f64(a: float64x2_t) -> float64x2_t;
23605    }
23606    unsafe { _vrndaq_f64(a) }
23607}
23608#[doc = "Floating-point round to integral, to nearest with ties to away"]
23609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23610#[inline]
23611#[target_feature(enable = "neon,fp16")]
23612#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23613#[cfg_attr(test, assert_instr(frinta))]
23614pub fn vrndah_f16(a: f16) -> f16 {
23615    unsafe extern "unadjusted" {
23616        #[cfg_attr(
23617            any(target_arch = "aarch64", target_arch = "arm64ec"),
23618            link_name = "llvm.round.f16"
23619        )]
23620        fn _vrndah_f16(a: f16) -> f16;
23621    }
23622    unsafe { _vrndah_f16(a) }
23623}
23624#[doc = "Floating-point round to integral, to nearest with ties to away"]
23625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23626#[inline]
23627#[target_feature(enable = "neon,fp16")]
23628#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23629#[cfg_attr(test, assert_instr(frintz))]
23630pub fn vrndh_f16(a: f16) -> f16 {
23631    unsafe extern "unadjusted" {
23632        #[cfg_attr(
23633            any(target_arch = "aarch64", target_arch = "arm64ec"),
23634            link_name = "llvm.trunc.f16"
23635        )]
23636        fn _vrndh_f16(a: f16) -> f16;
23637    }
23638    unsafe { _vrndh_f16(a) }
23639}
23640#[doc = "Floating-point round to integral, using current rounding mode"]
23641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23642#[inline]
23643#[target_feature(enable = "neon,fp16")]
23644#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23645#[cfg_attr(test, assert_instr(frinti))]
23646pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23647    unsafe extern "unadjusted" {
23648        #[cfg_attr(
23649            any(target_arch = "aarch64", target_arch = "arm64ec"),
23650            link_name = "llvm.nearbyint.v4f16"
23651        )]
23652        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23653    }
23654    unsafe { _vrndi_f16(a) }
23655}
23656#[doc = "Floating-point round to integral, using current rounding mode"]
23657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23658#[inline]
23659#[target_feature(enable = "neon,fp16")]
23660#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23661#[cfg_attr(test, assert_instr(frinti))]
23662pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23663    unsafe extern "unadjusted" {
23664        #[cfg_attr(
23665            any(target_arch = "aarch64", target_arch = "arm64ec"),
23666            link_name = "llvm.nearbyint.v8f16"
23667        )]
23668        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23669    }
23670    unsafe { _vrndiq_f16(a) }
23671}
23672#[doc = "Floating-point round to integral, using current rounding mode"]
23673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23674#[inline]
23675#[target_feature(enable = "neon")]
23676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23677#[cfg_attr(test, assert_instr(frinti))]
23678pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23679    unsafe extern "unadjusted" {
23680        #[cfg_attr(
23681            any(target_arch = "aarch64", target_arch = "arm64ec"),
23682            link_name = "llvm.nearbyint.v2f32"
23683        )]
23684        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23685    }
23686    unsafe { _vrndi_f32(a) }
23687}
23688#[doc = "Floating-point round to integral, using current rounding mode"]
23689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23690#[inline]
23691#[target_feature(enable = "neon")]
23692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23693#[cfg_attr(test, assert_instr(frinti))]
23694pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23695    unsafe extern "unadjusted" {
23696        #[cfg_attr(
23697            any(target_arch = "aarch64", target_arch = "arm64ec"),
23698            link_name = "llvm.nearbyint.v4f32"
23699        )]
23700        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23701    }
23702    unsafe { _vrndiq_f32(a) }
23703}
23704#[doc = "Floating-point round to integral, using current rounding mode"]
23705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23706#[inline]
23707#[target_feature(enable = "neon")]
23708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23709#[cfg_attr(test, assert_instr(frinti))]
23710pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23711    unsafe extern "unadjusted" {
23712        #[cfg_attr(
23713            any(target_arch = "aarch64", target_arch = "arm64ec"),
23714            link_name = "llvm.nearbyint.v1f64"
23715        )]
23716        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23717    }
23718    unsafe { _vrndi_f64(a) }
23719}
23720#[doc = "Floating-point round to integral, using current rounding mode"]
23721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23722#[inline]
23723#[target_feature(enable = "neon")]
23724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23725#[cfg_attr(test, assert_instr(frinti))]
23726pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23727    unsafe extern "unadjusted" {
23728        #[cfg_attr(
23729            any(target_arch = "aarch64", target_arch = "arm64ec"),
23730            link_name = "llvm.nearbyint.v2f64"
23731        )]
23732        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23733    }
23734    unsafe { _vrndiq_f64(a) }
23735}
23736#[doc = "Floating-point round to integral, using current rounding mode"]
23737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23738#[inline]
23739#[target_feature(enable = "neon,fp16")]
23740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23741#[cfg_attr(test, assert_instr(frinti))]
23742pub fn vrndih_f16(a: f16) -> f16 {
23743    unsafe extern "unadjusted" {
23744        #[cfg_attr(
23745            any(target_arch = "aarch64", target_arch = "arm64ec"),
23746            link_name = "llvm.nearbyint.f16"
23747        )]
23748        fn _vrndih_f16(a: f16) -> f16;
23749    }
23750    unsafe { _vrndih_f16(a) }
23751}
23752#[doc = "Floating-point round to integral, toward minus infinity"]
23753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23754#[inline]
23755#[target_feature(enable = "neon,fp16")]
23756#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23757#[cfg_attr(test, assert_instr(frintm))]
23758pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23759    unsafe extern "unadjusted" {
23760        #[cfg_attr(
23761            any(target_arch = "aarch64", target_arch = "arm64ec"),
23762            link_name = "llvm.floor.v4f16"
23763        )]
23764        fn _vrndm_f16(a: float16x4_t) -> float16x4_t;
23765    }
23766    unsafe { _vrndm_f16(a) }
23767}
23768#[doc = "Floating-point round to integral, toward minus infinity"]
23769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23770#[inline]
23771#[target_feature(enable = "neon,fp16")]
23772#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23773#[cfg_attr(test, assert_instr(frintm))]
23774pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23775    unsafe extern "unadjusted" {
23776        #[cfg_attr(
23777            any(target_arch = "aarch64", target_arch = "arm64ec"),
23778            link_name = "llvm.floor.v8f16"
23779        )]
23780        fn _vrndmq_f16(a: float16x8_t) -> float16x8_t;
23781    }
23782    unsafe { _vrndmq_f16(a) }
23783}
23784#[doc = "Floating-point round to integral, toward minus infinity"]
23785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23786#[inline]
23787#[target_feature(enable = "neon")]
23788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23789#[cfg_attr(test, assert_instr(frintm))]
23790pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23791    unsafe extern "unadjusted" {
23792        #[cfg_attr(
23793            any(target_arch = "aarch64", target_arch = "arm64ec"),
23794            link_name = "llvm.floor.v2f32"
23795        )]
23796        fn _vrndm_f32(a: float32x2_t) -> float32x2_t;
23797    }
23798    unsafe { _vrndm_f32(a) }
23799}
23800#[doc = "Floating-point round to integral, toward minus infinity"]
23801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23802#[inline]
23803#[target_feature(enable = "neon")]
23804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23805#[cfg_attr(test, assert_instr(frintm))]
23806pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23807    unsafe extern "unadjusted" {
23808        #[cfg_attr(
23809            any(target_arch = "aarch64", target_arch = "arm64ec"),
23810            link_name = "llvm.floor.v4f32"
23811        )]
23812        fn _vrndmq_f32(a: float32x4_t) -> float32x4_t;
23813    }
23814    unsafe { _vrndmq_f32(a) }
23815}
23816#[doc = "Floating-point round to integral, toward minus infinity"]
23817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23818#[inline]
23819#[target_feature(enable = "neon")]
23820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23821#[cfg_attr(test, assert_instr(frintm))]
23822pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23823    unsafe extern "unadjusted" {
23824        #[cfg_attr(
23825            any(target_arch = "aarch64", target_arch = "arm64ec"),
23826            link_name = "llvm.floor.v1f64"
23827        )]
23828        fn _vrndm_f64(a: float64x1_t) -> float64x1_t;
23829    }
23830    unsafe { _vrndm_f64(a) }
23831}
23832#[doc = "Floating-point round to integral, toward minus infinity"]
23833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23834#[inline]
23835#[target_feature(enable = "neon")]
23836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23837#[cfg_attr(test, assert_instr(frintm))]
23838pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23839    unsafe extern "unadjusted" {
23840        #[cfg_attr(
23841            any(target_arch = "aarch64", target_arch = "arm64ec"),
23842            link_name = "llvm.floor.v2f64"
23843        )]
23844        fn _vrndmq_f64(a: float64x2_t) -> float64x2_t;
23845    }
23846    unsafe { _vrndmq_f64(a) }
23847}
23848#[doc = "Floating-point round to integral, toward minus infinity"]
23849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23850#[inline]
23851#[target_feature(enable = "neon,fp16")]
23852#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23853#[cfg_attr(test, assert_instr(frintm))]
23854pub fn vrndmh_f16(a: f16) -> f16 {
23855    unsafe extern "unadjusted" {
23856        #[cfg_attr(
23857            any(target_arch = "aarch64", target_arch = "arm64ec"),
23858            link_name = "llvm.floor.f16"
23859        )]
23860        fn _vrndmh_f16(a: f16) -> f16;
23861    }
23862    unsafe { _vrndmh_f16(a) }
23863}
23864#[doc = "Floating-point round to integral, to nearest with ties to even"]
23865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23866#[inline]
23867#[target_feature(enable = "neon")]
23868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23869#[cfg_attr(test, assert_instr(frintn))]
23870pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23871    unsafe extern "unadjusted" {
23872        #[cfg_attr(
23873            any(target_arch = "aarch64", target_arch = "arm64ec"),
23874            link_name = "llvm.aarch64.neon.frintn.v1f64"
23875        )]
23876        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23877    }
23878    unsafe { _vrndn_f64(a) }
23879}
23880#[doc = "Floating-point round to integral, to nearest with ties to even"]
23881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23882#[inline]
23883#[target_feature(enable = "neon")]
23884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23885#[cfg_attr(test, assert_instr(frintn))]
23886pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23887    unsafe extern "unadjusted" {
23888        #[cfg_attr(
23889            any(target_arch = "aarch64", target_arch = "arm64ec"),
23890            link_name = "llvm.aarch64.neon.frintn.v2f64"
23891        )]
23892        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23893    }
23894    unsafe { _vrndnq_f64(a) }
23895}
23896#[doc = "Floating-point round to integral, toward minus infinity"]
23897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23898#[inline]
23899#[target_feature(enable = "neon,fp16")]
23900#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23901#[cfg_attr(test, assert_instr(frintn))]
23902pub fn vrndnh_f16(a: f16) -> f16 {
23903    unsafe extern "unadjusted" {
23904        #[cfg_attr(
23905            any(target_arch = "aarch64", target_arch = "arm64ec"),
23906            link_name = "llvm.roundeven.f16"
23907        )]
23908        fn _vrndnh_f16(a: f16) -> f16;
23909    }
23910    unsafe { _vrndnh_f16(a) }
23911}
23912#[doc = "Floating-point round to integral, to nearest with ties to even"]
23913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23914#[inline]
23915#[target_feature(enable = "neon")]
23916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23917#[cfg_attr(test, assert_instr(frintn))]
23918pub fn vrndns_f32(a: f32) -> f32 {
23919    unsafe extern "unadjusted" {
23920        #[cfg_attr(
23921            any(target_arch = "aarch64", target_arch = "arm64ec"),
23922            link_name = "llvm.roundeven.f32"
23923        )]
23924        fn _vrndns_f32(a: f32) -> f32;
23925    }
23926    unsafe { _vrndns_f32(a) }
23927}
23928#[doc = "Floating-point round to integral, toward plus infinity"]
23929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23930#[inline]
23931#[target_feature(enable = "neon,fp16")]
23932#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23933#[cfg_attr(test, assert_instr(frintp))]
23934pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23935    unsafe extern "unadjusted" {
23936        #[cfg_attr(
23937            any(target_arch = "aarch64", target_arch = "arm64ec"),
23938            link_name = "llvm.ceil.v4f16"
23939        )]
23940        fn _vrndp_f16(a: float16x4_t) -> float16x4_t;
23941    }
23942    unsafe { _vrndp_f16(a) }
23943}
23944#[doc = "Floating-point round to integral, toward plus infinity"]
23945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23946#[inline]
23947#[target_feature(enable = "neon,fp16")]
23948#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23949#[cfg_attr(test, assert_instr(frintp))]
23950pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23951    unsafe extern "unadjusted" {
23952        #[cfg_attr(
23953            any(target_arch = "aarch64", target_arch = "arm64ec"),
23954            link_name = "llvm.ceil.v8f16"
23955        )]
23956        fn _vrndpq_f16(a: float16x8_t) -> float16x8_t;
23957    }
23958    unsafe { _vrndpq_f16(a) }
23959}
23960#[doc = "Floating-point round to integral, toward plus infinity"]
23961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23962#[inline]
23963#[target_feature(enable = "neon")]
23964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23965#[cfg_attr(test, assert_instr(frintp))]
23966pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23967    unsafe extern "unadjusted" {
23968        #[cfg_attr(
23969            any(target_arch = "aarch64", target_arch = "arm64ec"),
23970            link_name = "llvm.ceil.v2f32"
23971        )]
23972        fn _vrndp_f32(a: float32x2_t) -> float32x2_t;
23973    }
23974    unsafe { _vrndp_f32(a) }
23975}
23976#[doc = "Floating-point round to integral, toward plus infinity"]
23977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23978#[inline]
23979#[target_feature(enable = "neon")]
23980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23981#[cfg_attr(test, assert_instr(frintp))]
23982pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23983    unsafe extern "unadjusted" {
23984        #[cfg_attr(
23985            any(target_arch = "aarch64", target_arch = "arm64ec"),
23986            link_name = "llvm.ceil.v4f32"
23987        )]
23988        fn _vrndpq_f32(a: float32x4_t) -> float32x4_t;
23989    }
23990    unsafe { _vrndpq_f32(a) }
23991}
23992#[doc = "Floating-point round to integral, toward plus infinity"]
23993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23994#[inline]
23995#[target_feature(enable = "neon")]
23996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23997#[cfg_attr(test, assert_instr(frintp))]
23998pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23999    unsafe extern "unadjusted" {
24000        #[cfg_attr(
24001            any(target_arch = "aarch64", target_arch = "arm64ec"),
24002            link_name = "llvm.ceil.v1f64"
24003        )]
24004        fn _vrndp_f64(a: float64x1_t) -> float64x1_t;
24005    }
24006    unsafe { _vrndp_f64(a) }
24007}
24008#[doc = "Floating-point round to integral, toward plus infinity"]
24009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
24010#[inline]
24011#[target_feature(enable = "neon")]
24012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24013#[cfg_attr(test, assert_instr(frintp))]
24014pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
24015    unsafe extern "unadjusted" {
24016        #[cfg_attr(
24017            any(target_arch = "aarch64", target_arch = "arm64ec"),
24018            link_name = "llvm.ceil.v2f64"
24019        )]
24020        fn _vrndpq_f64(a: float64x2_t) -> float64x2_t;
24021    }
24022    unsafe { _vrndpq_f64(a) }
24023}
24024#[doc = "Floating-point round to integral, toward plus infinity"]
24025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
24026#[inline]
24027#[target_feature(enable = "neon,fp16")]
24028#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24029#[cfg_attr(test, assert_instr(frintp))]
24030pub fn vrndph_f16(a: f16) -> f16 {
24031    unsafe extern "unadjusted" {
24032        #[cfg_attr(
24033            any(target_arch = "aarch64", target_arch = "arm64ec"),
24034            link_name = "llvm.ceil.f16"
24035        )]
24036        fn _vrndph_f16(a: f16) -> f16;
24037    }
24038    unsafe { _vrndph_f16(a) }
24039}
24040#[doc = "Floating-point round to integral exact, using current rounding mode"]
24041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
24042#[inline]
24043#[target_feature(enable = "neon,fp16")]
24044#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24045#[cfg_attr(test, assert_instr(frintx))]
24046pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
24047    unsafe extern "unadjusted" {
24048        #[cfg_attr(
24049            any(target_arch = "aarch64", target_arch = "arm64ec"),
24050            link_name = "llvm.rint.v4f16"
24051        )]
24052        fn _vrndx_f16(a: float16x4_t) -> float16x4_t;
24053    }
24054    unsafe { _vrndx_f16(a) }
24055}
24056#[doc = "Floating-point round to integral exact, using current rounding mode"]
24057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
24058#[inline]
24059#[target_feature(enable = "neon,fp16")]
24060#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24061#[cfg_attr(test, assert_instr(frintx))]
24062pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
24063    unsafe extern "unadjusted" {
24064        #[cfg_attr(
24065            any(target_arch = "aarch64", target_arch = "arm64ec"),
24066            link_name = "llvm.rint.v8f16"
24067        )]
24068        fn _vrndxq_f16(a: float16x8_t) -> float16x8_t;
24069    }
24070    unsafe { _vrndxq_f16(a) }
24071}
24072#[doc = "Floating-point round to integral exact, using current rounding mode"]
24073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
24074#[inline]
24075#[target_feature(enable = "neon")]
24076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24077#[cfg_attr(test, assert_instr(frintx))]
24078pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
24079    unsafe extern "unadjusted" {
24080        #[cfg_attr(
24081            any(target_arch = "aarch64", target_arch = "arm64ec"),
24082            link_name = "llvm.rint.v2f32"
24083        )]
24084        fn _vrndx_f32(a: float32x2_t) -> float32x2_t;
24085    }
24086    unsafe { _vrndx_f32(a) }
24087}
24088#[doc = "Floating-point round to integral exact, using current rounding mode"]
24089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
24090#[inline]
24091#[target_feature(enable = "neon")]
24092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24093#[cfg_attr(test, assert_instr(frintx))]
24094pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
24095    unsafe extern "unadjusted" {
24096        #[cfg_attr(
24097            any(target_arch = "aarch64", target_arch = "arm64ec"),
24098            link_name = "llvm.rint.v4f32"
24099        )]
24100        fn _vrndxq_f32(a: float32x4_t) -> float32x4_t;
24101    }
24102    unsafe { _vrndxq_f32(a) }
24103}
24104#[doc = "Floating-point round to integral exact, using current rounding mode"]
24105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
24106#[inline]
24107#[target_feature(enable = "neon")]
24108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24109#[cfg_attr(test, assert_instr(frintx))]
24110pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
24111    unsafe extern "unadjusted" {
24112        #[cfg_attr(
24113            any(target_arch = "aarch64", target_arch = "arm64ec"),
24114            link_name = "llvm.rint.v1f64"
24115        )]
24116        fn _vrndx_f64(a: float64x1_t) -> float64x1_t;
24117    }
24118    unsafe { _vrndx_f64(a) }
24119}
24120#[doc = "Floating-point round to integral exact, using current rounding mode"]
24121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
24122#[inline]
24123#[target_feature(enable = "neon")]
24124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24125#[cfg_attr(test, assert_instr(frintx))]
24126pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
24127    unsafe extern "unadjusted" {
24128        #[cfg_attr(
24129            any(target_arch = "aarch64", target_arch = "arm64ec"),
24130            link_name = "llvm.rint.v2f64"
24131        )]
24132        fn _vrndxq_f64(a: float64x2_t) -> float64x2_t;
24133    }
24134    unsafe { _vrndxq_f64(a) }
24135}
24136#[doc = "Floating-point round to integral, using current rounding mode"]
24137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
24138#[inline]
24139#[target_feature(enable = "neon,fp16")]
24140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24141#[cfg_attr(test, assert_instr(frintx))]
24142pub fn vrndxh_f16(a: f16) -> f16 {
24143    unsafe extern "unadjusted" {
24144        #[cfg_attr(
24145            any(target_arch = "aarch64", target_arch = "arm64ec"),
24146            link_name = "llvm.rint.f16"
24147        )]
24148        fn _vrndxh_f16(a: f16) -> f16;
24149    }
24150    unsafe { _vrndxh_f16(a) }
24151}
24152#[doc = "Signed rounding shift left"]
24153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
24154#[inline]
24155#[target_feature(enable = "neon")]
24156#[cfg_attr(test, assert_instr(srshl))]
24157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24158pub fn vrshld_s64(a: i64, b: i64) -> i64 {
24159    unsafe extern "unadjusted" {
24160        #[cfg_attr(
24161            any(target_arch = "aarch64", target_arch = "arm64ec"),
24162            link_name = "llvm.aarch64.neon.srshl.i64"
24163        )]
24164        fn _vrshld_s64(a: i64, b: i64) -> i64;
24165    }
24166    unsafe { _vrshld_s64(a, b) }
24167}
24168#[doc = "Unsigned rounding shift left"]
24169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
24170#[inline]
24171#[target_feature(enable = "neon")]
24172#[cfg_attr(test, assert_instr(urshl))]
24173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24174pub fn vrshld_u64(a: u64, b: i64) -> u64 {
24175    unsafe extern "unadjusted" {
24176        #[cfg_attr(
24177            any(target_arch = "aarch64", target_arch = "arm64ec"),
24178            link_name = "llvm.aarch64.neon.urshl.i64"
24179        )]
24180        fn _vrshld_u64(a: u64, b: i64) -> u64;
24181    }
24182    unsafe { _vrshld_u64(a, b) }
24183}
24184#[doc = "Signed rounding shift right"]
24185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
24186#[inline]
24187#[target_feature(enable = "neon")]
24188#[cfg_attr(test, assert_instr(srshr, N = 2))]
24189#[rustc_legacy_const_generics(1)]
24190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24191pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
24192    static_assert!(N >= 1 && N <= 64);
24193    vrshld_s64(a, -N as i64)
24194}
24195#[doc = "Unsigned rounding shift right"]
24196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
24197#[inline]
24198#[target_feature(enable = "neon")]
24199#[cfg_attr(test, assert_instr(urshr, N = 2))]
24200#[rustc_legacy_const_generics(1)]
24201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24202pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
24203    static_assert!(N >= 1 && N <= 64);
24204    vrshld_u64(a, -N as i64)
24205}
24206#[doc = "Rounding shift right narrow"]
24207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
24208#[inline]
24209#[target_feature(enable = "neon")]
24210#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24211#[rustc_legacy_const_generics(2)]
24212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24213pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24214    static_assert!(N >= 1 && N <= 8);
24215    unsafe {
24216        simd_shuffle!(
24217            a,
24218            vrshrn_n_s16::<N>(b),
24219            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24220        )
24221    }
24222}
24223#[doc = "Rounding shift right narrow"]
24224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
24225#[inline]
24226#[target_feature(enable = "neon")]
24227#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24228#[rustc_legacy_const_generics(2)]
24229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24230pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24231    static_assert!(N >= 1 && N <= 16);
24232    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24233}
24234#[doc = "Rounding shift right narrow"]
24235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
24236#[inline]
24237#[target_feature(enable = "neon")]
24238#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24239#[rustc_legacy_const_generics(2)]
24240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24241pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24242    static_assert!(N >= 1 && N <= 32);
24243    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24244}
24245#[doc = "Rounding shift right narrow"]
24246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
24247#[inline]
24248#[target_feature(enable = "neon")]
24249#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24250#[rustc_legacy_const_generics(2)]
24251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24252pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24253    static_assert!(N >= 1 && N <= 8);
24254    unsafe {
24255        simd_shuffle!(
24256            a,
24257            vrshrn_n_u16::<N>(b),
24258            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24259        )
24260    }
24261}
24262#[doc = "Rounding shift right narrow"]
24263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
24264#[inline]
24265#[target_feature(enable = "neon")]
24266#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24267#[rustc_legacy_const_generics(2)]
24268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24269pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24270    static_assert!(N >= 1 && N <= 16);
24271    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24272}
24273#[doc = "Rounding shift right narrow"]
24274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
24275#[inline]
24276#[target_feature(enable = "neon")]
24277#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24278#[rustc_legacy_const_generics(2)]
24279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24280pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24281    static_assert!(N >= 1 && N <= 32);
24282    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24283}
24284#[doc = "Reciprocal square-root estimate."]
24285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
24286#[inline]
24287#[target_feature(enable = "neon")]
24288#[cfg_attr(test, assert_instr(frsqrte))]
24289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24290pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
24291    unsafe extern "unadjusted" {
24292        #[cfg_attr(
24293            any(target_arch = "aarch64", target_arch = "arm64ec"),
24294            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
24295        )]
24296        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
24297    }
24298    unsafe { _vrsqrte_f64(a) }
24299}
24300#[doc = "Reciprocal square-root estimate."]
24301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
24302#[inline]
24303#[target_feature(enable = "neon")]
24304#[cfg_attr(test, assert_instr(frsqrte))]
24305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24306pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
24307    unsafe extern "unadjusted" {
24308        #[cfg_attr(
24309            any(target_arch = "aarch64", target_arch = "arm64ec"),
24310            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
24311        )]
24312        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
24313    }
24314    unsafe { _vrsqrteq_f64(a) }
24315}
24316#[doc = "Reciprocal square-root estimate."]
24317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
24318#[inline]
24319#[target_feature(enable = "neon")]
24320#[cfg_attr(test, assert_instr(frsqrte))]
24321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24322pub fn vrsqrted_f64(a: f64) -> f64 {
24323    unsafe extern "unadjusted" {
24324        #[cfg_attr(
24325            any(target_arch = "aarch64", target_arch = "arm64ec"),
24326            link_name = "llvm.aarch64.neon.frsqrte.f64"
24327        )]
24328        fn _vrsqrted_f64(a: f64) -> f64;
24329    }
24330    unsafe { _vrsqrted_f64(a) }
24331}
24332#[doc = "Reciprocal square-root estimate."]
24333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
24334#[inline]
24335#[target_feature(enable = "neon")]
24336#[cfg_attr(test, assert_instr(frsqrte))]
24337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24338pub fn vrsqrtes_f32(a: f32) -> f32 {
24339    unsafe extern "unadjusted" {
24340        #[cfg_attr(
24341            any(target_arch = "aarch64", target_arch = "arm64ec"),
24342            link_name = "llvm.aarch64.neon.frsqrte.f32"
24343        )]
24344        fn _vrsqrtes_f32(a: f32) -> f32;
24345    }
24346    unsafe { _vrsqrtes_f32(a) }
24347}
24348#[doc = "Reciprocal square-root estimate."]
24349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
24350#[inline]
24351#[target_feature(enable = "neon,fp16")]
24352#[cfg_attr(test, assert_instr(frsqrte))]
24353#[target_feature(enable = "neon,fp16")]
24354#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24355pub fn vrsqrteh_f16(a: f16) -> f16 {
24356    unsafe extern "unadjusted" {
24357        #[cfg_attr(
24358            any(target_arch = "aarch64", target_arch = "arm64ec"),
24359            link_name = "llvm.aarch64.neon.frsqrte.f16"
24360        )]
24361        fn _vrsqrteh_f16(a: f16) -> f16;
24362    }
24363    unsafe { _vrsqrteh_f16(a) }
24364}
24365#[doc = "Floating-point reciprocal square root step"]
24366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
24367#[inline]
24368#[target_feature(enable = "neon")]
24369#[cfg_attr(test, assert_instr(frsqrts))]
24370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24371pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
24372    unsafe extern "unadjusted" {
24373        #[cfg_attr(
24374            any(target_arch = "aarch64", target_arch = "arm64ec"),
24375            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
24376        )]
24377        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
24378    }
24379    unsafe { _vrsqrts_f64(a, b) }
24380}
24381#[doc = "Floating-point reciprocal square root step"]
24382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
24383#[inline]
24384#[target_feature(enable = "neon")]
24385#[cfg_attr(test, assert_instr(frsqrts))]
24386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24387pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
24388    unsafe extern "unadjusted" {
24389        #[cfg_attr(
24390            any(target_arch = "aarch64", target_arch = "arm64ec"),
24391            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
24392        )]
24393        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
24394    }
24395    unsafe { _vrsqrtsq_f64(a, b) }
24396}
24397#[doc = "Floating-point reciprocal square root step"]
24398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
24399#[inline]
24400#[target_feature(enable = "neon")]
24401#[cfg_attr(test, assert_instr(frsqrts))]
24402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24403pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
24404    unsafe extern "unadjusted" {
24405        #[cfg_attr(
24406            any(target_arch = "aarch64", target_arch = "arm64ec"),
24407            link_name = "llvm.aarch64.neon.frsqrts.f64"
24408        )]
24409        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
24410    }
24411    unsafe { _vrsqrtsd_f64(a, b) }
24412}
24413#[doc = "Floating-point reciprocal square root step"]
24414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
24415#[inline]
24416#[target_feature(enable = "neon")]
24417#[cfg_attr(test, assert_instr(frsqrts))]
24418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24419pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
24420    unsafe extern "unadjusted" {
24421        #[cfg_attr(
24422            any(target_arch = "aarch64", target_arch = "arm64ec"),
24423            link_name = "llvm.aarch64.neon.frsqrts.f32"
24424        )]
24425        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
24426    }
24427    unsafe { _vrsqrtss_f32(a, b) }
24428}
24429#[doc = "Floating-point reciprocal square root step"]
24430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
24431#[inline]
24432#[target_feature(enable = "neon,fp16")]
24433#[cfg_attr(test, assert_instr(frsqrts))]
24434#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24435pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
24436    unsafe extern "unadjusted" {
24437        #[cfg_attr(
24438            any(target_arch = "aarch64", target_arch = "arm64ec"),
24439            link_name = "llvm.aarch64.neon.frsqrts.f16"
24440        )]
24441        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
24442    }
24443    unsafe { _vrsqrtsh_f16(a, b) }
24444}
24445#[doc = "Signed rounding shift right and accumulate."]
24446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
24447#[inline]
24448#[target_feature(enable = "neon")]
24449#[cfg_attr(test, assert_instr(srshr, N = 2))]
24450#[rustc_legacy_const_generics(2)]
24451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24452pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24453    static_assert!(N >= 1 && N <= 64);
24454    let b: i64 = vrshrd_n_s64::<N>(b);
24455    a.wrapping_add(b)
24456}
24457#[doc = "Unsigned rounding shift right and accumulate."]
24458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
24459#[inline]
24460#[target_feature(enable = "neon")]
24461#[cfg_attr(test, assert_instr(urshr, N = 2))]
24462#[rustc_legacy_const_generics(2)]
24463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24464pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24465    static_assert!(N >= 1 && N <= 64);
24466    let b: u64 = vrshrd_n_u64::<N>(b);
24467    a.wrapping_add(b)
24468}
24469#[doc = "Rounding subtract returning high narrow"]
24470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24471#[inline]
24472#[target_feature(enable = "neon")]
24473#[cfg_attr(test, assert_instr(rsubhn2))]
24474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24475pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24476    let x: int8x8_t = vrsubhn_s16(b, c);
24477    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24478}
24479#[doc = "Rounding subtract returning high narrow"]
24480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24481#[inline]
24482#[target_feature(enable = "neon")]
24483#[cfg_attr(test, assert_instr(rsubhn2))]
24484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24485pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24486    let x: int16x4_t = vrsubhn_s32(b, c);
24487    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24488}
24489#[doc = "Rounding subtract returning high narrow"]
24490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24491#[inline]
24492#[target_feature(enable = "neon")]
24493#[cfg_attr(test, assert_instr(rsubhn2))]
24494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24495pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24496    let x: int32x2_t = vrsubhn_s64(b, c);
24497    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24498}
24499#[doc = "Rounding subtract returning high narrow"]
24500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24501#[inline]
24502#[target_feature(enable = "neon")]
24503#[cfg_attr(test, assert_instr(rsubhn2))]
24504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24505pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24506    let x: uint8x8_t = vrsubhn_u16(b, c);
24507    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24508}
24509#[doc = "Rounding subtract returning high narrow"]
24510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24511#[inline]
24512#[target_feature(enable = "neon")]
24513#[cfg_attr(test, assert_instr(rsubhn2))]
24514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24515pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24516    let x: uint16x4_t = vrsubhn_u32(b, c);
24517    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24518}
24519#[doc = "Rounding subtract returning high narrow"]
24520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24521#[inline]
24522#[target_feature(enable = "neon")]
24523#[cfg_attr(test, assert_instr(rsubhn2))]
24524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24525pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24526    let x: uint32x2_t = vrsubhn_u64(b, c);
24527    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24528}
24529#[doc = "Insert vector element from another vector element"]
24530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24531#[inline]
24532#[target_feature(enable = "neon")]
24533#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24534#[rustc_legacy_const_generics(2)]
24535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24536pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24537    static_assert!(LANE == 0);
24538    unsafe { simd_insert!(b, LANE as u32, a) }
24539}
24540#[doc = "Insert vector element from another vector element"]
24541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24542#[inline]
24543#[target_feature(enable = "neon")]
24544#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24545#[rustc_legacy_const_generics(2)]
24546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24547pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24548    static_assert_uimm_bits!(LANE, 1);
24549    unsafe { simd_insert!(b, LANE as u32, a) }
24550}
24551#[doc = "SHA512 hash update part 2"]
24552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24553#[inline]
24554#[target_feature(enable = "neon,sha3")]
24555#[cfg_attr(test, assert_instr(sha512h2))]
24556#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24557pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24558    unsafe extern "unadjusted" {
24559        #[cfg_attr(
24560            any(target_arch = "aarch64", target_arch = "arm64ec"),
24561            link_name = "llvm.aarch64.crypto.sha512h2"
24562        )]
24563        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24564    }
24565    unsafe { _vsha512h2q_u64(a, b, c) }
24566}
24567#[doc = "SHA512 hash update part 1"]
24568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24569#[inline]
24570#[target_feature(enable = "neon,sha3")]
24571#[cfg_attr(test, assert_instr(sha512h))]
24572#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24573pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24574    unsafe extern "unadjusted" {
24575        #[cfg_attr(
24576            any(target_arch = "aarch64", target_arch = "arm64ec"),
24577            link_name = "llvm.aarch64.crypto.sha512h"
24578        )]
24579        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24580    }
24581    unsafe { _vsha512hq_u64(a, b, c) }
24582}
24583#[doc = "SHA512 schedule update 0"]
24584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24585#[inline]
24586#[target_feature(enable = "neon,sha3")]
24587#[cfg_attr(test, assert_instr(sha512su0))]
24588#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24589pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24590    unsafe extern "unadjusted" {
24591        #[cfg_attr(
24592            any(target_arch = "aarch64", target_arch = "arm64ec"),
24593            link_name = "llvm.aarch64.crypto.sha512su0"
24594        )]
24595        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24596    }
24597    unsafe { _vsha512su0q_u64(a, b) }
24598}
24599#[doc = "SHA512 schedule update 1"]
24600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24601#[inline]
24602#[target_feature(enable = "neon,sha3")]
24603#[cfg_attr(test, assert_instr(sha512su1))]
24604#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24605pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24606    unsafe extern "unadjusted" {
24607        #[cfg_attr(
24608            any(target_arch = "aarch64", target_arch = "arm64ec"),
24609            link_name = "llvm.aarch64.crypto.sha512su1"
24610        )]
24611        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24612    }
24613    unsafe { _vsha512su1q_u64(a, b, c) }
24614}
24615#[doc = "Signed Shift left"]
24616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24617#[inline]
24618#[target_feature(enable = "neon")]
24619#[cfg_attr(test, assert_instr(sshl))]
24620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24621pub fn vshld_s64(a: i64, b: i64) -> i64 {
24622    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24623}
24624#[doc = "Unsigned Shift left"]
24625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24626#[inline]
24627#[target_feature(enable = "neon")]
24628#[cfg_attr(test, assert_instr(ushl))]
24629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24630pub fn vshld_u64(a: u64, b: i64) -> u64 {
24631    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24632}
24633#[doc = "Signed shift left long"]
24634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24635#[inline]
24636#[target_feature(enable = "neon")]
24637#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24638#[rustc_legacy_const_generics(1)]
24639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24640pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24641    static_assert!(N >= 0 && N <= 8);
24642    unsafe {
24643        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24644        vshll_n_s8::<N>(b)
24645    }
24646}
24647#[doc = "Signed shift left long"]
24648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24649#[inline]
24650#[target_feature(enable = "neon")]
24651#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24652#[rustc_legacy_const_generics(1)]
24653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24654pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24655    static_assert!(N >= 0 && N <= 16);
24656    unsafe {
24657        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24658        vshll_n_s16::<N>(b)
24659    }
24660}
24661#[doc = "Signed shift left long"]
24662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24663#[inline]
24664#[target_feature(enable = "neon")]
24665#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24666#[rustc_legacy_const_generics(1)]
24667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24668pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24669    static_assert!(N >= 0 && N <= 32);
24670    unsafe {
24671        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24672        vshll_n_s32::<N>(b)
24673    }
24674}
24675#[doc = "Signed shift left long"]
24676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24677#[inline]
24678#[target_feature(enable = "neon")]
24679#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24680#[rustc_legacy_const_generics(1)]
24681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24682pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24683    static_assert!(N >= 0 && N <= 8);
24684    unsafe {
24685        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24686        vshll_n_u8::<N>(b)
24687    }
24688}
24689#[doc = "Signed shift left long"]
24690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24691#[inline]
24692#[target_feature(enable = "neon")]
24693#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24694#[rustc_legacy_const_generics(1)]
24695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24696pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24697    static_assert!(N >= 0 && N <= 16);
24698    unsafe {
24699        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24700        vshll_n_u16::<N>(b)
24701    }
24702}
24703#[doc = "Signed shift left long"]
24704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24705#[inline]
24706#[target_feature(enable = "neon")]
24707#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24708#[rustc_legacy_const_generics(1)]
24709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24710pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24711    static_assert!(N >= 0 && N <= 32);
24712    unsafe {
24713        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24714        vshll_n_u32::<N>(b)
24715    }
24716}
24717#[doc = "Shift right narrow"]
24718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24719#[inline]
24720#[target_feature(enable = "neon")]
24721#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24722#[rustc_legacy_const_generics(2)]
24723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24724pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24725    static_assert!(N >= 1 && N <= 8);
24726    unsafe {
24727        simd_shuffle!(
24728            a,
24729            vshrn_n_s16::<N>(b),
24730            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24731        )
24732    }
24733}
24734#[doc = "Shift right narrow"]
24735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24736#[inline]
24737#[target_feature(enable = "neon")]
24738#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24739#[rustc_legacy_const_generics(2)]
24740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24741pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24742    static_assert!(N >= 1 && N <= 16);
24743    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24744}
24745#[doc = "Shift right narrow"]
24746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24747#[inline]
24748#[target_feature(enable = "neon")]
24749#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24750#[rustc_legacy_const_generics(2)]
24751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24752pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24753    static_assert!(N >= 1 && N <= 32);
24754    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24755}
24756#[doc = "Shift right narrow"]
24757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24758#[inline]
24759#[target_feature(enable = "neon")]
24760#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24761#[rustc_legacy_const_generics(2)]
24762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24763pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24764    static_assert!(N >= 1 && N <= 8);
24765    unsafe {
24766        simd_shuffle!(
24767            a,
24768            vshrn_n_u16::<N>(b),
24769            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24770        )
24771    }
24772}
24773#[doc = "Shift right narrow"]
24774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24775#[inline]
24776#[target_feature(enable = "neon")]
24777#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24778#[rustc_legacy_const_generics(2)]
24779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24780pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24781    static_assert!(N >= 1 && N <= 16);
24782    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24783}
24784#[doc = "Shift right narrow"]
24785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24786#[inline]
24787#[target_feature(enable = "neon")]
24788#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24789#[rustc_legacy_const_generics(2)]
24790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24791pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24792    static_assert!(N >= 1 && N <= 32);
24793    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24794}
24795#[doc = "Shift Left and Insert (immediate)"]
24796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24797#[inline]
24798#[target_feature(enable = "neon")]
24799#[cfg_attr(test, assert_instr(sli, N = 1))]
24800#[rustc_legacy_const_generics(2)]
24801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24802pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24803    static_assert_uimm_bits!(N, 3);
24804    unsafe extern "unadjusted" {
24805        #[cfg_attr(
24806            any(target_arch = "aarch64", target_arch = "arm64ec"),
24807            link_name = "llvm.aarch64.neon.vsli.v8i8"
24808        )]
24809        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24810    }
24811    unsafe { _vsli_n_s8(a, b, N) }
24812}
24813#[doc = "Shift Left and Insert (immediate)"]
24814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24815#[inline]
24816#[target_feature(enable = "neon")]
24817#[cfg_attr(test, assert_instr(sli, N = 1))]
24818#[rustc_legacy_const_generics(2)]
24819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24820pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24821    static_assert_uimm_bits!(N, 3);
24822    unsafe extern "unadjusted" {
24823        #[cfg_attr(
24824            any(target_arch = "aarch64", target_arch = "arm64ec"),
24825            link_name = "llvm.aarch64.neon.vsli.v16i8"
24826        )]
24827        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24828    }
24829    unsafe { _vsliq_n_s8(a, b, N) }
24830}
24831#[doc = "Shift Left and Insert (immediate)"]
24832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24833#[inline]
24834#[target_feature(enable = "neon")]
24835#[cfg_attr(test, assert_instr(sli, N = 1))]
24836#[rustc_legacy_const_generics(2)]
24837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24838pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24839    static_assert_uimm_bits!(N, 4);
24840    unsafe extern "unadjusted" {
24841        #[cfg_attr(
24842            any(target_arch = "aarch64", target_arch = "arm64ec"),
24843            link_name = "llvm.aarch64.neon.vsli.v4i16"
24844        )]
24845        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24846    }
24847    unsafe { _vsli_n_s16(a, b, N) }
24848}
24849#[doc = "Shift Left and Insert (immediate)"]
24850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24851#[inline]
24852#[target_feature(enable = "neon")]
24853#[cfg_attr(test, assert_instr(sli, N = 1))]
24854#[rustc_legacy_const_generics(2)]
24855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24856pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24857    static_assert_uimm_bits!(N, 4);
24858    unsafe extern "unadjusted" {
24859        #[cfg_attr(
24860            any(target_arch = "aarch64", target_arch = "arm64ec"),
24861            link_name = "llvm.aarch64.neon.vsli.v8i16"
24862        )]
24863        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24864    }
24865    unsafe { _vsliq_n_s16(a, b, N) }
24866}
24867#[doc = "Shift Left and Insert (immediate)"]
24868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24869#[inline]
24870#[target_feature(enable = "neon")]
24871#[cfg_attr(test, assert_instr(sli, N = 1))]
24872#[rustc_legacy_const_generics(2)]
24873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24874pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24875    static_assert!(N >= 0 && N <= 31);
24876    unsafe extern "unadjusted" {
24877        #[cfg_attr(
24878            any(target_arch = "aarch64", target_arch = "arm64ec"),
24879            link_name = "llvm.aarch64.neon.vsli.v2i32"
24880        )]
24881        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24882    }
24883    unsafe { _vsli_n_s32(a, b, N) }
24884}
24885#[doc = "Shift Left and Insert (immediate)"]
24886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24887#[inline]
24888#[target_feature(enable = "neon")]
24889#[cfg_attr(test, assert_instr(sli, N = 1))]
24890#[rustc_legacy_const_generics(2)]
24891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24892pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24893    static_assert!(N >= 0 && N <= 31);
24894    unsafe extern "unadjusted" {
24895        #[cfg_attr(
24896            any(target_arch = "aarch64", target_arch = "arm64ec"),
24897            link_name = "llvm.aarch64.neon.vsli.v4i32"
24898        )]
24899        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24900    }
24901    unsafe { _vsliq_n_s32(a, b, N) }
24902}
24903#[doc = "Shift Left and Insert (immediate)"]
24904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24905#[inline]
24906#[target_feature(enable = "neon")]
24907#[cfg_attr(test, assert_instr(sli, N = 1))]
24908#[rustc_legacy_const_generics(2)]
24909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24910pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24911    static_assert!(N >= 0 && N <= 63);
24912    unsafe extern "unadjusted" {
24913        #[cfg_attr(
24914            any(target_arch = "aarch64", target_arch = "arm64ec"),
24915            link_name = "llvm.aarch64.neon.vsli.v1i64"
24916        )]
24917        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24918    }
24919    unsafe { _vsli_n_s64(a, b, N) }
24920}
24921#[doc = "Shift Left and Insert (immediate)"]
24922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24923#[inline]
24924#[target_feature(enable = "neon")]
24925#[cfg_attr(test, assert_instr(sli, N = 1))]
24926#[rustc_legacy_const_generics(2)]
24927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24928pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24929    static_assert!(N >= 0 && N <= 63);
24930    unsafe extern "unadjusted" {
24931        #[cfg_attr(
24932            any(target_arch = "aarch64", target_arch = "arm64ec"),
24933            link_name = "llvm.aarch64.neon.vsli.v2i64"
24934        )]
24935        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24936    }
24937    unsafe { _vsliq_n_s64(a, b, N) }
24938}
24939#[doc = "Shift Left and Insert (immediate)"]
24940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24941#[inline]
24942#[target_feature(enable = "neon")]
24943#[cfg_attr(test, assert_instr(sli, N = 1))]
24944#[rustc_legacy_const_generics(2)]
24945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24946pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24947    static_assert_uimm_bits!(N, 3);
24948    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24949}
24950#[doc = "Shift Left and Insert (immediate)"]
24951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24952#[inline]
24953#[target_feature(enable = "neon")]
24954#[cfg_attr(test, assert_instr(sli, N = 1))]
24955#[rustc_legacy_const_generics(2)]
24956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24957pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24958    static_assert_uimm_bits!(N, 3);
24959    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24960}
24961#[doc = "Shift Left and Insert (immediate)"]
24962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24963#[inline]
24964#[target_feature(enable = "neon")]
24965#[cfg_attr(test, assert_instr(sli, N = 1))]
24966#[rustc_legacy_const_generics(2)]
24967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24968pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24969    static_assert_uimm_bits!(N, 4);
24970    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24971}
24972#[doc = "Shift Left and Insert (immediate)"]
24973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24974#[inline]
24975#[target_feature(enable = "neon")]
24976#[cfg_attr(test, assert_instr(sli, N = 1))]
24977#[rustc_legacy_const_generics(2)]
24978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24979pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24980    static_assert_uimm_bits!(N, 4);
24981    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24982}
24983#[doc = "Shift Left and Insert (immediate)"]
24984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24985#[inline]
24986#[target_feature(enable = "neon")]
24987#[cfg_attr(test, assert_instr(sli, N = 1))]
24988#[rustc_legacy_const_generics(2)]
24989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24990pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24991    static_assert!(N >= 0 && N <= 31);
24992    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24993}
24994#[doc = "Shift Left and Insert (immediate)"]
24995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
24996#[inline]
24997#[target_feature(enable = "neon")]
24998#[cfg_attr(test, assert_instr(sli, N = 1))]
24999#[rustc_legacy_const_generics(2)]
25000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25001pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25002    static_assert!(N >= 0 && N <= 31);
25003    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
25004}
25005#[doc = "Shift Left and Insert (immediate)"]
25006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
25007#[inline]
25008#[target_feature(enable = "neon")]
25009#[cfg_attr(test, assert_instr(sli, N = 1))]
25010#[rustc_legacy_const_generics(2)]
25011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25012pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25013    static_assert!(N >= 0 && N <= 63);
25014    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25015}
25016#[doc = "Shift Left and Insert (immediate)"]
25017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
25018#[inline]
25019#[target_feature(enable = "neon")]
25020#[cfg_attr(test, assert_instr(sli, N = 1))]
25021#[rustc_legacy_const_generics(2)]
25022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25023pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25024    static_assert!(N >= 0 && N <= 63);
25025    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25026}
25027#[doc = "Shift Left and Insert (immediate)"]
25028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
25029#[inline]
25030#[target_feature(enable = "neon")]
25031#[cfg_attr(test, assert_instr(sli, N = 1))]
25032#[rustc_legacy_const_generics(2)]
25033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25034pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25035    static_assert_uimm_bits!(N, 3);
25036    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
25037}
25038#[doc = "Shift Left and Insert (immediate)"]
25039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
25040#[inline]
25041#[target_feature(enable = "neon")]
25042#[cfg_attr(test, assert_instr(sli, N = 1))]
25043#[rustc_legacy_const_generics(2)]
25044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25045pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25046    static_assert_uimm_bits!(N, 3);
25047    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
25048}
25049#[doc = "Shift Left and Insert (immediate)"]
25050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
25051#[inline]
25052#[target_feature(enable = "neon")]
25053#[cfg_attr(test, assert_instr(sli, N = 1))]
25054#[rustc_legacy_const_generics(2)]
25055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25056pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25057    static_assert_uimm_bits!(N, 4);
25058    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
25059}
25060#[doc = "Shift Left and Insert (immediate)"]
25061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
25062#[inline]
25063#[target_feature(enable = "neon")]
25064#[cfg_attr(test, assert_instr(sli, N = 1))]
25065#[rustc_legacy_const_generics(2)]
25066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25067pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25068    static_assert_uimm_bits!(N, 4);
25069    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
25070}
25071#[doc = "Shift Left and Insert (immediate)"]
25072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
25073#[inline]
25074#[target_feature(enable = "neon,aes")]
25075#[cfg_attr(test, assert_instr(sli, N = 1))]
25076#[rustc_legacy_const_generics(2)]
25077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25078pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25079    static_assert!(N >= 0 && N <= 63);
25080    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25081}
25082#[doc = "Shift Left and Insert (immediate)"]
25083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
25084#[inline]
25085#[target_feature(enable = "neon,aes")]
25086#[cfg_attr(test, assert_instr(sli, N = 1))]
25087#[rustc_legacy_const_generics(2)]
25088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25089pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25090    static_assert!(N >= 0 && N <= 63);
25091    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25092}
25093#[doc = "Shift left and insert"]
25094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
25095#[inline]
25096#[target_feature(enable = "neon")]
25097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25098#[rustc_legacy_const_generics(2)]
25099#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25100pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25101    static_assert!(N >= 0 && N <= 63);
25102    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25103}
25104#[doc = "Shift left and insert"]
25105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
25106#[inline]
25107#[target_feature(enable = "neon")]
25108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25109#[rustc_legacy_const_generics(2)]
25110#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25111pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25112    static_assert!(N >= 0 && N <= 63);
25113    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
25114}
25115#[doc = "SM3PARTW1"]
25116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
25117#[inline]
25118#[target_feature(enable = "neon,sm4")]
25119#[cfg_attr(test, assert_instr(sm3partw1))]
25120#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25121pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25122    unsafe extern "unadjusted" {
25123        #[cfg_attr(
25124            any(target_arch = "aarch64", target_arch = "arm64ec"),
25125            link_name = "llvm.aarch64.crypto.sm3partw1"
25126        )]
25127        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25128    }
25129    unsafe { _vsm3partw1q_u32(a, b, c) }
25130}
25131#[doc = "SM3PARTW2"]
25132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
25133#[inline]
25134#[target_feature(enable = "neon,sm4")]
25135#[cfg_attr(test, assert_instr(sm3partw2))]
25136#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25137pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25138    unsafe extern "unadjusted" {
25139        #[cfg_attr(
25140            any(target_arch = "aarch64", target_arch = "arm64ec"),
25141            link_name = "llvm.aarch64.crypto.sm3partw2"
25142        )]
25143        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25144    }
25145    unsafe { _vsm3partw2q_u32(a, b, c) }
25146}
25147#[doc = "SM3SS1"]
25148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
25149#[inline]
25150#[target_feature(enable = "neon,sm4")]
25151#[cfg_attr(test, assert_instr(sm3ss1))]
25152#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25153pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25154    unsafe extern "unadjusted" {
25155        #[cfg_attr(
25156            any(target_arch = "aarch64", target_arch = "arm64ec"),
25157            link_name = "llvm.aarch64.crypto.sm3ss1"
25158        )]
25159        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25160    }
25161    unsafe { _vsm3ss1q_u32(a, b, c) }
25162}
25163#[doc = "SM3TT1A"]
25164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
25165#[inline]
25166#[target_feature(enable = "neon,sm4")]
25167#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
25168#[rustc_legacy_const_generics(3)]
25169#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25170pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25171    static_assert_uimm_bits!(IMM2, 2);
25172    unsafe extern "unadjusted" {
25173        #[cfg_attr(
25174            any(target_arch = "aarch64", target_arch = "arm64ec"),
25175            link_name = "llvm.aarch64.crypto.sm3tt1a"
25176        )]
25177        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25178    }
25179    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
25180}
25181#[doc = "SM3TT1B"]
25182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
25183#[inline]
25184#[target_feature(enable = "neon,sm4")]
25185#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
25186#[rustc_legacy_const_generics(3)]
25187#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25188pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25189    static_assert_uimm_bits!(IMM2, 2);
25190    unsafe extern "unadjusted" {
25191        #[cfg_attr(
25192            any(target_arch = "aarch64", target_arch = "arm64ec"),
25193            link_name = "llvm.aarch64.crypto.sm3tt1b"
25194        )]
25195        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25196    }
25197    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
25198}
25199#[doc = "SM3TT2A"]
25200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
25201#[inline]
25202#[target_feature(enable = "neon,sm4")]
25203#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
25204#[rustc_legacy_const_generics(3)]
25205#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25206pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25207    static_assert_uimm_bits!(IMM2, 2);
25208    unsafe extern "unadjusted" {
25209        #[cfg_attr(
25210            any(target_arch = "aarch64", target_arch = "arm64ec"),
25211            link_name = "llvm.aarch64.crypto.sm3tt2a"
25212        )]
25213        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25214    }
25215    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
25216}
25217#[doc = "SM3TT2B"]
25218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
25219#[inline]
25220#[target_feature(enable = "neon,sm4")]
25221#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
25222#[rustc_legacy_const_generics(3)]
25223#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25224pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25225    static_assert_uimm_bits!(IMM2, 2);
25226    unsafe extern "unadjusted" {
25227        #[cfg_attr(
25228            any(target_arch = "aarch64", target_arch = "arm64ec"),
25229            link_name = "llvm.aarch64.crypto.sm3tt2b"
25230        )]
25231        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25232    }
25233    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
25234}
25235#[doc = "SM4 key"]
25236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
25237#[inline]
25238#[target_feature(enable = "neon,sm4")]
25239#[cfg_attr(test, assert_instr(sm4ekey))]
25240#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25241pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25242    unsafe extern "unadjusted" {
25243        #[cfg_attr(
25244            any(target_arch = "aarch64", target_arch = "arm64ec"),
25245            link_name = "llvm.aarch64.crypto.sm4ekey"
25246        )]
25247        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25248    }
25249    unsafe { _vsm4ekeyq_u32(a, b) }
25250}
25251#[doc = "SM4 encode"]
25252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
25253#[inline]
25254#[target_feature(enable = "neon,sm4")]
25255#[cfg_attr(test, assert_instr(sm4e))]
25256#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25257pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25258    unsafe extern "unadjusted" {
25259        #[cfg_attr(
25260            any(target_arch = "aarch64", target_arch = "arm64ec"),
25261            link_name = "llvm.aarch64.crypto.sm4e"
25262        )]
25263        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25264    }
25265    unsafe { _vsm4eq_u32(a, b) }
25266}
25267#[doc = "Unsigned saturating Accumulate of Signed value."]
25268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
25269#[inline]
25270#[target_feature(enable = "neon")]
25271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25272#[cfg_attr(test, assert_instr(usqadd))]
25273pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
25274    unsafe extern "unadjusted" {
25275        #[cfg_attr(
25276            any(target_arch = "aarch64", target_arch = "arm64ec"),
25277            link_name = "llvm.aarch64.neon.usqadd.v8i8"
25278        )]
25279        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
25280    }
25281    unsafe { _vsqadd_u8(a, b) }
25282}
25283#[doc = "Unsigned saturating Accumulate of Signed value."]
25284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
25285#[inline]
25286#[target_feature(enable = "neon")]
25287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25288#[cfg_attr(test, assert_instr(usqadd))]
25289pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
25290    unsafe extern "unadjusted" {
25291        #[cfg_attr(
25292            any(target_arch = "aarch64", target_arch = "arm64ec"),
25293            link_name = "llvm.aarch64.neon.usqadd.v16i8"
25294        )]
25295        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
25296    }
25297    unsafe { _vsqaddq_u8(a, b) }
25298}
25299#[doc = "Unsigned saturating Accumulate of Signed value."]
25300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
25301#[inline]
25302#[target_feature(enable = "neon")]
25303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25304#[cfg_attr(test, assert_instr(usqadd))]
25305pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
25306    unsafe extern "unadjusted" {
25307        #[cfg_attr(
25308            any(target_arch = "aarch64", target_arch = "arm64ec"),
25309            link_name = "llvm.aarch64.neon.usqadd.v4i16"
25310        )]
25311        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
25312    }
25313    unsafe { _vsqadd_u16(a, b) }
25314}
25315#[doc = "Unsigned saturating Accumulate of Signed value."]
25316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
25317#[inline]
25318#[target_feature(enable = "neon")]
25319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25320#[cfg_attr(test, assert_instr(usqadd))]
25321pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
25322    unsafe extern "unadjusted" {
25323        #[cfg_attr(
25324            any(target_arch = "aarch64", target_arch = "arm64ec"),
25325            link_name = "llvm.aarch64.neon.usqadd.v8i16"
25326        )]
25327        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
25328    }
25329    unsafe { _vsqaddq_u16(a, b) }
25330}
25331#[doc = "Unsigned saturating Accumulate of Signed value."]
25332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
25333#[inline]
25334#[target_feature(enable = "neon")]
25335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25336#[cfg_attr(test, assert_instr(usqadd))]
25337pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
25338    unsafe extern "unadjusted" {
25339        #[cfg_attr(
25340            any(target_arch = "aarch64", target_arch = "arm64ec"),
25341            link_name = "llvm.aarch64.neon.usqadd.v2i32"
25342        )]
25343        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
25344    }
25345    unsafe { _vsqadd_u32(a, b) }
25346}
25347#[doc = "Unsigned saturating Accumulate of Signed value."]
25348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
25349#[inline]
25350#[target_feature(enable = "neon")]
25351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25352#[cfg_attr(test, assert_instr(usqadd))]
25353pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
25354    unsafe extern "unadjusted" {
25355        #[cfg_attr(
25356            any(target_arch = "aarch64", target_arch = "arm64ec"),
25357            link_name = "llvm.aarch64.neon.usqadd.v4i32"
25358        )]
25359        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
25360    }
25361    unsafe { _vsqaddq_u32(a, b) }
25362}
25363#[doc = "Unsigned saturating Accumulate of Signed value."]
25364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
25365#[inline]
25366#[target_feature(enable = "neon")]
25367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25368#[cfg_attr(test, assert_instr(usqadd))]
25369pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
25370    unsafe extern "unadjusted" {
25371        #[cfg_attr(
25372            any(target_arch = "aarch64", target_arch = "arm64ec"),
25373            link_name = "llvm.aarch64.neon.usqadd.v1i64"
25374        )]
25375        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
25376    }
25377    unsafe { _vsqadd_u64(a, b) }
25378}
25379#[doc = "Unsigned saturating Accumulate of Signed value."]
25380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
25381#[inline]
25382#[target_feature(enable = "neon")]
25383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25384#[cfg_attr(test, assert_instr(usqadd))]
25385pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
25386    unsafe extern "unadjusted" {
25387        #[cfg_attr(
25388            any(target_arch = "aarch64", target_arch = "arm64ec"),
25389            link_name = "llvm.aarch64.neon.usqadd.v2i64"
25390        )]
25391        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
25392    }
25393    unsafe { _vsqaddq_u64(a, b) }
25394}
25395#[doc = "Unsigned saturating accumulate of signed value"]
25396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
25397#[inline]
25398#[target_feature(enable = "neon")]
25399#[cfg_attr(test, assert_instr(usqadd))]
25400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25401pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
25402    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
25403}
25404#[doc = "Unsigned saturating accumulate of signed value"]
25405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
25406#[inline]
25407#[target_feature(enable = "neon")]
25408#[cfg_attr(test, assert_instr(usqadd))]
25409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25410pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
25411    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
25412}
25413#[doc = "Unsigned saturating accumulate of signed value"]
25414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25415#[inline]
25416#[target_feature(enable = "neon")]
25417#[cfg_attr(test, assert_instr(usqadd))]
25418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25419pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25420    unsafe extern "unadjusted" {
25421        #[cfg_attr(
25422            any(target_arch = "aarch64", target_arch = "arm64ec"),
25423            link_name = "llvm.aarch64.neon.usqadd.i64"
25424        )]
25425        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25426    }
25427    unsafe { _vsqaddd_u64(a, b) }
25428}
25429#[doc = "Unsigned saturating accumulate of signed value"]
25430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25431#[inline]
25432#[target_feature(enable = "neon")]
25433#[cfg_attr(test, assert_instr(usqadd))]
25434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25435pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25436    unsafe extern "unadjusted" {
25437        #[cfg_attr(
25438            any(target_arch = "aarch64", target_arch = "arm64ec"),
25439            link_name = "llvm.aarch64.neon.usqadd.i32"
25440        )]
25441        fn _vsqadds_u32(a: u32, b: i32) -> u32;
25442    }
25443    unsafe { _vsqadds_u32(a, b) }
25444}
25445#[doc = "Calculates the square root of each lane."]
25446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25447#[inline]
25448#[cfg_attr(test, assert_instr(fsqrt))]
25449#[target_feature(enable = "neon,fp16")]
25450#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25451pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25452    unsafe { simd_fsqrt(a) }
25453}
25454#[doc = "Calculates the square root of each lane."]
25455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25456#[inline]
25457#[cfg_attr(test, assert_instr(fsqrt))]
25458#[target_feature(enable = "neon,fp16")]
25459#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25460pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25461    unsafe { simd_fsqrt(a) }
25462}
25463#[doc = "Calculates the square root of each lane."]
25464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25465#[inline]
25466#[target_feature(enable = "neon")]
25467#[cfg_attr(test, assert_instr(fsqrt))]
25468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25469pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25470    unsafe { simd_fsqrt(a) }
25471}
25472#[doc = "Calculates the square root of each lane."]
25473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25474#[inline]
25475#[target_feature(enable = "neon")]
25476#[cfg_attr(test, assert_instr(fsqrt))]
25477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25478pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25479    unsafe { simd_fsqrt(a) }
25480}
25481#[doc = "Calculates the square root of each lane."]
25482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25483#[inline]
25484#[target_feature(enable = "neon")]
25485#[cfg_attr(test, assert_instr(fsqrt))]
25486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25487pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25488    unsafe { simd_fsqrt(a) }
25489}
25490#[doc = "Calculates the square root of each lane."]
25491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25492#[inline]
25493#[target_feature(enable = "neon")]
25494#[cfg_attr(test, assert_instr(fsqrt))]
25495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25496pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25497    unsafe { simd_fsqrt(a) }
25498}
25499#[doc = "Floating-point round to integral, using current rounding mode"]
25500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25501#[inline]
25502#[target_feature(enable = "neon,fp16")]
25503#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25504#[cfg_attr(test, assert_instr(fsqrt))]
25505pub fn vsqrth_f16(a: f16) -> f16 {
25506    unsafe extern "unadjusted" {
25507        #[cfg_attr(
25508            any(target_arch = "aarch64", target_arch = "arm64ec"),
25509            link_name = "llvm.sqrt.f16"
25510        )]
25511        fn _vsqrth_f16(a: f16) -> f16;
25512    }
25513    unsafe { _vsqrth_f16(a) }
25514}
25515#[doc = "Shift Right and Insert (immediate)"]
25516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25517#[inline]
25518#[target_feature(enable = "neon")]
25519#[cfg_attr(test, assert_instr(sri, N = 1))]
25520#[rustc_legacy_const_generics(2)]
25521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25522pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25523    static_assert!(N >= 1 && N <= 8);
25524    unsafe extern "unadjusted" {
25525        #[cfg_attr(
25526            any(target_arch = "aarch64", target_arch = "arm64ec"),
25527            link_name = "llvm.aarch64.neon.vsri.v8i8"
25528        )]
25529        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25530    }
25531    unsafe { _vsri_n_s8(a, b, N) }
25532}
25533#[doc = "Shift Right and Insert (immediate)"]
25534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25535#[inline]
25536#[target_feature(enable = "neon")]
25537#[cfg_attr(test, assert_instr(sri, N = 1))]
25538#[rustc_legacy_const_generics(2)]
25539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25540pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25541    static_assert!(N >= 1 && N <= 8);
25542    unsafe extern "unadjusted" {
25543        #[cfg_attr(
25544            any(target_arch = "aarch64", target_arch = "arm64ec"),
25545            link_name = "llvm.aarch64.neon.vsri.v16i8"
25546        )]
25547        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25548    }
25549    unsafe { _vsriq_n_s8(a, b, N) }
25550}
25551#[doc = "Shift Right and Insert (immediate)"]
25552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25553#[inline]
25554#[target_feature(enable = "neon")]
25555#[cfg_attr(test, assert_instr(sri, N = 1))]
25556#[rustc_legacy_const_generics(2)]
25557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25558pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25559    static_assert!(N >= 1 && N <= 16);
25560    unsafe extern "unadjusted" {
25561        #[cfg_attr(
25562            any(target_arch = "aarch64", target_arch = "arm64ec"),
25563            link_name = "llvm.aarch64.neon.vsri.v4i16"
25564        )]
25565        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25566    }
25567    unsafe { _vsri_n_s16(a, b, N) }
25568}
25569#[doc = "Shift Right and Insert (immediate)"]
25570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25571#[inline]
25572#[target_feature(enable = "neon")]
25573#[cfg_attr(test, assert_instr(sri, N = 1))]
25574#[rustc_legacy_const_generics(2)]
25575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25576pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25577    static_assert!(N >= 1 && N <= 16);
25578    unsafe extern "unadjusted" {
25579        #[cfg_attr(
25580            any(target_arch = "aarch64", target_arch = "arm64ec"),
25581            link_name = "llvm.aarch64.neon.vsri.v8i16"
25582        )]
25583        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25584    }
25585    unsafe { _vsriq_n_s16(a, b, N) }
25586}
25587#[doc = "Shift Right and Insert (immediate)"]
25588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25589#[inline]
25590#[target_feature(enable = "neon")]
25591#[cfg_attr(test, assert_instr(sri, N = 1))]
25592#[rustc_legacy_const_generics(2)]
25593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25594pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25595    static_assert!(N >= 1 && N <= 32);
25596    unsafe extern "unadjusted" {
25597        #[cfg_attr(
25598            any(target_arch = "aarch64", target_arch = "arm64ec"),
25599            link_name = "llvm.aarch64.neon.vsri.v2i32"
25600        )]
25601        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25602    }
25603    unsafe { _vsri_n_s32(a, b, N) }
25604}
25605#[doc = "Shift Right and Insert (immediate)"]
25606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25607#[inline]
25608#[target_feature(enable = "neon")]
25609#[cfg_attr(test, assert_instr(sri, N = 1))]
25610#[rustc_legacy_const_generics(2)]
25611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25612pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25613    static_assert!(N >= 1 && N <= 32);
25614    unsafe extern "unadjusted" {
25615        #[cfg_attr(
25616            any(target_arch = "aarch64", target_arch = "arm64ec"),
25617            link_name = "llvm.aarch64.neon.vsri.v4i32"
25618        )]
25619        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25620    }
25621    unsafe { _vsriq_n_s32(a, b, N) }
25622}
25623#[doc = "Shift Right and Insert (immediate)"]
25624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25625#[inline]
25626#[target_feature(enable = "neon")]
25627#[cfg_attr(test, assert_instr(sri, N = 1))]
25628#[rustc_legacy_const_generics(2)]
25629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25630pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25631    static_assert!(N >= 1 && N <= 64);
25632    unsafe extern "unadjusted" {
25633        #[cfg_attr(
25634            any(target_arch = "aarch64", target_arch = "arm64ec"),
25635            link_name = "llvm.aarch64.neon.vsri.v1i64"
25636        )]
25637        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25638    }
25639    unsafe { _vsri_n_s64(a, b, N) }
25640}
25641#[doc = "Shift Right and Insert (immediate)"]
25642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25643#[inline]
25644#[target_feature(enable = "neon")]
25645#[cfg_attr(test, assert_instr(sri, N = 1))]
25646#[rustc_legacy_const_generics(2)]
25647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25648pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25649    static_assert!(N >= 1 && N <= 64);
25650    unsafe extern "unadjusted" {
25651        #[cfg_attr(
25652            any(target_arch = "aarch64", target_arch = "arm64ec"),
25653            link_name = "llvm.aarch64.neon.vsri.v2i64"
25654        )]
25655        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25656    }
25657    unsafe { _vsriq_n_s64(a, b, N) }
25658}
25659#[doc = "Shift Right and Insert (immediate)"]
25660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25661#[inline]
25662#[target_feature(enable = "neon")]
25663#[cfg_attr(test, assert_instr(sri, N = 1))]
25664#[rustc_legacy_const_generics(2)]
25665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25666pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25667    static_assert!(N >= 1 && N <= 8);
25668    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25669}
25670#[doc = "Shift Right and Insert (immediate)"]
25671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25672#[inline]
25673#[target_feature(enable = "neon")]
25674#[cfg_attr(test, assert_instr(sri, N = 1))]
25675#[rustc_legacy_const_generics(2)]
25676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25677pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25678    static_assert!(N >= 1 && N <= 8);
25679    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25680}
25681#[doc = "Shift Right and Insert (immediate)"]
25682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25683#[inline]
25684#[target_feature(enable = "neon")]
25685#[cfg_attr(test, assert_instr(sri, N = 1))]
25686#[rustc_legacy_const_generics(2)]
25687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25688pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25689    static_assert!(N >= 1 && N <= 16);
25690    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25691}
25692#[doc = "Shift Right and Insert (immediate)"]
25693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25694#[inline]
25695#[target_feature(enable = "neon")]
25696#[cfg_attr(test, assert_instr(sri, N = 1))]
25697#[rustc_legacy_const_generics(2)]
25698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25699pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25700    static_assert!(N >= 1 && N <= 16);
25701    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25702}
25703#[doc = "Shift Right and Insert (immediate)"]
25704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25705#[inline]
25706#[target_feature(enable = "neon")]
25707#[cfg_attr(test, assert_instr(sri, N = 1))]
25708#[rustc_legacy_const_generics(2)]
25709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25710pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25711    static_assert!(N >= 1 && N <= 32);
25712    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25713}
25714#[doc = "Shift Right and Insert (immediate)"]
25715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25716#[inline]
25717#[target_feature(enable = "neon")]
25718#[cfg_attr(test, assert_instr(sri, N = 1))]
25719#[rustc_legacy_const_generics(2)]
25720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25721pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25722    static_assert!(N >= 1 && N <= 32);
25723    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25724}
25725#[doc = "Shift Right and Insert (immediate)"]
25726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25727#[inline]
25728#[target_feature(enable = "neon")]
25729#[cfg_attr(test, assert_instr(sri, N = 1))]
25730#[rustc_legacy_const_generics(2)]
25731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25732pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25733    static_assert!(N >= 1 && N <= 64);
25734    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25735}
25736#[doc = "Shift Right and Insert (immediate)"]
25737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25738#[inline]
25739#[target_feature(enable = "neon")]
25740#[cfg_attr(test, assert_instr(sri, N = 1))]
25741#[rustc_legacy_const_generics(2)]
25742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25743pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25744    static_assert!(N >= 1 && N <= 64);
25745    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25746}
25747#[doc = "Shift Right and Insert (immediate)"]
25748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25749#[inline]
25750#[target_feature(enable = "neon")]
25751#[cfg_attr(test, assert_instr(sri, N = 1))]
25752#[rustc_legacy_const_generics(2)]
25753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25754pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25755    static_assert!(N >= 1 && N <= 8);
25756    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25757}
25758#[doc = "Shift Right and Insert (immediate)"]
25759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25760#[inline]
25761#[target_feature(enable = "neon")]
25762#[cfg_attr(test, assert_instr(sri, N = 1))]
25763#[rustc_legacy_const_generics(2)]
25764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25765pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25766    static_assert!(N >= 1 && N <= 8);
25767    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25768}
25769#[doc = "Shift Right and Insert (immediate)"]
25770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25771#[inline]
25772#[target_feature(enable = "neon")]
25773#[cfg_attr(test, assert_instr(sri, N = 1))]
25774#[rustc_legacy_const_generics(2)]
25775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25776pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25777    static_assert!(N >= 1 && N <= 16);
25778    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25779}
25780#[doc = "Shift Right and Insert (immediate)"]
25781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25782#[inline]
25783#[target_feature(enable = "neon")]
25784#[cfg_attr(test, assert_instr(sri, N = 1))]
25785#[rustc_legacy_const_generics(2)]
25786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25787pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25788    static_assert!(N >= 1 && N <= 16);
25789    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25790}
25791#[doc = "Shift Right and Insert (immediate)"]
25792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25793#[inline]
25794#[target_feature(enable = "neon,aes")]
25795#[cfg_attr(test, assert_instr(sri, N = 1))]
25796#[rustc_legacy_const_generics(2)]
25797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25798pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25799    static_assert!(N >= 1 && N <= 64);
25800    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25801}
25802#[doc = "Shift Right and Insert (immediate)"]
25803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25804#[inline]
25805#[target_feature(enable = "neon,aes")]
25806#[cfg_attr(test, assert_instr(sri, N = 1))]
25807#[rustc_legacy_const_generics(2)]
25808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25809pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25810    static_assert!(N >= 1 && N <= 64);
25811    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25812}
25813#[doc = "Shift right and insert"]
25814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25815#[inline]
25816#[target_feature(enable = "neon")]
25817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25818#[rustc_legacy_const_generics(2)]
25819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25820pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25821    static_assert!(N >= 1 && N <= 64);
25822    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25823}
25824#[doc = "Shift right and insert"]
25825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25826#[inline]
25827#[target_feature(enable = "neon")]
25828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25829#[rustc_legacy_const_generics(2)]
25830#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25831pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25832    static_assert!(N >= 1 && N <= 64);
25833    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25834}
25835#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25837#[doc = "## Safety"]
25838#[doc = "  * Neon instrinsic unsafe"]
25839#[inline]
25840#[target_feature(enable = "neon,fp16")]
25841#[cfg_attr(test, assert_instr(str))]
25842#[allow(clippy::cast_ptr_alignment)]
25843#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25844pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25845    crate::ptr::write_unaligned(ptr.cast(), a)
25846}
25847#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25849#[doc = "## Safety"]
25850#[doc = "  * Neon instrinsic unsafe"]
25851#[inline]
25852#[target_feature(enable = "neon,fp16")]
25853#[cfg_attr(test, assert_instr(str))]
25854#[allow(clippy::cast_ptr_alignment)]
25855#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25856pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25857    crate::ptr::write_unaligned(ptr.cast(), a)
25858}
25859#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25861#[doc = "## Safety"]
25862#[doc = "  * Neon instrinsic unsafe"]
25863#[inline]
25864#[target_feature(enable = "neon")]
25865#[cfg_attr(test, assert_instr(str))]
25866#[allow(clippy::cast_ptr_alignment)]
25867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25868pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25869    crate::ptr::write_unaligned(ptr.cast(), a)
25870}
25871#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25873#[doc = "## Safety"]
25874#[doc = "  * Neon instrinsic unsafe"]
25875#[inline]
25876#[target_feature(enable = "neon")]
25877#[cfg_attr(test, assert_instr(str))]
25878#[allow(clippy::cast_ptr_alignment)]
25879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25880pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25881    crate::ptr::write_unaligned(ptr.cast(), a)
25882}
25883#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25885#[doc = "## Safety"]
25886#[doc = "  * Neon instrinsic unsafe"]
25887#[inline]
25888#[target_feature(enable = "neon")]
25889#[cfg_attr(test, assert_instr(str))]
25890#[allow(clippy::cast_ptr_alignment)]
25891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25892pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25893    crate::ptr::write_unaligned(ptr.cast(), a)
25894}
25895#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25897#[doc = "## Safety"]
25898#[doc = "  * Neon instrinsic unsafe"]
25899#[inline]
25900#[target_feature(enable = "neon")]
25901#[cfg_attr(test, assert_instr(str))]
25902#[allow(clippy::cast_ptr_alignment)]
25903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25904pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25905    crate::ptr::write_unaligned(ptr.cast(), a)
25906}
25907#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25909#[doc = "## Safety"]
25910#[doc = "  * Neon instrinsic unsafe"]
25911#[inline]
25912#[target_feature(enable = "neon")]
25913#[cfg_attr(test, assert_instr(str))]
25914#[allow(clippy::cast_ptr_alignment)]
25915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25916pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25917    crate::ptr::write_unaligned(ptr.cast(), a)
25918}
25919#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25921#[doc = "## Safety"]
25922#[doc = "  * Neon instrinsic unsafe"]
25923#[inline]
25924#[target_feature(enable = "neon")]
25925#[cfg_attr(test, assert_instr(str))]
25926#[allow(clippy::cast_ptr_alignment)]
25927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25928pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25929    crate::ptr::write_unaligned(ptr.cast(), a)
25930}
25931#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25933#[doc = "## Safety"]
25934#[doc = "  * Neon instrinsic unsafe"]
25935#[inline]
25936#[target_feature(enable = "neon")]
25937#[cfg_attr(test, assert_instr(str))]
25938#[allow(clippy::cast_ptr_alignment)]
25939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25940pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25941    crate::ptr::write_unaligned(ptr.cast(), a)
25942}
25943#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25945#[doc = "## Safety"]
25946#[doc = "  * Neon instrinsic unsafe"]
25947#[inline]
25948#[target_feature(enable = "neon")]
25949#[cfg_attr(test, assert_instr(str))]
25950#[allow(clippy::cast_ptr_alignment)]
25951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25952pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25953    crate::ptr::write_unaligned(ptr.cast(), a)
25954}
25955#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25957#[doc = "## Safety"]
25958#[doc = "  * Neon instrinsic unsafe"]
25959#[inline]
25960#[target_feature(enable = "neon")]
25961#[cfg_attr(test, assert_instr(str))]
25962#[allow(clippy::cast_ptr_alignment)]
25963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25964pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25965    crate::ptr::write_unaligned(ptr.cast(), a)
25966}
25967#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25969#[doc = "## Safety"]
25970#[doc = "  * Neon instrinsic unsafe"]
25971#[inline]
25972#[target_feature(enable = "neon")]
25973#[cfg_attr(test, assert_instr(str))]
25974#[allow(clippy::cast_ptr_alignment)]
25975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25976pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25977    crate::ptr::write_unaligned(ptr.cast(), a)
25978}
25979#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25981#[doc = "## Safety"]
25982#[doc = "  * Neon instrinsic unsafe"]
25983#[inline]
25984#[target_feature(enable = "neon")]
25985#[cfg_attr(test, assert_instr(str))]
25986#[allow(clippy::cast_ptr_alignment)]
25987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25988pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25989    crate::ptr::write_unaligned(ptr.cast(), a)
25990}
25991#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25993#[doc = "## Safety"]
25994#[doc = "  * Neon instrinsic unsafe"]
25995#[inline]
25996#[target_feature(enable = "neon")]
25997#[cfg_attr(test, assert_instr(str))]
25998#[allow(clippy::cast_ptr_alignment)]
25999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26000pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
26001    crate::ptr::write_unaligned(ptr.cast(), a)
26002}
26003#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
26005#[doc = "## Safety"]
26006#[doc = "  * Neon instrinsic unsafe"]
26007#[inline]
26008#[target_feature(enable = "neon")]
26009#[cfg_attr(test, assert_instr(str))]
26010#[allow(clippy::cast_ptr_alignment)]
26011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26012pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
26013    crate::ptr::write_unaligned(ptr.cast(), a)
26014}
26015#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
26017#[doc = "## Safety"]
26018#[doc = "  * Neon instrinsic unsafe"]
26019#[inline]
26020#[target_feature(enable = "neon")]
26021#[cfg_attr(test, assert_instr(str))]
26022#[allow(clippy::cast_ptr_alignment)]
26023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26024pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
26025    crate::ptr::write_unaligned(ptr.cast(), a)
26026}
26027#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
26029#[doc = "## Safety"]
26030#[doc = "  * Neon instrinsic unsafe"]
26031#[inline]
26032#[target_feature(enable = "neon")]
26033#[cfg_attr(test, assert_instr(str))]
26034#[allow(clippy::cast_ptr_alignment)]
26035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26036pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
26037    crate::ptr::write_unaligned(ptr.cast(), a)
26038}
26039#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
26041#[doc = "## Safety"]
26042#[doc = "  * Neon instrinsic unsafe"]
26043#[inline]
26044#[target_feature(enable = "neon")]
26045#[cfg_attr(test, assert_instr(str))]
26046#[allow(clippy::cast_ptr_alignment)]
26047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26048pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
26049    crate::ptr::write_unaligned(ptr.cast(), a)
26050}
26051#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
26053#[doc = "## Safety"]
26054#[doc = "  * Neon instrinsic unsafe"]
26055#[inline]
26056#[target_feature(enable = "neon")]
26057#[cfg_attr(test, assert_instr(str))]
26058#[allow(clippy::cast_ptr_alignment)]
26059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26060pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
26061    crate::ptr::write_unaligned(ptr.cast(), a)
26062}
26063#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
26065#[doc = "## Safety"]
26066#[doc = "  * Neon instrinsic unsafe"]
26067#[inline]
26068#[target_feature(enable = "neon")]
26069#[cfg_attr(test, assert_instr(str))]
26070#[allow(clippy::cast_ptr_alignment)]
26071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26072pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
26073    crate::ptr::write_unaligned(ptr.cast(), a)
26074}
26075#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
26077#[doc = "## Safety"]
26078#[doc = "  * Neon instrinsic unsafe"]
26079#[inline]
26080#[target_feature(enable = "neon")]
26081#[cfg_attr(test, assert_instr(str))]
26082#[allow(clippy::cast_ptr_alignment)]
26083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26084pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
26085    crate::ptr::write_unaligned(ptr.cast(), a)
26086}
26087#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
26089#[doc = "## Safety"]
26090#[doc = "  * Neon instrinsic unsafe"]
26091#[inline]
26092#[target_feature(enable = "neon")]
26093#[cfg_attr(test, assert_instr(str))]
26094#[allow(clippy::cast_ptr_alignment)]
26095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26096pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
26097    crate::ptr::write_unaligned(ptr.cast(), a)
26098}
26099#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
26101#[doc = "## Safety"]
26102#[doc = "  * Neon instrinsic unsafe"]
26103#[inline]
26104#[target_feature(enable = "neon")]
26105#[cfg_attr(test, assert_instr(str))]
26106#[allow(clippy::cast_ptr_alignment)]
26107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26108pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
26109    crate::ptr::write_unaligned(ptr.cast(), a)
26110}
26111#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
26113#[doc = "## Safety"]
26114#[doc = "  * Neon instrinsic unsafe"]
26115#[inline]
26116#[target_feature(enable = "neon")]
26117#[cfg_attr(test, assert_instr(str))]
26118#[allow(clippy::cast_ptr_alignment)]
26119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26120pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
26121    crate::ptr::write_unaligned(ptr.cast(), a)
26122}
26123#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
26125#[doc = "## Safety"]
26126#[doc = "  * Neon instrinsic unsafe"]
26127#[inline]
26128#[target_feature(enable = "neon")]
26129#[cfg_attr(test, assert_instr(str))]
26130#[allow(clippy::cast_ptr_alignment)]
26131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26132pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
26133    crate::ptr::write_unaligned(ptr.cast(), a)
26134}
26135#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
26137#[doc = "## Safety"]
26138#[doc = "  * Neon instrinsic unsafe"]
26139#[inline]
26140#[target_feature(enable = "neon")]
26141#[cfg_attr(test, assert_instr(str))]
26142#[allow(clippy::cast_ptr_alignment)]
26143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26144pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
26145    crate::ptr::write_unaligned(ptr.cast(), a)
26146}
26147#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
26149#[doc = "## Safety"]
26150#[doc = "  * Neon instrinsic unsafe"]
26151#[inline]
26152#[target_feature(enable = "neon,aes")]
26153#[cfg_attr(test, assert_instr(str))]
26154#[allow(clippy::cast_ptr_alignment)]
26155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26156pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
26157    crate::ptr::write_unaligned(ptr.cast(), a)
26158}
26159#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
26161#[doc = "## Safety"]
26162#[doc = "  * Neon instrinsic unsafe"]
26163#[inline]
26164#[target_feature(enable = "neon,aes")]
26165#[cfg_attr(test, assert_instr(str))]
26166#[allow(clippy::cast_ptr_alignment)]
26167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26168pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
26169    crate::ptr::write_unaligned(ptr.cast(), a)
26170}
26171#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
26173#[doc = "## Safety"]
26174#[doc = "  * Neon instrinsic unsafe"]
26175#[inline]
26176#[target_feature(enable = "neon")]
26177#[cfg_attr(test, assert_instr(st1))]
26178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26179pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
26180    unsafe extern "unadjusted" {
26181        #[cfg_attr(
26182            any(target_arch = "aarch64", target_arch = "arm64ec"),
26183            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0f64"
26184        )]
26185        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
26186    }
26187    _vst1_f64_x2(b.0, b.1, a)
26188}
26189#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
26191#[doc = "## Safety"]
26192#[doc = "  * Neon instrinsic unsafe"]
26193#[inline]
26194#[target_feature(enable = "neon")]
26195#[cfg_attr(test, assert_instr(st1))]
26196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26197pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
26198    unsafe extern "unadjusted" {
26199        #[cfg_attr(
26200            any(target_arch = "aarch64", target_arch = "arm64ec"),
26201            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0f64"
26202        )]
26203        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
26204    }
26205    _vst1q_f64_x2(b.0, b.1, a)
26206}
26207#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
26209#[doc = "## Safety"]
26210#[doc = "  * Neon instrinsic unsafe"]
26211#[inline]
26212#[target_feature(enable = "neon")]
26213#[cfg_attr(test, assert_instr(st1))]
26214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26215pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
26216    unsafe extern "unadjusted" {
26217        #[cfg_attr(
26218            any(target_arch = "aarch64", target_arch = "arm64ec"),
26219            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0f64"
26220        )]
26221        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
26222    }
26223    _vst1_f64_x3(b.0, b.1, b.2, a)
26224}
26225#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
26227#[doc = "## Safety"]
26228#[doc = "  * Neon instrinsic unsafe"]
26229#[inline]
26230#[target_feature(enable = "neon")]
26231#[cfg_attr(test, assert_instr(st1))]
26232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26233pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
26234    unsafe extern "unadjusted" {
26235        #[cfg_attr(
26236            any(target_arch = "aarch64", target_arch = "arm64ec"),
26237            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0f64"
26238        )]
26239        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
26240    }
26241    _vst1q_f64_x3(b.0, b.1, b.2, a)
26242}
26243#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
26245#[doc = "## Safety"]
26246#[doc = "  * Neon instrinsic unsafe"]
26247#[inline]
26248#[target_feature(enable = "neon")]
26249#[cfg_attr(test, assert_instr(st1))]
26250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26251pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
26252    unsafe extern "unadjusted" {
26253        #[cfg_attr(
26254            any(target_arch = "aarch64", target_arch = "arm64ec"),
26255            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0f64"
26256        )]
26257        fn _vst1_f64_x4(
26258            a: float64x1_t,
26259            b: float64x1_t,
26260            c: float64x1_t,
26261            d: float64x1_t,
26262            ptr: *mut f64,
26263        );
26264    }
26265    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
26266}
26267#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
26269#[doc = "## Safety"]
26270#[doc = "  * Neon instrinsic unsafe"]
26271#[inline]
26272#[target_feature(enable = "neon")]
26273#[cfg_attr(test, assert_instr(st1))]
26274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26275pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
26276    unsafe extern "unadjusted" {
26277        #[cfg_attr(
26278            any(target_arch = "aarch64", target_arch = "arm64ec"),
26279            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0f64"
26280        )]
26281        fn _vst1q_f64_x4(
26282            a: float64x2_t,
26283            b: float64x2_t,
26284            c: float64x2_t,
26285            d: float64x2_t,
26286            ptr: *mut f64,
26287        );
26288    }
26289    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
26290}
26291#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
26293#[doc = "## Safety"]
26294#[doc = "  * Neon instrinsic unsafe"]
26295#[inline]
26296#[target_feature(enable = "neon")]
26297#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26298#[rustc_legacy_const_generics(2)]
26299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26300pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
26301    static_assert!(LANE == 0);
26302    *a = simd_extract!(b, LANE as u32);
26303}
26304#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
26306#[doc = "## Safety"]
26307#[doc = "  * Neon instrinsic unsafe"]
26308#[inline]
26309#[target_feature(enable = "neon")]
26310#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26311#[rustc_legacy_const_generics(2)]
26312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26313pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
26314    static_assert_uimm_bits!(LANE, 1);
26315    *a = simd_extract!(b, LANE as u32);
26316}
26317#[doc = "Store multiple 2-element structures from two registers"]
26318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
26319#[doc = "## Safety"]
26320#[doc = "  * Neon instrinsic unsafe"]
26321#[inline]
26322#[target_feature(enable = "neon")]
26323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26324#[cfg_attr(test, assert_instr(st1))]
26325pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
26326    unsafe extern "unadjusted" {
26327        #[cfg_attr(
26328            any(target_arch = "aarch64", target_arch = "arm64ec"),
26329            link_name = "llvm.aarch64.neon.st2.v1f64.p0i8"
26330        )]
26331        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
26332    }
26333    _vst2_f64(b.0, b.1, a as _)
26334}
26335#[doc = "Store multiple 2-element structures from two registers"]
26336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
26337#[doc = "## Safety"]
26338#[doc = "  * Neon instrinsic unsafe"]
26339#[inline]
26340#[target_feature(enable = "neon")]
26341#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26342#[rustc_legacy_const_generics(2)]
26343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26344pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
26345    static_assert!(LANE == 0);
26346    unsafe extern "unadjusted" {
26347        #[cfg_attr(
26348            any(target_arch = "aarch64", target_arch = "arm64ec"),
26349            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8"
26350        )]
26351        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
26352    }
26353    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
26354}
26355#[doc = "Store multiple 2-element structures from two registers"]
26356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
26357#[doc = "## Safety"]
26358#[doc = "  * Neon instrinsic unsafe"]
26359#[inline]
26360#[target_feature(enable = "neon")]
26361#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26362#[rustc_legacy_const_generics(2)]
26363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26364pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
26365    static_assert!(LANE == 0);
26366    unsafe extern "unadjusted" {
26367        #[cfg_attr(
26368            any(target_arch = "aarch64", target_arch = "arm64ec"),
26369            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8"
26370        )]
26371        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
26372    }
26373    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
26374}
26375#[doc = "Store multiple 2-element structures from two registers"]
26376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
26377#[doc = "## Safety"]
26378#[doc = "  * Neon instrinsic unsafe"]
26379#[inline]
26380#[target_feature(enable = "neon,aes")]
26381#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26382#[rustc_legacy_const_generics(2)]
26383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26384pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
26385    static_assert!(LANE == 0);
26386    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26387}
26388#[doc = "Store multiple 2-element structures from two registers"]
26389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
26390#[doc = "## Safety"]
26391#[doc = "  * Neon instrinsic unsafe"]
26392#[inline]
26393#[target_feature(enable = "neon")]
26394#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26395#[rustc_legacy_const_generics(2)]
26396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26397pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
26398    static_assert!(LANE == 0);
26399    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26400}
26401#[doc = "Store multiple 2-element structures from two registers"]
26402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
26403#[doc = "## Safety"]
26404#[doc = "  * Neon instrinsic unsafe"]
26405#[inline]
26406#[target_feature(enable = "neon")]
26407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26408#[cfg_attr(test, assert_instr(st2))]
26409pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
26410    unsafe extern "unadjusted" {
26411        #[cfg_attr(
26412            any(target_arch = "aarch64", target_arch = "arm64ec"),
26413            link_name = "llvm.aarch64.neon.st2.v2f64.p0i8"
26414        )]
26415        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26416    }
26417    _vst2q_f64(b.0, b.1, a as _)
26418}
26419#[doc = "Store multiple 2-element structures from two registers"]
26420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26421#[doc = "## Safety"]
26422#[doc = "  * Neon instrinsic unsafe"]
26423#[inline]
26424#[target_feature(enable = "neon")]
26425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26426#[cfg_attr(test, assert_instr(st2))]
26427pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26428    unsafe extern "unadjusted" {
26429        #[cfg_attr(
26430            any(target_arch = "aarch64", target_arch = "arm64ec"),
26431            link_name = "llvm.aarch64.neon.st2.v2i64.p0i8"
26432        )]
26433        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26434    }
26435    _vst2q_s64(b.0, b.1, a as _)
26436}
26437#[doc = "Store multiple 2-element structures from two registers"]
26438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26439#[doc = "## Safety"]
26440#[doc = "  * Neon instrinsic unsafe"]
26441#[inline]
26442#[target_feature(enable = "neon")]
26443#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26444#[rustc_legacy_const_generics(2)]
26445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26446pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26447    static_assert_uimm_bits!(LANE, 1);
26448    unsafe extern "unadjusted" {
26449        #[cfg_attr(
26450            any(target_arch = "aarch64", target_arch = "arm64ec"),
26451            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8"
26452        )]
26453        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26454    }
26455    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26456}
26457#[doc = "Store multiple 2-element structures from two registers"]
26458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26459#[doc = "## Safety"]
26460#[doc = "  * Neon instrinsic unsafe"]
26461#[inline]
26462#[target_feature(enable = "neon")]
26463#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26464#[rustc_legacy_const_generics(2)]
26465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26466pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26467    static_assert_uimm_bits!(LANE, 4);
26468    unsafe extern "unadjusted" {
26469        #[cfg_attr(
26470            any(target_arch = "aarch64", target_arch = "arm64ec"),
26471            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8"
26472        )]
26473        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26474    }
26475    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26476}
26477#[doc = "Store multiple 2-element structures from two registers"]
26478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26479#[doc = "## Safety"]
26480#[doc = "  * Neon instrinsic unsafe"]
26481#[inline]
26482#[target_feature(enable = "neon")]
26483#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26484#[rustc_legacy_const_generics(2)]
26485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26486pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26487    static_assert_uimm_bits!(LANE, 1);
26488    unsafe extern "unadjusted" {
26489        #[cfg_attr(
26490            any(target_arch = "aarch64", target_arch = "arm64ec"),
26491            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8"
26492        )]
26493        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26494    }
26495    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26496}
26497#[doc = "Store multiple 2-element structures from two registers"]
26498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26499#[doc = "## Safety"]
26500#[doc = "  * Neon instrinsic unsafe"]
26501#[inline]
26502#[target_feature(enable = "neon,aes")]
26503#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26504#[rustc_legacy_const_generics(2)]
26505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26506pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26507    static_assert_uimm_bits!(LANE, 1);
26508    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26509}
26510#[doc = "Store multiple 2-element structures from two registers"]
26511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26512#[doc = "## Safety"]
26513#[doc = "  * Neon instrinsic unsafe"]
26514#[inline]
26515#[target_feature(enable = "neon")]
26516#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26517#[rustc_legacy_const_generics(2)]
26518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26519pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26520    static_assert_uimm_bits!(LANE, 4);
26521    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26522}
26523#[doc = "Store multiple 2-element structures from two registers"]
26524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26525#[doc = "## Safety"]
26526#[doc = "  * Neon instrinsic unsafe"]
26527#[inline]
26528#[target_feature(enable = "neon")]
26529#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26530#[rustc_legacy_const_generics(2)]
26531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26532pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26533    static_assert_uimm_bits!(LANE, 1);
26534    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26535}
26536#[doc = "Store multiple 2-element structures from two registers"]
26537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26538#[doc = "## Safety"]
26539#[doc = "  * Neon instrinsic unsafe"]
26540#[inline]
26541#[target_feature(enable = "neon")]
26542#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26543#[rustc_legacy_const_generics(2)]
26544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26545pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26546    static_assert_uimm_bits!(LANE, 4);
26547    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26548}
26549#[doc = "Store multiple 2-element structures from two registers"]
26550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26551#[doc = "## Safety"]
26552#[doc = "  * Neon instrinsic unsafe"]
26553#[inline]
26554#[target_feature(enable = "neon,aes")]
26555#[cfg_attr(test, assert_instr(st2))]
26556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26557pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26558    vst2q_s64(transmute(a), transmute(b))
26559}
26560#[doc = "Store multiple 2-element structures from two registers"]
26561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26562#[doc = "## Safety"]
26563#[doc = "  * Neon instrinsic unsafe"]
26564#[inline]
26565#[target_feature(enable = "neon")]
26566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26567#[cfg_attr(test, assert_instr(st2))]
26568pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26569    vst2q_s64(transmute(a), transmute(b))
26570}
26571#[doc = "Store multiple 3-element structures from three registers"]
26572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26573#[doc = "## Safety"]
26574#[doc = "  * Neon instrinsic unsafe"]
26575#[inline]
26576#[target_feature(enable = "neon")]
26577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26578#[cfg_attr(test, assert_instr(nop))]
26579pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26580    unsafe extern "unadjusted" {
26581        #[cfg_attr(
26582            any(target_arch = "aarch64", target_arch = "arm64ec"),
26583            link_name = "llvm.aarch64.neon.st3.v1f64.p0i8"
26584        )]
26585        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26586    }
26587    _vst3_f64(b.0, b.1, b.2, a as _)
26588}
26589#[doc = "Store multiple 3-element structures from three registers"]
26590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26591#[doc = "## Safety"]
26592#[doc = "  * Neon instrinsic unsafe"]
26593#[inline]
26594#[target_feature(enable = "neon")]
26595#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26596#[rustc_legacy_const_generics(2)]
26597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26598pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26599    static_assert!(LANE == 0);
26600    unsafe extern "unadjusted" {
26601        #[cfg_attr(
26602            any(target_arch = "aarch64", target_arch = "arm64ec"),
26603            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8"
26604        )]
26605        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26606    }
26607    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26608}
26609#[doc = "Store multiple 3-element structures from three registers"]
26610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26611#[doc = "## Safety"]
26612#[doc = "  * Neon instrinsic unsafe"]
26613#[inline]
26614#[target_feature(enable = "neon")]
26615#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26616#[rustc_legacy_const_generics(2)]
26617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26618pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26619    static_assert!(LANE == 0);
26620    unsafe extern "unadjusted" {
26621        #[cfg_attr(
26622            any(target_arch = "aarch64", target_arch = "arm64ec"),
26623            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8"
26624        )]
26625        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26626    }
26627    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26628}
26629#[doc = "Store multiple 3-element structures from three registers"]
26630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26631#[doc = "## Safety"]
26632#[doc = "  * Neon instrinsic unsafe"]
26633#[inline]
26634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26635#[target_feature(enable = "neon,aes")]
26636#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26637#[rustc_legacy_const_generics(2)]
26638pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26639    static_assert!(LANE == 0);
26640    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26641}
26642#[doc = "Store multiple 3-element structures from three registers"]
26643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26644#[doc = "## Safety"]
26645#[doc = "  * Neon instrinsic unsafe"]
26646#[inline]
26647#[target_feature(enable = "neon")]
26648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26649#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26650#[rustc_legacy_const_generics(2)]
26651pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26652    static_assert!(LANE == 0);
26653    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26654}
26655#[doc = "Store multiple 3-element structures from three registers"]
26656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26657#[doc = "## Safety"]
26658#[doc = "  * Neon instrinsic unsafe"]
26659#[inline]
26660#[target_feature(enable = "neon")]
26661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26662#[cfg_attr(test, assert_instr(st3))]
26663pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26664    unsafe extern "unadjusted" {
26665        #[cfg_attr(
26666            any(target_arch = "aarch64", target_arch = "arm64ec"),
26667            link_name = "llvm.aarch64.neon.st3.v2f64.p0i8"
26668        )]
26669        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26670    }
26671    _vst3q_f64(b.0, b.1, b.2, a as _)
26672}
26673#[doc = "Store multiple 3-element structures from three registers"]
26674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26675#[doc = "## Safety"]
26676#[doc = "  * Neon instrinsic unsafe"]
26677#[inline]
26678#[target_feature(enable = "neon")]
26679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26680#[cfg_attr(test, assert_instr(st3))]
26681pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26682    unsafe extern "unadjusted" {
26683        #[cfg_attr(
26684            any(target_arch = "aarch64", target_arch = "arm64ec"),
26685            link_name = "llvm.aarch64.neon.st3.v2i64.p0i8"
26686        )]
26687        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26688    }
26689    _vst3q_s64(b.0, b.1, b.2, a as _)
26690}
26691#[doc = "Store multiple 3-element structures from three registers"]
26692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26693#[doc = "## Safety"]
26694#[doc = "  * Neon instrinsic unsafe"]
26695#[inline]
26696#[target_feature(enable = "neon")]
26697#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26698#[rustc_legacy_const_generics(2)]
26699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26700pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26701    static_assert_uimm_bits!(LANE, 1);
26702    unsafe extern "unadjusted" {
26703        #[cfg_attr(
26704            any(target_arch = "aarch64", target_arch = "arm64ec"),
26705            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8"
26706        )]
26707        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26708    }
26709    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26710}
26711#[doc = "Store multiple 3-element structures from three registers"]
26712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26713#[doc = "## Safety"]
26714#[doc = "  * Neon instrinsic unsafe"]
26715#[inline]
26716#[target_feature(enable = "neon")]
26717#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26718#[rustc_legacy_const_generics(2)]
26719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26720pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26721    static_assert_uimm_bits!(LANE, 4);
26722    unsafe extern "unadjusted" {
26723        #[cfg_attr(
26724            any(target_arch = "aarch64", target_arch = "arm64ec"),
26725            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8"
26726        )]
26727        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26728    }
26729    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26730}
26731#[doc = "Store multiple 3-element structures from three registers"]
26732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26733#[doc = "## Safety"]
26734#[doc = "  * Neon instrinsic unsafe"]
26735#[inline]
26736#[target_feature(enable = "neon")]
26737#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26738#[rustc_legacy_const_generics(2)]
26739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26740pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26741    static_assert_uimm_bits!(LANE, 1);
26742    unsafe extern "unadjusted" {
26743        #[cfg_attr(
26744            any(target_arch = "aarch64", target_arch = "arm64ec"),
26745            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8"
26746        )]
26747        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26748    }
26749    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26750}
26751#[doc = "Store multiple 3-element structures from three registers"]
26752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26753#[doc = "## Safety"]
26754#[doc = "  * Neon instrinsic unsafe"]
26755#[inline]
26756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26757#[target_feature(enable = "neon,aes")]
26758#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26759#[rustc_legacy_const_generics(2)]
26760pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26761    static_assert_uimm_bits!(LANE, 1);
26762    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26763}
26764#[doc = "Store multiple 3-element structures from three registers"]
26765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26766#[doc = "## Safety"]
26767#[doc = "  * Neon instrinsic unsafe"]
26768#[inline]
26769#[target_feature(enable = "neon")]
26770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26771#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26772#[rustc_legacy_const_generics(2)]
26773pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26774    static_assert_uimm_bits!(LANE, 4);
26775    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26776}
26777#[doc = "Store multiple 3-element structures from three registers"]
26778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26779#[doc = "## Safety"]
26780#[doc = "  * Neon instrinsic unsafe"]
26781#[inline]
26782#[target_feature(enable = "neon")]
26783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26784#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26785#[rustc_legacy_const_generics(2)]
26786pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26787    static_assert_uimm_bits!(LANE, 1);
26788    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26789}
26790#[doc = "Store multiple 3-element structures from three registers"]
26791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26792#[doc = "## Safety"]
26793#[doc = "  * Neon instrinsic unsafe"]
26794#[inline]
26795#[target_feature(enable = "neon")]
26796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26797#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26798#[rustc_legacy_const_generics(2)]
26799pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26800    static_assert_uimm_bits!(LANE, 4);
26801    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26802}
26803#[doc = "Store multiple 3-element structures from three registers"]
26804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26805#[doc = "## Safety"]
26806#[doc = "  * Neon instrinsic unsafe"]
26807#[inline]
26808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26809#[target_feature(enable = "neon,aes")]
26810#[cfg_attr(test, assert_instr(st3))]
26811pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26812    vst3q_s64(transmute(a), transmute(b))
26813}
26814#[doc = "Store multiple 3-element structures from three registers"]
26815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26816#[doc = "## Safety"]
26817#[doc = "  * Neon instrinsic unsafe"]
26818#[inline]
26819#[target_feature(enable = "neon")]
26820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26821#[cfg_attr(test, assert_instr(st3))]
26822pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26823    vst3q_s64(transmute(a), transmute(b))
26824}
26825#[doc = "Store multiple 4-element structures from four registers"]
26826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26827#[doc = "## Safety"]
26828#[doc = "  * Neon instrinsic unsafe"]
26829#[inline]
26830#[target_feature(enable = "neon")]
26831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26832#[cfg_attr(test, assert_instr(nop))]
26833pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26834    unsafe extern "unadjusted" {
26835        #[cfg_attr(
26836            any(target_arch = "aarch64", target_arch = "arm64ec"),
26837            link_name = "llvm.aarch64.neon.st4.v1f64.p0i8"
26838        )]
26839        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26840    }
26841    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26842}
26843#[doc = "Store multiple 4-element structures from four registers"]
26844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26845#[doc = "## Safety"]
26846#[doc = "  * Neon instrinsic unsafe"]
26847#[inline]
26848#[target_feature(enable = "neon")]
26849#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26850#[rustc_legacy_const_generics(2)]
26851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26852pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26853    static_assert!(LANE == 0);
26854    unsafe extern "unadjusted" {
26855        #[cfg_attr(
26856            any(target_arch = "aarch64", target_arch = "arm64ec"),
26857            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8"
26858        )]
26859        fn _vst4_lane_f64(
26860            a: float64x1_t,
26861            b: float64x1_t,
26862            c: float64x1_t,
26863            d: float64x1_t,
26864            n: i64,
26865            ptr: *mut i8,
26866        );
26867    }
26868    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26869}
26870#[doc = "Store multiple 4-element structures from four registers"]
26871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26872#[doc = "## Safety"]
26873#[doc = "  * Neon instrinsic unsafe"]
26874#[inline]
26875#[target_feature(enable = "neon")]
26876#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26877#[rustc_legacy_const_generics(2)]
26878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26879pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26880    static_assert!(LANE == 0);
26881    unsafe extern "unadjusted" {
26882        #[cfg_attr(
26883            any(target_arch = "aarch64", target_arch = "arm64ec"),
26884            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8"
26885        )]
26886        fn _vst4_lane_s64(
26887            a: int64x1_t,
26888            b: int64x1_t,
26889            c: int64x1_t,
26890            d: int64x1_t,
26891            n: i64,
26892            ptr: *mut i8,
26893        );
26894    }
26895    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26896}
26897#[doc = "Store multiple 4-element structures from four registers"]
26898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26899#[doc = "## Safety"]
26900#[doc = "  * Neon instrinsic unsafe"]
26901#[inline]
26902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26903#[target_feature(enable = "neon,aes")]
26904#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26905#[rustc_legacy_const_generics(2)]
26906pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26907    static_assert!(LANE == 0);
26908    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26909}
26910#[doc = "Store multiple 4-element structures from four registers"]
26911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26912#[doc = "## Safety"]
26913#[doc = "  * Neon instrinsic unsafe"]
26914#[inline]
26915#[target_feature(enable = "neon")]
26916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26917#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26918#[rustc_legacy_const_generics(2)]
26919pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26920    static_assert!(LANE == 0);
26921    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26922}
26923#[doc = "Store multiple 4-element structures from four registers"]
26924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26925#[doc = "## Safety"]
26926#[doc = "  * Neon instrinsic unsafe"]
26927#[inline]
26928#[target_feature(enable = "neon")]
26929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26930#[cfg_attr(test, assert_instr(st4))]
26931pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26932    unsafe extern "unadjusted" {
26933        #[cfg_attr(
26934            any(target_arch = "aarch64", target_arch = "arm64ec"),
26935            link_name = "llvm.aarch64.neon.st4.v2f64.p0i8"
26936        )]
26937        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26938    }
26939    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26940}
26941#[doc = "Store multiple 4-element structures from four registers"]
26942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26943#[doc = "## Safety"]
26944#[doc = "  * Neon instrinsic unsafe"]
26945#[inline]
26946#[target_feature(enable = "neon")]
26947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26948#[cfg_attr(test, assert_instr(st4))]
26949pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26950    unsafe extern "unadjusted" {
26951        #[cfg_attr(
26952            any(target_arch = "aarch64", target_arch = "arm64ec"),
26953            link_name = "llvm.aarch64.neon.st4.v2i64.p0i8"
26954        )]
26955        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26956    }
26957    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26958}
26959#[doc = "Store multiple 4-element structures from four registers"]
26960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26961#[doc = "## Safety"]
26962#[doc = "  * Neon instrinsic unsafe"]
26963#[inline]
26964#[target_feature(enable = "neon")]
26965#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26966#[rustc_legacy_const_generics(2)]
26967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26968pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26969    static_assert_uimm_bits!(LANE, 1);
26970    unsafe extern "unadjusted" {
26971        #[cfg_attr(
26972            any(target_arch = "aarch64", target_arch = "arm64ec"),
26973            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8"
26974        )]
26975        fn _vst4q_lane_f64(
26976            a: float64x2_t,
26977            b: float64x2_t,
26978            c: float64x2_t,
26979            d: float64x2_t,
26980            n: i64,
26981            ptr: *mut i8,
26982        );
26983    }
26984    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26985}
26986#[doc = "Store multiple 4-element structures from four registers"]
26987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26988#[doc = "## Safety"]
26989#[doc = "  * Neon instrinsic unsafe"]
26990#[inline]
26991#[target_feature(enable = "neon")]
26992#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26993#[rustc_legacy_const_generics(2)]
26994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26995pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
26996    static_assert_uimm_bits!(LANE, 4);
26997    unsafe extern "unadjusted" {
26998        #[cfg_attr(
26999            any(target_arch = "aarch64", target_arch = "arm64ec"),
27000            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8"
27001        )]
27002        fn _vst4q_lane_s8(
27003            a: int8x16_t,
27004            b: int8x16_t,
27005            c: int8x16_t,
27006            d: int8x16_t,
27007            n: i64,
27008            ptr: *mut i8,
27009        );
27010    }
27011    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27012}
27013#[doc = "Store multiple 4-element structures from four registers"]
27014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
27015#[doc = "## Safety"]
27016#[doc = "  * Neon instrinsic unsafe"]
27017#[inline]
27018#[target_feature(enable = "neon")]
27019#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27020#[rustc_legacy_const_generics(2)]
27021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27022pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
27023    static_assert_uimm_bits!(LANE, 1);
27024    unsafe extern "unadjusted" {
27025        #[cfg_attr(
27026            any(target_arch = "aarch64", target_arch = "arm64ec"),
27027            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8"
27028        )]
27029        fn _vst4q_lane_s64(
27030            a: int64x2_t,
27031            b: int64x2_t,
27032            c: int64x2_t,
27033            d: int64x2_t,
27034            n: i64,
27035            ptr: *mut i8,
27036        );
27037    }
27038    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27039}
27040#[doc = "Store multiple 4-element structures from four registers"]
27041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
27042#[doc = "## Safety"]
27043#[doc = "  * Neon instrinsic unsafe"]
27044#[inline]
27045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27046#[target_feature(enable = "neon,aes")]
27047#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27048#[rustc_legacy_const_generics(2)]
27049pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
27050    static_assert_uimm_bits!(LANE, 1);
27051    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27052}
27053#[doc = "Store multiple 4-element structures from four registers"]
27054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
27055#[doc = "## Safety"]
27056#[doc = "  * Neon instrinsic unsafe"]
27057#[inline]
27058#[target_feature(enable = "neon")]
27059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27060#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27061#[rustc_legacy_const_generics(2)]
27062pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
27063    static_assert_uimm_bits!(LANE, 4);
27064    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27065}
27066#[doc = "Store multiple 4-element structures from four registers"]
27067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
27068#[doc = "## Safety"]
27069#[doc = "  * Neon instrinsic unsafe"]
27070#[inline]
27071#[target_feature(enable = "neon")]
27072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27073#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27074#[rustc_legacy_const_generics(2)]
27075pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
27076    static_assert_uimm_bits!(LANE, 1);
27077    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27078}
27079#[doc = "Store multiple 4-element structures from four registers"]
27080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
27081#[doc = "## Safety"]
27082#[doc = "  * Neon instrinsic unsafe"]
27083#[inline]
27084#[target_feature(enable = "neon")]
27085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27086#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27087#[rustc_legacy_const_generics(2)]
27088pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
27089    static_assert_uimm_bits!(LANE, 4);
27090    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27091}
27092#[doc = "Store multiple 4-element structures from four registers"]
27093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
27094#[doc = "## Safety"]
27095#[doc = "  * Neon instrinsic unsafe"]
27096#[inline]
27097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27098#[target_feature(enable = "neon,aes")]
27099#[cfg_attr(test, assert_instr(st4))]
27100pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
27101    vst4q_s64(transmute(a), transmute(b))
27102}
27103#[doc = "Store multiple 4-element structures from four registers"]
27104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
27105#[doc = "## Safety"]
27106#[doc = "  * Neon instrinsic unsafe"]
27107#[inline]
27108#[target_feature(enable = "neon")]
27109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27110#[cfg_attr(test, assert_instr(st4))]
27111pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
27112    vst4q_s64(transmute(a), transmute(b))
27113}
27114#[doc = "Subtract"]
27115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
27116#[inline]
27117#[target_feature(enable = "neon")]
27118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27119#[cfg_attr(test, assert_instr(fsub))]
27120pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
27121    unsafe { simd_sub(a, b) }
27122}
27123#[doc = "Subtract"]
27124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
27125#[inline]
27126#[target_feature(enable = "neon")]
27127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27128#[cfg_attr(test, assert_instr(fsub))]
27129pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27130    unsafe { simd_sub(a, b) }
27131}
27132#[doc = "Subtract"]
27133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
27134#[inline]
27135#[target_feature(enable = "neon")]
27136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27137#[cfg_attr(test, assert_instr(nop))]
27138pub fn vsubd_s64(a: i64, b: i64) -> i64 {
27139    a.wrapping_sub(b)
27140}
27141#[doc = "Subtract"]
27142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
27143#[inline]
27144#[target_feature(enable = "neon")]
27145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27146#[cfg_attr(test, assert_instr(nop))]
27147pub fn vsubd_u64(a: u64, b: u64) -> u64 {
27148    a.wrapping_sub(b)
27149}
27150#[doc = "Subtract"]
27151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
27152#[inline]
27153#[target_feature(enable = "neon,fp16")]
27154#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27155#[cfg_attr(test, assert_instr(nop))]
27156pub fn vsubh_f16(a: f16, b: f16) -> f16 {
27157    a - b
27158}
27159#[doc = "Signed Subtract Long"]
27160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
27161#[inline]
27162#[target_feature(enable = "neon")]
27163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27164#[cfg_attr(test, assert_instr(ssubl))]
27165pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
27166    unsafe {
27167        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27168        let d: int16x8_t = simd_cast(c);
27169        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27170        let f: int16x8_t = simd_cast(e);
27171        simd_sub(d, f)
27172    }
27173}
27174#[doc = "Signed Subtract Long"]
27175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
27176#[inline]
27177#[target_feature(enable = "neon")]
27178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27179#[cfg_attr(test, assert_instr(ssubl))]
27180pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
27181    unsafe {
27182        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27183        let d: int32x4_t = simd_cast(c);
27184        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27185        let f: int32x4_t = simd_cast(e);
27186        simd_sub(d, f)
27187    }
27188}
27189#[doc = "Signed Subtract Long"]
27190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
27191#[inline]
27192#[target_feature(enable = "neon")]
27193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27194#[cfg_attr(test, assert_instr(ssubl))]
27195pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
27196    unsafe {
27197        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
27198        let d: int64x2_t = simd_cast(c);
27199        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27200        let f: int64x2_t = simd_cast(e);
27201        simd_sub(d, f)
27202    }
27203}
27204#[doc = "Unsigned Subtract Long"]
27205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
27206#[inline]
27207#[target_feature(enable = "neon")]
27208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27209#[cfg_attr(test, assert_instr(usubl))]
27210pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
27211    unsafe {
27212        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27213        let d: uint16x8_t = simd_cast(c);
27214        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27215        let f: uint16x8_t = simd_cast(e);
27216        simd_sub(d, f)
27217    }
27218}
27219#[doc = "Unsigned Subtract Long"]
27220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
27221#[inline]
27222#[target_feature(enable = "neon")]
27223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27224#[cfg_attr(test, assert_instr(usubl))]
27225pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
27226    unsafe {
27227        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27228        let d: uint32x4_t = simd_cast(c);
27229        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27230        let f: uint32x4_t = simd_cast(e);
27231        simd_sub(d, f)
27232    }
27233}
27234#[doc = "Unsigned Subtract Long"]
27235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
27236#[inline]
27237#[target_feature(enable = "neon")]
27238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27239#[cfg_attr(test, assert_instr(usubl))]
27240pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
27241    unsafe {
27242        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
27243        let d: uint64x2_t = simd_cast(c);
27244        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27245        let f: uint64x2_t = simd_cast(e);
27246        simd_sub(d, f)
27247    }
27248}
27249#[doc = "Signed Subtract Wide"]
27250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
27251#[inline]
27252#[target_feature(enable = "neon")]
27253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27254#[cfg_attr(test, assert_instr(ssubw))]
27255pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
27256    unsafe {
27257        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27258        simd_sub(a, simd_cast(c))
27259    }
27260}
27261#[doc = "Signed Subtract Wide"]
27262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
27263#[inline]
27264#[target_feature(enable = "neon")]
27265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27266#[cfg_attr(test, assert_instr(ssubw))]
27267pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
27268    unsafe {
27269        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27270        simd_sub(a, simd_cast(c))
27271    }
27272}
27273#[doc = "Signed Subtract Wide"]
27274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
27275#[inline]
27276#[target_feature(enable = "neon")]
27277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27278#[cfg_attr(test, assert_instr(ssubw))]
27279pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
27280    unsafe {
27281        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27282        simd_sub(a, simd_cast(c))
27283    }
27284}
27285#[doc = "Unsigned Subtract Wide"]
27286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
27287#[inline]
27288#[target_feature(enable = "neon")]
27289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27290#[cfg_attr(test, assert_instr(usubw))]
27291pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
27292    unsafe {
27293        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27294        simd_sub(a, simd_cast(c))
27295    }
27296}
27297#[doc = "Unsigned Subtract Wide"]
27298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
27299#[inline]
27300#[target_feature(enable = "neon")]
27301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27302#[cfg_attr(test, assert_instr(usubw))]
27303pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
27304    unsafe {
27305        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27306        simd_sub(a, simd_cast(c))
27307    }
27308}
27309#[doc = "Unsigned Subtract Wide"]
27310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
27311#[inline]
27312#[target_feature(enable = "neon")]
27313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27314#[cfg_attr(test, assert_instr(usubw))]
27315pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
27316    unsafe {
27317        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27318        simd_sub(a, simd_cast(c))
27319    }
27320}
27321#[doc = "Dot product index form with signed and unsigned integers"]
27322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
27323#[inline]
27324#[target_feature(enable = "neon,i8mm")]
27325#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27326#[rustc_legacy_const_generics(3)]
27327#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27328pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
27329    static_assert_uimm_bits!(LANE, 2);
27330    unsafe {
27331        let c: uint32x4_t = transmute(c);
27332        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
27333        vusdot_s32(a, transmute(c), b)
27334    }
27335}
27336#[doc = "Dot product index form with signed and unsigned integers"]
27337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
27338#[inline]
27339#[target_feature(enable = "neon,i8mm")]
27340#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27341#[rustc_legacy_const_generics(3)]
27342#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27343pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
27344    static_assert_uimm_bits!(LANE, 2);
27345    unsafe {
27346        let c: uint32x4_t = transmute(c);
27347        let c: uint32x4_t =
27348            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
27349        vusdotq_s32(a, transmute(c), b)
27350    }
27351}
27352#[doc = "Table look-up"]
27353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
27354#[inline]
27355#[target_feature(enable = "neon")]
27356#[cfg_attr(test, assert_instr(tbl))]
27357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27358pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27359    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
27360        {
27361            transmute(b)
27362        }
27363    })
27364}
27365#[doc = "Table look-up"]
27366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
27367#[inline]
27368#[target_feature(enable = "neon")]
27369#[cfg_attr(test, assert_instr(tbl))]
27370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27371pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27372    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
27373}
27374#[doc = "Table look-up"]
27375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
27376#[inline]
27377#[target_feature(enable = "neon")]
27378#[cfg_attr(test, assert_instr(tbl))]
27379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27380pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
27381    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
27382}
27383#[doc = "Table look-up"]
27384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
27385#[inline]
27386#[target_feature(enable = "neon")]
27387#[cfg_attr(test, assert_instr(tbl))]
27388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27389pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
27390    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
27391}
27392#[doc = "Table look-up"]
27393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27394#[inline]
27395#[cfg(target_endian = "little")]
27396#[target_feature(enable = "neon")]
27397#[cfg_attr(test, assert_instr(tbl))]
27398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27399pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27400    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
27401}
27402#[doc = "Table look-up"]
27403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27404#[inline]
27405#[cfg(target_endian = "big")]
27406#[target_feature(enable = "neon")]
27407#[cfg_attr(test, assert_instr(tbl))]
27408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27409pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27410    let mut a: uint8x8x2_t = a;
27411    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27412    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27413    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27414    unsafe {
27415        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27416        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27417    }
27418}
27419#[doc = "Table look-up"]
27420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27421#[inline]
27422#[cfg(target_endian = "little")]
27423#[target_feature(enable = "neon")]
27424#[cfg_attr(test, assert_instr(tbl))]
27425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27426pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27427    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27428}
27429#[doc = "Table look-up"]
27430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27431#[inline]
27432#[cfg(target_endian = "big")]
27433#[target_feature(enable = "neon")]
27434#[cfg_attr(test, assert_instr(tbl))]
27435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27436pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27437    let mut a: poly8x8x2_t = a;
27438    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27439    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27440    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27441    unsafe {
27442        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27443        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27444    }
27445}
27446#[doc = "Table look-up"]
27447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27448#[inline]
27449#[target_feature(enable = "neon")]
27450#[cfg_attr(test, assert_instr(tbl))]
27451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27452pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27453    let x = int8x16x2_t(
27454        vcombine_s8(a.0, a.1),
27455        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27456    );
27457    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27458}
27459#[doc = "Table look-up"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27461#[inline]
27462#[cfg(target_endian = "little")]
27463#[target_feature(enable = "neon")]
27464#[cfg_attr(test, assert_instr(tbl))]
27465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27466pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27467    let x = uint8x16x2_t(
27468        vcombine_u8(a.0, a.1),
27469        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27470    );
27471    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27472}
27473#[doc = "Table look-up"]
27474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27475#[inline]
27476#[cfg(target_endian = "big")]
27477#[target_feature(enable = "neon")]
27478#[cfg_attr(test, assert_instr(tbl))]
27479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27480pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27481    let mut a: uint8x8x3_t = a;
27482    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27483    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27484    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27485    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27486    let x = uint8x16x2_t(
27487        vcombine_u8(a.0, a.1),
27488        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27489    );
27490    unsafe {
27491        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27492        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27493    }
27494}
27495#[doc = "Table look-up"]
27496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27497#[inline]
27498#[cfg(target_endian = "little")]
27499#[target_feature(enable = "neon")]
27500#[cfg_attr(test, assert_instr(tbl))]
27501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27502pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27503    let x = poly8x16x2_t(
27504        vcombine_p8(a.0, a.1),
27505        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27506    );
27507    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27508}
27509#[doc = "Table look-up"]
27510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27511#[inline]
27512#[cfg(target_endian = "big")]
27513#[target_feature(enable = "neon")]
27514#[cfg_attr(test, assert_instr(tbl))]
27515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27516pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27517    let mut a: poly8x8x3_t = a;
27518    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27519    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27520    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27521    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27522    let x = poly8x16x2_t(
27523        vcombine_p8(a.0, a.1),
27524        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27525    );
27526    unsafe {
27527        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27528        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27529    }
27530}
27531#[doc = "Table look-up"]
27532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27533#[inline]
27534#[target_feature(enable = "neon")]
27535#[cfg_attr(test, assert_instr(tbl))]
27536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27537pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27538    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27539    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27540}
27541#[doc = "Table look-up"]
27542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27543#[inline]
27544#[cfg(target_endian = "little")]
27545#[target_feature(enable = "neon")]
27546#[cfg_attr(test, assert_instr(tbl))]
27547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27548pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27549    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27550    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27551}
27552#[doc = "Table look-up"]
27553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27554#[inline]
27555#[cfg(target_endian = "big")]
27556#[target_feature(enable = "neon")]
27557#[cfg_attr(test, assert_instr(tbl))]
27558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27559pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27560    let mut a: uint8x8x4_t = a;
27561    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27562    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27563    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27564    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27565    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27566    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27567    unsafe {
27568        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27569        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27570    }
27571}
27572#[doc = "Table look-up"]
27573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27574#[inline]
27575#[cfg(target_endian = "little")]
27576#[target_feature(enable = "neon")]
27577#[cfg_attr(test, assert_instr(tbl))]
27578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27579pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27580    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27581    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27582}
27583#[doc = "Table look-up"]
27584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27585#[inline]
27586#[cfg(target_endian = "big")]
27587#[target_feature(enable = "neon")]
27588#[cfg_attr(test, assert_instr(tbl))]
27589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27590pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27591    let mut a: poly8x8x4_t = a;
27592    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27593    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27594    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27595    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27596    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27597    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27598    unsafe {
27599        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27600        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27601    }
27602}
27603#[doc = "Extended table look-up"]
27604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27605#[inline]
27606#[target_feature(enable = "neon")]
27607#[cfg_attr(test, assert_instr(tbx))]
27608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27609pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27610    unsafe {
27611        simd_select(
27612            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27613            transmute(vqtbx1(
27614                transmute(a),
27615                transmute(vcombine_s8(b, crate::mem::zeroed())),
27616                transmute(c),
27617            )),
27618            a,
27619        )
27620    }
27621}
27622#[doc = "Extended table look-up"]
27623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27624#[inline]
27625#[target_feature(enable = "neon")]
27626#[cfg_attr(test, assert_instr(tbx))]
27627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27628pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27629    unsafe {
27630        simd_select(
27631            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27632            transmute(vqtbx1(
27633                transmute(a),
27634                transmute(vcombine_u8(b, crate::mem::zeroed())),
27635                c,
27636            )),
27637            a,
27638        )
27639    }
27640}
27641#[doc = "Extended table look-up"]
27642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27643#[inline]
27644#[target_feature(enable = "neon")]
27645#[cfg_attr(test, assert_instr(tbx))]
27646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27647pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27648    unsafe {
27649        simd_select(
27650            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27651            transmute(vqtbx1(
27652                transmute(a),
27653                transmute(vcombine_p8(b, crate::mem::zeroed())),
27654                c,
27655            )),
27656            a,
27657        )
27658    }
27659}
27660#[doc = "Extended table look-up"]
27661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27662#[inline]
27663#[target_feature(enable = "neon")]
27664#[cfg_attr(test, assert_instr(tbx))]
27665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27666pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27667    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27668}
27669#[doc = "Extended table look-up"]
27670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27671#[inline]
27672#[cfg(target_endian = "little")]
27673#[target_feature(enable = "neon")]
27674#[cfg_attr(test, assert_instr(tbx))]
27675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27676pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27677    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27678}
27679#[doc = "Extended table look-up"]
27680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27681#[inline]
27682#[cfg(target_endian = "big")]
27683#[target_feature(enable = "neon")]
27684#[cfg_attr(test, assert_instr(tbx))]
27685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27686pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27687    let mut b: uint8x8x2_t = b;
27688    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27689    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27690    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27691    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27692    unsafe {
27693        let ret_val: uint8x8_t =
27694            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27695        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27696    }
27697}
27698#[doc = "Extended table look-up"]
27699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27700#[inline]
27701#[cfg(target_endian = "little")]
27702#[target_feature(enable = "neon")]
27703#[cfg_attr(test, assert_instr(tbx))]
27704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27705pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27706    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27707}
27708#[doc = "Extended table look-up"]
27709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27710#[inline]
27711#[cfg(target_endian = "big")]
27712#[target_feature(enable = "neon")]
27713#[cfg_attr(test, assert_instr(tbx))]
27714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27715pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27716    let mut b: poly8x8x2_t = b;
27717    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27718    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27719    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27720    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27721    unsafe {
27722        let ret_val: poly8x8_t =
27723            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27724        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27725    }
27726}
27727#[doc = "Extended table look-up"]
27728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27729#[inline]
27730#[target_feature(enable = "neon")]
27731#[cfg_attr(test, assert_instr(tbx))]
27732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27733pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27734    let x = int8x16x2_t(
27735        vcombine_s8(b.0, b.1),
27736        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27737    );
27738    unsafe {
27739        transmute(simd_select(
27740            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27741            transmute(vqtbx2(
27742                transmute(a),
27743                transmute(x.0),
27744                transmute(x.1),
27745                transmute(c),
27746            )),
27747            a,
27748        ))
27749    }
27750}
27751#[doc = "Extended table look-up"]
27752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27753#[inline]
27754#[cfg(target_endian = "little")]
27755#[target_feature(enable = "neon")]
27756#[cfg_attr(test, assert_instr(tbx))]
27757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27758pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27759    let x = uint8x16x2_t(
27760        vcombine_u8(b.0, b.1),
27761        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27762    );
27763    unsafe {
27764        transmute(simd_select(
27765            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27766            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27767            a,
27768        ))
27769    }
27770}
27771#[doc = "Extended table look-up"]
27772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27773#[inline]
27774#[cfg(target_endian = "big")]
27775#[target_feature(enable = "neon")]
27776#[cfg_attr(test, assert_instr(tbx))]
27777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27778pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27779    let mut b: uint8x8x3_t = b;
27780    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27781    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27782    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27783    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27784    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27785    let x = uint8x16x2_t(
27786        vcombine_u8(b.0, b.1),
27787        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27788    );
27789    unsafe {
27790        let ret_val: uint8x8_t = transmute(simd_select(
27791            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27792            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27793            a,
27794        ));
27795        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27796    }
27797}
27798#[doc = "Extended table look-up"]
27799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27800#[inline]
27801#[cfg(target_endian = "little")]
27802#[target_feature(enable = "neon")]
27803#[cfg_attr(test, assert_instr(tbx))]
27804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27805pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27806    let x = poly8x16x2_t(
27807        vcombine_p8(b.0, b.1),
27808        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27809    );
27810    unsafe {
27811        transmute(simd_select(
27812            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27813            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27814            a,
27815        ))
27816    }
27817}
27818#[doc = "Extended table look-up"]
27819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27820#[inline]
27821#[cfg(target_endian = "big")]
27822#[target_feature(enable = "neon")]
27823#[cfg_attr(test, assert_instr(tbx))]
27824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27825pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27826    let mut b: poly8x8x3_t = b;
27827    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27828    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27829    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27830    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27831    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27832    let x = poly8x16x2_t(
27833        vcombine_p8(b.0, b.1),
27834        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27835    );
27836    unsafe {
27837        let ret_val: poly8x8_t = transmute(simd_select(
27838            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27839            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27840            a,
27841        ));
27842        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27843    }
27844}
27845#[doc = "Extended table look-up"]
27846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27847#[inline]
27848#[target_feature(enable = "neon")]
27849#[cfg_attr(test, assert_instr(tbx))]
27850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27851pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27852    unsafe {
27853        vqtbx2(
27854            transmute(a),
27855            transmute(vcombine_s8(b.0, b.1)),
27856            transmute(vcombine_s8(b.2, b.3)),
27857            transmute(c),
27858        )
27859    }
27860}
27861#[doc = "Extended table look-up"]
27862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27863#[inline]
27864#[cfg(target_endian = "little")]
27865#[target_feature(enable = "neon")]
27866#[cfg_attr(test, assert_instr(tbx))]
27867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27868pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27869    unsafe {
27870        transmute(vqtbx2(
27871            transmute(a),
27872            transmute(vcombine_u8(b.0, b.1)),
27873            transmute(vcombine_u8(b.2, b.3)),
27874            c,
27875        ))
27876    }
27877}
27878#[doc = "Extended table look-up"]
27879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27880#[inline]
27881#[cfg(target_endian = "big")]
27882#[target_feature(enable = "neon")]
27883#[cfg_attr(test, assert_instr(tbx))]
27884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27885pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27886    let mut b: uint8x8x4_t = b;
27887    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27888    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27889    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27890    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27891    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27892    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27893    unsafe {
27894        let ret_val: uint8x8_t = transmute(vqtbx2(
27895            transmute(a),
27896            transmute(vcombine_u8(b.0, b.1)),
27897            transmute(vcombine_u8(b.2, b.3)),
27898            c,
27899        ));
27900        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27901    }
27902}
27903#[doc = "Extended table look-up"]
27904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27905#[inline]
27906#[cfg(target_endian = "little")]
27907#[target_feature(enable = "neon")]
27908#[cfg_attr(test, assert_instr(tbx))]
27909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27910pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27911    unsafe {
27912        transmute(vqtbx2(
27913            transmute(a),
27914            transmute(vcombine_p8(b.0, b.1)),
27915            transmute(vcombine_p8(b.2, b.3)),
27916            c,
27917        ))
27918    }
27919}
27920#[doc = "Extended table look-up"]
27921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27922#[inline]
27923#[cfg(target_endian = "big")]
27924#[target_feature(enable = "neon")]
27925#[cfg_attr(test, assert_instr(tbx))]
27926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27927pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27928    let mut b: poly8x8x4_t = b;
27929    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27930    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27931    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27932    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27933    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27934    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27935    unsafe {
27936        let ret_val: poly8x8_t = transmute(vqtbx2(
27937            transmute(a),
27938            transmute(vcombine_p8(b.0, b.1)),
27939            transmute(vcombine_p8(b.2, b.3)),
27940            c,
27941        ));
27942        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27943    }
27944}
27945#[doc = "Transpose vectors"]
27946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
27947#[inline]
27948#[target_feature(enable = "neon,fp16")]
27949#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27950#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27951pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27952    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27953}
27954#[doc = "Transpose vectors"]
27955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
27956#[inline]
27957#[target_feature(enable = "neon,fp16")]
27958#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27959#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27960pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27961    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27962}
27963#[doc = "Transpose vectors"]
27964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
27965#[inline]
27966#[target_feature(enable = "neon")]
27967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27968#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27969pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27970    unsafe { simd_shuffle!(a, b, [0, 2]) }
27971}
27972#[doc = "Transpose vectors"]
27973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
27974#[inline]
27975#[target_feature(enable = "neon")]
27976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27977#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27978pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27979    unsafe { simd_shuffle!(a, b, [0, 2]) }
27980}
27981#[doc = "Transpose vectors"]
27982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
27983#[inline]
27984#[target_feature(enable = "neon")]
27985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27986#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27987pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27988    unsafe { simd_shuffle!(a, b, [0, 2]) }
27989}
27990#[doc = "Transpose vectors"]
27991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
27992#[inline]
27993#[target_feature(enable = "neon")]
27994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27995#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27996pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27997    unsafe { simd_shuffle!(a, b, [0, 2]) }
27998}
27999#[doc = "Transpose vectors"]
28000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
28001#[inline]
28002#[target_feature(enable = "neon")]
28003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28004#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28005pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28006    unsafe { simd_shuffle!(a, b, [0, 2]) }
28007}
28008#[doc = "Transpose vectors"]
28009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
28010#[inline]
28011#[target_feature(enable = "neon")]
28012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28013#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28014pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28015    unsafe { simd_shuffle!(a, b, [0, 2]) }
28016}
28017#[doc = "Transpose vectors"]
28018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
28019#[inline]
28020#[target_feature(enable = "neon")]
28021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28022#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28023pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28024    unsafe { simd_shuffle!(a, b, [0, 2]) }
28025}
28026#[doc = "Transpose vectors"]
28027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
28028#[inline]
28029#[target_feature(enable = "neon")]
28030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28031#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28032pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28033    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28034}
28035#[doc = "Transpose vectors"]
28036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
28037#[inline]
28038#[target_feature(enable = "neon")]
28039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28041pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28042    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28043}
28044#[doc = "Transpose vectors"]
28045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
28046#[inline]
28047#[target_feature(enable = "neon")]
28048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28049#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28050pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28051    unsafe {
28052        simd_shuffle!(
28053            a,
28054            b,
28055            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28056        )
28057    }
28058}
28059#[doc = "Transpose vectors"]
28060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
28061#[inline]
28062#[target_feature(enable = "neon")]
28063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28064#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28065pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28066    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28067}
28068#[doc = "Transpose vectors"]
28069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
28070#[inline]
28071#[target_feature(enable = "neon")]
28072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28073#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28074pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28075    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28076}
28077#[doc = "Transpose vectors"]
28078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
28079#[inline]
28080#[target_feature(enable = "neon")]
28081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28082#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28083pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28084    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28085}
28086#[doc = "Transpose vectors"]
28087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
28088#[inline]
28089#[target_feature(enable = "neon")]
28090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28091#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28092pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28093    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28094}
28095#[doc = "Transpose vectors"]
28096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
28097#[inline]
28098#[target_feature(enable = "neon")]
28099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28100#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28101pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28102    unsafe {
28103        simd_shuffle!(
28104            a,
28105            b,
28106            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28107        )
28108    }
28109}
28110#[doc = "Transpose vectors"]
28111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
28112#[inline]
28113#[target_feature(enable = "neon")]
28114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28115#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28116pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28117    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28118}
28119#[doc = "Transpose vectors"]
28120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
28121#[inline]
28122#[target_feature(enable = "neon")]
28123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28124#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28125pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28126    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28127}
28128#[doc = "Transpose vectors"]
28129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
28130#[inline]
28131#[target_feature(enable = "neon")]
28132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28133#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28134pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28135    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28136}
28137#[doc = "Transpose vectors"]
28138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
28139#[inline]
28140#[target_feature(enable = "neon")]
28141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28142#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28143pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28144    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28145}
28146#[doc = "Transpose vectors"]
28147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
28148#[inline]
28149#[target_feature(enable = "neon")]
28150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28151#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28152pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28153    unsafe {
28154        simd_shuffle!(
28155            a,
28156            b,
28157            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28158        )
28159    }
28160}
28161#[doc = "Transpose vectors"]
28162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
28163#[inline]
28164#[target_feature(enable = "neon")]
28165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28167pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28168    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28169}
28170#[doc = "Transpose vectors"]
28171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
28172#[inline]
28173#[target_feature(enable = "neon")]
28174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28175#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28176pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28177    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28178}
28179#[doc = "Transpose vectors"]
28180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
28181#[inline]
28182#[target_feature(enable = "neon,fp16")]
28183#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28184#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28185pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28186    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28187}
28188#[doc = "Transpose vectors"]
28189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
28190#[inline]
28191#[target_feature(enable = "neon,fp16")]
28192#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28193#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28194pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28195    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28196}
28197#[doc = "Transpose vectors"]
28198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
28199#[inline]
28200#[target_feature(enable = "neon")]
28201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28202#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28203pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28204    unsafe { simd_shuffle!(a, b, [1, 3]) }
28205}
28206#[doc = "Transpose vectors"]
28207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
28208#[inline]
28209#[target_feature(enable = "neon")]
28210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28211#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28212pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28213    unsafe { simd_shuffle!(a, b, [1, 3]) }
28214}
28215#[doc = "Transpose vectors"]
28216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
28217#[inline]
28218#[target_feature(enable = "neon")]
28219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28220#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28221pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28222    unsafe { simd_shuffle!(a, b, [1, 3]) }
28223}
28224#[doc = "Transpose vectors"]
28225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
28226#[inline]
28227#[target_feature(enable = "neon")]
28228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28229#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28230pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28231    unsafe { simd_shuffle!(a, b, [1, 3]) }
28232}
28233#[doc = "Transpose vectors"]
28234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
28235#[inline]
28236#[target_feature(enable = "neon")]
28237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28238#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28239pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28240    unsafe { simd_shuffle!(a, b, [1, 3]) }
28241}
28242#[doc = "Transpose vectors"]
28243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
28244#[inline]
28245#[target_feature(enable = "neon")]
28246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28247#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28248pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28249    unsafe { simd_shuffle!(a, b, [1, 3]) }
28250}
28251#[doc = "Transpose vectors"]
28252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
28253#[inline]
28254#[target_feature(enable = "neon")]
28255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28256#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28257pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28258    unsafe { simd_shuffle!(a, b, [1, 3]) }
28259}
28260#[doc = "Transpose vectors"]
28261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
28262#[inline]
28263#[target_feature(enable = "neon")]
28264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28265#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28266pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28267    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28268}
28269#[doc = "Transpose vectors"]
28270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
28271#[inline]
28272#[target_feature(enable = "neon")]
28273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28274#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28275pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28276    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28277}
28278#[doc = "Transpose vectors"]
28279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
28280#[inline]
28281#[target_feature(enable = "neon")]
28282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28283#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28284pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28285    unsafe {
28286        simd_shuffle!(
28287            a,
28288            b,
28289            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28290        )
28291    }
28292}
28293#[doc = "Transpose vectors"]
28294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
28295#[inline]
28296#[target_feature(enable = "neon")]
28297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28298#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28299pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28300    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28301}
28302#[doc = "Transpose vectors"]
28303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
28304#[inline]
28305#[target_feature(enable = "neon")]
28306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28307#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28308pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28309    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28310}
28311#[doc = "Transpose vectors"]
28312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
28313#[inline]
28314#[target_feature(enable = "neon")]
28315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28316#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28317pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28318    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28319}
28320#[doc = "Transpose vectors"]
28321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
28322#[inline]
28323#[target_feature(enable = "neon")]
28324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28326pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28327    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28328}
28329#[doc = "Transpose vectors"]
28330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
28331#[inline]
28332#[target_feature(enable = "neon")]
28333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28334#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28335pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28336    unsafe {
28337        simd_shuffle!(
28338            a,
28339            b,
28340            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28341        )
28342    }
28343}
28344#[doc = "Transpose vectors"]
28345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
28346#[inline]
28347#[target_feature(enable = "neon")]
28348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28349#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28350pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28351    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28352}
28353#[doc = "Transpose vectors"]
28354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
28355#[inline]
28356#[target_feature(enable = "neon")]
28357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28358#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28359pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28360    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28361}
28362#[doc = "Transpose vectors"]
28363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
28364#[inline]
28365#[target_feature(enable = "neon")]
28366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28367#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28368pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28369    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28370}
28371#[doc = "Transpose vectors"]
28372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
28373#[inline]
28374#[target_feature(enable = "neon")]
28375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28376#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28377pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28378    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28379}
28380#[doc = "Transpose vectors"]
28381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
28382#[inline]
28383#[target_feature(enable = "neon")]
28384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28385#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28386pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28387    unsafe {
28388        simd_shuffle!(
28389            a,
28390            b,
28391            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28392        )
28393    }
28394}
28395#[doc = "Transpose vectors"]
28396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
28397#[inline]
28398#[target_feature(enable = "neon")]
28399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28400#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28401pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28402    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28403}
28404#[doc = "Transpose vectors"]
28405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
28406#[inline]
28407#[target_feature(enable = "neon")]
28408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28409#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28410pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28411    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28412}
28413#[doc = "Signed compare bitwise Test bits nonzero"]
28414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28415#[inline]
28416#[target_feature(enable = "neon")]
28417#[cfg_attr(test, assert_instr(cmtst))]
28418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28419pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28420    unsafe {
28421        let c: int64x1_t = simd_and(a, b);
28422        let d: i64x1 = i64x1::new(0);
28423        simd_ne(c, transmute(d))
28424    }
28425}
28426#[doc = "Signed compare bitwise Test bits nonzero"]
28427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28428#[inline]
28429#[target_feature(enable = "neon")]
28430#[cfg_attr(test, assert_instr(cmtst))]
28431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28432pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28433    unsafe {
28434        let c: int64x2_t = simd_and(a, b);
28435        let d: i64x2 = i64x2::new(0, 0);
28436        simd_ne(c, transmute(d))
28437    }
28438}
28439#[doc = "Signed compare bitwise Test bits nonzero"]
28440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28441#[inline]
28442#[target_feature(enable = "neon")]
28443#[cfg_attr(test, assert_instr(cmtst))]
28444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28445pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28446    unsafe {
28447        let c: poly64x1_t = simd_and(a, b);
28448        let d: i64x1 = i64x1::new(0);
28449        simd_ne(c, transmute(d))
28450    }
28451}
28452#[doc = "Signed compare bitwise Test bits nonzero"]
28453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28454#[inline]
28455#[target_feature(enable = "neon")]
28456#[cfg_attr(test, assert_instr(cmtst))]
28457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28458pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28459    unsafe {
28460        let c: poly64x2_t = simd_and(a, b);
28461        let d: i64x2 = i64x2::new(0, 0);
28462        simd_ne(c, transmute(d))
28463    }
28464}
28465#[doc = "Unsigned compare bitwise Test bits nonzero"]
28466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28467#[inline]
28468#[target_feature(enable = "neon")]
28469#[cfg_attr(test, assert_instr(cmtst))]
28470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28471pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28472    unsafe {
28473        let c: uint64x1_t = simd_and(a, b);
28474        let d: u64x1 = u64x1::new(0);
28475        simd_ne(c, transmute(d))
28476    }
28477}
28478#[doc = "Unsigned compare bitwise Test bits nonzero"]
28479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28480#[inline]
28481#[target_feature(enable = "neon")]
28482#[cfg_attr(test, assert_instr(cmtst))]
28483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28484pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28485    unsafe {
28486        let c: uint64x2_t = simd_and(a, b);
28487        let d: u64x2 = u64x2::new(0, 0);
28488        simd_ne(c, transmute(d))
28489    }
28490}
28491#[doc = "Compare bitwise test bits nonzero"]
28492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28493#[inline]
28494#[target_feature(enable = "neon")]
28495#[cfg_attr(test, assert_instr(tst))]
28496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28497pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28498    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28499}
28500#[doc = "Compare bitwise test bits nonzero"]
28501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28502#[inline]
28503#[target_feature(enable = "neon")]
28504#[cfg_attr(test, assert_instr(tst))]
28505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28506pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28507    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28508}
28509#[doc = "Signed saturating Accumulate of Unsigned value."]
28510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28511#[inline]
28512#[target_feature(enable = "neon")]
28513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28514#[cfg_attr(test, assert_instr(suqadd))]
28515pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28516    unsafe extern "unadjusted" {
28517        #[cfg_attr(
28518            any(target_arch = "aarch64", target_arch = "arm64ec"),
28519            link_name = "llvm.aarch64.neon.suqadd.v8i8"
28520        )]
28521        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28522    }
28523    unsafe { _vuqadd_s8(a, b) }
28524}
28525#[doc = "Signed saturating Accumulate of Unsigned value."]
28526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28527#[inline]
28528#[target_feature(enable = "neon")]
28529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28530#[cfg_attr(test, assert_instr(suqadd))]
28531pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28532    unsafe extern "unadjusted" {
28533        #[cfg_attr(
28534            any(target_arch = "aarch64", target_arch = "arm64ec"),
28535            link_name = "llvm.aarch64.neon.suqadd.v16i8"
28536        )]
28537        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28538    }
28539    unsafe { _vuqaddq_s8(a, b) }
28540}
28541#[doc = "Signed saturating Accumulate of Unsigned value."]
28542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28543#[inline]
28544#[target_feature(enable = "neon")]
28545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28546#[cfg_attr(test, assert_instr(suqadd))]
28547pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28548    unsafe extern "unadjusted" {
28549        #[cfg_attr(
28550            any(target_arch = "aarch64", target_arch = "arm64ec"),
28551            link_name = "llvm.aarch64.neon.suqadd.v4i16"
28552        )]
28553        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28554    }
28555    unsafe { _vuqadd_s16(a, b) }
28556}
28557#[doc = "Signed saturating Accumulate of Unsigned value."]
28558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28559#[inline]
28560#[target_feature(enable = "neon")]
28561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28562#[cfg_attr(test, assert_instr(suqadd))]
28563pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28564    unsafe extern "unadjusted" {
28565        #[cfg_attr(
28566            any(target_arch = "aarch64", target_arch = "arm64ec"),
28567            link_name = "llvm.aarch64.neon.suqadd.v8i16"
28568        )]
28569        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28570    }
28571    unsafe { _vuqaddq_s16(a, b) }
28572}
28573#[doc = "Signed saturating Accumulate of Unsigned value."]
28574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28575#[inline]
28576#[target_feature(enable = "neon")]
28577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28578#[cfg_attr(test, assert_instr(suqadd))]
28579pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28580    unsafe extern "unadjusted" {
28581        #[cfg_attr(
28582            any(target_arch = "aarch64", target_arch = "arm64ec"),
28583            link_name = "llvm.aarch64.neon.suqadd.v2i32"
28584        )]
28585        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28586    }
28587    unsafe { _vuqadd_s32(a, b) }
28588}
28589#[doc = "Signed saturating Accumulate of Unsigned value."]
28590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28591#[inline]
28592#[target_feature(enable = "neon")]
28593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28594#[cfg_attr(test, assert_instr(suqadd))]
28595pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28596    unsafe extern "unadjusted" {
28597        #[cfg_attr(
28598            any(target_arch = "aarch64", target_arch = "arm64ec"),
28599            link_name = "llvm.aarch64.neon.suqadd.v4i32"
28600        )]
28601        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28602    }
28603    unsafe { _vuqaddq_s32(a, b) }
28604}
28605#[doc = "Signed saturating Accumulate of Unsigned value."]
28606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28607#[inline]
28608#[target_feature(enable = "neon")]
28609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28610#[cfg_attr(test, assert_instr(suqadd))]
28611pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28612    unsafe extern "unadjusted" {
28613        #[cfg_attr(
28614            any(target_arch = "aarch64", target_arch = "arm64ec"),
28615            link_name = "llvm.aarch64.neon.suqadd.v1i64"
28616        )]
28617        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28618    }
28619    unsafe { _vuqadd_s64(a, b) }
28620}
28621#[doc = "Signed saturating Accumulate of Unsigned value."]
28622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28623#[inline]
28624#[target_feature(enable = "neon")]
28625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28626#[cfg_attr(test, assert_instr(suqadd))]
28627pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28628    unsafe extern "unadjusted" {
28629        #[cfg_attr(
28630            any(target_arch = "aarch64", target_arch = "arm64ec"),
28631            link_name = "llvm.aarch64.neon.suqadd.v2i64"
28632        )]
28633        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28634    }
28635    unsafe { _vuqaddq_s64(a, b) }
28636}
28637#[doc = "Signed saturating accumulate of unsigned value"]
28638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28639#[inline]
28640#[target_feature(enable = "neon")]
28641#[cfg_attr(test, assert_instr(suqadd))]
28642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28643pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28644    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28645}
28646#[doc = "Signed saturating accumulate of unsigned value"]
28647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28648#[inline]
28649#[target_feature(enable = "neon")]
28650#[cfg_attr(test, assert_instr(suqadd))]
28651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28652pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28653    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28654}
28655#[doc = "Signed saturating accumulate of unsigned value"]
28656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28657#[inline]
28658#[target_feature(enable = "neon")]
28659#[cfg_attr(test, assert_instr(suqadd))]
28660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28661pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28662    unsafe extern "unadjusted" {
28663        #[cfg_attr(
28664            any(target_arch = "aarch64", target_arch = "arm64ec"),
28665            link_name = "llvm.aarch64.neon.suqadd.i64"
28666        )]
28667        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28668    }
28669    unsafe { _vuqaddd_s64(a, b) }
28670}
28671#[doc = "Signed saturating accumulate of unsigned value"]
28672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28673#[inline]
28674#[target_feature(enable = "neon")]
28675#[cfg_attr(test, assert_instr(suqadd))]
28676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28677pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28678    unsafe extern "unadjusted" {
28679        #[cfg_attr(
28680            any(target_arch = "aarch64", target_arch = "arm64ec"),
28681            link_name = "llvm.aarch64.neon.suqadd.i32"
28682        )]
28683        fn _vuqadds_s32(a: i32, b: u32) -> i32;
28684    }
28685    unsafe { _vuqadds_s32(a, b) }
28686}
28687#[doc = "Dot product index form with unsigned and signed integers"]
28688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
28689#[inline]
28690#[target_feature(enable = "neon,i8mm")]
28691#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28692#[rustc_legacy_const_generics(3)]
28693#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28694pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
28695    static_assert_uimm_bits!(LANE, 2);
28696    unsafe {
28697        let c: int32x4_t = transmute(c);
28698        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
28699        vusdot_s32(a, b, transmute(c))
28700    }
28701}
28702#[doc = "Dot product index form with unsigned and signed integers"]
28703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
28704#[inline]
28705#[target_feature(enable = "neon,i8mm")]
28706#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28707#[rustc_legacy_const_generics(3)]
28708#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28709pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
28710    static_assert_uimm_bits!(LANE, 2);
28711    unsafe {
28712        let c: int32x4_t = transmute(c);
28713        let c: int32x4_t =
28714            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
28715        vusdotq_s32(a, b, transmute(c))
28716    }
28717}
28718#[doc = "Unzip vectors"]
28719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28720#[inline]
28721#[target_feature(enable = "neon,fp16")]
28722#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28723#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28724pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28725    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28726}
28727#[doc = "Unzip vectors"]
28728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28729#[inline]
28730#[target_feature(enable = "neon,fp16")]
28731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28732#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28733pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28734    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28735}
28736#[doc = "Unzip vectors"]
28737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28738#[inline]
28739#[target_feature(enable = "neon")]
28740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28741#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28742pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28743    unsafe { simd_shuffle!(a, b, [0, 2]) }
28744}
28745#[doc = "Unzip vectors"]
28746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28747#[inline]
28748#[target_feature(enable = "neon")]
28749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28750#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28751pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28752    unsafe { simd_shuffle!(a, b, [0, 2]) }
28753}
28754#[doc = "Unzip vectors"]
28755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28756#[inline]
28757#[target_feature(enable = "neon")]
28758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28759#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28760pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28761    unsafe { simd_shuffle!(a, b, [0, 2]) }
28762}
28763#[doc = "Unzip vectors"]
28764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28765#[inline]
28766#[target_feature(enable = "neon")]
28767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28768#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28769pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28770    unsafe { simd_shuffle!(a, b, [0, 2]) }
28771}
28772#[doc = "Unzip vectors"]
28773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28774#[inline]
28775#[target_feature(enable = "neon")]
28776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28777#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28778pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28779    unsafe { simd_shuffle!(a, b, [0, 2]) }
28780}
28781#[doc = "Unzip vectors"]
28782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28783#[inline]
28784#[target_feature(enable = "neon")]
28785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28786#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28787pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28788    unsafe { simd_shuffle!(a, b, [0, 2]) }
28789}
28790#[doc = "Unzip vectors"]
28791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28792#[inline]
28793#[target_feature(enable = "neon")]
28794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28795#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28796pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28797    unsafe { simd_shuffle!(a, b, [0, 2]) }
28798}
28799#[doc = "Unzip vectors"]
28800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28801#[inline]
28802#[target_feature(enable = "neon")]
28803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28804#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28805pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28806    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28807}
28808#[doc = "Unzip vectors"]
28809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28810#[inline]
28811#[target_feature(enable = "neon")]
28812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28813#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28814pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28815    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28816}
28817#[doc = "Unzip vectors"]
28818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28819#[inline]
28820#[target_feature(enable = "neon")]
28821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28822#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28823pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28824    unsafe {
28825        simd_shuffle!(
28826            a,
28827            b,
28828            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28829        )
28830    }
28831}
28832#[doc = "Unzip vectors"]
28833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28834#[inline]
28835#[target_feature(enable = "neon")]
28836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28837#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28838pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28839    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28840}
28841#[doc = "Unzip vectors"]
28842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28843#[inline]
28844#[target_feature(enable = "neon")]
28845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28846#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28847pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28848    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28849}
28850#[doc = "Unzip vectors"]
28851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28852#[inline]
28853#[target_feature(enable = "neon")]
28854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28856pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28857    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28858}
28859#[doc = "Unzip vectors"]
28860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28861#[inline]
28862#[target_feature(enable = "neon")]
28863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28864#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28865pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28866    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28867}
28868#[doc = "Unzip vectors"]
28869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28870#[inline]
28871#[target_feature(enable = "neon")]
28872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28873#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28874pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28875    unsafe {
28876        simd_shuffle!(
28877            a,
28878            b,
28879            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28880        )
28881    }
28882}
28883#[doc = "Unzip vectors"]
28884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28885#[inline]
28886#[target_feature(enable = "neon")]
28887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28888#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28889pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28890    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28891}
28892#[doc = "Unzip vectors"]
28893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28894#[inline]
28895#[target_feature(enable = "neon")]
28896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28898pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28899    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28900}
28901#[doc = "Unzip vectors"]
28902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28903#[inline]
28904#[target_feature(enable = "neon")]
28905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28906#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28907pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28908    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28909}
28910#[doc = "Unzip vectors"]
28911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28912#[inline]
28913#[target_feature(enable = "neon")]
28914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28915#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28916pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28917    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28918}
28919#[doc = "Unzip vectors"]
28920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28921#[inline]
28922#[target_feature(enable = "neon")]
28923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28925pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28926    unsafe {
28927        simd_shuffle!(
28928            a,
28929            b,
28930            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28931        )
28932    }
28933}
28934#[doc = "Unzip vectors"]
28935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28936#[inline]
28937#[target_feature(enable = "neon")]
28938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28939#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28940pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28941    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28942}
28943#[doc = "Unzip vectors"]
28944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28945#[inline]
28946#[target_feature(enable = "neon")]
28947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28948#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28949pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28950    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28951}
28952#[doc = "Unzip vectors"]
28953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28954#[inline]
28955#[target_feature(enable = "neon,fp16")]
28956#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28957#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28958pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28959    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28960}
28961#[doc = "Unzip vectors"]
28962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
28963#[inline]
28964#[target_feature(enable = "neon,fp16")]
28965#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28966#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28967pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28968    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28969}
28970#[doc = "Unzip vectors"]
28971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
28972#[inline]
28973#[target_feature(enable = "neon")]
28974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28975#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28976pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28977    unsafe { simd_shuffle!(a, b, [1, 3]) }
28978}
28979#[doc = "Unzip vectors"]
28980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
28981#[inline]
28982#[target_feature(enable = "neon")]
28983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28984#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28985pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28986    unsafe { simd_shuffle!(a, b, [1, 3]) }
28987}
28988#[doc = "Unzip vectors"]
28989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
28990#[inline]
28991#[target_feature(enable = "neon")]
28992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28993#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28994pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28995    unsafe { simd_shuffle!(a, b, [1, 3]) }
28996}
28997#[doc = "Unzip vectors"]
28998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
28999#[inline]
29000#[target_feature(enable = "neon")]
29001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29002#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29003pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29004    unsafe { simd_shuffle!(a, b, [1, 3]) }
29005}
29006#[doc = "Unzip vectors"]
29007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
29008#[inline]
29009#[target_feature(enable = "neon")]
29010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29011#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29012pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29013    unsafe { simd_shuffle!(a, b, [1, 3]) }
29014}
29015#[doc = "Unzip vectors"]
29016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
29017#[inline]
29018#[target_feature(enable = "neon")]
29019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29020#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29021pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29022    unsafe { simd_shuffle!(a, b, [1, 3]) }
29023}
29024#[doc = "Unzip vectors"]
29025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
29026#[inline]
29027#[target_feature(enable = "neon")]
29028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29029#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29030pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29031    unsafe { simd_shuffle!(a, b, [1, 3]) }
29032}
29033#[doc = "Unzip vectors"]
29034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
29035#[inline]
29036#[target_feature(enable = "neon")]
29037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29038#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29039pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29040    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29041}
29042#[doc = "Unzip vectors"]
29043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
29044#[inline]
29045#[target_feature(enable = "neon")]
29046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29047#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29048pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29049    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29050}
29051#[doc = "Unzip vectors"]
29052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
29053#[inline]
29054#[target_feature(enable = "neon")]
29055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29056#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29057pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29058    unsafe {
29059        simd_shuffle!(
29060            a,
29061            b,
29062            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29063        )
29064    }
29065}
29066#[doc = "Unzip vectors"]
29067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
29068#[inline]
29069#[target_feature(enable = "neon")]
29070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29071#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29072pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29073    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29074}
29075#[doc = "Unzip vectors"]
29076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
29077#[inline]
29078#[target_feature(enable = "neon")]
29079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29080#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29081pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29082    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29083}
29084#[doc = "Unzip vectors"]
29085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
29086#[inline]
29087#[target_feature(enable = "neon")]
29088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29089#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29090pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29091    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29092}
29093#[doc = "Unzip vectors"]
29094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
29095#[inline]
29096#[target_feature(enable = "neon")]
29097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29098#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29099pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29100    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29101}
29102#[doc = "Unzip vectors"]
29103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
29104#[inline]
29105#[target_feature(enable = "neon")]
29106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29107#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29108pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29109    unsafe {
29110        simd_shuffle!(
29111            a,
29112            b,
29113            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29114        )
29115    }
29116}
29117#[doc = "Unzip vectors"]
29118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
29119#[inline]
29120#[target_feature(enable = "neon")]
29121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29122#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29123pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29124    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29125}
29126#[doc = "Unzip vectors"]
29127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
29128#[inline]
29129#[target_feature(enable = "neon")]
29130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29131#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29132pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29133    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29134}
29135#[doc = "Unzip vectors"]
29136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
29137#[inline]
29138#[target_feature(enable = "neon")]
29139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29140#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29141pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29142    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29143}
29144#[doc = "Unzip vectors"]
29145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
29146#[inline]
29147#[target_feature(enable = "neon")]
29148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29149#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29150pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29151    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29152}
29153#[doc = "Unzip vectors"]
29154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
29155#[inline]
29156#[target_feature(enable = "neon")]
29157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29158#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29159pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29160    unsafe {
29161        simd_shuffle!(
29162            a,
29163            b,
29164            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29165        )
29166    }
29167}
29168#[doc = "Unzip vectors"]
29169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
29170#[inline]
29171#[target_feature(enable = "neon")]
29172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29173#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29174pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29175    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29176}
29177#[doc = "Unzip vectors"]
29178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
29179#[inline]
29180#[target_feature(enable = "neon")]
29181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29182#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29183pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29184    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29185}
29186#[doc = "Exclusive OR and rotate"]
29187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
29188#[inline]
29189#[target_feature(enable = "neon,sha3")]
29190#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
29191#[rustc_legacy_const_generics(2)]
29192#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
29193pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29194    static_assert_uimm_bits!(IMM6, 6);
29195    unsafe extern "unadjusted" {
29196        #[cfg_attr(
29197            any(target_arch = "aarch64", target_arch = "arm64ec"),
29198            link_name = "llvm.aarch64.crypto.xar"
29199        )]
29200        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
29201    }
29202    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
29203}
29204#[doc = "Zip vectors"]
29205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
29206#[inline]
29207#[target_feature(enable = "neon,fp16")]
29208#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29209#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29210pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29211    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29212}
29213#[doc = "Zip vectors"]
29214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
29215#[inline]
29216#[target_feature(enable = "neon,fp16")]
29217#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29218#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29219pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29220    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29221}
29222#[doc = "Zip vectors"]
29223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
29224#[inline]
29225#[target_feature(enable = "neon")]
29226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29227#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29228pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29229    unsafe { simd_shuffle!(a, b, [0, 2]) }
29230}
29231#[doc = "Zip vectors"]
29232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
29233#[inline]
29234#[target_feature(enable = "neon")]
29235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29236#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29237pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29238    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29239}
29240#[doc = "Zip vectors"]
29241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
29242#[inline]
29243#[target_feature(enable = "neon")]
29244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29245#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29246pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29247    unsafe { simd_shuffle!(a, b, [0, 2]) }
29248}
29249#[doc = "Zip vectors"]
29250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
29251#[inline]
29252#[target_feature(enable = "neon")]
29253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29254#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29255pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29256    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29257}
29258#[doc = "Zip vectors"]
29259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
29260#[inline]
29261#[target_feature(enable = "neon")]
29262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29263#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29264pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29265    unsafe {
29266        simd_shuffle!(
29267            a,
29268            b,
29269            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29270        )
29271    }
29272}
29273#[doc = "Zip vectors"]
29274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
29275#[inline]
29276#[target_feature(enable = "neon")]
29277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29278#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29279pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29280    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29281}
29282#[doc = "Zip vectors"]
29283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
29284#[inline]
29285#[target_feature(enable = "neon")]
29286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29287#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29288pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29289    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29290}
29291#[doc = "Zip vectors"]
29292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
29293#[inline]
29294#[target_feature(enable = "neon")]
29295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29296#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29297pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29298    unsafe { simd_shuffle!(a, b, [0, 2]) }
29299}
29300#[doc = "Zip vectors"]
29301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
29302#[inline]
29303#[target_feature(enable = "neon")]
29304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29305#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29306pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29307    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29308}
29309#[doc = "Zip vectors"]
29310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
29311#[inline]
29312#[target_feature(enable = "neon")]
29313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29314#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29315pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29316    unsafe { simd_shuffle!(a, b, [0, 2]) }
29317}
29318#[doc = "Zip vectors"]
29319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
29320#[inline]
29321#[target_feature(enable = "neon")]
29322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29323#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29324pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29325    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29326}
29327#[doc = "Zip vectors"]
29328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
29329#[inline]
29330#[target_feature(enable = "neon")]
29331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29332#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29333pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29334    unsafe {
29335        simd_shuffle!(
29336            a,
29337            b,
29338            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29339        )
29340    }
29341}
29342#[doc = "Zip vectors"]
29343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
29344#[inline]
29345#[target_feature(enable = "neon")]
29346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29347#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29348pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29349    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29350}
29351#[doc = "Zip vectors"]
29352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
29353#[inline]
29354#[target_feature(enable = "neon")]
29355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29356#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29357pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29358    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29359}
29360#[doc = "Zip vectors"]
29361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
29362#[inline]
29363#[target_feature(enable = "neon")]
29364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29365#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29366pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29367    unsafe { simd_shuffle!(a, b, [0, 2]) }
29368}
29369#[doc = "Zip vectors"]
29370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
29371#[inline]
29372#[target_feature(enable = "neon")]
29373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29374#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29375pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29376    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29377}
29378#[doc = "Zip vectors"]
29379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
29380#[inline]
29381#[target_feature(enable = "neon")]
29382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29383#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29384pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29385    unsafe { simd_shuffle!(a, b, [0, 2]) }
29386}
29387#[doc = "Zip vectors"]
29388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
29389#[inline]
29390#[target_feature(enable = "neon")]
29391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29392#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29393pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29394    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29395}
29396#[doc = "Zip vectors"]
29397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
29398#[inline]
29399#[target_feature(enable = "neon")]
29400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29401#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29402pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29403    unsafe {
29404        simd_shuffle!(
29405            a,
29406            b,
29407            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29408        )
29409    }
29410}
29411#[doc = "Zip vectors"]
29412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29413#[inline]
29414#[target_feature(enable = "neon")]
29415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29416#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29417pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29418    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29419}
29420#[doc = "Zip vectors"]
29421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29422#[inline]
29423#[target_feature(enable = "neon")]
29424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29425#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29426pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29427    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29428}
29429#[doc = "Zip vectors"]
29430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29431#[inline]
29432#[target_feature(enable = "neon")]
29433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29434#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29435pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29436    unsafe { simd_shuffle!(a, b, [0, 2]) }
29437}
29438#[doc = "Zip vectors"]
29439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29440#[inline]
29441#[target_feature(enable = "neon,fp16")]
29442#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29443#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29444pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29445    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29446}
29447#[doc = "Zip vectors"]
29448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29449#[inline]
29450#[target_feature(enable = "neon,fp16")]
29451#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29452#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29453pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29454    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29455}
29456#[doc = "Zip vectors"]
29457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29458#[inline]
29459#[target_feature(enable = "neon")]
29460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29461#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29462pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29463    unsafe { simd_shuffle!(a, b, [1, 3]) }
29464}
29465#[doc = "Zip vectors"]
29466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29467#[inline]
29468#[target_feature(enable = "neon")]
29469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29470#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29471pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29472    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29473}
29474#[doc = "Zip vectors"]
29475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29476#[inline]
29477#[target_feature(enable = "neon")]
29478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29479#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29480pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29481    unsafe { simd_shuffle!(a, b, [1, 3]) }
29482}
29483#[doc = "Zip vectors"]
29484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29485#[inline]
29486#[target_feature(enable = "neon")]
29487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29488#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29489pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29490    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29491}
29492#[doc = "Zip vectors"]
29493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29494#[inline]
29495#[target_feature(enable = "neon")]
29496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29497#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29498pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29499    unsafe {
29500        simd_shuffle!(
29501            a,
29502            b,
29503            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29504        )
29505    }
29506}
29507#[doc = "Zip vectors"]
29508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29509#[inline]
29510#[target_feature(enable = "neon")]
29511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29512#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29513pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29514    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29515}
29516#[doc = "Zip vectors"]
29517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29518#[inline]
29519#[target_feature(enable = "neon")]
29520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29521#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29522pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29523    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29524}
29525#[doc = "Zip vectors"]
29526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29527#[inline]
29528#[target_feature(enable = "neon")]
29529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29530#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29531pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29532    unsafe { simd_shuffle!(a, b, [1, 3]) }
29533}
29534#[doc = "Zip vectors"]
29535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29536#[inline]
29537#[target_feature(enable = "neon")]
29538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29539#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29540pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29541    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29542}
29543#[doc = "Zip vectors"]
29544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29545#[inline]
29546#[target_feature(enable = "neon")]
29547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29548#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29549pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29550    unsafe { simd_shuffle!(a, b, [1, 3]) }
29551}
29552#[doc = "Zip vectors"]
29553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29554#[inline]
29555#[target_feature(enable = "neon")]
29556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29557#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29558pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29559    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29560}
29561#[doc = "Zip vectors"]
29562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29563#[inline]
29564#[target_feature(enable = "neon")]
29565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29566#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29567pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29568    unsafe {
29569        simd_shuffle!(
29570            a,
29571            b,
29572            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29573        )
29574    }
29575}
29576#[doc = "Zip vectors"]
29577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29578#[inline]
29579#[target_feature(enable = "neon")]
29580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29581#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29582pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29583    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29584}
29585#[doc = "Zip vectors"]
29586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29587#[inline]
29588#[target_feature(enable = "neon")]
29589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29590#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29591pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29592    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29593}
29594#[doc = "Zip vectors"]
29595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29596#[inline]
29597#[target_feature(enable = "neon")]
29598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29599#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29600pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29601    unsafe { simd_shuffle!(a, b, [1, 3]) }
29602}
29603#[doc = "Zip vectors"]
29604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29605#[inline]
29606#[target_feature(enable = "neon")]
29607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29608#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29609pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29610    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29611}
29612#[doc = "Zip vectors"]
29613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29614#[inline]
29615#[target_feature(enable = "neon")]
29616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29617#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29618pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29619    unsafe { simd_shuffle!(a, b, [1, 3]) }
29620}
29621#[doc = "Zip vectors"]
29622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29623#[inline]
29624#[target_feature(enable = "neon")]
29625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29626#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29627pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29628    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29629}
29630#[doc = "Zip vectors"]
29631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29632#[inline]
29633#[target_feature(enable = "neon")]
29634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29635#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29636pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29637    unsafe {
29638        simd_shuffle!(
29639            a,
29640            b,
29641            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29642        )
29643    }
29644}
29645#[doc = "Zip vectors"]
29646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29647#[inline]
29648#[target_feature(enable = "neon")]
29649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29650#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29651pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29652    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29653}
29654#[doc = "Zip vectors"]
29655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29656#[inline]
29657#[target_feature(enable = "neon")]
29658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29659#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29660pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29661    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29662}
29663#[doc = "Zip vectors"]
29664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29665#[inline]
29666#[target_feature(enable = "neon")]
29667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29668#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29669pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29670    unsafe { simd_shuffle!(a, b, [1, 3]) }
29671}