core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg(not(target_arch = "arm"))]
20#[cfg_attr(test, assert_instr(crc32cx))]
21#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
22pub fn __crc32cd(crc: u32, data: u64) -> u32 {
23    unsafe extern "unadjusted" {
24        #[cfg_attr(
25            any(target_arch = "aarch64", target_arch = "arm64ec"),
26            link_name = "llvm.aarch64.crc32cx"
27        )]
28        fn ___crc32cd(crc: u32, data: u64) -> u32;
29    }
30    unsafe { ___crc32cd(crc, data) }
31}
32#[doc = "CRC32 single round checksum for quad words (64 bits)."]
33#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
34#[inline]
35#[target_feature(enable = "crc")]
36#[cfg(not(target_arch = "arm"))]
37#[cfg_attr(test, assert_instr(crc32x))]
38#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
39pub fn __crc32d(crc: u32, data: u64) -> u32 {
40    unsafe extern "unadjusted" {
41        #[cfg_attr(
42            any(target_arch = "aarch64", target_arch = "arm64ec"),
43            link_name = "llvm.aarch64.crc32x"
44        )]
45        fn ___crc32d(crc: u32, data: u64) -> u32;
46    }
47    unsafe { ___crc32d(crc, data) }
48}
49#[doc = "Signed Absolute difference and Accumulate Long"]
50#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
51#[inline]
52#[target_feature(enable = "neon")]
53#[stable(feature = "neon_intrinsics", since = "1.59.0")]
54#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
55pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
56    unsafe {
57        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
58        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
59        let f: int8x8_t = vabd_s8(d, e);
60        let f: uint8x8_t = simd_cast(f);
61        simd_add(a, simd_cast(f))
62    }
63}
64#[doc = "Signed Absolute difference and Accumulate Long"]
65#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
66#[inline]
67#[target_feature(enable = "neon")]
68#[stable(feature = "neon_intrinsics", since = "1.59.0")]
69#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
70pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
71    unsafe {
72        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
73        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
74        let f: int16x4_t = vabd_s16(d, e);
75        let f: uint16x4_t = simd_cast(f);
76        simd_add(a, simd_cast(f))
77    }
78}
79#[doc = "Signed Absolute difference and Accumulate Long"]
80#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
81#[inline]
82#[target_feature(enable = "neon")]
83#[stable(feature = "neon_intrinsics", since = "1.59.0")]
84#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))]
85pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
86    unsafe {
87        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
88        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
89        let f: int32x2_t = vabd_s32(d, e);
90        let f: uint32x2_t = simd_cast(f);
91        simd_add(a, simd_cast(f))
92    }
93}
94#[doc = "Unsigned Absolute difference and Accumulate Long"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
96#[inline]
97#[target_feature(enable = "neon")]
98#[stable(feature = "neon_intrinsics", since = "1.59.0")]
99#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
100pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
101    unsafe {
102        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
103        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
104        let f: uint8x8_t = vabd_u8(d, e);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
114pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
115    unsafe {
116        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
117        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
118        let f: uint16x4_t = vabd_u16(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))]
128pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
129    unsafe {
130        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
131        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
132        let f: uint32x2_t = vabd_u32(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Absolute difference between the arguments of Floating"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(test, assert_instr(fabd))]
142pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
143    unsafe extern "unadjusted" {
144        #[cfg_attr(
145            any(target_arch = "aarch64", target_arch = "arm64ec"),
146            link_name = "llvm.aarch64.neon.fabd.v1f64"
147        )]
148        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
149    }
150    unsafe { _vabd_f64(a, b) }
151}
152#[doc = "Absolute difference between the arguments of Floating"]
153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
154#[inline]
155#[target_feature(enable = "neon")]
156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
157#[cfg_attr(test, assert_instr(fabd))]
158pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
159    unsafe extern "unadjusted" {
160        #[cfg_attr(
161            any(target_arch = "aarch64", target_arch = "arm64ec"),
162            link_name = "llvm.aarch64.neon.fabd.v2f64"
163        )]
164        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
165    }
166    unsafe { _vabdq_f64(a, b) }
167}
168#[doc = "Floating-point absolute difference"]
169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
170#[inline]
171#[target_feature(enable = "neon")]
172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
173#[cfg_attr(test, assert_instr(fabd))]
174pub fn vabdd_f64(a: f64, b: f64) -> f64 {
175    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
176}
177#[doc = "Floating-point absolute difference"]
178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
179#[inline]
180#[target_feature(enable = "neon")]
181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
182#[cfg_attr(test, assert_instr(fabd))]
183pub fn vabds_f32(a: f32, b: f32) -> f32 {
184    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
185}
186#[doc = "Floating-point absolute difference"]
187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
188#[inline]
189#[target_feature(enable = "neon,fp16")]
190#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
191#[cfg_attr(test, assert_instr(fabd))]
192pub fn vabdh_f16(a: f16, b: f16) -> f16 {
193    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
194}
195#[doc = "Signed Absolute difference Long"]
196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
197#[inline]
198#[target_feature(enable = "neon")]
199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
200#[cfg_attr(test, assert_instr(sabdl))]
201pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
202    unsafe {
203        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
204        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
205        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
206        simd_cast(e)
207    }
208}
209#[doc = "Signed Absolute difference Long"]
210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
211#[inline]
212#[target_feature(enable = "neon")]
213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
214#[cfg_attr(test, assert_instr(sabdl))]
215pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
216    unsafe {
217        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
218        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
219        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
220        simd_cast(e)
221    }
222}
223#[doc = "Signed Absolute difference Long"]
224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
225#[inline]
226#[target_feature(enable = "neon")]
227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
228#[cfg_attr(test, assert_instr(sabdl))]
229pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
230    unsafe {
231        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
232        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
233        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
234        simd_cast(e)
235    }
236}
237#[doc = "Unsigned Absolute difference Long"]
238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
239#[inline]
240#[target_feature(enable = "neon")]
241#[cfg_attr(test, assert_instr(uabdl))]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
244    unsafe {
245        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
246        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
247        simd_cast(vabd_u8(c, d))
248    }
249}
250#[doc = "Unsigned Absolute difference Long"]
251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
252#[inline]
253#[target_feature(enable = "neon")]
254#[cfg_attr(test, assert_instr(uabdl))]
255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
256pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
257    unsafe {
258        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
259        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
260        simd_cast(vabd_u16(c, d))
261    }
262}
263#[doc = "Unsigned Absolute difference Long"]
264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
265#[inline]
266#[target_feature(enable = "neon")]
267#[cfg_attr(test, assert_instr(uabdl))]
268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
269pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
270    unsafe {
271        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
272        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
273        simd_cast(vabd_u32(c, d))
274    }
275}
276#[doc = "Floating-point absolute value"]
277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
278#[inline]
279#[target_feature(enable = "neon")]
280#[cfg_attr(test, assert_instr(fabs))]
281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
282pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
283    unsafe { simd_fabs(a) }
284}
285#[doc = "Floating-point absolute value"]
286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
287#[inline]
288#[target_feature(enable = "neon")]
289#[cfg_attr(test, assert_instr(fabs))]
290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
291pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
292    unsafe { simd_fabs(a) }
293}
294#[doc = "Absolute Value (wrapping)."]
295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
296#[inline]
297#[target_feature(enable = "neon")]
298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
299#[cfg_attr(test, assert_instr(abs))]
300pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
301    unsafe extern "unadjusted" {
302        #[cfg_attr(
303            any(target_arch = "aarch64", target_arch = "arm64ec"),
304            link_name = "llvm.aarch64.neon.abs.v1i64"
305        )]
306        fn _vabs_s64(a: int64x1_t) -> int64x1_t;
307    }
308    unsafe { _vabs_s64(a) }
309}
310#[doc = "Absolute Value (wrapping)."]
311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
312#[inline]
313#[target_feature(enable = "neon")]
314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
315#[cfg_attr(test, assert_instr(abs))]
316pub fn vabsd_s64(a: i64) -> i64 {
317    unsafe extern "unadjusted" {
318        #[cfg_attr(
319            any(target_arch = "aarch64", target_arch = "arm64ec"),
320            link_name = "llvm.aarch64.neon.abs.i64"
321        )]
322        fn _vabsd_s64(a: i64) -> i64;
323    }
324    unsafe { _vabsd_s64(a) }
325}
326#[doc = "Absolute Value (wrapping)."]
327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
328#[inline]
329#[target_feature(enable = "neon")]
330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
331#[cfg_attr(test, assert_instr(abs))]
332pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
333    unsafe extern "unadjusted" {
334        #[cfg_attr(
335            any(target_arch = "aarch64", target_arch = "arm64ec"),
336            link_name = "llvm.aarch64.neon.abs.v2i64"
337        )]
338        fn _vabsq_s64(a: int64x2_t) -> int64x2_t;
339    }
340    unsafe { _vabsq_s64(a) }
341}
342#[doc = "Add"]
343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
344#[inline]
345#[target_feature(enable = "neon")]
346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
347#[cfg_attr(test, assert_instr(nop))]
348pub fn vaddd_s64(a: i64, b: i64) -> i64 {
349    a.wrapping_add(b)
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
353#[inline]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_u64(a: u64, b: u64) -> u64 {
358    a.wrapping_add(b)
359}
360#[doc = "Signed Add Long across Vector"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
362#[inline]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(saddlv))]
366pub fn vaddlv_s16(a: int16x4_t) -> i32 {
367    unsafe extern "unadjusted" {
368        #[cfg_attr(
369            any(target_arch = "aarch64", target_arch = "arm64ec"),
370            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
371        )]
372        fn _vaddlv_s16(a: int16x4_t) -> i32;
373    }
374    unsafe { _vaddlv_s16(a) }
375}
376#[doc = "Signed Add Long across Vector"]
377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
378#[inline]
379#[target_feature(enable = "neon")]
380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
381#[cfg_attr(test, assert_instr(saddlv))]
382pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
383    unsafe extern "unadjusted" {
384        #[cfg_attr(
385            any(target_arch = "aarch64", target_arch = "arm64ec"),
386            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
387        )]
388        fn _vaddlvq_s16(a: int16x8_t) -> i32;
389    }
390    unsafe { _vaddlvq_s16(a) }
391}
392#[doc = "Signed Add Long across Vector"]
393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
394#[inline]
395#[target_feature(enable = "neon")]
396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
397#[cfg_attr(test, assert_instr(saddlv))]
398pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
399    unsafe extern "unadjusted" {
400        #[cfg_attr(
401            any(target_arch = "aarch64", target_arch = "arm64ec"),
402            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
403        )]
404        fn _vaddlvq_s32(a: int32x4_t) -> i64;
405    }
406    unsafe { _vaddlvq_s32(a) }
407}
408#[doc = "Signed Add Long across Vector"]
409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
410#[inline]
411#[target_feature(enable = "neon")]
412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
413#[cfg_attr(test, assert_instr(saddlp))]
414pub fn vaddlv_s32(a: int32x2_t) -> i64 {
415    unsafe extern "unadjusted" {
416        #[cfg_attr(
417            any(target_arch = "aarch64", target_arch = "arm64ec"),
418            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
419        )]
420        fn _vaddlv_s32(a: int32x2_t) -> i64;
421    }
422    unsafe { _vaddlv_s32(a) }
423}
424#[doc = "Signed Add Long across Vector"]
425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
426#[inline]
427#[target_feature(enable = "neon")]
428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
429#[cfg_attr(test, assert_instr(saddlv))]
430pub fn vaddlv_s8(a: int8x8_t) -> i16 {
431    unsafe extern "unadjusted" {
432        #[cfg_attr(
433            any(target_arch = "aarch64", target_arch = "arm64ec"),
434            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
435        )]
436        fn _vaddlv_s8(a: int8x8_t) -> i32;
437    }
438    unsafe { _vaddlv_s8(a) as i16 }
439}
440#[doc = "Signed Add Long across Vector"]
441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
442#[inline]
443#[target_feature(enable = "neon")]
444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
445#[cfg_attr(test, assert_instr(saddlv))]
446pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
447    unsafe extern "unadjusted" {
448        #[cfg_attr(
449            any(target_arch = "aarch64", target_arch = "arm64ec"),
450            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
451        )]
452        fn _vaddlvq_s8(a: int8x16_t) -> i32;
453    }
454    unsafe { _vaddlvq_s8(a) as i16 }
455}
456#[doc = "Unsigned Add Long across Vector"]
457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
458#[inline]
459#[target_feature(enable = "neon")]
460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
461#[cfg_attr(test, assert_instr(uaddlv))]
462pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
463    unsafe extern "unadjusted" {
464        #[cfg_attr(
465            any(target_arch = "aarch64", target_arch = "arm64ec"),
466            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
467        )]
468        fn _vaddlv_u16(a: uint16x4_t) -> u32;
469    }
470    unsafe { _vaddlv_u16(a) }
471}
472#[doc = "Unsigned Add Long across Vector"]
473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
474#[inline]
475#[target_feature(enable = "neon")]
476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
477#[cfg_attr(test, assert_instr(uaddlv))]
478pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
479    unsafe extern "unadjusted" {
480        #[cfg_attr(
481            any(target_arch = "aarch64", target_arch = "arm64ec"),
482            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
483        )]
484        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
485    }
486    unsafe { _vaddlvq_u16(a) }
487}
488#[doc = "Unsigned Add Long across Vector"]
489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
490#[inline]
491#[target_feature(enable = "neon")]
492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
493#[cfg_attr(test, assert_instr(uaddlv))]
494pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
495    unsafe extern "unadjusted" {
496        #[cfg_attr(
497            any(target_arch = "aarch64", target_arch = "arm64ec"),
498            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
499        )]
500        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
501    }
502    unsafe { _vaddlvq_u32(a) }
503}
504#[doc = "Unsigned Add Long across Vector"]
505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
506#[inline]
507#[target_feature(enable = "neon")]
508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
509#[cfg_attr(test, assert_instr(uaddlp))]
510pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
511    unsafe extern "unadjusted" {
512        #[cfg_attr(
513            any(target_arch = "aarch64", target_arch = "arm64ec"),
514            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
515        )]
516        fn _vaddlv_u32(a: uint32x2_t) -> u64;
517    }
518    unsafe { _vaddlv_u32(a) }
519}
520#[doc = "Unsigned Add Long across Vector"]
521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
522#[inline]
523#[target_feature(enable = "neon")]
524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
525#[cfg_attr(test, assert_instr(uaddlv))]
526pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
527    unsafe extern "unadjusted" {
528        #[cfg_attr(
529            any(target_arch = "aarch64", target_arch = "arm64ec"),
530            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
531        )]
532        fn _vaddlv_u8(a: uint8x8_t) -> i32;
533    }
534    unsafe { _vaddlv_u8(a) as u16 }
535}
536#[doc = "Unsigned Add Long across Vector"]
537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
538#[inline]
539#[target_feature(enable = "neon")]
540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
541#[cfg_attr(test, assert_instr(uaddlv))]
542pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
543    unsafe extern "unadjusted" {
544        #[cfg_attr(
545            any(target_arch = "aarch64", target_arch = "arm64ec"),
546            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
547        )]
548        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
549    }
550    unsafe { _vaddlvq_u8(a) as u16 }
551}
552#[doc = "Floating-point add across vector"]
553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
554#[inline]
555#[target_feature(enable = "neon")]
556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
557#[cfg_attr(test, assert_instr(faddp))]
558pub fn vaddv_f32(a: float32x2_t) -> f32 {
559    unsafe extern "unadjusted" {
560        #[cfg_attr(
561            any(target_arch = "aarch64", target_arch = "arm64ec"),
562            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
563        )]
564        fn _vaddv_f32(a: float32x2_t) -> f32;
565    }
566    unsafe { _vaddv_f32(a) }
567}
568#[doc = "Floating-point add across vector"]
569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
570#[inline]
571#[target_feature(enable = "neon")]
572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
573#[cfg_attr(test, assert_instr(faddp))]
574pub fn vaddvq_f32(a: float32x4_t) -> f32 {
575    unsafe extern "unadjusted" {
576        #[cfg_attr(
577            any(target_arch = "aarch64", target_arch = "arm64ec"),
578            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
579        )]
580        fn _vaddvq_f32(a: float32x4_t) -> f32;
581    }
582    unsafe { _vaddvq_f32(a) }
583}
584#[doc = "Floating-point add across vector"]
585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
586#[inline]
587#[target_feature(enable = "neon")]
588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
589#[cfg_attr(test, assert_instr(faddp))]
590pub fn vaddvq_f64(a: float64x2_t) -> f64 {
591    unsafe extern "unadjusted" {
592        #[cfg_attr(
593            any(target_arch = "aarch64", target_arch = "arm64ec"),
594            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
595        )]
596        fn _vaddvq_f64(a: float64x2_t) -> f64;
597    }
598    unsafe { _vaddvq_f64(a) }
599}
600#[doc = "Add across vector"]
601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
602#[inline]
603#[target_feature(enable = "neon")]
604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
605#[cfg_attr(test, assert_instr(addp))]
606pub fn vaddv_s32(a: int32x2_t) -> i32 {
607    unsafe extern "unadjusted" {
608        #[cfg_attr(
609            any(target_arch = "aarch64", target_arch = "arm64ec"),
610            link_name = "llvm.aarch64.neon.saddv.i32.v2i32"
611        )]
612        fn _vaddv_s32(a: int32x2_t) -> i32;
613    }
614    unsafe { _vaddv_s32(a) }
615}
616#[doc = "Add across vector"]
617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
618#[inline]
619#[target_feature(enable = "neon")]
620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
621#[cfg_attr(test, assert_instr(addv))]
622pub fn vaddv_s8(a: int8x8_t) -> i8 {
623    unsafe extern "unadjusted" {
624        #[cfg_attr(
625            any(target_arch = "aarch64", target_arch = "arm64ec"),
626            link_name = "llvm.aarch64.neon.saddv.i8.v8i8"
627        )]
628        fn _vaddv_s8(a: int8x8_t) -> i8;
629    }
630    unsafe { _vaddv_s8(a) }
631}
632#[doc = "Add across vector"]
633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
634#[inline]
635#[target_feature(enable = "neon")]
636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
637#[cfg_attr(test, assert_instr(addv))]
638pub fn vaddvq_s8(a: int8x16_t) -> i8 {
639    unsafe extern "unadjusted" {
640        #[cfg_attr(
641            any(target_arch = "aarch64", target_arch = "arm64ec"),
642            link_name = "llvm.aarch64.neon.saddv.i8.v16i8"
643        )]
644        fn _vaddvq_s8(a: int8x16_t) -> i8;
645    }
646    unsafe { _vaddvq_s8(a) }
647}
648#[doc = "Add across vector"]
649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
650#[inline]
651#[target_feature(enable = "neon")]
652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
653#[cfg_attr(test, assert_instr(addv))]
654pub fn vaddv_s16(a: int16x4_t) -> i16 {
655    unsafe extern "unadjusted" {
656        #[cfg_attr(
657            any(target_arch = "aarch64", target_arch = "arm64ec"),
658            link_name = "llvm.aarch64.neon.saddv.i16.v4i16"
659        )]
660        fn _vaddv_s16(a: int16x4_t) -> i16;
661    }
662    unsafe { _vaddv_s16(a) }
663}
664#[doc = "Add across vector"]
665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
666#[inline]
667#[target_feature(enable = "neon")]
668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
669#[cfg_attr(test, assert_instr(addv))]
670pub fn vaddvq_s16(a: int16x8_t) -> i16 {
671    unsafe extern "unadjusted" {
672        #[cfg_attr(
673            any(target_arch = "aarch64", target_arch = "arm64ec"),
674            link_name = "llvm.aarch64.neon.saddv.i16.v8i16"
675        )]
676        fn _vaddvq_s16(a: int16x8_t) -> i16;
677    }
678    unsafe { _vaddvq_s16(a) }
679}
680#[doc = "Add across vector"]
681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
682#[inline]
683#[target_feature(enable = "neon")]
684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
685#[cfg_attr(test, assert_instr(addv))]
686pub fn vaddvq_s32(a: int32x4_t) -> i32 {
687    unsafe extern "unadjusted" {
688        #[cfg_attr(
689            any(target_arch = "aarch64", target_arch = "arm64ec"),
690            link_name = "llvm.aarch64.neon.saddv.i32.v4i32"
691        )]
692        fn _vaddvq_s32(a: int32x4_t) -> i32;
693    }
694    unsafe { _vaddvq_s32(a) }
695}
696#[doc = "Add across vector"]
697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
698#[inline]
699#[target_feature(enable = "neon")]
700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
701#[cfg_attr(test, assert_instr(addp))]
702pub fn vaddv_u32(a: uint32x2_t) -> u32 {
703    unsafe extern "unadjusted" {
704        #[cfg_attr(
705            any(target_arch = "aarch64", target_arch = "arm64ec"),
706            link_name = "llvm.aarch64.neon.uaddv.i32.v2i32"
707        )]
708        fn _vaddv_u32(a: uint32x2_t) -> u32;
709    }
710    unsafe { _vaddv_u32(a) }
711}
712#[doc = "Add across vector"]
713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
714#[inline]
715#[target_feature(enable = "neon")]
716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
717#[cfg_attr(test, assert_instr(addv))]
718pub fn vaddv_u8(a: uint8x8_t) -> u8 {
719    unsafe extern "unadjusted" {
720        #[cfg_attr(
721            any(target_arch = "aarch64", target_arch = "arm64ec"),
722            link_name = "llvm.aarch64.neon.uaddv.i8.v8i8"
723        )]
724        fn _vaddv_u8(a: uint8x8_t) -> u8;
725    }
726    unsafe { _vaddv_u8(a) }
727}
728#[doc = "Add across vector"]
729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
730#[inline]
731#[target_feature(enable = "neon")]
732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
733#[cfg_attr(test, assert_instr(addv))]
734pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
735    unsafe extern "unadjusted" {
736        #[cfg_attr(
737            any(target_arch = "aarch64", target_arch = "arm64ec"),
738            link_name = "llvm.aarch64.neon.uaddv.i8.v16i8"
739        )]
740        fn _vaddvq_u8(a: uint8x16_t) -> u8;
741    }
742    unsafe { _vaddvq_u8(a) }
743}
744#[doc = "Add across vector"]
745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
746#[inline]
747#[target_feature(enable = "neon")]
748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
749#[cfg_attr(test, assert_instr(addv))]
750pub fn vaddv_u16(a: uint16x4_t) -> u16 {
751    unsafe extern "unadjusted" {
752        #[cfg_attr(
753            any(target_arch = "aarch64", target_arch = "arm64ec"),
754            link_name = "llvm.aarch64.neon.uaddv.i16.v4i16"
755        )]
756        fn _vaddv_u16(a: uint16x4_t) -> u16;
757    }
758    unsafe { _vaddv_u16(a) }
759}
760#[doc = "Add across vector"]
761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
762#[inline]
763#[target_feature(enable = "neon")]
764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
765#[cfg_attr(test, assert_instr(addv))]
766pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
767    unsafe extern "unadjusted" {
768        #[cfg_attr(
769            any(target_arch = "aarch64", target_arch = "arm64ec"),
770            link_name = "llvm.aarch64.neon.uaddv.i16.v8i16"
771        )]
772        fn _vaddvq_u16(a: uint16x8_t) -> u16;
773    }
774    unsafe { _vaddvq_u16(a) }
775}
776#[doc = "Add across vector"]
777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
778#[inline]
779#[target_feature(enable = "neon")]
780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
781#[cfg_attr(test, assert_instr(addv))]
782pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
783    unsafe extern "unadjusted" {
784        #[cfg_attr(
785            any(target_arch = "aarch64", target_arch = "arm64ec"),
786            link_name = "llvm.aarch64.neon.uaddv.i32.v4i32"
787        )]
788        fn _vaddvq_u32(a: uint32x4_t) -> u32;
789    }
790    unsafe { _vaddvq_u32(a) }
791}
792#[doc = "Add across vector"]
793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
794#[inline]
795#[target_feature(enable = "neon")]
796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
797#[cfg_attr(test, assert_instr(addp))]
798pub fn vaddvq_s64(a: int64x2_t) -> i64 {
799    unsafe extern "unadjusted" {
800        #[cfg_attr(
801            any(target_arch = "aarch64", target_arch = "arm64ec"),
802            link_name = "llvm.aarch64.neon.saddv.i64.v2i64"
803        )]
804        fn _vaddvq_s64(a: int64x2_t) -> i64;
805    }
806    unsafe { _vaddvq_s64(a) }
807}
808#[doc = "Add across vector"]
809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
810#[inline]
811#[target_feature(enable = "neon")]
812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
813#[cfg_attr(test, assert_instr(addp))]
814pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
815    unsafe extern "unadjusted" {
816        #[cfg_attr(
817            any(target_arch = "aarch64", target_arch = "arm64ec"),
818            link_name = "llvm.aarch64.neon.uaddv.i64.v2i64"
819        )]
820        fn _vaddvq_u64(a: uint64x2_t) -> u64;
821    }
822    unsafe { _vaddvq_u64(a) }
823}
824#[doc = "Multi-vector floating-point absolute maximum"]
825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
826#[inline]
827#[target_feature(enable = "neon,faminmax")]
828#[cfg_attr(test, assert_instr(nop))]
829#[unstable(feature = "faminmax", issue = "137933")]
830pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
831    unsafe extern "unadjusted" {
832        #[cfg_attr(
833            any(target_arch = "aarch64", target_arch = "arm64ec"),
834            link_name = "llvm.aarch64.neon.famax.v2f32"
835        )]
836        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
837    }
838    unsafe { _vamax_f32(a, b) }
839}
840#[doc = "Multi-vector floating-point absolute maximum"]
841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
842#[inline]
843#[target_feature(enable = "neon,faminmax")]
844#[cfg_attr(test, assert_instr(nop))]
845#[unstable(feature = "faminmax", issue = "137933")]
846pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
847    unsafe extern "unadjusted" {
848        #[cfg_attr(
849            any(target_arch = "aarch64", target_arch = "arm64ec"),
850            link_name = "llvm.aarch64.neon.famax.v4f32"
851        )]
852        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
853    }
854    unsafe { _vamaxq_f32(a, b) }
855}
856#[doc = "Multi-vector floating-point absolute maximum"]
857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
858#[inline]
859#[target_feature(enable = "neon,faminmax")]
860#[cfg_attr(test, assert_instr(nop))]
861#[unstable(feature = "faminmax", issue = "137933")]
862pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
863    unsafe extern "unadjusted" {
864        #[cfg_attr(
865            any(target_arch = "aarch64", target_arch = "arm64ec"),
866            link_name = "llvm.aarch64.neon.famax.v2f64"
867        )]
868        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
869    }
870    unsafe { _vamaxq_f64(a, b) }
871}
872#[doc = "Multi-vector floating-point absolute minimum"]
873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
874#[inline]
875#[target_feature(enable = "neon,faminmax")]
876#[cfg_attr(test, assert_instr(nop))]
877#[unstable(feature = "faminmax", issue = "137933")]
878pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
879    unsafe extern "unadjusted" {
880        #[cfg_attr(
881            any(target_arch = "aarch64", target_arch = "arm64ec"),
882            link_name = "llvm.aarch64.neon.famin.v2f32"
883        )]
884        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
885    }
886    unsafe { _vamin_f32(a, b) }
887}
888#[doc = "Multi-vector floating-point absolute minimum"]
889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
890#[inline]
891#[target_feature(enable = "neon,faminmax")]
892#[cfg_attr(test, assert_instr(nop))]
893#[unstable(feature = "faminmax", issue = "137933")]
894pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
895    unsafe extern "unadjusted" {
896        #[cfg_attr(
897            any(target_arch = "aarch64", target_arch = "arm64ec"),
898            link_name = "llvm.aarch64.neon.famin.v4f32"
899        )]
900        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
901    }
902    unsafe { _vaminq_f32(a, b) }
903}
904#[doc = "Multi-vector floating-point absolute minimum"]
905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
906#[inline]
907#[target_feature(enable = "neon,faminmax")]
908#[cfg_attr(test, assert_instr(nop))]
909#[unstable(feature = "faminmax", issue = "137933")]
910pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
911    unsafe extern "unadjusted" {
912        #[cfg_attr(
913            any(target_arch = "aarch64", target_arch = "arm64ec"),
914            link_name = "llvm.aarch64.neon.famin.v2f64"
915        )]
916        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
917    }
918    unsafe { _vaminq_f64(a, b) }
919}
920#[doc = "Bit clear and exclusive OR"]
921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
922#[inline]
923#[target_feature(enable = "neon,sha3")]
924#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
925#[cfg_attr(test, assert_instr(bcax))]
926pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
927    unsafe extern "unadjusted" {
928        #[cfg_attr(
929            any(target_arch = "aarch64", target_arch = "arm64ec"),
930            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
931        )]
932        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
933    }
934    unsafe { _vbcaxq_s8(a, b, c) }
935}
936#[doc = "Bit clear and exclusive OR"]
937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
938#[inline]
939#[target_feature(enable = "neon,sha3")]
940#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
941#[cfg_attr(test, assert_instr(bcax))]
942pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
943    unsafe extern "unadjusted" {
944        #[cfg_attr(
945            any(target_arch = "aarch64", target_arch = "arm64ec"),
946            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
947        )]
948        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
949    }
950    unsafe { _vbcaxq_s16(a, b, c) }
951}
952#[doc = "Bit clear and exclusive OR"]
953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
954#[inline]
955#[target_feature(enable = "neon,sha3")]
956#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
957#[cfg_attr(test, assert_instr(bcax))]
958pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
959    unsafe extern "unadjusted" {
960        #[cfg_attr(
961            any(target_arch = "aarch64", target_arch = "arm64ec"),
962            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
963        )]
964        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
965    }
966    unsafe { _vbcaxq_s32(a, b, c) }
967}
968#[doc = "Bit clear and exclusive OR"]
969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
970#[inline]
971#[target_feature(enable = "neon,sha3")]
972#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
973#[cfg_attr(test, assert_instr(bcax))]
974pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
975    unsafe extern "unadjusted" {
976        #[cfg_attr(
977            any(target_arch = "aarch64", target_arch = "arm64ec"),
978            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
979        )]
980        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
981    }
982    unsafe { _vbcaxq_s64(a, b, c) }
983}
984#[doc = "Bit clear and exclusive OR"]
985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
986#[inline]
987#[target_feature(enable = "neon,sha3")]
988#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
989#[cfg_attr(test, assert_instr(bcax))]
990pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
991    unsafe extern "unadjusted" {
992        #[cfg_attr(
993            any(target_arch = "aarch64", target_arch = "arm64ec"),
994            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
995        )]
996        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
997    }
998    unsafe { _vbcaxq_u8(a, b, c) }
999}
1000#[doc = "Bit clear and exclusive OR"]
1001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
1002#[inline]
1003#[target_feature(enable = "neon,sha3")]
1004#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1005#[cfg_attr(test, assert_instr(bcax))]
1006pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
1007    unsafe extern "unadjusted" {
1008        #[cfg_attr(
1009            any(target_arch = "aarch64", target_arch = "arm64ec"),
1010            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
1011        )]
1012        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
1013    }
1014    unsafe { _vbcaxq_u16(a, b, c) }
1015}
1016#[doc = "Bit clear and exclusive OR"]
1017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
1018#[inline]
1019#[target_feature(enable = "neon,sha3")]
1020#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1021#[cfg_attr(test, assert_instr(bcax))]
1022pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
1023    unsafe extern "unadjusted" {
1024        #[cfg_attr(
1025            any(target_arch = "aarch64", target_arch = "arm64ec"),
1026            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1027        )]
1028        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1029    }
1030    unsafe { _vbcaxq_u32(a, b, c) }
1031}
1032#[doc = "Bit clear and exclusive OR"]
1033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1034#[inline]
1035#[target_feature(enable = "neon,sha3")]
1036#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1037#[cfg_attr(test, assert_instr(bcax))]
1038pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1039    unsafe extern "unadjusted" {
1040        #[cfg_attr(
1041            any(target_arch = "aarch64", target_arch = "arm64ec"),
1042            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1043        )]
1044        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1045    }
1046    unsafe { _vbcaxq_u64(a, b, c) }
1047}
1048#[doc = "Floating-point complex add"]
1049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1050#[inline]
1051#[target_feature(enable = "neon,fp16")]
1052#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1053#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1054#[cfg_attr(test, assert_instr(fcadd))]
1055pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1056    unsafe extern "unadjusted" {
1057        #[cfg_attr(
1058            any(target_arch = "aarch64", target_arch = "arm64ec"),
1059            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1060        )]
1061        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1062    }
1063    unsafe { _vcadd_rot270_f16(a, b) }
1064}
1065#[doc = "Floating-point complex add"]
1066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1067#[inline]
1068#[target_feature(enable = "neon,fp16")]
1069#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1070#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1071#[cfg_attr(test, assert_instr(fcadd))]
1072pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1073    unsafe extern "unadjusted" {
1074        #[cfg_attr(
1075            any(target_arch = "aarch64", target_arch = "arm64ec"),
1076            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1077        )]
1078        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1079    }
1080    unsafe { _vcaddq_rot270_f16(a, b) }
1081}
1082#[doc = "Floating-point complex add"]
1083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1084#[inline]
1085#[target_feature(enable = "neon,fcma")]
1086#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1087#[cfg_attr(test, assert_instr(fcadd))]
1088pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1089    unsafe extern "unadjusted" {
1090        #[cfg_attr(
1091            any(target_arch = "aarch64", target_arch = "arm64ec"),
1092            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1093        )]
1094        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1095    }
1096    unsafe { _vcadd_rot270_f32(a, b) }
1097}
1098#[doc = "Floating-point complex add"]
1099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1100#[inline]
1101#[target_feature(enable = "neon,fcma")]
1102#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1103#[cfg_attr(test, assert_instr(fcadd))]
1104pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1105    unsafe extern "unadjusted" {
1106        #[cfg_attr(
1107            any(target_arch = "aarch64", target_arch = "arm64ec"),
1108            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1109        )]
1110        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1111    }
1112    unsafe { _vcaddq_rot270_f32(a, b) }
1113}
1114#[doc = "Floating-point complex add"]
1115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1116#[inline]
1117#[target_feature(enable = "neon,fcma")]
1118#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1119#[cfg_attr(test, assert_instr(fcadd))]
1120pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1121    unsafe extern "unadjusted" {
1122        #[cfg_attr(
1123            any(target_arch = "aarch64", target_arch = "arm64ec"),
1124            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1125        )]
1126        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1127    }
1128    unsafe { _vcaddq_rot270_f64(a, b) }
1129}
1130#[doc = "Floating-point complex add"]
1131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1132#[inline]
1133#[target_feature(enable = "neon,fp16")]
1134#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1136#[cfg_attr(test, assert_instr(fcadd))]
1137pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1138    unsafe extern "unadjusted" {
1139        #[cfg_attr(
1140            any(target_arch = "aarch64", target_arch = "arm64ec"),
1141            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1142        )]
1143        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1144    }
1145    unsafe { _vcadd_rot90_f16(a, b) }
1146}
1147#[doc = "Floating-point complex add"]
1148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1149#[inline]
1150#[target_feature(enable = "neon,fp16")]
1151#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1152#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1153#[cfg_attr(test, assert_instr(fcadd))]
1154pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1155    unsafe extern "unadjusted" {
1156        #[cfg_attr(
1157            any(target_arch = "aarch64", target_arch = "arm64ec"),
1158            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1159        )]
1160        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1161    }
1162    unsafe { _vcaddq_rot90_f16(a, b) }
1163}
1164#[doc = "Floating-point complex add"]
1165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1166#[inline]
1167#[target_feature(enable = "neon,fcma")]
1168#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1169#[cfg_attr(test, assert_instr(fcadd))]
1170pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1171    unsafe extern "unadjusted" {
1172        #[cfg_attr(
1173            any(target_arch = "aarch64", target_arch = "arm64ec"),
1174            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1175        )]
1176        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1177    }
1178    unsafe { _vcadd_rot90_f32(a, b) }
1179}
1180#[doc = "Floating-point complex add"]
1181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1182#[inline]
1183#[target_feature(enable = "neon,fcma")]
1184#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1185#[cfg_attr(test, assert_instr(fcadd))]
1186pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1187    unsafe extern "unadjusted" {
1188        #[cfg_attr(
1189            any(target_arch = "aarch64", target_arch = "arm64ec"),
1190            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1191        )]
1192        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1193    }
1194    unsafe { _vcaddq_rot90_f32(a, b) }
1195}
1196#[doc = "Floating-point complex add"]
1197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1198#[inline]
1199#[target_feature(enable = "neon,fcma")]
1200#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1201#[cfg_attr(test, assert_instr(fcadd))]
1202pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1203    unsafe extern "unadjusted" {
1204        #[cfg_attr(
1205            any(target_arch = "aarch64", target_arch = "arm64ec"),
1206            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1207        )]
1208        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1209    }
1210    unsafe { _vcaddq_rot90_f64(a, b) }
1211}
1212#[doc = "Floating-point absolute compare greater than or equal"]
1213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1214#[inline]
1215#[target_feature(enable = "neon")]
1216#[cfg_attr(test, assert_instr(facge))]
1217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1218pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1219    unsafe extern "unadjusted" {
1220        #[cfg_attr(
1221            any(target_arch = "aarch64", target_arch = "arm64ec"),
1222            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1223        )]
1224        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1225    }
1226    unsafe { _vcage_f64(a, b) }
1227}
1228#[doc = "Floating-point absolute compare greater than or equal"]
1229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1230#[inline]
1231#[target_feature(enable = "neon")]
1232#[cfg_attr(test, assert_instr(facge))]
1233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1234pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1235    unsafe extern "unadjusted" {
1236        #[cfg_attr(
1237            any(target_arch = "aarch64", target_arch = "arm64ec"),
1238            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1239        )]
1240        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1241    }
1242    unsafe { _vcageq_f64(a, b) }
1243}
1244#[doc = "Floating-point absolute compare greater than or equal"]
1245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1246#[inline]
1247#[target_feature(enable = "neon")]
1248#[cfg_attr(test, assert_instr(facge))]
1249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1250pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1251    unsafe extern "unadjusted" {
1252        #[cfg_attr(
1253            any(target_arch = "aarch64", target_arch = "arm64ec"),
1254            link_name = "llvm.aarch64.neon.facge.i64.f64"
1255        )]
1256        fn _vcaged_f64(a: f64, b: f64) -> u64;
1257    }
1258    unsafe { _vcaged_f64(a, b) }
1259}
1260#[doc = "Floating-point absolute compare greater than or equal"]
1261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1262#[inline]
1263#[target_feature(enable = "neon")]
1264#[cfg_attr(test, assert_instr(facge))]
1265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1266pub fn vcages_f32(a: f32, b: f32) -> u32 {
1267    unsafe extern "unadjusted" {
1268        #[cfg_attr(
1269            any(target_arch = "aarch64", target_arch = "arm64ec"),
1270            link_name = "llvm.aarch64.neon.facge.i32.f32"
1271        )]
1272        fn _vcages_f32(a: f32, b: f32) -> u32;
1273    }
1274    unsafe { _vcages_f32(a, b) }
1275}
1276#[doc = "Floating-point absolute compare greater than or equal"]
1277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1278#[inline]
1279#[cfg_attr(test, assert_instr(facge))]
1280#[target_feature(enable = "neon,fp16")]
1281#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1282pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1283    unsafe extern "unadjusted" {
1284        #[cfg_attr(
1285            any(target_arch = "aarch64", target_arch = "arm64ec"),
1286            link_name = "llvm.aarch64.neon.facge.i32.f16"
1287        )]
1288        fn _vcageh_f16(a: f16, b: f16) -> i32;
1289    }
1290    unsafe { _vcageh_f16(a, b) as u16 }
1291}
1292#[doc = "Floating-point absolute compare greater than"]
1293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1294#[inline]
1295#[target_feature(enable = "neon")]
1296#[cfg_attr(test, assert_instr(facgt))]
1297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1298pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1299    unsafe extern "unadjusted" {
1300        #[cfg_attr(
1301            any(target_arch = "aarch64", target_arch = "arm64ec"),
1302            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1303        )]
1304        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1305    }
1306    unsafe { _vcagt_f64(a, b) }
1307}
1308#[doc = "Floating-point absolute compare greater than"]
1309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1310#[inline]
1311#[target_feature(enable = "neon")]
1312#[cfg_attr(test, assert_instr(facgt))]
1313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1314pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1315    unsafe extern "unadjusted" {
1316        #[cfg_attr(
1317            any(target_arch = "aarch64", target_arch = "arm64ec"),
1318            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1319        )]
1320        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1321    }
1322    unsafe { _vcagtq_f64(a, b) }
1323}
1324#[doc = "Floating-point absolute compare greater than"]
1325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1326#[inline]
1327#[target_feature(enable = "neon")]
1328#[cfg_attr(test, assert_instr(facgt))]
1329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1330pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1331    unsafe extern "unadjusted" {
1332        #[cfg_attr(
1333            any(target_arch = "aarch64", target_arch = "arm64ec"),
1334            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1335        )]
1336        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1337    }
1338    unsafe { _vcagtd_f64(a, b) }
1339}
1340#[doc = "Floating-point absolute compare greater than"]
1341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1342#[inline]
1343#[target_feature(enable = "neon")]
1344#[cfg_attr(test, assert_instr(facgt))]
1345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1346pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1347    unsafe extern "unadjusted" {
1348        #[cfg_attr(
1349            any(target_arch = "aarch64", target_arch = "arm64ec"),
1350            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1351        )]
1352        fn _vcagts_f32(a: f32, b: f32) -> u32;
1353    }
1354    unsafe { _vcagts_f32(a, b) }
1355}
1356#[doc = "Floating-point absolute compare greater than"]
1357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1358#[inline]
1359#[cfg_attr(test, assert_instr(facgt))]
1360#[target_feature(enable = "neon,fp16")]
1361#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1362pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1363    unsafe extern "unadjusted" {
1364        #[cfg_attr(
1365            any(target_arch = "aarch64", target_arch = "arm64ec"),
1366            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1367        )]
1368        fn _vcagth_f16(a: f16, b: f16) -> i32;
1369    }
1370    unsafe { _vcagth_f16(a, b) as u16 }
1371}
1372#[doc = "Floating-point absolute compare less than or equal"]
1373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1374#[inline]
1375#[target_feature(enable = "neon")]
1376#[cfg_attr(test, assert_instr(facge))]
1377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1378pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1379    vcage_f64(b, a)
1380}
1381#[doc = "Floating-point absolute compare less than or equal"]
1382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1383#[inline]
1384#[target_feature(enable = "neon")]
1385#[cfg_attr(test, assert_instr(facge))]
1386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1387pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1388    vcageq_f64(b, a)
1389}
1390#[doc = "Floating-point absolute compare less than or equal"]
1391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1392#[inline]
1393#[target_feature(enable = "neon")]
1394#[cfg_attr(test, assert_instr(facge))]
1395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1396pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1397    vcaged_f64(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than or equal"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1401#[inline]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facge))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcales_f32(a: f32, b: f32) -> u32 {
1406    vcages_f32(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than or equal"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1410#[inline]
1411#[cfg_attr(test, assert_instr(facge))]
1412#[target_feature(enable = "neon,fp16")]
1413#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1414pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1415    vcageh_f16(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1419#[inline]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1424    vcagt_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1428#[inline]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1433    vcagtq_f64(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1437#[inline]
1438#[target_feature(enable = "neon")]
1439#[cfg_attr(test, assert_instr(facgt))]
1440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1441pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1442    vcagtd_f64(b, a)
1443}
1444#[doc = "Floating-point absolute compare less than"]
1445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1446#[inline]
1447#[target_feature(enable = "neon")]
1448#[cfg_attr(test, assert_instr(facgt))]
1449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1450pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1451    vcagts_f32(b, a)
1452}
1453#[doc = "Floating-point absolute compare less than"]
1454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1455#[inline]
1456#[cfg_attr(test, assert_instr(facgt))]
1457#[target_feature(enable = "neon,fp16")]
1458#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1459pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1460    vcagth_f16(b, a)
1461}
1462#[doc = "Floating-point compare equal"]
1463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1464#[inline]
1465#[target_feature(enable = "neon")]
1466#[cfg_attr(test, assert_instr(fcmeq))]
1467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1468pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1469    unsafe { simd_eq(a, b) }
1470}
1471#[doc = "Floating-point compare equal"]
1472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1473#[inline]
1474#[target_feature(enable = "neon")]
1475#[cfg_attr(test, assert_instr(fcmeq))]
1476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1477pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1478    unsafe { simd_eq(a, b) }
1479}
1480#[doc = "Compare bitwise Equal (vector)"]
1481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1482#[inline]
1483#[target_feature(enable = "neon")]
1484#[cfg_attr(test, assert_instr(cmeq))]
1485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1486pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1487    unsafe { simd_eq(a, b) }
1488}
1489#[doc = "Compare bitwise Equal (vector)"]
1490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1491#[inline]
1492#[target_feature(enable = "neon")]
1493#[cfg_attr(test, assert_instr(cmeq))]
1494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1495pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1496    unsafe { simd_eq(a, b) }
1497}
1498#[doc = "Compare bitwise Equal (vector)"]
1499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1500#[inline]
1501#[target_feature(enable = "neon")]
1502#[cfg_attr(test, assert_instr(cmeq))]
1503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1504pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1505    unsafe { simd_eq(a, b) }
1506}
1507#[doc = "Compare bitwise Equal (vector)"]
1508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1509#[inline]
1510#[target_feature(enable = "neon")]
1511#[cfg_attr(test, assert_instr(cmeq))]
1512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1513pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1514    unsafe { simd_eq(a, b) }
1515}
1516#[doc = "Compare bitwise Equal (vector)"]
1517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1518#[inline]
1519#[target_feature(enable = "neon")]
1520#[cfg_attr(test, assert_instr(cmeq))]
1521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1522pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1523    unsafe { simd_eq(a, b) }
1524}
1525#[doc = "Compare bitwise Equal (vector)"]
1526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1527#[inline]
1528#[target_feature(enable = "neon")]
1529#[cfg_attr(test, assert_instr(cmeq))]
1530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1531pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1532    unsafe { simd_eq(a, b) }
1533}
1534#[doc = "Floating-point compare equal"]
1535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1536#[inline]
1537#[target_feature(enable = "neon")]
1538#[cfg_attr(test, assert_instr(fcmp))]
1539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1540pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1541    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1542}
1543#[doc = "Floating-point compare equal"]
1544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1545#[inline]
1546#[target_feature(enable = "neon")]
1547#[cfg_attr(test, assert_instr(fcmp))]
1548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1549pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1550    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1551}
1552#[doc = "Compare bitwise equal"]
1553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1554#[inline]
1555#[target_feature(enable = "neon")]
1556#[cfg_attr(test, assert_instr(cmp))]
1557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1558pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1559    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1560}
1561#[doc = "Compare bitwise equal"]
1562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1563#[inline]
1564#[target_feature(enable = "neon")]
1565#[cfg_attr(test, assert_instr(cmp))]
1566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1567pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1568    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1569}
1570#[doc = "Floating-point compare equal"]
1571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1572#[inline]
1573#[cfg_attr(test, assert_instr(fcmp))]
1574#[target_feature(enable = "neon,fp16")]
1575#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1576pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1577    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1578}
1579#[doc = "Floating-point compare bitwise equal to zero"]
1580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1581#[inline]
1582#[cfg_attr(test, assert_instr(fcmeq))]
1583#[target_feature(enable = "neon,fp16")]
1584#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1585pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1586    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1587    unsafe { simd_eq(a, transmute(b)) }
1588}
1589#[doc = "Floating-point compare bitwise equal to zero"]
1590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1591#[inline]
1592#[cfg_attr(test, assert_instr(fcmeq))]
1593#[target_feature(enable = "neon,fp16")]
1594#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1595pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1596    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1597    unsafe { simd_eq(a, transmute(b)) }
1598}
1599#[doc = "Floating-point compare bitwise equal to zero"]
1600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1601#[inline]
1602#[target_feature(enable = "neon")]
1603#[cfg_attr(test, assert_instr(fcmeq))]
1604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1605pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1606    let b: f32x2 = f32x2::new(0.0, 0.0);
1607    unsafe { simd_eq(a, transmute(b)) }
1608}
1609#[doc = "Floating-point compare bitwise equal to zero"]
1610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1611#[inline]
1612#[target_feature(enable = "neon")]
1613#[cfg_attr(test, assert_instr(fcmeq))]
1614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1615pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1616    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1617    unsafe { simd_eq(a, transmute(b)) }
1618}
1619#[doc = "Floating-point compare bitwise equal to zero"]
1620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1621#[inline]
1622#[target_feature(enable = "neon")]
1623#[cfg_attr(test, assert_instr(fcmeq))]
1624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1625pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1626    let b: f64 = 0.0;
1627    unsafe { simd_eq(a, transmute(b)) }
1628}
1629#[doc = "Floating-point compare bitwise equal to zero"]
1630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1631#[inline]
1632#[target_feature(enable = "neon")]
1633#[cfg_attr(test, assert_instr(fcmeq))]
1634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1635pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1636    let b: f64x2 = f64x2::new(0.0, 0.0);
1637    unsafe { simd_eq(a, transmute(b)) }
1638}
1639#[doc = "Signed compare bitwise equal to zero"]
1640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1641#[inline]
1642#[target_feature(enable = "neon")]
1643#[cfg_attr(test, assert_instr(cmeq))]
1644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1645pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1646    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1647    unsafe { simd_eq(a, transmute(b)) }
1648}
1649#[doc = "Signed compare bitwise equal to zero"]
1650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1651#[inline]
1652#[target_feature(enable = "neon")]
1653#[cfg_attr(test, assert_instr(cmeq))]
1654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1655pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1656    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1657    unsafe { simd_eq(a, transmute(b)) }
1658}
1659#[doc = "Signed compare bitwise equal to zero"]
1660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1661#[inline]
1662#[target_feature(enable = "neon")]
1663#[cfg_attr(test, assert_instr(cmeq))]
1664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1665pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1666    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1667    unsafe { simd_eq(a, transmute(b)) }
1668}
1669#[doc = "Signed compare bitwise equal to zero"]
1670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1671#[inline]
1672#[target_feature(enable = "neon")]
1673#[cfg_attr(test, assert_instr(cmeq))]
1674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1675pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1676    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1677    unsafe { simd_eq(a, transmute(b)) }
1678}
1679#[doc = "Signed compare bitwise equal to zero"]
1680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1681#[inline]
1682#[target_feature(enable = "neon")]
1683#[cfg_attr(test, assert_instr(cmeq))]
1684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1685pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1686    let b: i32x2 = i32x2::new(0, 0);
1687    unsafe { simd_eq(a, transmute(b)) }
1688}
1689#[doc = "Signed compare bitwise equal to zero"]
1690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1691#[inline]
1692#[target_feature(enable = "neon")]
1693#[cfg_attr(test, assert_instr(cmeq))]
1694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1695pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1696    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1697    unsafe { simd_eq(a, transmute(b)) }
1698}
1699#[doc = "Signed compare bitwise equal to zero"]
1700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1701#[inline]
1702#[target_feature(enable = "neon")]
1703#[cfg_attr(test, assert_instr(cmeq))]
1704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1705pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1706    let b: i64x1 = i64x1::new(0);
1707    unsafe { simd_eq(a, transmute(b)) }
1708}
1709#[doc = "Signed compare bitwise equal to zero"]
1710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1711#[inline]
1712#[target_feature(enable = "neon")]
1713#[cfg_attr(test, assert_instr(cmeq))]
1714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1715pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1716    let b: i64x2 = i64x2::new(0, 0);
1717    unsafe { simd_eq(a, transmute(b)) }
1718}
1719#[doc = "Signed compare bitwise equal to zero"]
1720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1721#[inline]
1722#[target_feature(enable = "neon")]
1723#[cfg_attr(test, assert_instr(cmeq))]
1724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1725pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1726    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1727    unsafe { simd_eq(a, transmute(b)) }
1728}
1729#[doc = "Signed compare bitwise equal to zero"]
1730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1731#[inline]
1732#[target_feature(enable = "neon")]
1733#[cfg_attr(test, assert_instr(cmeq))]
1734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1735pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1736    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1737    unsafe { simd_eq(a, transmute(b)) }
1738}
1739#[doc = "Signed compare bitwise equal to zero"]
1740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1741#[inline]
1742#[target_feature(enable = "neon")]
1743#[cfg_attr(test, assert_instr(cmeq))]
1744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1745pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1746    let b: i64x1 = i64x1::new(0);
1747    unsafe { simd_eq(a, transmute(b)) }
1748}
1749#[doc = "Signed compare bitwise equal to zero"]
1750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1751#[inline]
1752#[target_feature(enable = "neon")]
1753#[cfg_attr(test, assert_instr(cmeq))]
1754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1755pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1756    let b: i64x2 = i64x2::new(0, 0);
1757    unsafe { simd_eq(a, transmute(b)) }
1758}
1759#[doc = "Unsigned compare bitwise equal to zero"]
1760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1761#[inline]
1762#[target_feature(enable = "neon")]
1763#[cfg_attr(test, assert_instr(cmeq))]
1764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1765pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1766    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1767    unsafe { simd_eq(a, transmute(b)) }
1768}
1769#[doc = "Unsigned compare bitwise equal to zero"]
1770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1771#[inline]
1772#[target_feature(enable = "neon")]
1773#[cfg_attr(test, assert_instr(cmeq))]
1774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1775pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1776    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1777    unsafe { simd_eq(a, transmute(b)) }
1778}
1779#[doc = "Unsigned compare bitwise equal to zero"]
1780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1781#[inline]
1782#[target_feature(enable = "neon")]
1783#[cfg_attr(test, assert_instr(cmeq))]
1784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1785pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1786    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1787    unsafe { simd_eq(a, transmute(b)) }
1788}
1789#[doc = "Unsigned compare bitwise equal to zero"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1791#[inline]
1792#[target_feature(enable = "neon")]
1793#[cfg_attr(test, assert_instr(cmeq))]
1794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1795pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1796    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1797    unsafe { simd_eq(a, transmute(b)) }
1798}
1799#[doc = "Unsigned compare bitwise equal to zero"]
1800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1801#[inline]
1802#[target_feature(enable = "neon")]
1803#[cfg_attr(test, assert_instr(cmeq))]
1804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1805pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1806    let b: u32x2 = u32x2::new(0, 0);
1807    unsafe { simd_eq(a, transmute(b)) }
1808}
1809#[doc = "Unsigned compare bitwise equal to zero"]
1810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1811#[inline]
1812#[target_feature(enable = "neon")]
1813#[cfg_attr(test, assert_instr(cmeq))]
1814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1815pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1816    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1817    unsafe { simd_eq(a, transmute(b)) }
1818}
1819#[doc = "Unsigned compare bitwise equal to zero"]
1820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1821#[inline]
1822#[target_feature(enable = "neon")]
1823#[cfg_attr(test, assert_instr(cmeq))]
1824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1825pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1826    let b: u64x1 = u64x1::new(0);
1827    unsafe { simd_eq(a, transmute(b)) }
1828}
1829#[doc = "Unsigned compare bitwise equal to zero"]
1830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1831#[inline]
1832#[target_feature(enable = "neon")]
1833#[cfg_attr(test, assert_instr(cmeq))]
1834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1835pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1836    let b: u64x2 = u64x2::new(0, 0);
1837    unsafe { simd_eq(a, transmute(b)) }
1838}
1839#[doc = "Compare bitwise equal to zero"]
1840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1841#[inline]
1842#[target_feature(enable = "neon")]
1843#[cfg_attr(test, assert_instr(cmp))]
1844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1845pub fn vceqzd_s64(a: i64) -> u64 {
1846    unsafe { transmute(vceqz_s64(transmute(a))) }
1847}
1848#[doc = "Compare bitwise equal to zero"]
1849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1850#[inline]
1851#[target_feature(enable = "neon")]
1852#[cfg_attr(test, assert_instr(cmp))]
1853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1854pub fn vceqzd_u64(a: u64) -> u64 {
1855    unsafe { transmute(vceqz_u64(transmute(a))) }
1856}
1857#[doc = "Floating-point compare bitwise equal to zero"]
1858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1859#[inline]
1860#[cfg_attr(test, assert_instr(fcmp))]
1861#[target_feature(enable = "neon,fp16")]
1862#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1863pub fn vceqzh_f16(a: f16) -> u16 {
1864    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1865}
1866#[doc = "Floating-point compare bitwise equal to zero"]
1867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1868#[inline]
1869#[target_feature(enable = "neon")]
1870#[cfg_attr(test, assert_instr(fcmp))]
1871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1872pub fn vceqzs_f32(a: f32) -> u32 {
1873    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1874}
1875#[doc = "Floating-point compare bitwise equal to zero"]
1876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1877#[inline]
1878#[target_feature(enable = "neon")]
1879#[cfg_attr(test, assert_instr(fcmp))]
1880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1881pub fn vceqzd_f64(a: f64) -> u64 {
1882    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1883}
1884#[doc = "Floating-point compare greater than or equal"]
1885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1886#[inline]
1887#[target_feature(enable = "neon")]
1888#[cfg_attr(test, assert_instr(fcmge))]
1889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1890pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1891    unsafe { simd_ge(a, b) }
1892}
1893#[doc = "Floating-point compare greater than or equal"]
1894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1895#[inline]
1896#[target_feature(enable = "neon")]
1897#[cfg_attr(test, assert_instr(fcmge))]
1898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1899pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1900    unsafe { simd_ge(a, b) }
1901}
1902#[doc = "Compare signed greater than or equal"]
1903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1904#[inline]
1905#[target_feature(enable = "neon")]
1906#[cfg_attr(test, assert_instr(cmge))]
1907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1908pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1909    unsafe { simd_ge(a, b) }
1910}
1911#[doc = "Compare signed greater than or equal"]
1912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1913#[inline]
1914#[target_feature(enable = "neon")]
1915#[cfg_attr(test, assert_instr(cmge))]
1916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1917pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1918    unsafe { simd_ge(a, b) }
1919}
1920#[doc = "Compare unsigned greater than or equal"]
1921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1922#[inline]
1923#[target_feature(enable = "neon")]
1924#[cfg_attr(test, assert_instr(cmhs))]
1925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1926pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1927    unsafe { simd_ge(a, b) }
1928}
1929#[doc = "Compare unsigned greater than or equal"]
1930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1931#[inline]
1932#[target_feature(enable = "neon")]
1933#[cfg_attr(test, assert_instr(cmhs))]
1934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1935pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1936    unsafe { simd_ge(a, b) }
1937}
1938#[doc = "Floating-point compare greater than or equal"]
1939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1940#[inline]
1941#[target_feature(enable = "neon")]
1942#[cfg_attr(test, assert_instr(fcmp))]
1943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1944pub fn vcged_f64(a: f64, b: f64) -> u64 {
1945    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1946}
1947#[doc = "Floating-point compare greater than or equal"]
1948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1949#[inline]
1950#[target_feature(enable = "neon")]
1951#[cfg_attr(test, assert_instr(fcmp))]
1952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1953pub fn vcges_f32(a: f32, b: f32) -> u32 {
1954    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1955}
1956#[doc = "Compare greater than or equal"]
1957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1958#[inline]
1959#[target_feature(enable = "neon")]
1960#[cfg_attr(test, assert_instr(cmp))]
1961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1962pub fn vcged_s64(a: i64, b: i64) -> u64 {
1963    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1964}
1965#[doc = "Compare greater than or equal"]
1966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1967#[inline]
1968#[target_feature(enable = "neon")]
1969#[cfg_attr(test, assert_instr(cmp))]
1970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1971pub fn vcged_u64(a: u64, b: u64) -> u64 {
1972    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1973}
1974#[doc = "Floating-point compare greater than or equal"]
1975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1976#[inline]
1977#[cfg_attr(test, assert_instr(fcmp))]
1978#[target_feature(enable = "neon,fp16")]
1979#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1980pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1981    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1982}
1983#[doc = "Floating-point compare greater than or equal to zero"]
1984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1985#[inline]
1986#[target_feature(enable = "neon")]
1987#[cfg_attr(test, assert_instr(fcmge))]
1988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1989pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1990    let b: f32x2 = f32x2::new(0.0, 0.0);
1991    unsafe { simd_ge(a, transmute(b)) }
1992}
1993#[doc = "Floating-point compare greater than or equal to zero"]
1994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1995#[inline]
1996#[target_feature(enable = "neon")]
1997#[cfg_attr(test, assert_instr(fcmge))]
1998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1999pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
2000    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2001    unsafe { simd_ge(a, transmute(b)) }
2002}
2003#[doc = "Floating-point compare greater than or equal to zero"]
2004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
2005#[inline]
2006#[target_feature(enable = "neon")]
2007#[cfg_attr(test, assert_instr(fcmge))]
2008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2009pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
2010    let b: f64 = 0.0;
2011    unsafe { simd_ge(a, transmute(b)) }
2012}
2013#[doc = "Floating-point compare greater than or equal to zero"]
2014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2015#[inline]
2016#[target_feature(enable = "neon")]
2017#[cfg_attr(test, assert_instr(fcmge))]
2018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2019pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2020    let b: f64x2 = f64x2::new(0.0, 0.0);
2021    unsafe { simd_ge(a, transmute(b)) }
2022}
2023#[doc = "Compare signed greater than or equal to zero"]
2024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2025#[inline]
2026#[target_feature(enable = "neon")]
2027#[cfg_attr(test, assert_instr(cmge))]
2028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2029pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2030    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2031    unsafe { simd_ge(a, transmute(b)) }
2032}
2033#[doc = "Compare signed greater than or equal to zero"]
2034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2035#[inline]
2036#[target_feature(enable = "neon")]
2037#[cfg_attr(test, assert_instr(cmge))]
2038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2039pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2040    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2041    unsafe { simd_ge(a, transmute(b)) }
2042}
2043#[doc = "Compare signed greater than or equal to zero"]
2044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2045#[inline]
2046#[target_feature(enable = "neon")]
2047#[cfg_attr(test, assert_instr(cmge))]
2048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2049pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2050    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2051    unsafe { simd_ge(a, transmute(b)) }
2052}
2053#[doc = "Compare signed greater than or equal to zero"]
2054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2055#[inline]
2056#[target_feature(enable = "neon")]
2057#[cfg_attr(test, assert_instr(cmge))]
2058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2059pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2060    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2061    unsafe { simd_ge(a, transmute(b)) }
2062}
2063#[doc = "Compare signed greater than or equal to zero"]
2064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2065#[inline]
2066#[target_feature(enable = "neon")]
2067#[cfg_attr(test, assert_instr(cmge))]
2068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2069pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2070    let b: i32x2 = i32x2::new(0, 0);
2071    unsafe { simd_ge(a, transmute(b)) }
2072}
2073#[doc = "Compare signed greater than or equal to zero"]
2074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2075#[inline]
2076#[target_feature(enable = "neon")]
2077#[cfg_attr(test, assert_instr(cmge))]
2078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2079pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2080    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2081    unsafe { simd_ge(a, transmute(b)) }
2082}
2083#[doc = "Compare signed greater than or equal to zero"]
2084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2085#[inline]
2086#[target_feature(enable = "neon")]
2087#[cfg_attr(test, assert_instr(cmge))]
2088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2089pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2090    let b: i64x1 = i64x1::new(0);
2091    unsafe { simd_ge(a, transmute(b)) }
2092}
2093#[doc = "Compare signed greater than or equal to zero"]
2094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2095#[inline]
2096#[target_feature(enable = "neon")]
2097#[cfg_attr(test, assert_instr(cmge))]
2098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2099pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2100    let b: i64x2 = i64x2::new(0, 0);
2101    unsafe { simd_ge(a, transmute(b)) }
2102}
2103#[doc = "Floating-point compare greater than or equal to zero"]
2104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2105#[inline]
2106#[target_feature(enable = "neon")]
2107#[cfg_attr(test, assert_instr(fcmp))]
2108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2109pub fn vcgezd_f64(a: f64) -> u64 {
2110    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2111}
2112#[doc = "Floating-point compare greater than or equal to zero"]
2113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2114#[inline]
2115#[target_feature(enable = "neon")]
2116#[cfg_attr(test, assert_instr(fcmp))]
2117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2118pub fn vcgezs_f32(a: f32) -> u32 {
2119    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2120}
2121#[doc = "Compare signed greater than or equal to zero"]
2122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2123#[inline]
2124#[target_feature(enable = "neon")]
2125#[cfg_attr(test, assert_instr(nop))]
2126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2127pub fn vcgezd_s64(a: i64) -> u64 {
2128    unsafe { transmute(vcgez_s64(transmute(a))) }
2129}
2130#[doc = "Floating-point compare greater than or equal to zero"]
2131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2132#[inline]
2133#[cfg_attr(test, assert_instr(fcmp))]
2134#[target_feature(enable = "neon,fp16")]
2135#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2136pub fn vcgezh_f16(a: f16) -> u16 {
2137    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2138}
2139#[doc = "Floating-point compare greater than"]
2140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2141#[inline]
2142#[target_feature(enable = "neon")]
2143#[cfg_attr(test, assert_instr(fcmgt))]
2144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2145pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2146    unsafe { simd_gt(a, b) }
2147}
2148#[doc = "Floating-point compare greater than"]
2149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2150#[inline]
2151#[target_feature(enable = "neon")]
2152#[cfg_attr(test, assert_instr(fcmgt))]
2153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2154pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2155    unsafe { simd_gt(a, b) }
2156}
2157#[doc = "Compare signed greater than"]
2158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2159#[inline]
2160#[target_feature(enable = "neon")]
2161#[cfg_attr(test, assert_instr(cmgt))]
2162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2163pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2164    unsafe { simd_gt(a, b) }
2165}
2166#[doc = "Compare signed greater than"]
2167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2168#[inline]
2169#[target_feature(enable = "neon")]
2170#[cfg_attr(test, assert_instr(cmgt))]
2171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2172pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2173    unsafe { simd_gt(a, b) }
2174}
2175#[doc = "Compare unsigned greater than"]
2176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2177#[inline]
2178#[target_feature(enable = "neon")]
2179#[cfg_attr(test, assert_instr(cmhi))]
2180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2181pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2182    unsafe { simd_gt(a, b) }
2183}
2184#[doc = "Compare unsigned greater than"]
2185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(cmhi))]
2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2190pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2191    unsafe { simd_gt(a, b) }
2192}
2193#[doc = "Floating-point compare greater than"]
2194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2195#[inline]
2196#[target_feature(enable = "neon")]
2197#[cfg_attr(test, assert_instr(fcmp))]
2198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2199pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2200    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2201}
2202#[doc = "Floating-point compare greater than"]
2203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2204#[inline]
2205#[target_feature(enable = "neon")]
2206#[cfg_attr(test, assert_instr(fcmp))]
2207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2208pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2209    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2210}
2211#[doc = "Compare greater than"]
2212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2213#[inline]
2214#[target_feature(enable = "neon")]
2215#[cfg_attr(test, assert_instr(cmp))]
2216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2217pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2218    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2219}
2220#[doc = "Compare greater than"]
2221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2222#[inline]
2223#[target_feature(enable = "neon")]
2224#[cfg_attr(test, assert_instr(cmp))]
2225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2226pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2227    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2228}
2229#[doc = "Floating-point compare greater than"]
2230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2231#[inline]
2232#[cfg_attr(test, assert_instr(fcmp))]
2233#[target_feature(enable = "neon,fp16")]
2234#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2235pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2236    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2240#[inline]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2245    let b: f32x2 = f32x2::new(0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2250#[inline]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2255    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2260#[inline]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2265    let b: f64 = 0.0;
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Floating-point compare greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2270#[inline]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(fcmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2275    let b: f64x2 = f64x2::new(0.0, 0.0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2280#[inline]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2285    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2290#[inline]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2295    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2300#[inline]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2305    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2310#[inline]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2315    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2320#[inline]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2325    let b: i32x2 = i32x2::new(0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2330#[inline]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2335    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2340#[inline]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2345    let b: i64x1 = i64x1::new(0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Compare signed greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2350#[inline]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(cmgt))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2355    let b: i64x2 = i64x2::new(0, 0);
2356    unsafe { simd_gt(a, transmute(b)) }
2357}
2358#[doc = "Floating-point compare greater than zero"]
2359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2360#[inline]
2361#[target_feature(enable = "neon")]
2362#[cfg_attr(test, assert_instr(fcmp))]
2363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2364pub fn vcgtzd_f64(a: f64) -> u64 {
2365    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2366}
2367#[doc = "Floating-point compare greater than zero"]
2368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2369#[inline]
2370#[target_feature(enable = "neon")]
2371#[cfg_attr(test, assert_instr(fcmp))]
2372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2373pub fn vcgtzs_f32(a: f32) -> u32 {
2374    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2375}
2376#[doc = "Compare signed greater than zero"]
2377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2378#[inline]
2379#[target_feature(enable = "neon")]
2380#[cfg_attr(test, assert_instr(cmp))]
2381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2382pub fn vcgtzd_s64(a: i64) -> u64 {
2383    unsafe { transmute(vcgtz_s64(transmute(a))) }
2384}
2385#[doc = "Floating-point compare greater than zero"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2387#[inline]
2388#[cfg_attr(test, assert_instr(fcmp))]
2389#[target_feature(enable = "neon,fp16")]
2390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2391pub fn vcgtzh_f16(a: f16) -> u16 {
2392    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2396#[inline]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Floating-point compare less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2405#[inline]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(fcmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2414#[inline]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare signed less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2423#[inline]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmge))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2432#[inline]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Compare unsigned less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2441#[inline]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(cmhs))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2446    unsafe { simd_le(a, b) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2450#[inline]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcled_f64(a: f64, b: f64) -> u64 {
2455    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2456}
2457#[doc = "Floating-point compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2459#[inline]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(fcmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcles_f32(a: f32, b: f32) -> u32 {
2464    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2468#[inline]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_u64(a: u64, b: u64) -> u64 {
2473    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2477#[inline]
2478#[target_feature(enable = "neon")]
2479#[cfg_attr(test, assert_instr(cmp))]
2480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2481pub fn vcled_s64(a: i64, b: i64) -> u64 {
2482    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2483}
2484#[doc = "Floating-point compare less than or equal"]
2485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2486#[inline]
2487#[cfg_attr(test, assert_instr(fcmp))]
2488#[target_feature(enable = "neon,fp16")]
2489#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2490pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2491    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2492}
2493#[doc = "Floating-point compare less than or equal to zero"]
2494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2495#[inline]
2496#[target_feature(enable = "neon")]
2497#[cfg_attr(test, assert_instr(fcmle))]
2498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2499pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2500    let b: f32x2 = f32x2::new(0.0, 0.0);
2501    unsafe { simd_le(a, transmute(b)) }
2502}
2503#[doc = "Floating-point compare less than or equal to zero"]
2504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2505#[inline]
2506#[target_feature(enable = "neon")]
2507#[cfg_attr(test, assert_instr(fcmle))]
2508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2509pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2510    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2511    unsafe { simd_le(a, transmute(b)) }
2512}
2513#[doc = "Floating-point compare less than or equal to zero"]
2514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2515#[inline]
2516#[target_feature(enable = "neon")]
2517#[cfg_attr(test, assert_instr(fcmle))]
2518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2519pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2520    let b: f64 = 0.0;
2521    unsafe { simd_le(a, transmute(b)) }
2522}
2523#[doc = "Floating-point compare less than or equal to zero"]
2524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2525#[inline]
2526#[target_feature(enable = "neon")]
2527#[cfg_attr(test, assert_instr(fcmle))]
2528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2529pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2530    let b: f64x2 = f64x2::new(0.0, 0.0);
2531    unsafe { simd_le(a, transmute(b)) }
2532}
2533#[doc = "Compare signed less than or equal to zero"]
2534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2535#[inline]
2536#[target_feature(enable = "neon")]
2537#[cfg_attr(test, assert_instr(cmle))]
2538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2539pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2540    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2541    unsafe { simd_le(a, transmute(b)) }
2542}
2543#[doc = "Compare signed less than or equal to zero"]
2544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2545#[inline]
2546#[target_feature(enable = "neon")]
2547#[cfg_attr(test, assert_instr(cmle))]
2548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2549pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2550    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2551    unsafe { simd_le(a, transmute(b)) }
2552}
2553#[doc = "Compare signed less than or equal to zero"]
2554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2555#[inline]
2556#[target_feature(enable = "neon")]
2557#[cfg_attr(test, assert_instr(cmle))]
2558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2559pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2560    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2561    unsafe { simd_le(a, transmute(b)) }
2562}
2563#[doc = "Compare signed less than or equal to zero"]
2564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2565#[inline]
2566#[target_feature(enable = "neon")]
2567#[cfg_attr(test, assert_instr(cmle))]
2568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2569pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2570    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2571    unsafe { simd_le(a, transmute(b)) }
2572}
2573#[doc = "Compare signed less than or equal to zero"]
2574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2575#[inline]
2576#[target_feature(enable = "neon")]
2577#[cfg_attr(test, assert_instr(cmle))]
2578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2579pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2580    let b: i32x2 = i32x2::new(0, 0);
2581    unsafe { simd_le(a, transmute(b)) }
2582}
2583#[doc = "Compare signed less than or equal to zero"]
2584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2585#[inline]
2586#[target_feature(enable = "neon")]
2587#[cfg_attr(test, assert_instr(cmle))]
2588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2589pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2590    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2591    unsafe { simd_le(a, transmute(b)) }
2592}
2593#[doc = "Compare signed less than or equal to zero"]
2594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2595#[inline]
2596#[target_feature(enable = "neon")]
2597#[cfg_attr(test, assert_instr(cmle))]
2598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2599pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2600    let b: i64x1 = i64x1::new(0);
2601    unsafe { simd_le(a, transmute(b)) }
2602}
2603#[doc = "Compare signed less than or equal to zero"]
2604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2605#[inline]
2606#[target_feature(enable = "neon")]
2607#[cfg_attr(test, assert_instr(cmle))]
2608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2609pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2610    let b: i64x2 = i64x2::new(0, 0);
2611    unsafe { simd_le(a, transmute(b)) }
2612}
2613#[doc = "Floating-point compare less than or equal to zero"]
2614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2615#[inline]
2616#[target_feature(enable = "neon")]
2617#[cfg_attr(test, assert_instr(fcmp))]
2618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2619pub fn vclezd_f64(a: f64) -> u64 {
2620    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2621}
2622#[doc = "Floating-point compare less than or equal to zero"]
2623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2624#[inline]
2625#[target_feature(enable = "neon")]
2626#[cfg_attr(test, assert_instr(fcmp))]
2627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2628pub fn vclezs_f32(a: f32) -> u32 {
2629    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2630}
2631#[doc = "Compare less than or equal to zero"]
2632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2633#[inline]
2634#[target_feature(enable = "neon")]
2635#[cfg_attr(test, assert_instr(cmp))]
2636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2637pub fn vclezd_s64(a: i64) -> u64 {
2638    unsafe { transmute(vclez_s64(transmute(a))) }
2639}
2640#[doc = "Floating-point compare less than or equal to zero"]
2641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2642#[inline]
2643#[cfg_attr(test, assert_instr(fcmp))]
2644#[target_feature(enable = "neon,fp16")]
2645#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2646pub fn vclezh_f16(a: f16) -> u16 {
2647    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2648}
2649#[doc = "Floating-point compare less than"]
2650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2651#[inline]
2652#[target_feature(enable = "neon")]
2653#[cfg_attr(test, assert_instr(fcmgt))]
2654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2655pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2656    unsafe { simd_lt(a, b) }
2657}
2658#[doc = "Floating-point compare less than"]
2659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2660#[inline]
2661#[target_feature(enable = "neon")]
2662#[cfg_attr(test, assert_instr(fcmgt))]
2663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2664pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2665    unsafe { simd_lt(a, b) }
2666}
2667#[doc = "Compare signed less than"]
2668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2669#[inline]
2670#[target_feature(enable = "neon")]
2671#[cfg_attr(test, assert_instr(cmgt))]
2672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2673pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2674    unsafe { simd_lt(a, b) }
2675}
2676#[doc = "Compare signed less than"]
2677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2678#[inline]
2679#[target_feature(enable = "neon")]
2680#[cfg_attr(test, assert_instr(cmgt))]
2681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2682pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2683    unsafe { simd_lt(a, b) }
2684}
2685#[doc = "Compare unsigned less than"]
2686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2687#[inline]
2688#[target_feature(enable = "neon")]
2689#[cfg_attr(test, assert_instr(cmhi))]
2690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2691pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2692    unsafe { simd_lt(a, b) }
2693}
2694#[doc = "Compare unsigned less than"]
2695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2696#[inline]
2697#[target_feature(enable = "neon")]
2698#[cfg_attr(test, assert_instr(cmhi))]
2699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2700pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2701    unsafe { simd_lt(a, b) }
2702}
2703#[doc = "Compare less than"]
2704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2705#[inline]
2706#[target_feature(enable = "neon")]
2707#[cfg_attr(test, assert_instr(cmp))]
2708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2709pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2710    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2711}
2712#[doc = "Compare less than"]
2713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2714#[inline]
2715#[target_feature(enable = "neon")]
2716#[cfg_attr(test, assert_instr(cmp))]
2717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2718pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2719    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2720}
2721#[doc = "Floating-point compare less than"]
2722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2723#[inline]
2724#[cfg_attr(test, assert_instr(fcmp))]
2725#[target_feature(enable = "neon,fp16")]
2726#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2727pub fn vclth_f16(a: f16, b: f16) -> u16 {
2728    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2729}
2730#[doc = "Floating-point compare less than"]
2731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2732#[inline]
2733#[target_feature(enable = "neon")]
2734#[cfg_attr(test, assert_instr(fcmp))]
2735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2736pub fn vclts_f32(a: f32, b: f32) -> u32 {
2737    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2738}
2739#[doc = "Floating-point compare less than"]
2740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2741#[inline]
2742#[target_feature(enable = "neon")]
2743#[cfg_attr(test, assert_instr(fcmp))]
2744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2745pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2746    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2747}
2748#[doc = "Floating-point compare less than zero"]
2749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2750#[inline]
2751#[target_feature(enable = "neon")]
2752#[cfg_attr(test, assert_instr(fcmlt))]
2753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2754pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2755    let b: f32x2 = f32x2::new(0.0, 0.0);
2756    unsafe { simd_lt(a, transmute(b)) }
2757}
2758#[doc = "Floating-point compare less than zero"]
2759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2760#[inline]
2761#[target_feature(enable = "neon")]
2762#[cfg_attr(test, assert_instr(fcmlt))]
2763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2764pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2765    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2766    unsafe { simd_lt(a, transmute(b)) }
2767}
2768#[doc = "Floating-point compare less than zero"]
2769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2770#[inline]
2771#[target_feature(enable = "neon")]
2772#[cfg_attr(test, assert_instr(fcmlt))]
2773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2774pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2775    let b: f64 = 0.0;
2776    unsafe { simd_lt(a, transmute(b)) }
2777}
2778#[doc = "Floating-point compare less than zero"]
2779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2780#[inline]
2781#[target_feature(enable = "neon")]
2782#[cfg_attr(test, assert_instr(fcmlt))]
2783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2784pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2785    let b: f64x2 = f64x2::new(0.0, 0.0);
2786    unsafe { simd_lt(a, transmute(b)) }
2787}
2788#[doc = "Compare signed less than zero"]
2789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2790#[inline]
2791#[target_feature(enable = "neon")]
2792#[cfg_attr(test, assert_instr(cmlt))]
2793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2794pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2795    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2796    unsafe { simd_lt(a, transmute(b)) }
2797}
2798#[doc = "Compare signed less than zero"]
2799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2800#[inline]
2801#[target_feature(enable = "neon")]
2802#[cfg_attr(test, assert_instr(cmlt))]
2803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2804pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2805    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2806    unsafe { simd_lt(a, transmute(b)) }
2807}
2808#[doc = "Compare signed less than zero"]
2809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2810#[inline]
2811#[target_feature(enable = "neon")]
2812#[cfg_attr(test, assert_instr(cmlt))]
2813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2814pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2815    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2816    unsafe { simd_lt(a, transmute(b)) }
2817}
2818#[doc = "Compare signed less than zero"]
2819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2820#[inline]
2821#[target_feature(enable = "neon")]
2822#[cfg_attr(test, assert_instr(cmlt))]
2823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2824pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2825    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2826    unsafe { simd_lt(a, transmute(b)) }
2827}
2828#[doc = "Compare signed less than zero"]
2829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2830#[inline]
2831#[target_feature(enable = "neon")]
2832#[cfg_attr(test, assert_instr(cmlt))]
2833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2834pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2835    let b: i32x2 = i32x2::new(0, 0);
2836    unsafe { simd_lt(a, transmute(b)) }
2837}
2838#[doc = "Compare signed less than zero"]
2839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2840#[inline]
2841#[target_feature(enable = "neon")]
2842#[cfg_attr(test, assert_instr(cmlt))]
2843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2844pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2845    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2846    unsafe { simd_lt(a, transmute(b)) }
2847}
2848#[doc = "Compare signed less than zero"]
2849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2850#[inline]
2851#[target_feature(enable = "neon")]
2852#[cfg_attr(test, assert_instr(cmlt))]
2853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2854pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2855    let b: i64x1 = i64x1::new(0);
2856    unsafe { simd_lt(a, transmute(b)) }
2857}
2858#[doc = "Compare signed less than zero"]
2859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2860#[inline]
2861#[target_feature(enable = "neon")]
2862#[cfg_attr(test, assert_instr(cmlt))]
2863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2864pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2865    let b: i64x2 = i64x2::new(0, 0);
2866    unsafe { simd_lt(a, transmute(b)) }
2867}
2868#[doc = "Floating-point compare less than zero"]
2869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2870#[inline]
2871#[target_feature(enable = "neon")]
2872#[cfg_attr(test, assert_instr(fcmp))]
2873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2874pub fn vcltzd_f64(a: f64) -> u64 {
2875    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2876}
2877#[doc = "Floating-point compare less than zero"]
2878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2879#[inline]
2880#[target_feature(enable = "neon")]
2881#[cfg_attr(test, assert_instr(fcmp))]
2882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2883pub fn vcltzs_f32(a: f32) -> u32 {
2884    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2885}
2886#[doc = "Compare less than zero"]
2887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2888#[inline]
2889#[target_feature(enable = "neon")]
2890#[cfg_attr(test, assert_instr(asr))]
2891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2892pub fn vcltzd_s64(a: i64) -> u64 {
2893    unsafe { transmute(vcltz_s64(transmute(a))) }
2894}
2895#[doc = "Floating-point compare less than zero"]
2896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2897#[inline]
2898#[cfg_attr(test, assert_instr(fcmp))]
2899#[target_feature(enable = "neon,fp16")]
2900#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2901pub fn vcltzh_f16(a: f16) -> u16 {
2902    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2903}
2904#[doc = "Floating-point complex multiply accumulate"]
2905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2906#[inline]
2907#[target_feature(enable = "neon,fcma")]
2908#[target_feature(enable = "neon,fp16")]
2909#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2910#[cfg_attr(test, assert_instr(fcmla))]
2911pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2912    unsafe extern "unadjusted" {
2913        #[cfg_attr(
2914            any(target_arch = "aarch64", target_arch = "arm64ec"),
2915            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2916        )]
2917        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2918    }
2919    unsafe { _vcmla_f16(a, b, c) }
2920}
2921#[doc = "Floating-point complex multiply accumulate"]
2922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2923#[inline]
2924#[target_feature(enable = "neon,fcma")]
2925#[target_feature(enable = "neon,fp16")]
2926#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2927#[cfg_attr(test, assert_instr(fcmla))]
2928pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2929    unsafe extern "unadjusted" {
2930        #[cfg_attr(
2931            any(target_arch = "aarch64", target_arch = "arm64ec"),
2932            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2933        )]
2934        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2935    }
2936    unsafe { _vcmlaq_f16(a, b, c) }
2937}
2938#[doc = "Floating-point complex multiply accumulate"]
2939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2940#[inline]
2941#[target_feature(enable = "neon,fcma")]
2942#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2943#[cfg_attr(test, assert_instr(fcmla))]
2944pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2945    unsafe extern "unadjusted" {
2946        #[cfg_attr(
2947            any(target_arch = "aarch64", target_arch = "arm64ec"),
2948            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2949        )]
2950        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2951    }
2952    unsafe { _vcmla_f32(a, b, c) }
2953}
2954#[doc = "Floating-point complex multiply accumulate"]
2955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2956#[inline]
2957#[target_feature(enable = "neon,fcma")]
2958#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2959#[cfg_attr(test, assert_instr(fcmla))]
2960pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2961    unsafe extern "unadjusted" {
2962        #[cfg_attr(
2963            any(target_arch = "aarch64", target_arch = "arm64ec"),
2964            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2965        )]
2966        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2967    }
2968    unsafe { _vcmlaq_f32(a, b, c) }
2969}
2970#[doc = "Floating-point complex multiply accumulate"]
2971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2972#[inline]
2973#[target_feature(enable = "neon,fcma")]
2974#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2975#[cfg_attr(test, assert_instr(fcmla))]
2976pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2977    unsafe extern "unadjusted" {
2978        #[cfg_attr(
2979            any(target_arch = "aarch64", target_arch = "arm64ec"),
2980            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2981        )]
2982        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2983    }
2984    unsafe { _vcmlaq_f64(a, b, c) }
2985}
2986#[doc = "Floating-point complex multiply accumulate"]
2987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2988#[inline]
2989#[target_feature(enable = "neon,fcma")]
2990#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2991#[rustc_legacy_const_generics(3)]
2992#[target_feature(enable = "neon,fp16")]
2993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2994pub fn vcmla_lane_f16<const LANE: i32>(
2995    a: float16x4_t,
2996    b: float16x4_t,
2997    c: float16x4_t,
2998) -> float16x4_t {
2999    static_assert_uimm_bits!(LANE, 1);
3000    unsafe {
3001        let c: float16x4_t = simd_shuffle!(
3002            c,
3003            c,
3004            [
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1,
3007                2 * LANE as u32,
3008                2 * LANE as u32 + 1
3009            ]
3010        );
3011        vcmla_f16(a, b, c)
3012    }
3013}
3014#[doc = "Floating-point complex multiply accumulate"]
3015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3016#[inline]
3017#[target_feature(enable = "neon,fcma")]
3018#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3019#[rustc_legacy_const_generics(3)]
3020#[target_feature(enable = "neon,fp16")]
3021#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3022pub fn vcmlaq_lane_f16<const LANE: i32>(
3023    a: float16x8_t,
3024    b: float16x8_t,
3025    c: float16x4_t,
3026) -> float16x8_t {
3027    static_assert_uimm_bits!(LANE, 1);
3028    unsafe {
3029        let c: float16x8_t = simd_shuffle!(
3030            c,
3031            c,
3032            [
3033                2 * LANE as u32,
3034                2 * LANE as u32 + 1,
3035                2 * LANE as u32,
3036                2 * LANE as u32 + 1,
3037                2 * LANE as u32,
3038                2 * LANE as u32 + 1,
3039                2 * LANE as u32,
3040                2 * LANE as u32 + 1
3041            ]
3042        );
3043        vcmlaq_f16(a, b, c)
3044    }
3045}
3046#[doc = "Floating-point complex multiply accumulate"]
3047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3048#[inline]
3049#[target_feature(enable = "neon,fcma")]
3050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3051#[rustc_legacy_const_generics(3)]
3052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3053pub fn vcmla_lane_f32<const LANE: i32>(
3054    a: float32x2_t,
3055    b: float32x2_t,
3056    c: float32x2_t,
3057) -> float32x2_t {
3058    static_assert!(LANE == 0);
3059    unsafe {
3060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3061        vcmla_f32(a, b, c)
3062    }
3063}
3064#[doc = "Floating-point complex multiply accumulate"]
3065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3066#[inline]
3067#[target_feature(enable = "neon,fcma")]
3068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3069#[rustc_legacy_const_generics(3)]
3070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3071pub fn vcmlaq_lane_f32<const LANE: i32>(
3072    a: float32x4_t,
3073    b: float32x4_t,
3074    c: float32x2_t,
3075) -> float32x4_t {
3076    static_assert!(LANE == 0);
3077    unsafe {
3078        let c: float32x4_t = simd_shuffle!(
3079            c,
3080            c,
3081            [
3082                2 * LANE as u32,
3083                2 * LANE as u32 + 1,
3084                2 * LANE as u32,
3085                2 * LANE as u32 + 1
3086            ]
3087        );
3088        vcmlaq_f32(a, b, c)
3089    }
3090}
3091#[doc = "Floating-point complex multiply accumulate"]
3092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3093#[inline]
3094#[target_feature(enable = "neon,fcma")]
3095#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3096#[rustc_legacy_const_generics(3)]
3097#[target_feature(enable = "neon,fp16")]
3098#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3127pub fn vcmlaq_laneq_f16<const LANE: i32>(
3128    a: float16x8_t,
3129    b: float16x8_t,
3130    c: float16x8_t,
3131) -> float16x8_t {
3132    static_assert_uimm_bits!(LANE, 2);
3133    unsafe {
3134        let c: float16x8_t = simd_shuffle!(
3135            c,
3136            c,
3137            [
3138                2 * LANE as u32,
3139                2 * LANE as u32 + 1,
3140                2 * LANE as u32,
3141                2 * LANE as u32 + 1,
3142                2 * LANE as u32,
3143                2 * LANE as u32 + 1,
3144                2 * LANE as u32,
3145                2 * LANE as u32 + 1
3146            ]
3147        );
3148        vcmlaq_f16(a, b, c)
3149    }
3150}
3151#[doc = "Floating-point complex multiply accumulate"]
3152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3153#[inline]
3154#[target_feature(enable = "neon,fcma")]
3155#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3156#[rustc_legacy_const_generics(3)]
3157#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3158pub fn vcmla_laneq_f32<const LANE: i32>(
3159    a: float32x2_t,
3160    b: float32x2_t,
3161    c: float32x4_t,
3162) -> float32x2_t {
3163    static_assert_uimm_bits!(LANE, 1);
3164    unsafe {
3165        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3166        vcmla_f32(a, b, c)
3167    }
3168}
3169#[doc = "Floating-point complex multiply accumulate"]
3170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3171#[inline]
3172#[target_feature(enable = "neon,fcma")]
3173#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3174#[rustc_legacy_const_generics(3)]
3175#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3176pub fn vcmlaq_laneq_f32<const LANE: i32>(
3177    a: float32x4_t,
3178    b: float32x4_t,
3179    c: float32x4_t,
3180) -> float32x4_t {
3181    static_assert_uimm_bits!(LANE, 1);
3182    unsafe {
3183        let c: float32x4_t = simd_shuffle!(
3184            c,
3185            c,
3186            [
3187                2 * LANE as u32,
3188                2 * LANE as u32 + 1,
3189                2 * LANE as u32,
3190                2 * LANE as u32 + 1
3191            ]
3192        );
3193        vcmlaq_f32(a, b, c)
3194    }
3195}
3196#[doc = "Floating-point complex multiply accumulate"]
3197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3198#[inline]
3199#[target_feature(enable = "neon,fcma")]
3200#[target_feature(enable = "neon,fp16")]
3201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3202#[cfg_attr(test, assert_instr(fcmla))]
3203pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3204    unsafe extern "unadjusted" {
3205        #[cfg_attr(
3206            any(target_arch = "aarch64", target_arch = "arm64ec"),
3207            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3208        )]
3209        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3210    }
3211    unsafe { _vcmla_rot180_f16(a, b, c) }
3212}
3213#[doc = "Floating-point complex multiply accumulate"]
3214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3215#[inline]
3216#[target_feature(enable = "neon,fcma")]
3217#[target_feature(enable = "neon,fp16")]
3218#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3219#[cfg_attr(test, assert_instr(fcmla))]
3220pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3221    unsafe extern "unadjusted" {
3222        #[cfg_attr(
3223            any(target_arch = "aarch64", target_arch = "arm64ec"),
3224            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3225        )]
3226        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3227    }
3228    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3229}
3230#[doc = "Floating-point complex multiply accumulate"]
3231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3232#[inline]
3233#[target_feature(enable = "neon,fcma")]
3234#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3235#[cfg_attr(test, assert_instr(fcmla))]
3236pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3237    unsafe extern "unadjusted" {
3238        #[cfg_attr(
3239            any(target_arch = "aarch64", target_arch = "arm64ec"),
3240            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3241        )]
3242        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3243    }
3244    unsafe { _vcmla_rot180_f32(a, b, c) }
3245}
3246#[doc = "Floating-point complex multiply accumulate"]
3247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3248#[inline]
3249#[target_feature(enable = "neon,fcma")]
3250#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3251#[cfg_attr(test, assert_instr(fcmla))]
3252pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3253    unsafe extern "unadjusted" {
3254        #[cfg_attr(
3255            any(target_arch = "aarch64", target_arch = "arm64ec"),
3256            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3257        )]
3258        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3259    }
3260    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3261}
3262#[doc = "Floating-point complex multiply accumulate"]
3263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3264#[inline]
3265#[target_feature(enable = "neon,fcma")]
3266#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3267#[cfg_attr(test, assert_instr(fcmla))]
3268pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3269    unsafe extern "unadjusted" {
3270        #[cfg_attr(
3271            any(target_arch = "aarch64", target_arch = "arm64ec"),
3272            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3273        )]
3274        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3275    }
3276    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3277}
3278#[doc = "Floating-point complex multiply accumulate"]
3279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3280#[inline]
3281#[target_feature(enable = "neon,fcma")]
3282#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3283#[rustc_legacy_const_generics(3)]
3284#[target_feature(enable = "neon,fp16")]
3285#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3286pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3287    a: float16x4_t,
3288    b: float16x4_t,
3289    c: float16x4_t,
3290) -> float16x4_t {
3291    static_assert_uimm_bits!(LANE, 1);
3292    unsafe {
3293        let c: float16x4_t = simd_shuffle!(
3294            c,
3295            c,
3296            [
3297                2 * LANE as u32,
3298                2 * LANE as u32 + 1,
3299                2 * LANE as u32,
3300                2 * LANE as u32 + 1
3301            ]
3302        );
3303        vcmla_rot180_f16(a, b, c)
3304    }
3305}
3306#[doc = "Floating-point complex multiply accumulate"]
3307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3308#[inline]
3309#[target_feature(enable = "neon,fcma")]
3310#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3311#[rustc_legacy_const_generics(3)]
3312#[target_feature(enable = "neon,fp16")]
3313#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3314pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3315    a: float16x8_t,
3316    b: float16x8_t,
3317    c: float16x4_t,
3318) -> float16x8_t {
3319    static_assert_uimm_bits!(LANE, 1);
3320    unsafe {
3321        let c: float16x8_t = simd_shuffle!(
3322            c,
3323            c,
3324            [
3325                2 * LANE as u32,
3326                2 * LANE as u32 + 1,
3327                2 * LANE as u32,
3328                2 * LANE as u32 + 1,
3329                2 * LANE as u32,
3330                2 * LANE as u32 + 1,
3331                2 * LANE as u32,
3332                2 * LANE as u32 + 1
3333            ]
3334        );
3335        vcmlaq_rot180_f16(a, b, c)
3336    }
3337}
3338#[doc = "Floating-point complex multiply accumulate"]
3339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3340#[inline]
3341#[target_feature(enable = "neon,fcma")]
3342#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3343#[rustc_legacy_const_generics(3)]
3344#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3345pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3346    a: float32x2_t,
3347    b: float32x2_t,
3348    c: float32x2_t,
3349) -> float32x2_t {
3350    static_assert!(LANE == 0);
3351    unsafe {
3352        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3353        vcmla_rot180_f32(a, b, c)
3354    }
3355}
3356#[doc = "Floating-point complex multiply accumulate"]
3357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3358#[inline]
3359#[target_feature(enable = "neon,fcma")]
3360#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3361#[rustc_legacy_const_generics(3)]
3362#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3363pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3364    a: float32x4_t,
3365    b: float32x4_t,
3366    c: float32x2_t,
3367) -> float32x4_t {
3368    static_assert!(LANE == 0);
3369    unsafe {
3370        let c: float32x4_t = simd_shuffle!(
3371            c,
3372            c,
3373            [
3374                2 * LANE as u32,
3375                2 * LANE as u32 + 1,
3376                2 * LANE as u32,
3377                2 * LANE as u32 + 1
3378            ]
3379        );
3380        vcmlaq_rot180_f32(a, b, c)
3381    }
3382}
3383#[doc = "Floating-point complex multiply accumulate"]
3384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3385#[inline]
3386#[target_feature(enable = "neon,fcma")]
3387#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3388#[rustc_legacy_const_generics(3)]
3389#[target_feature(enable = "neon,fp16")]
3390#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3391pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3392    a: float16x4_t,
3393    b: float16x4_t,
3394    c: float16x8_t,
3395) -> float16x4_t {
3396    static_assert_uimm_bits!(LANE, 2);
3397    unsafe {
3398        let c: float16x4_t = simd_shuffle!(
3399            c,
3400            c,
3401            [
3402                2 * LANE as u32,
3403                2 * LANE as u32 + 1,
3404                2 * LANE as u32,
3405                2 * LANE as u32 + 1
3406            ]
3407        );
3408        vcmla_rot180_f16(a, b, c)
3409    }
3410}
3411#[doc = "Floating-point complex multiply accumulate"]
3412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3413#[inline]
3414#[target_feature(enable = "neon,fcma")]
3415#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3416#[rustc_legacy_const_generics(3)]
3417#[target_feature(enable = "neon,fp16")]
3418#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3419pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3420    a: float16x8_t,
3421    b: float16x8_t,
3422    c: float16x8_t,
3423) -> float16x8_t {
3424    static_assert_uimm_bits!(LANE, 2);
3425    unsafe {
3426        let c: float16x8_t = simd_shuffle!(
3427            c,
3428            c,
3429            [
3430                2 * LANE as u32,
3431                2 * LANE as u32 + 1,
3432                2 * LANE as u32,
3433                2 * LANE as u32 + 1,
3434                2 * LANE as u32,
3435                2 * LANE as u32 + 1,
3436                2 * LANE as u32,
3437                2 * LANE as u32 + 1
3438            ]
3439        );
3440        vcmlaq_rot180_f16(a, b, c)
3441    }
3442}
3443#[doc = "Floating-point complex multiply accumulate"]
3444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3445#[inline]
3446#[target_feature(enable = "neon,fcma")]
3447#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3448#[rustc_legacy_const_generics(3)]
3449#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3450pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3451    a: float32x2_t,
3452    b: float32x2_t,
3453    c: float32x4_t,
3454) -> float32x2_t {
3455    static_assert_uimm_bits!(LANE, 1);
3456    unsafe {
3457        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3458        vcmla_rot180_f32(a, b, c)
3459    }
3460}
3461#[doc = "Floating-point complex multiply accumulate"]
3462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3463#[inline]
3464#[target_feature(enable = "neon,fcma")]
3465#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3466#[rustc_legacy_const_generics(3)]
3467#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3468pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3469    a: float32x4_t,
3470    b: float32x4_t,
3471    c: float32x4_t,
3472) -> float32x4_t {
3473    static_assert_uimm_bits!(LANE, 1);
3474    unsafe {
3475        let c: float32x4_t = simd_shuffle!(
3476            c,
3477            c,
3478            [
3479                2 * LANE as u32,
3480                2 * LANE as u32 + 1,
3481                2 * LANE as u32,
3482                2 * LANE as u32 + 1
3483            ]
3484        );
3485        vcmlaq_rot180_f32(a, b, c)
3486    }
3487}
3488#[doc = "Floating-point complex multiply accumulate"]
3489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3490#[inline]
3491#[target_feature(enable = "neon,fcma")]
3492#[target_feature(enable = "neon,fp16")]
3493#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3494#[cfg_attr(test, assert_instr(fcmla))]
3495pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3496    unsafe extern "unadjusted" {
3497        #[cfg_attr(
3498            any(target_arch = "aarch64", target_arch = "arm64ec"),
3499            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3500        )]
3501        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3502    }
3503    unsafe { _vcmla_rot270_f16(a, b, c) }
3504}
3505#[doc = "Floating-point complex multiply accumulate"]
3506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3507#[inline]
3508#[target_feature(enable = "neon,fcma")]
3509#[target_feature(enable = "neon,fp16")]
3510#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3511#[cfg_attr(test, assert_instr(fcmla))]
3512pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3513    unsafe extern "unadjusted" {
3514        #[cfg_attr(
3515            any(target_arch = "aarch64", target_arch = "arm64ec"),
3516            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3517        )]
3518        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3519    }
3520    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3521}
3522#[doc = "Floating-point complex multiply accumulate"]
3523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3524#[inline]
3525#[target_feature(enable = "neon,fcma")]
3526#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3527#[cfg_attr(test, assert_instr(fcmla))]
3528pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3529    unsafe extern "unadjusted" {
3530        #[cfg_attr(
3531            any(target_arch = "aarch64", target_arch = "arm64ec"),
3532            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3533        )]
3534        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3535    }
3536    unsafe { _vcmla_rot270_f32(a, b, c) }
3537}
3538#[doc = "Floating-point complex multiply accumulate"]
3539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3540#[inline]
3541#[target_feature(enable = "neon,fcma")]
3542#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3543#[cfg_attr(test, assert_instr(fcmla))]
3544pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3545    unsafe extern "unadjusted" {
3546        #[cfg_attr(
3547            any(target_arch = "aarch64", target_arch = "arm64ec"),
3548            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3549        )]
3550        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3551    }
3552    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3553}
3554#[doc = "Floating-point complex multiply accumulate"]
3555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3556#[inline]
3557#[target_feature(enable = "neon,fcma")]
3558#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3559#[cfg_attr(test, assert_instr(fcmla))]
3560pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3561    unsafe extern "unadjusted" {
3562        #[cfg_attr(
3563            any(target_arch = "aarch64", target_arch = "arm64ec"),
3564            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3565        )]
3566        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3567    }
3568    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3569}
3570#[doc = "Floating-point complex multiply accumulate"]
3571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3572#[inline]
3573#[target_feature(enable = "neon,fcma")]
3574#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3575#[rustc_legacy_const_generics(3)]
3576#[target_feature(enable = "neon,fp16")]
3577#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3578pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3579    a: float16x4_t,
3580    b: float16x4_t,
3581    c: float16x4_t,
3582) -> float16x4_t {
3583    static_assert_uimm_bits!(LANE, 1);
3584    unsafe {
3585        let c: float16x4_t = simd_shuffle!(
3586            c,
3587            c,
3588            [
3589                2 * LANE as u32,
3590                2 * LANE as u32 + 1,
3591                2 * LANE as u32,
3592                2 * LANE as u32 + 1
3593            ]
3594        );
3595        vcmla_rot270_f16(a, b, c)
3596    }
3597}
3598#[doc = "Floating-point complex multiply accumulate"]
3599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3600#[inline]
3601#[target_feature(enable = "neon,fcma")]
3602#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3603#[rustc_legacy_const_generics(3)]
3604#[target_feature(enable = "neon,fp16")]
3605#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3606pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3607    a: float16x8_t,
3608    b: float16x8_t,
3609    c: float16x4_t,
3610) -> float16x8_t {
3611    static_assert_uimm_bits!(LANE, 1);
3612    unsafe {
3613        let c: float16x8_t = simd_shuffle!(
3614            c,
3615            c,
3616            [
3617                2 * LANE as u32,
3618                2 * LANE as u32 + 1,
3619                2 * LANE as u32,
3620                2 * LANE as u32 + 1,
3621                2 * LANE as u32,
3622                2 * LANE as u32 + 1,
3623                2 * LANE as u32,
3624                2 * LANE as u32 + 1
3625            ]
3626        );
3627        vcmlaq_rot270_f16(a, b, c)
3628    }
3629}
3630#[doc = "Floating-point complex multiply accumulate"]
3631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3632#[inline]
3633#[target_feature(enable = "neon,fcma")]
3634#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3635#[rustc_legacy_const_generics(3)]
3636#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3637pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3638    a: float32x2_t,
3639    b: float32x2_t,
3640    c: float32x2_t,
3641) -> float32x2_t {
3642    static_assert!(LANE == 0);
3643    unsafe {
3644        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3645        vcmla_rot270_f32(a, b, c)
3646    }
3647}
3648#[doc = "Floating-point complex multiply accumulate"]
3649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3650#[inline]
3651#[target_feature(enable = "neon,fcma")]
3652#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3653#[rustc_legacy_const_generics(3)]
3654#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3655pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3656    a: float32x4_t,
3657    b: float32x4_t,
3658    c: float32x2_t,
3659) -> float32x4_t {
3660    static_assert!(LANE == 0);
3661    unsafe {
3662        let c: float32x4_t = simd_shuffle!(
3663            c,
3664            c,
3665            [
3666                2 * LANE as u32,
3667                2 * LANE as u32 + 1,
3668                2 * LANE as u32,
3669                2 * LANE as u32 + 1
3670            ]
3671        );
3672        vcmlaq_rot270_f32(a, b, c)
3673    }
3674}
3675#[doc = "Floating-point complex multiply accumulate"]
3676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3677#[inline]
3678#[target_feature(enable = "neon,fcma")]
3679#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3680#[rustc_legacy_const_generics(3)]
3681#[target_feature(enable = "neon,fp16")]
3682#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3683pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3684    a: float16x4_t,
3685    b: float16x4_t,
3686    c: float16x8_t,
3687) -> float16x4_t {
3688    static_assert_uimm_bits!(LANE, 2);
3689    unsafe {
3690        let c: float16x4_t = simd_shuffle!(
3691            c,
3692            c,
3693            [
3694                2 * LANE as u32,
3695                2 * LANE as u32 + 1,
3696                2 * LANE as u32,
3697                2 * LANE as u32 + 1
3698            ]
3699        );
3700        vcmla_rot270_f16(a, b, c)
3701    }
3702}
3703#[doc = "Floating-point complex multiply accumulate"]
3704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3705#[inline]
3706#[target_feature(enable = "neon,fcma")]
3707#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3708#[rustc_legacy_const_generics(3)]
3709#[target_feature(enable = "neon,fp16")]
3710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3711pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3712    a: float16x8_t,
3713    b: float16x8_t,
3714    c: float16x8_t,
3715) -> float16x8_t {
3716    static_assert_uimm_bits!(LANE, 2);
3717    unsafe {
3718        let c: float16x8_t = simd_shuffle!(
3719            c,
3720            c,
3721            [
3722                2 * LANE as u32,
3723                2 * LANE as u32 + 1,
3724                2 * LANE as u32,
3725                2 * LANE as u32 + 1,
3726                2 * LANE as u32,
3727                2 * LANE as u32 + 1,
3728                2 * LANE as u32,
3729                2 * LANE as u32 + 1
3730            ]
3731        );
3732        vcmlaq_rot270_f16(a, b, c)
3733    }
3734}
3735#[doc = "Floating-point complex multiply accumulate"]
3736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3737#[inline]
3738#[target_feature(enable = "neon,fcma")]
3739#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3740#[rustc_legacy_const_generics(3)]
3741#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3742pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3743    a: float32x2_t,
3744    b: float32x2_t,
3745    c: float32x4_t,
3746) -> float32x2_t {
3747    static_assert_uimm_bits!(LANE, 1);
3748    unsafe {
3749        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3750        vcmla_rot270_f32(a, b, c)
3751    }
3752}
3753#[doc = "Floating-point complex multiply accumulate"]
3754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3755#[inline]
3756#[target_feature(enable = "neon,fcma")]
3757#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3758#[rustc_legacy_const_generics(3)]
3759#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3760pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3761    a: float32x4_t,
3762    b: float32x4_t,
3763    c: float32x4_t,
3764) -> float32x4_t {
3765    static_assert_uimm_bits!(LANE, 1);
3766    unsafe {
3767        let c: float32x4_t = simd_shuffle!(
3768            c,
3769            c,
3770            [
3771                2 * LANE as u32,
3772                2 * LANE as u32 + 1,
3773                2 * LANE as u32,
3774                2 * LANE as u32 + 1
3775            ]
3776        );
3777        vcmlaq_rot270_f32(a, b, c)
3778    }
3779}
3780#[doc = "Floating-point complex multiply accumulate"]
3781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3782#[inline]
3783#[target_feature(enable = "neon,fcma")]
3784#[target_feature(enable = "neon,fp16")]
3785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3786#[cfg_attr(test, assert_instr(fcmla))]
3787pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3788    unsafe extern "unadjusted" {
3789        #[cfg_attr(
3790            any(target_arch = "aarch64", target_arch = "arm64ec"),
3791            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3792        )]
3793        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3794    }
3795    unsafe { _vcmla_rot90_f16(a, b, c) }
3796}
3797#[doc = "Floating-point complex multiply accumulate"]
3798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3799#[inline]
3800#[target_feature(enable = "neon,fcma")]
3801#[target_feature(enable = "neon,fp16")]
3802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3803#[cfg_attr(test, assert_instr(fcmla))]
3804pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3805    unsafe extern "unadjusted" {
3806        #[cfg_attr(
3807            any(target_arch = "aarch64", target_arch = "arm64ec"),
3808            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3809        )]
3810        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3811    }
3812    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3813}
3814#[doc = "Floating-point complex multiply accumulate"]
3815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3816#[inline]
3817#[target_feature(enable = "neon,fcma")]
3818#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3819#[cfg_attr(test, assert_instr(fcmla))]
3820pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3821    unsafe extern "unadjusted" {
3822        #[cfg_attr(
3823            any(target_arch = "aarch64", target_arch = "arm64ec"),
3824            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3825        )]
3826        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3827    }
3828    unsafe { _vcmla_rot90_f32(a, b, c) }
3829}
3830#[doc = "Floating-point complex multiply accumulate"]
3831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3832#[inline]
3833#[target_feature(enable = "neon,fcma")]
3834#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3835#[cfg_attr(test, assert_instr(fcmla))]
3836pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3837    unsafe extern "unadjusted" {
3838        #[cfg_attr(
3839            any(target_arch = "aarch64", target_arch = "arm64ec"),
3840            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3841        )]
3842        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3843    }
3844    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3845}
3846#[doc = "Floating-point complex multiply accumulate"]
3847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3848#[inline]
3849#[target_feature(enable = "neon,fcma")]
3850#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3851#[cfg_attr(test, assert_instr(fcmla))]
3852pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3853    unsafe extern "unadjusted" {
3854        #[cfg_attr(
3855            any(target_arch = "aarch64", target_arch = "arm64ec"),
3856            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3857        )]
3858        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3859    }
3860    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3861}
3862#[doc = "Floating-point complex multiply accumulate"]
3863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3864#[inline]
3865#[target_feature(enable = "neon,fcma")]
3866#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3867#[rustc_legacy_const_generics(3)]
3868#[target_feature(enable = "neon,fp16")]
3869#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3870pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3871    a: float16x4_t,
3872    b: float16x4_t,
3873    c: float16x4_t,
3874) -> float16x4_t {
3875    static_assert_uimm_bits!(LANE, 1);
3876    unsafe {
3877        let c: float16x4_t = simd_shuffle!(
3878            c,
3879            c,
3880            [
3881                2 * LANE as u32,
3882                2 * LANE as u32 + 1,
3883                2 * LANE as u32,
3884                2 * LANE as u32 + 1
3885            ]
3886        );
3887        vcmla_rot90_f16(a, b, c)
3888    }
3889}
3890#[doc = "Floating-point complex multiply accumulate"]
3891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3892#[inline]
3893#[target_feature(enable = "neon,fcma")]
3894#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3895#[rustc_legacy_const_generics(3)]
3896#[target_feature(enable = "neon,fp16")]
3897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3898pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3899    a: float16x8_t,
3900    b: float16x8_t,
3901    c: float16x4_t,
3902) -> float16x8_t {
3903    static_assert_uimm_bits!(LANE, 1);
3904    unsafe {
3905        let c: float16x8_t = simd_shuffle!(
3906            c,
3907            c,
3908            [
3909                2 * LANE as u32,
3910                2 * LANE as u32 + 1,
3911                2 * LANE as u32,
3912                2 * LANE as u32 + 1,
3913                2 * LANE as u32,
3914                2 * LANE as u32 + 1,
3915                2 * LANE as u32,
3916                2 * LANE as u32 + 1
3917            ]
3918        );
3919        vcmlaq_rot90_f16(a, b, c)
3920    }
3921}
3922#[doc = "Floating-point complex multiply accumulate"]
3923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3924#[inline]
3925#[target_feature(enable = "neon,fcma")]
3926#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3927#[rustc_legacy_const_generics(3)]
3928#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3929pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3930    a: float32x2_t,
3931    b: float32x2_t,
3932    c: float32x2_t,
3933) -> float32x2_t {
3934    static_assert!(LANE == 0);
3935    unsafe {
3936        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3937        vcmla_rot90_f32(a, b, c)
3938    }
3939}
3940#[doc = "Floating-point complex multiply accumulate"]
3941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3942#[inline]
3943#[target_feature(enable = "neon,fcma")]
3944#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3945#[rustc_legacy_const_generics(3)]
3946#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3947pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3948    a: float32x4_t,
3949    b: float32x4_t,
3950    c: float32x2_t,
3951) -> float32x4_t {
3952    static_assert!(LANE == 0);
3953    unsafe {
3954        let c: float32x4_t = simd_shuffle!(
3955            c,
3956            c,
3957            [
3958                2 * LANE as u32,
3959                2 * LANE as u32 + 1,
3960                2 * LANE as u32,
3961                2 * LANE as u32 + 1
3962            ]
3963        );
3964        vcmlaq_rot90_f32(a, b, c)
3965    }
3966}
3967#[doc = "Floating-point complex multiply accumulate"]
3968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3969#[inline]
3970#[target_feature(enable = "neon,fcma")]
3971#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3972#[rustc_legacy_const_generics(3)]
3973#[target_feature(enable = "neon,fp16")]
3974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3975pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3976    a: float16x4_t,
3977    b: float16x4_t,
3978    c: float16x8_t,
3979) -> float16x4_t {
3980    static_assert_uimm_bits!(LANE, 2);
3981    unsafe {
3982        let c: float16x4_t = simd_shuffle!(
3983            c,
3984            c,
3985            [
3986                2 * LANE as u32,
3987                2 * LANE as u32 + 1,
3988                2 * LANE as u32,
3989                2 * LANE as u32 + 1
3990            ]
3991        );
3992        vcmla_rot90_f16(a, b, c)
3993    }
3994}
3995#[doc = "Floating-point complex multiply accumulate"]
3996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3997#[inline]
3998#[target_feature(enable = "neon,fcma")]
3999#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4000#[rustc_legacy_const_generics(3)]
4001#[target_feature(enable = "neon,fp16")]
4002#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
4003pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4004    a: float16x8_t,
4005    b: float16x8_t,
4006    c: float16x8_t,
4007) -> float16x8_t {
4008    static_assert_uimm_bits!(LANE, 2);
4009    unsafe {
4010        let c: float16x8_t = simd_shuffle!(
4011            c,
4012            c,
4013            [
4014                2 * LANE as u32,
4015                2 * LANE as u32 + 1,
4016                2 * LANE as u32,
4017                2 * LANE as u32 + 1,
4018                2 * LANE as u32,
4019                2 * LANE as u32 + 1,
4020                2 * LANE as u32,
4021                2 * LANE as u32 + 1
4022            ]
4023        );
4024        vcmlaq_rot90_f16(a, b, c)
4025    }
4026}
4027#[doc = "Floating-point complex multiply accumulate"]
4028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4029#[inline]
4030#[target_feature(enable = "neon,fcma")]
4031#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4032#[rustc_legacy_const_generics(3)]
4033#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4034pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4035    a: float32x2_t,
4036    b: float32x2_t,
4037    c: float32x4_t,
4038) -> float32x2_t {
4039    static_assert_uimm_bits!(LANE, 1);
4040    unsafe {
4041        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4042        vcmla_rot90_f32(a, b, c)
4043    }
4044}
4045#[doc = "Floating-point complex multiply accumulate"]
4046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4047#[inline]
4048#[target_feature(enable = "neon,fcma")]
4049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4050#[rustc_legacy_const_generics(3)]
4051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4052pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4053    a: float32x4_t,
4054    b: float32x4_t,
4055    c: float32x4_t,
4056) -> float32x4_t {
4057    static_assert_uimm_bits!(LANE, 1);
4058    unsafe {
4059        let c: float32x4_t = simd_shuffle!(
4060            c,
4061            c,
4062            [
4063                2 * LANE as u32,
4064                2 * LANE as u32 + 1,
4065                2 * LANE as u32,
4066                2 * LANE as u32 + 1
4067            ]
4068        );
4069        vcmlaq_rot90_f32(a, b, c)
4070    }
4071}
4072#[doc = "Insert vector element from another vector element"]
4073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4074#[inline]
4075#[target_feature(enable = "neon")]
4076#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4077#[rustc_legacy_const_generics(1, 3)]
4078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4079pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4080    a: float32x2_t,
4081    b: float32x2_t,
4082) -> float32x2_t {
4083    static_assert_uimm_bits!(LANE1, 1);
4084    static_assert_uimm_bits!(LANE2, 1);
4085    unsafe {
4086        match LANE1 & 0b1 {
4087            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4088            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4089            _ => unreachable_unchecked(),
4090        }
4091    }
4092}
4093#[doc = "Insert vector element from another vector element"]
4094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4095#[inline]
4096#[target_feature(enable = "neon")]
4097#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4098#[rustc_legacy_const_generics(1, 3)]
4099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4100pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4101    static_assert_uimm_bits!(LANE1, 3);
4102    static_assert_uimm_bits!(LANE2, 3);
4103    unsafe {
4104        match LANE1 & 0b111 {
4105            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4106            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4107            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4108            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4109            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4110            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4111            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4112            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4113            _ => unreachable_unchecked(),
4114        }
4115    }
4116}
4117#[doc = "Insert vector element from another vector element"]
4118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4119#[inline]
4120#[target_feature(enable = "neon")]
4121#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4122#[rustc_legacy_const_generics(1, 3)]
4123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4124pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4125    static_assert_uimm_bits!(LANE1, 2);
4126    static_assert_uimm_bits!(LANE2, 2);
4127    unsafe {
4128        match LANE1 & 0b11 {
4129            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4130            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4131            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4132            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4133            _ => unreachable_unchecked(),
4134        }
4135    }
4136}
4137#[doc = "Insert vector element from another vector element"]
4138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4139#[inline]
4140#[target_feature(enable = "neon")]
4141#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4142#[rustc_legacy_const_generics(1, 3)]
4143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4144pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4145    static_assert_uimm_bits!(LANE1, 1);
4146    static_assert_uimm_bits!(LANE2, 1);
4147    unsafe {
4148        match LANE1 & 0b1 {
4149            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4150            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4151            _ => unreachable_unchecked(),
4152        }
4153    }
4154}
4155#[doc = "Insert vector element from another vector element"]
4156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4157#[inline]
4158#[target_feature(enable = "neon")]
4159#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4160#[rustc_legacy_const_generics(1, 3)]
4161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4162pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4163    static_assert_uimm_bits!(LANE1, 3);
4164    static_assert_uimm_bits!(LANE2, 3);
4165    unsafe {
4166        match LANE1 & 0b111 {
4167            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4168            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4169            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4170            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4171            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4172            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4173            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4174            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4175            _ => unreachable_unchecked(),
4176        }
4177    }
4178}
4179#[doc = "Insert vector element from another vector element"]
4180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4181#[inline]
4182#[target_feature(enable = "neon")]
4183#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4184#[rustc_legacy_const_generics(1, 3)]
4185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4186pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4187    a: uint16x4_t,
4188    b: uint16x4_t,
4189) -> uint16x4_t {
4190    static_assert_uimm_bits!(LANE1, 2);
4191    static_assert_uimm_bits!(LANE2, 2);
4192    unsafe {
4193        match LANE1 & 0b11 {
4194            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4195            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4196            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4197            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4198            _ => unreachable_unchecked(),
4199        }
4200    }
4201}
4202#[doc = "Insert vector element from another vector element"]
4203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4204#[inline]
4205#[target_feature(enable = "neon")]
4206#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4207#[rustc_legacy_const_generics(1, 3)]
4208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4209pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4210    a: uint32x2_t,
4211    b: uint32x2_t,
4212) -> uint32x2_t {
4213    static_assert_uimm_bits!(LANE1, 1);
4214    static_assert_uimm_bits!(LANE2, 1);
4215    unsafe {
4216        match LANE1 & 0b1 {
4217            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4218            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4219            _ => unreachable_unchecked(),
4220        }
4221    }
4222}
4223#[doc = "Insert vector element from another vector element"]
4224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4225#[inline]
4226#[target_feature(enable = "neon")]
4227#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4228#[rustc_legacy_const_generics(1, 3)]
4229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4230pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4231    static_assert_uimm_bits!(LANE1, 3);
4232    static_assert_uimm_bits!(LANE2, 3);
4233    unsafe {
4234        match LANE1 & 0b111 {
4235            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4236            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4237            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4238            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4239            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4240            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4241            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4242            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4243            _ => unreachable_unchecked(),
4244        }
4245    }
4246}
4247#[doc = "Insert vector element from another vector element"]
4248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4249#[inline]
4250#[target_feature(enable = "neon")]
4251#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4252#[rustc_legacy_const_generics(1, 3)]
4253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4254pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4255    a: poly16x4_t,
4256    b: poly16x4_t,
4257) -> poly16x4_t {
4258    static_assert_uimm_bits!(LANE1, 2);
4259    static_assert_uimm_bits!(LANE2, 2);
4260    unsafe {
4261        match LANE1 & 0b11 {
4262            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4263            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4264            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4265            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4266            _ => unreachable_unchecked(),
4267        }
4268    }
4269}
4270#[doc = "Insert vector element from another vector element"]
4271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4272#[inline]
4273#[target_feature(enable = "neon")]
4274#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4275#[rustc_legacy_const_generics(1, 3)]
4276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4277pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4278    a: float32x2_t,
4279    b: float32x4_t,
4280) -> float32x2_t {
4281    static_assert_uimm_bits!(LANE1, 1);
4282    static_assert_uimm_bits!(LANE2, 2);
4283    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4284    unsafe {
4285        match LANE1 & 0b1 {
4286            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4287            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4288            _ => unreachable_unchecked(),
4289        }
4290    }
4291}
4292#[doc = "Insert vector element from another vector element"]
4293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4294#[inline]
4295#[target_feature(enable = "neon")]
4296#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4297#[rustc_legacy_const_generics(1, 3)]
4298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4299pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4300    static_assert_uimm_bits!(LANE1, 3);
4301    static_assert_uimm_bits!(LANE2, 4);
4302    let a: int8x16_t =
4303        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4304    unsafe {
4305        match LANE1 & 0b111 {
4306            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4307            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4308            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4309            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4310            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4311            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4312            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4313            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4314            _ => unreachable_unchecked(),
4315        }
4316    }
4317}
4318#[doc = "Insert vector element from another vector element"]
4319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4320#[inline]
4321#[target_feature(enable = "neon")]
4322#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4323#[rustc_legacy_const_generics(1, 3)]
4324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4325pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4326    a: int16x4_t,
4327    b: int16x8_t,
4328) -> int16x4_t {
4329    static_assert_uimm_bits!(LANE1, 2);
4330    static_assert_uimm_bits!(LANE2, 3);
4331    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4332    unsafe {
4333        match LANE1 & 0b11 {
4334            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4335            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4336            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4337            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4338            _ => unreachable_unchecked(),
4339        }
4340    }
4341}
4342#[doc = "Insert vector element from another vector element"]
4343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4344#[inline]
4345#[target_feature(enable = "neon")]
4346#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4347#[rustc_legacy_const_generics(1, 3)]
4348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4349pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4350    a: int32x2_t,
4351    b: int32x4_t,
4352) -> int32x2_t {
4353    static_assert_uimm_bits!(LANE1, 1);
4354    static_assert_uimm_bits!(LANE2, 2);
4355    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4356    unsafe {
4357        match LANE1 & 0b1 {
4358            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4359            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4360            _ => unreachable_unchecked(),
4361        }
4362    }
4363}
4364#[doc = "Insert vector element from another vector element"]
4365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4366#[inline]
4367#[target_feature(enable = "neon")]
4368#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4369#[rustc_legacy_const_generics(1, 3)]
4370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4371pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4372    a: uint8x8_t,
4373    b: uint8x16_t,
4374) -> uint8x8_t {
4375    static_assert_uimm_bits!(LANE1, 3);
4376    static_assert_uimm_bits!(LANE2, 4);
4377    let a: uint8x16_t =
4378        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4379    unsafe {
4380        match LANE1 & 0b111 {
4381            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4382            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4383            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4384            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4385            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4386            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4387            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4388            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4389            _ => unreachable_unchecked(),
4390        }
4391    }
4392}
4393#[doc = "Insert vector element from another vector element"]
4394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4395#[inline]
4396#[target_feature(enable = "neon")]
4397#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4398#[rustc_legacy_const_generics(1, 3)]
4399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4400pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4401    a: uint16x4_t,
4402    b: uint16x8_t,
4403) -> uint16x4_t {
4404    static_assert_uimm_bits!(LANE1, 2);
4405    static_assert_uimm_bits!(LANE2, 3);
4406    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4407    unsafe {
4408        match LANE1 & 0b11 {
4409            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4410            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4411            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4412            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4413            _ => unreachable_unchecked(),
4414        }
4415    }
4416}
4417#[doc = "Insert vector element from another vector element"]
4418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4419#[inline]
4420#[target_feature(enable = "neon")]
4421#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4422#[rustc_legacy_const_generics(1, 3)]
4423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4424pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4425    a: uint32x2_t,
4426    b: uint32x4_t,
4427) -> uint32x2_t {
4428    static_assert_uimm_bits!(LANE1, 1);
4429    static_assert_uimm_bits!(LANE2, 2);
4430    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4431    unsafe {
4432        match LANE1 & 0b1 {
4433            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4434            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4435            _ => unreachable_unchecked(),
4436        }
4437    }
4438}
4439#[doc = "Insert vector element from another vector element"]
4440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4441#[inline]
4442#[target_feature(enable = "neon")]
4443#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4444#[rustc_legacy_const_generics(1, 3)]
4445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4446pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4447    a: poly8x8_t,
4448    b: poly8x16_t,
4449) -> poly8x8_t {
4450    static_assert_uimm_bits!(LANE1, 3);
4451    static_assert_uimm_bits!(LANE2, 4);
4452    let a: poly8x16_t =
4453        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4454    unsafe {
4455        match LANE1 & 0b111 {
4456            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4457            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4458            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4459            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4460            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4461            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4462            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4463            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4464            _ => unreachable_unchecked(),
4465        }
4466    }
4467}
4468#[doc = "Insert vector element from another vector element"]
4469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4470#[inline]
4471#[target_feature(enable = "neon")]
4472#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4473#[rustc_legacy_const_generics(1, 3)]
4474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4475pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4476    a: poly16x4_t,
4477    b: poly16x8_t,
4478) -> poly16x4_t {
4479    static_assert_uimm_bits!(LANE1, 2);
4480    static_assert_uimm_bits!(LANE2, 3);
4481    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4482    unsafe {
4483        match LANE1 & 0b11 {
4484            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4485            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4486            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4487            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4488            _ => unreachable_unchecked(),
4489        }
4490    }
4491}
4492#[doc = "Insert vector element from another vector element"]
4493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4494#[inline]
4495#[target_feature(enable = "neon")]
4496#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4497#[rustc_legacy_const_generics(1, 3)]
4498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4499pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4500    a: float32x4_t,
4501    b: float32x2_t,
4502) -> float32x4_t {
4503    static_assert_uimm_bits!(LANE1, 2);
4504    static_assert_uimm_bits!(LANE2, 1);
4505    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4506    unsafe {
4507        match LANE1 & 0b11 {
4508            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4509            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4510            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4511            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4512            _ => unreachable_unchecked(),
4513        }
4514    }
4515}
4516#[doc = "Insert vector element from another vector element"]
4517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4518#[inline]
4519#[target_feature(enable = "neon")]
4520#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4521#[rustc_legacy_const_generics(1, 3)]
4522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4523pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4524    a: float64x2_t,
4525    b: float64x1_t,
4526) -> float64x2_t {
4527    static_assert_uimm_bits!(LANE1, 1);
4528    static_assert!(LANE2 == 0);
4529    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4530    unsafe {
4531        match LANE1 & 0b1 {
4532            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4533            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4534            _ => unreachable_unchecked(),
4535        }
4536    }
4537}
4538#[doc = "Insert vector element from another vector element"]
4539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4540#[inline]
4541#[target_feature(enable = "neon")]
4542#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4543#[rustc_legacy_const_generics(1, 3)]
4544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4545pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4546    a: int64x2_t,
4547    b: int64x1_t,
4548) -> int64x2_t {
4549    static_assert_uimm_bits!(LANE1, 1);
4550    static_assert!(LANE2 == 0);
4551    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4552    unsafe {
4553        match LANE1 & 0b1 {
4554            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4555            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4556            _ => unreachable_unchecked(),
4557        }
4558    }
4559}
4560#[doc = "Insert vector element from another vector element"]
4561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4562#[inline]
4563#[target_feature(enable = "neon")]
4564#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4565#[rustc_legacy_const_generics(1, 3)]
4566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4567pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4568    a: uint64x2_t,
4569    b: uint64x1_t,
4570) -> uint64x2_t {
4571    static_assert_uimm_bits!(LANE1, 1);
4572    static_assert!(LANE2 == 0);
4573    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4574    unsafe {
4575        match LANE1 & 0b1 {
4576            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4577            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4578            _ => unreachable_unchecked(),
4579        }
4580    }
4581}
4582#[doc = "Insert vector element from another vector element"]
4583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4584#[inline]
4585#[target_feature(enable = "neon")]
4586#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4587#[rustc_legacy_const_generics(1, 3)]
4588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4589pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4590    a: poly64x2_t,
4591    b: poly64x1_t,
4592) -> poly64x2_t {
4593    static_assert_uimm_bits!(LANE1, 1);
4594    static_assert!(LANE2 == 0);
4595    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4596    unsafe {
4597        match LANE1 & 0b1 {
4598            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4599            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4600            _ => unreachable_unchecked(),
4601        }
4602    }
4603}
4604#[doc = "Insert vector element from another vector element"]
4605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4606#[inline]
4607#[target_feature(enable = "neon")]
4608#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4609#[rustc_legacy_const_generics(1, 3)]
4610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4611pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4612    static_assert_uimm_bits!(LANE1, 4);
4613    static_assert_uimm_bits!(LANE2, 3);
4614    let b: int8x16_t =
4615        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4616    unsafe {
4617        match LANE1 & 0b1111 {
4618            0 => simd_shuffle!(
4619                a,
4620                b,
4621                [
4622                    16 + LANE2 as u32,
4623                    1,
4624                    2,
4625                    3,
4626                    4,
4627                    5,
4628                    6,
4629                    7,
4630                    8,
4631                    9,
4632                    10,
4633                    11,
4634                    12,
4635                    13,
4636                    14,
4637                    15
4638                ]
4639            ),
4640            1 => simd_shuffle!(
4641                a,
4642                b,
4643                [
4644                    0,
4645                    16 + LANE2 as u32,
4646                    2,
4647                    3,
4648                    4,
4649                    5,
4650                    6,
4651                    7,
4652                    8,
4653                    9,
4654                    10,
4655                    11,
4656                    12,
4657                    13,
4658                    14,
4659                    15
4660                ]
4661            ),
4662            2 => simd_shuffle!(
4663                a,
4664                b,
4665                [
4666                    0,
4667                    1,
4668                    16 + LANE2 as u32,
4669                    3,
4670                    4,
4671                    5,
4672                    6,
4673                    7,
4674                    8,
4675                    9,
4676                    10,
4677                    11,
4678                    12,
4679                    13,
4680                    14,
4681                    15
4682                ]
4683            ),
4684            3 => simd_shuffle!(
4685                a,
4686                b,
4687                [
4688                    0,
4689                    1,
4690                    2,
4691                    16 + LANE2 as u32,
4692                    4,
4693                    5,
4694                    6,
4695                    7,
4696                    8,
4697                    9,
4698                    10,
4699                    11,
4700                    12,
4701                    13,
4702                    14,
4703                    15
4704                ]
4705            ),
4706            4 => simd_shuffle!(
4707                a,
4708                b,
4709                [
4710                    0,
4711                    1,
4712                    2,
4713                    3,
4714                    16 + LANE2 as u32,
4715                    5,
4716                    6,
4717                    7,
4718                    8,
4719                    9,
4720                    10,
4721                    11,
4722                    12,
4723                    13,
4724                    14,
4725                    15
4726                ]
4727            ),
4728            5 => simd_shuffle!(
4729                a,
4730                b,
4731                [
4732                    0,
4733                    1,
4734                    2,
4735                    3,
4736                    4,
4737                    16 + LANE2 as u32,
4738                    6,
4739                    7,
4740                    8,
4741                    9,
4742                    10,
4743                    11,
4744                    12,
4745                    13,
4746                    14,
4747                    15
4748                ]
4749            ),
4750            6 => simd_shuffle!(
4751                a,
4752                b,
4753                [
4754                    0,
4755                    1,
4756                    2,
4757                    3,
4758                    4,
4759                    5,
4760                    16 + LANE2 as u32,
4761                    7,
4762                    8,
4763                    9,
4764                    10,
4765                    11,
4766                    12,
4767                    13,
4768                    14,
4769                    15
4770                ]
4771            ),
4772            7 => simd_shuffle!(
4773                a,
4774                b,
4775                [
4776                    0,
4777                    1,
4778                    2,
4779                    3,
4780                    4,
4781                    5,
4782                    6,
4783                    16 + LANE2 as u32,
4784                    8,
4785                    9,
4786                    10,
4787                    11,
4788                    12,
4789                    13,
4790                    14,
4791                    15
4792                ]
4793            ),
4794            8 => simd_shuffle!(
4795                a,
4796                b,
4797                [
4798                    0,
4799                    1,
4800                    2,
4801                    3,
4802                    4,
4803                    5,
4804                    6,
4805                    7,
4806                    16 + LANE2 as u32,
4807                    9,
4808                    10,
4809                    11,
4810                    12,
4811                    13,
4812                    14,
4813                    15
4814                ]
4815            ),
4816            9 => simd_shuffle!(
4817                a,
4818                b,
4819                [
4820                    0,
4821                    1,
4822                    2,
4823                    3,
4824                    4,
4825                    5,
4826                    6,
4827                    7,
4828                    8,
4829                    16 + LANE2 as u32,
4830                    10,
4831                    11,
4832                    12,
4833                    13,
4834                    14,
4835                    15
4836                ]
4837            ),
4838            10 => simd_shuffle!(
4839                a,
4840                b,
4841                [
4842                    0,
4843                    1,
4844                    2,
4845                    3,
4846                    4,
4847                    5,
4848                    6,
4849                    7,
4850                    8,
4851                    9,
4852                    16 + LANE2 as u32,
4853                    11,
4854                    12,
4855                    13,
4856                    14,
4857                    15
4858                ]
4859            ),
4860            11 => simd_shuffle!(
4861                a,
4862                b,
4863                [
4864                    0,
4865                    1,
4866                    2,
4867                    3,
4868                    4,
4869                    5,
4870                    6,
4871                    7,
4872                    8,
4873                    9,
4874                    10,
4875                    16 + LANE2 as u32,
4876                    12,
4877                    13,
4878                    14,
4879                    15
4880                ]
4881            ),
4882            12 => simd_shuffle!(
4883                a,
4884                b,
4885                [
4886                    0,
4887                    1,
4888                    2,
4889                    3,
4890                    4,
4891                    5,
4892                    6,
4893                    7,
4894                    8,
4895                    9,
4896                    10,
4897                    11,
4898                    16 + LANE2 as u32,
4899                    13,
4900                    14,
4901                    15
4902                ]
4903            ),
4904            13 => simd_shuffle!(
4905                a,
4906                b,
4907                [
4908                    0,
4909                    1,
4910                    2,
4911                    3,
4912                    4,
4913                    5,
4914                    6,
4915                    7,
4916                    8,
4917                    9,
4918                    10,
4919                    11,
4920                    12,
4921                    16 + LANE2 as u32,
4922                    14,
4923                    15
4924                ]
4925            ),
4926            14 => simd_shuffle!(
4927                a,
4928                b,
4929                [
4930                    0,
4931                    1,
4932                    2,
4933                    3,
4934                    4,
4935                    5,
4936                    6,
4937                    7,
4938                    8,
4939                    9,
4940                    10,
4941                    11,
4942                    12,
4943                    13,
4944                    16 + LANE2 as u32,
4945                    15
4946                ]
4947            ),
4948            15 => simd_shuffle!(
4949                a,
4950                b,
4951                [
4952                    0,
4953                    1,
4954                    2,
4955                    3,
4956                    4,
4957                    5,
4958                    6,
4959                    7,
4960                    8,
4961                    9,
4962                    10,
4963                    11,
4964                    12,
4965                    13,
4966                    14,
4967                    16 + LANE2 as u32
4968                ]
4969            ),
4970            _ => unreachable_unchecked(),
4971        }
4972    }
4973}
4974#[doc = "Insert vector element from another vector element"]
4975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4976#[inline]
4977#[target_feature(enable = "neon")]
4978#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4979#[rustc_legacy_const_generics(1, 3)]
4980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4981pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4982    a: int16x8_t,
4983    b: int16x4_t,
4984) -> int16x8_t {
4985    static_assert_uimm_bits!(LANE1, 3);
4986    static_assert_uimm_bits!(LANE2, 2);
4987    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4988    unsafe {
4989        match LANE1 & 0b111 {
4990            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4991            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4992            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4993            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4994            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4995            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4996            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4997            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4998            _ => unreachable_unchecked(),
4999        }
5000    }
5001}
5002#[doc = "Insert vector element from another vector element"]
5003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5004#[inline]
5005#[target_feature(enable = "neon")]
5006#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5007#[rustc_legacy_const_generics(1, 3)]
5008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5009pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5010    a: int32x4_t,
5011    b: int32x2_t,
5012) -> int32x4_t {
5013    static_assert_uimm_bits!(LANE1, 2);
5014    static_assert_uimm_bits!(LANE2, 1);
5015    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5016    unsafe {
5017        match LANE1 & 0b11 {
5018            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5019            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5020            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5021            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5022            _ => unreachable_unchecked(),
5023        }
5024    }
5025}
5026#[doc = "Insert vector element from another vector element"]
5027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5028#[inline]
5029#[target_feature(enable = "neon")]
5030#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5031#[rustc_legacy_const_generics(1, 3)]
5032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5033pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5034    a: uint8x16_t,
5035    b: uint8x8_t,
5036) -> uint8x16_t {
5037    static_assert_uimm_bits!(LANE1, 4);
5038    static_assert_uimm_bits!(LANE2, 3);
5039    let b: uint8x16_t =
5040        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5041    unsafe {
5042        match LANE1 & 0b1111 {
5043            0 => simd_shuffle!(
5044                a,
5045                b,
5046                [
5047                    16 + LANE2 as u32,
5048                    1,
5049                    2,
5050                    3,
5051                    4,
5052                    5,
5053                    6,
5054                    7,
5055                    8,
5056                    9,
5057                    10,
5058                    11,
5059                    12,
5060                    13,
5061                    14,
5062                    15
5063                ]
5064            ),
5065            1 => simd_shuffle!(
5066                a,
5067                b,
5068                [
5069                    0,
5070                    16 + LANE2 as u32,
5071                    2,
5072                    3,
5073                    4,
5074                    5,
5075                    6,
5076                    7,
5077                    8,
5078                    9,
5079                    10,
5080                    11,
5081                    12,
5082                    13,
5083                    14,
5084                    15
5085                ]
5086            ),
5087            2 => simd_shuffle!(
5088                a,
5089                b,
5090                [
5091                    0,
5092                    1,
5093                    16 + LANE2 as u32,
5094                    3,
5095                    4,
5096                    5,
5097                    6,
5098                    7,
5099                    8,
5100                    9,
5101                    10,
5102                    11,
5103                    12,
5104                    13,
5105                    14,
5106                    15
5107                ]
5108            ),
5109            3 => simd_shuffle!(
5110                a,
5111                b,
5112                [
5113                    0,
5114                    1,
5115                    2,
5116                    16 + LANE2 as u32,
5117                    4,
5118                    5,
5119                    6,
5120                    7,
5121                    8,
5122                    9,
5123                    10,
5124                    11,
5125                    12,
5126                    13,
5127                    14,
5128                    15
5129                ]
5130            ),
5131            4 => simd_shuffle!(
5132                a,
5133                b,
5134                [
5135                    0,
5136                    1,
5137                    2,
5138                    3,
5139                    16 + LANE2 as u32,
5140                    5,
5141                    6,
5142                    7,
5143                    8,
5144                    9,
5145                    10,
5146                    11,
5147                    12,
5148                    13,
5149                    14,
5150                    15
5151                ]
5152            ),
5153            5 => simd_shuffle!(
5154                a,
5155                b,
5156                [
5157                    0,
5158                    1,
5159                    2,
5160                    3,
5161                    4,
5162                    16 + LANE2 as u32,
5163                    6,
5164                    7,
5165                    8,
5166                    9,
5167                    10,
5168                    11,
5169                    12,
5170                    13,
5171                    14,
5172                    15
5173                ]
5174            ),
5175            6 => simd_shuffle!(
5176                a,
5177                b,
5178                [
5179                    0,
5180                    1,
5181                    2,
5182                    3,
5183                    4,
5184                    5,
5185                    16 + LANE2 as u32,
5186                    7,
5187                    8,
5188                    9,
5189                    10,
5190                    11,
5191                    12,
5192                    13,
5193                    14,
5194                    15
5195                ]
5196            ),
5197            7 => simd_shuffle!(
5198                a,
5199                b,
5200                [
5201                    0,
5202                    1,
5203                    2,
5204                    3,
5205                    4,
5206                    5,
5207                    6,
5208                    16 + LANE2 as u32,
5209                    8,
5210                    9,
5211                    10,
5212                    11,
5213                    12,
5214                    13,
5215                    14,
5216                    15
5217                ]
5218            ),
5219            8 => simd_shuffle!(
5220                a,
5221                b,
5222                [
5223                    0,
5224                    1,
5225                    2,
5226                    3,
5227                    4,
5228                    5,
5229                    6,
5230                    7,
5231                    16 + LANE2 as u32,
5232                    9,
5233                    10,
5234                    11,
5235                    12,
5236                    13,
5237                    14,
5238                    15
5239                ]
5240            ),
5241            9 => simd_shuffle!(
5242                a,
5243                b,
5244                [
5245                    0,
5246                    1,
5247                    2,
5248                    3,
5249                    4,
5250                    5,
5251                    6,
5252                    7,
5253                    8,
5254                    16 + LANE2 as u32,
5255                    10,
5256                    11,
5257                    12,
5258                    13,
5259                    14,
5260                    15
5261                ]
5262            ),
5263            10 => simd_shuffle!(
5264                a,
5265                b,
5266                [
5267                    0,
5268                    1,
5269                    2,
5270                    3,
5271                    4,
5272                    5,
5273                    6,
5274                    7,
5275                    8,
5276                    9,
5277                    16 + LANE2 as u32,
5278                    11,
5279                    12,
5280                    13,
5281                    14,
5282                    15
5283                ]
5284            ),
5285            11 => simd_shuffle!(
5286                a,
5287                b,
5288                [
5289                    0,
5290                    1,
5291                    2,
5292                    3,
5293                    4,
5294                    5,
5295                    6,
5296                    7,
5297                    8,
5298                    9,
5299                    10,
5300                    16 + LANE2 as u32,
5301                    12,
5302                    13,
5303                    14,
5304                    15
5305                ]
5306            ),
5307            12 => simd_shuffle!(
5308                a,
5309                b,
5310                [
5311                    0,
5312                    1,
5313                    2,
5314                    3,
5315                    4,
5316                    5,
5317                    6,
5318                    7,
5319                    8,
5320                    9,
5321                    10,
5322                    11,
5323                    16 + LANE2 as u32,
5324                    13,
5325                    14,
5326                    15
5327                ]
5328            ),
5329            13 => simd_shuffle!(
5330                a,
5331                b,
5332                [
5333                    0,
5334                    1,
5335                    2,
5336                    3,
5337                    4,
5338                    5,
5339                    6,
5340                    7,
5341                    8,
5342                    9,
5343                    10,
5344                    11,
5345                    12,
5346                    16 + LANE2 as u32,
5347                    14,
5348                    15
5349                ]
5350            ),
5351            14 => simd_shuffle!(
5352                a,
5353                b,
5354                [
5355                    0,
5356                    1,
5357                    2,
5358                    3,
5359                    4,
5360                    5,
5361                    6,
5362                    7,
5363                    8,
5364                    9,
5365                    10,
5366                    11,
5367                    12,
5368                    13,
5369                    16 + LANE2 as u32,
5370                    15
5371                ]
5372            ),
5373            15 => simd_shuffle!(
5374                a,
5375                b,
5376                [
5377                    0,
5378                    1,
5379                    2,
5380                    3,
5381                    4,
5382                    5,
5383                    6,
5384                    7,
5385                    8,
5386                    9,
5387                    10,
5388                    11,
5389                    12,
5390                    13,
5391                    14,
5392                    16 + LANE2 as u32
5393                ]
5394            ),
5395            _ => unreachable_unchecked(),
5396        }
5397    }
5398}
5399#[doc = "Insert vector element from another vector element"]
5400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5401#[inline]
5402#[target_feature(enable = "neon")]
5403#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5404#[rustc_legacy_const_generics(1, 3)]
5405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5406pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5407    a: uint16x8_t,
5408    b: uint16x4_t,
5409) -> uint16x8_t {
5410    static_assert_uimm_bits!(LANE1, 3);
5411    static_assert_uimm_bits!(LANE2, 2);
5412    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5413    unsafe {
5414        match LANE1 & 0b111 {
5415            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5416            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5417            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5418            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5419            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5420            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5421            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5422            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5423            _ => unreachable_unchecked(),
5424        }
5425    }
5426}
5427#[doc = "Insert vector element from another vector element"]
5428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5429#[inline]
5430#[target_feature(enable = "neon")]
5431#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5432#[rustc_legacy_const_generics(1, 3)]
5433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5434pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5435    a: uint32x4_t,
5436    b: uint32x2_t,
5437) -> uint32x4_t {
5438    static_assert_uimm_bits!(LANE1, 2);
5439    static_assert_uimm_bits!(LANE2, 1);
5440    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5441    unsafe {
5442        match LANE1 & 0b11 {
5443            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5444            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5445            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5446            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5447            _ => unreachable_unchecked(),
5448        }
5449    }
5450}
5451#[doc = "Insert vector element from another vector element"]
5452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5453#[inline]
5454#[target_feature(enable = "neon")]
5455#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5456#[rustc_legacy_const_generics(1, 3)]
5457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5458pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5459    a: poly8x16_t,
5460    b: poly8x8_t,
5461) -> poly8x16_t {
5462    static_assert_uimm_bits!(LANE1, 4);
5463    static_assert_uimm_bits!(LANE2, 3);
5464    let b: poly8x16_t =
5465        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5466    unsafe {
5467        match LANE1 & 0b1111 {
5468            0 => simd_shuffle!(
5469                a,
5470                b,
5471                [
5472                    16 + LANE2 as u32,
5473                    1,
5474                    2,
5475                    3,
5476                    4,
5477                    5,
5478                    6,
5479                    7,
5480                    8,
5481                    9,
5482                    10,
5483                    11,
5484                    12,
5485                    13,
5486                    14,
5487                    15
5488                ]
5489            ),
5490            1 => simd_shuffle!(
5491                a,
5492                b,
5493                [
5494                    0,
5495                    16 + LANE2 as u32,
5496                    2,
5497                    3,
5498                    4,
5499                    5,
5500                    6,
5501                    7,
5502                    8,
5503                    9,
5504                    10,
5505                    11,
5506                    12,
5507                    13,
5508                    14,
5509                    15
5510                ]
5511            ),
5512            2 => simd_shuffle!(
5513                a,
5514                b,
5515                [
5516                    0,
5517                    1,
5518                    16 + LANE2 as u32,
5519                    3,
5520                    4,
5521                    5,
5522                    6,
5523                    7,
5524                    8,
5525                    9,
5526                    10,
5527                    11,
5528                    12,
5529                    13,
5530                    14,
5531                    15
5532                ]
5533            ),
5534            3 => simd_shuffle!(
5535                a,
5536                b,
5537                [
5538                    0,
5539                    1,
5540                    2,
5541                    16 + LANE2 as u32,
5542                    4,
5543                    5,
5544                    6,
5545                    7,
5546                    8,
5547                    9,
5548                    10,
5549                    11,
5550                    12,
5551                    13,
5552                    14,
5553                    15
5554                ]
5555            ),
5556            4 => simd_shuffle!(
5557                a,
5558                b,
5559                [
5560                    0,
5561                    1,
5562                    2,
5563                    3,
5564                    16 + LANE2 as u32,
5565                    5,
5566                    6,
5567                    7,
5568                    8,
5569                    9,
5570                    10,
5571                    11,
5572                    12,
5573                    13,
5574                    14,
5575                    15
5576                ]
5577            ),
5578            5 => simd_shuffle!(
5579                a,
5580                b,
5581                [
5582                    0,
5583                    1,
5584                    2,
5585                    3,
5586                    4,
5587                    16 + LANE2 as u32,
5588                    6,
5589                    7,
5590                    8,
5591                    9,
5592                    10,
5593                    11,
5594                    12,
5595                    13,
5596                    14,
5597                    15
5598                ]
5599            ),
5600            6 => simd_shuffle!(
5601                a,
5602                b,
5603                [
5604                    0,
5605                    1,
5606                    2,
5607                    3,
5608                    4,
5609                    5,
5610                    16 + LANE2 as u32,
5611                    7,
5612                    8,
5613                    9,
5614                    10,
5615                    11,
5616                    12,
5617                    13,
5618                    14,
5619                    15
5620                ]
5621            ),
5622            7 => simd_shuffle!(
5623                a,
5624                b,
5625                [
5626                    0,
5627                    1,
5628                    2,
5629                    3,
5630                    4,
5631                    5,
5632                    6,
5633                    16 + LANE2 as u32,
5634                    8,
5635                    9,
5636                    10,
5637                    11,
5638                    12,
5639                    13,
5640                    14,
5641                    15
5642                ]
5643            ),
5644            8 => simd_shuffle!(
5645                a,
5646                b,
5647                [
5648                    0,
5649                    1,
5650                    2,
5651                    3,
5652                    4,
5653                    5,
5654                    6,
5655                    7,
5656                    16 + LANE2 as u32,
5657                    9,
5658                    10,
5659                    11,
5660                    12,
5661                    13,
5662                    14,
5663                    15
5664                ]
5665            ),
5666            9 => simd_shuffle!(
5667                a,
5668                b,
5669                [
5670                    0,
5671                    1,
5672                    2,
5673                    3,
5674                    4,
5675                    5,
5676                    6,
5677                    7,
5678                    8,
5679                    16 + LANE2 as u32,
5680                    10,
5681                    11,
5682                    12,
5683                    13,
5684                    14,
5685                    15
5686                ]
5687            ),
5688            10 => simd_shuffle!(
5689                a,
5690                b,
5691                [
5692                    0,
5693                    1,
5694                    2,
5695                    3,
5696                    4,
5697                    5,
5698                    6,
5699                    7,
5700                    8,
5701                    9,
5702                    16 + LANE2 as u32,
5703                    11,
5704                    12,
5705                    13,
5706                    14,
5707                    15
5708                ]
5709            ),
5710            11 => simd_shuffle!(
5711                a,
5712                b,
5713                [
5714                    0,
5715                    1,
5716                    2,
5717                    3,
5718                    4,
5719                    5,
5720                    6,
5721                    7,
5722                    8,
5723                    9,
5724                    10,
5725                    16 + LANE2 as u32,
5726                    12,
5727                    13,
5728                    14,
5729                    15
5730                ]
5731            ),
5732            12 => simd_shuffle!(
5733                a,
5734                b,
5735                [
5736                    0,
5737                    1,
5738                    2,
5739                    3,
5740                    4,
5741                    5,
5742                    6,
5743                    7,
5744                    8,
5745                    9,
5746                    10,
5747                    11,
5748                    16 + LANE2 as u32,
5749                    13,
5750                    14,
5751                    15
5752                ]
5753            ),
5754            13 => simd_shuffle!(
5755                a,
5756                b,
5757                [
5758                    0,
5759                    1,
5760                    2,
5761                    3,
5762                    4,
5763                    5,
5764                    6,
5765                    7,
5766                    8,
5767                    9,
5768                    10,
5769                    11,
5770                    12,
5771                    16 + LANE2 as u32,
5772                    14,
5773                    15
5774                ]
5775            ),
5776            14 => simd_shuffle!(
5777                a,
5778                b,
5779                [
5780                    0,
5781                    1,
5782                    2,
5783                    3,
5784                    4,
5785                    5,
5786                    6,
5787                    7,
5788                    8,
5789                    9,
5790                    10,
5791                    11,
5792                    12,
5793                    13,
5794                    16 + LANE2 as u32,
5795                    15
5796                ]
5797            ),
5798            15 => simd_shuffle!(
5799                a,
5800                b,
5801                [
5802                    0,
5803                    1,
5804                    2,
5805                    3,
5806                    4,
5807                    5,
5808                    6,
5809                    7,
5810                    8,
5811                    9,
5812                    10,
5813                    11,
5814                    12,
5815                    13,
5816                    14,
5817                    16 + LANE2 as u32
5818                ]
5819            ),
5820            _ => unreachable_unchecked(),
5821        }
5822    }
5823}
5824#[doc = "Insert vector element from another vector element"]
5825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5826#[inline]
5827#[target_feature(enable = "neon")]
5828#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5829#[rustc_legacy_const_generics(1, 3)]
5830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5831pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5832    a: poly16x8_t,
5833    b: poly16x4_t,
5834) -> poly16x8_t {
5835    static_assert_uimm_bits!(LANE1, 3);
5836    static_assert_uimm_bits!(LANE2, 2);
5837    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5838    unsafe {
5839        match LANE1 & 0b111 {
5840            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5841            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5842            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5843            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5844            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5845            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5846            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5847            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5848            _ => unreachable_unchecked(),
5849        }
5850    }
5851}
5852#[doc = "Insert vector element from another vector element"]
5853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5854#[inline]
5855#[target_feature(enable = "neon")]
5856#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5857#[rustc_legacy_const_generics(1, 3)]
5858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5859pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5860    a: float32x4_t,
5861    b: float32x4_t,
5862) -> float32x4_t {
5863    static_assert_uimm_bits!(LANE1, 2);
5864    static_assert_uimm_bits!(LANE2, 2);
5865    unsafe {
5866        match LANE1 & 0b11 {
5867            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5868            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5869            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5870            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5871            _ => unreachable_unchecked(),
5872        }
5873    }
5874}
5875#[doc = "Insert vector element from another vector element"]
5876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5877#[inline]
5878#[target_feature(enable = "neon")]
5879#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5880#[rustc_legacy_const_generics(1, 3)]
5881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5882pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5883    a: float64x2_t,
5884    b: float64x2_t,
5885) -> float64x2_t {
5886    static_assert_uimm_bits!(LANE1, 1);
5887    static_assert_uimm_bits!(LANE2, 1);
5888    unsafe {
5889        match LANE1 & 0b1 {
5890            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5891            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5892            _ => unreachable_unchecked(),
5893        }
5894    }
5895}
5896#[doc = "Insert vector element from another vector element"]
5897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5898#[inline]
5899#[target_feature(enable = "neon")]
5900#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5901#[rustc_legacy_const_generics(1, 3)]
5902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5903pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5904    a: int8x16_t,
5905    b: int8x16_t,
5906) -> int8x16_t {
5907    static_assert_uimm_bits!(LANE1, 4);
5908    static_assert_uimm_bits!(LANE2, 4);
5909    unsafe {
5910        match LANE1 & 0b1111 {
5911            0 => simd_shuffle!(
5912                a,
5913                b,
5914                [
5915                    16 + LANE2 as u32,
5916                    1,
5917                    2,
5918                    3,
5919                    4,
5920                    5,
5921                    6,
5922                    7,
5923                    8,
5924                    9,
5925                    10,
5926                    11,
5927                    12,
5928                    13,
5929                    14,
5930                    15
5931                ]
5932            ),
5933            1 => simd_shuffle!(
5934                a,
5935                b,
5936                [
5937                    0,
5938                    16 + LANE2 as u32,
5939                    2,
5940                    3,
5941                    4,
5942                    5,
5943                    6,
5944                    7,
5945                    8,
5946                    9,
5947                    10,
5948                    11,
5949                    12,
5950                    13,
5951                    14,
5952                    15
5953                ]
5954            ),
5955            2 => simd_shuffle!(
5956                a,
5957                b,
5958                [
5959                    0,
5960                    1,
5961                    16 + LANE2 as u32,
5962                    3,
5963                    4,
5964                    5,
5965                    6,
5966                    7,
5967                    8,
5968                    9,
5969                    10,
5970                    11,
5971                    12,
5972                    13,
5973                    14,
5974                    15
5975                ]
5976            ),
5977            3 => simd_shuffle!(
5978                a,
5979                b,
5980                [
5981                    0,
5982                    1,
5983                    2,
5984                    16 + LANE2 as u32,
5985                    4,
5986                    5,
5987                    6,
5988                    7,
5989                    8,
5990                    9,
5991                    10,
5992                    11,
5993                    12,
5994                    13,
5995                    14,
5996                    15
5997                ]
5998            ),
5999            4 => simd_shuffle!(
6000                a,
6001                b,
6002                [
6003                    0,
6004                    1,
6005                    2,
6006                    3,
6007                    16 + LANE2 as u32,
6008                    5,
6009                    6,
6010                    7,
6011                    8,
6012                    9,
6013                    10,
6014                    11,
6015                    12,
6016                    13,
6017                    14,
6018                    15
6019                ]
6020            ),
6021            5 => simd_shuffle!(
6022                a,
6023                b,
6024                [
6025                    0,
6026                    1,
6027                    2,
6028                    3,
6029                    4,
6030                    16 + LANE2 as u32,
6031                    6,
6032                    7,
6033                    8,
6034                    9,
6035                    10,
6036                    11,
6037                    12,
6038                    13,
6039                    14,
6040                    15
6041                ]
6042            ),
6043            6 => simd_shuffle!(
6044                a,
6045                b,
6046                [
6047                    0,
6048                    1,
6049                    2,
6050                    3,
6051                    4,
6052                    5,
6053                    16 + LANE2 as u32,
6054                    7,
6055                    8,
6056                    9,
6057                    10,
6058                    11,
6059                    12,
6060                    13,
6061                    14,
6062                    15
6063                ]
6064            ),
6065            7 => simd_shuffle!(
6066                a,
6067                b,
6068                [
6069                    0,
6070                    1,
6071                    2,
6072                    3,
6073                    4,
6074                    5,
6075                    6,
6076                    16 + LANE2 as u32,
6077                    8,
6078                    9,
6079                    10,
6080                    11,
6081                    12,
6082                    13,
6083                    14,
6084                    15
6085                ]
6086            ),
6087            8 => simd_shuffle!(
6088                a,
6089                b,
6090                [
6091                    0,
6092                    1,
6093                    2,
6094                    3,
6095                    4,
6096                    5,
6097                    6,
6098                    7,
6099                    16 + LANE2 as u32,
6100                    9,
6101                    10,
6102                    11,
6103                    12,
6104                    13,
6105                    14,
6106                    15
6107                ]
6108            ),
6109            9 => simd_shuffle!(
6110                a,
6111                b,
6112                [
6113                    0,
6114                    1,
6115                    2,
6116                    3,
6117                    4,
6118                    5,
6119                    6,
6120                    7,
6121                    8,
6122                    16 + LANE2 as u32,
6123                    10,
6124                    11,
6125                    12,
6126                    13,
6127                    14,
6128                    15
6129                ]
6130            ),
6131            10 => simd_shuffle!(
6132                a,
6133                b,
6134                [
6135                    0,
6136                    1,
6137                    2,
6138                    3,
6139                    4,
6140                    5,
6141                    6,
6142                    7,
6143                    8,
6144                    9,
6145                    16 + LANE2 as u32,
6146                    11,
6147                    12,
6148                    13,
6149                    14,
6150                    15
6151                ]
6152            ),
6153            11 => simd_shuffle!(
6154                a,
6155                b,
6156                [
6157                    0,
6158                    1,
6159                    2,
6160                    3,
6161                    4,
6162                    5,
6163                    6,
6164                    7,
6165                    8,
6166                    9,
6167                    10,
6168                    16 + LANE2 as u32,
6169                    12,
6170                    13,
6171                    14,
6172                    15
6173                ]
6174            ),
6175            12 => simd_shuffle!(
6176                a,
6177                b,
6178                [
6179                    0,
6180                    1,
6181                    2,
6182                    3,
6183                    4,
6184                    5,
6185                    6,
6186                    7,
6187                    8,
6188                    9,
6189                    10,
6190                    11,
6191                    16 + LANE2 as u32,
6192                    13,
6193                    14,
6194                    15
6195                ]
6196            ),
6197            13 => simd_shuffle!(
6198                a,
6199                b,
6200                [
6201                    0,
6202                    1,
6203                    2,
6204                    3,
6205                    4,
6206                    5,
6207                    6,
6208                    7,
6209                    8,
6210                    9,
6211                    10,
6212                    11,
6213                    12,
6214                    16 + LANE2 as u32,
6215                    14,
6216                    15
6217                ]
6218            ),
6219            14 => simd_shuffle!(
6220                a,
6221                b,
6222                [
6223                    0,
6224                    1,
6225                    2,
6226                    3,
6227                    4,
6228                    5,
6229                    6,
6230                    7,
6231                    8,
6232                    9,
6233                    10,
6234                    11,
6235                    12,
6236                    13,
6237                    16 + LANE2 as u32,
6238                    15
6239                ]
6240            ),
6241            15 => simd_shuffle!(
6242                a,
6243                b,
6244                [
6245                    0,
6246                    1,
6247                    2,
6248                    3,
6249                    4,
6250                    5,
6251                    6,
6252                    7,
6253                    8,
6254                    9,
6255                    10,
6256                    11,
6257                    12,
6258                    13,
6259                    14,
6260                    16 + LANE2 as u32
6261                ]
6262            ),
6263            _ => unreachable_unchecked(),
6264        }
6265    }
6266}
6267#[doc = "Insert vector element from another vector element"]
6268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6269#[inline]
6270#[target_feature(enable = "neon")]
6271#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6272#[rustc_legacy_const_generics(1, 3)]
6273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6274pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6275    a: int16x8_t,
6276    b: int16x8_t,
6277) -> int16x8_t {
6278    static_assert_uimm_bits!(LANE1, 3);
6279    static_assert_uimm_bits!(LANE2, 3);
6280    unsafe {
6281        match LANE1 & 0b111 {
6282            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6283            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6284            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6285            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6286            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6287            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6288            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6289            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6290            _ => unreachable_unchecked(),
6291        }
6292    }
6293}
6294#[doc = "Insert vector element from another vector element"]
6295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6296#[inline]
6297#[target_feature(enable = "neon")]
6298#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6299#[rustc_legacy_const_generics(1, 3)]
6300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6301pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6302    a: int32x4_t,
6303    b: int32x4_t,
6304) -> int32x4_t {
6305    static_assert_uimm_bits!(LANE1, 2);
6306    static_assert_uimm_bits!(LANE2, 2);
6307    unsafe {
6308        match LANE1 & 0b11 {
6309            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6310            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6311            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6312            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6313            _ => unreachable_unchecked(),
6314        }
6315    }
6316}
6317#[doc = "Insert vector element from another vector element"]
6318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6319#[inline]
6320#[target_feature(enable = "neon")]
6321#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6322#[rustc_legacy_const_generics(1, 3)]
6323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6324pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6325    a: int64x2_t,
6326    b: int64x2_t,
6327) -> int64x2_t {
6328    static_assert_uimm_bits!(LANE1, 1);
6329    static_assert_uimm_bits!(LANE2, 1);
6330    unsafe {
6331        match LANE1 & 0b1 {
6332            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6333            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6334            _ => unreachable_unchecked(),
6335        }
6336    }
6337}
6338#[doc = "Insert vector element from another vector element"]
6339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6340#[inline]
6341#[target_feature(enable = "neon")]
6342#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6343#[rustc_legacy_const_generics(1, 3)]
6344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6345pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6346    a: uint8x16_t,
6347    b: uint8x16_t,
6348) -> uint8x16_t {
6349    static_assert_uimm_bits!(LANE1, 4);
6350    static_assert_uimm_bits!(LANE2, 4);
6351    unsafe {
6352        match LANE1 & 0b1111 {
6353            0 => simd_shuffle!(
6354                a,
6355                b,
6356                [
6357                    16 + LANE2 as u32,
6358                    1,
6359                    2,
6360                    3,
6361                    4,
6362                    5,
6363                    6,
6364                    7,
6365                    8,
6366                    9,
6367                    10,
6368                    11,
6369                    12,
6370                    13,
6371                    14,
6372                    15
6373                ]
6374            ),
6375            1 => simd_shuffle!(
6376                a,
6377                b,
6378                [
6379                    0,
6380                    16 + LANE2 as u32,
6381                    2,
6382                    3,
6383                    4,
6384                    5,
6385                    6,
6386                    7,
6387                    8,
6388                    9,
6389                    10,
6390                    11,
6391                    12,
6392                    13,
6393                    14,
6394                    15
6395                ]
6396            ),
6397            2 => simd_shuffle!(
6398                a,
6399                b,
6400                [
6401                    0,
6402                    1,
6403                    16 + LANE2 as u32,
6404                    3,
6405                    4,
6406                    5,
6407                    6,
6408                    7,
6409                    8,
6410                    9,
6411                    10,
6412                    11,
6413                    12,
6414                    13,
6415                    14,
6416                    15
6417                ]
6418            ),
6419            3 => simd_shuffle!(
6420                a,
6421                b,
6422                [
6423                    0,
6424                    1,
6425                    2,
6426                    16 + LANE2 as u32,
6427                    4,
6428                    5,
6429                    6,
6430                    7,
6431                    8,
6432                    9,
6433                    10,
6434                    11,
6435                    12,
6436                    13,
6437                    14,
6438                    15
6439                ]
6440            ),
6441            4 => simd_shuffle!(
6442                a,
6443                b,
6444                [
6445                    0,
6446                    1,
6447                    2,
6448                    3,
6449                    16 + LANE2 as u32,
6450                    5,
6451                    6,
6452                    7,
6453                    8,
6454                    9,
6455                    10,
6456                    11,
6457                    12,
6458                    13,
6459                    14,
6460                    15
6461                ]
6462            ),
6463            5 => simd_shuffle!(
6464                a,
6465                b,
6466                [
6467                    0,
6468                    1,
6469                    2,
6470                    3,
6471                    4,
6472                    16 + LANE2 as u32,
6473                    6,
6474                    7,
6475                    8,
6476                    9,
6477                    10,
6478                    11,
6479                    12,
6480                    13,
6481                    14,
6482                    15
6483                ]
6484            ),
6485            6 => simd_shuffle!(
6486                a,
6487                b,
6488                [
6489                    0,
6490                    1,
6491                    2,
6492                    3,
6493                    4,
6494                    5,
6495                    16 + LANE2 as u32,
6496                    7,
6497                    8,
6498                    9,
6499                    10,
6500                    11,
6501                    12,
6502                    13,
6503                    14,
6504                    15
6505                ]
6506            ),
6507            7 => simd_shuffle!(
6508                a,
6509                b,
6510                [
6511                    0,
6512                    1,
6513                    2,
6514                    3,
6515                    4,
6516                    5,
6517                    6,
6518                    16 + LANE2 as u32,
6519                    8,
6520                    9,
6521                    10,
6522                    11,
6523                    12,
6524                    13,
6525                    14,
6526                    15
6527                ]
6528            ),
6529            8 => simd_shuffle!(
6530                a,
6531                b,
6532                [
6533                    0,
6534                    1,
6535                    2,
6536                    3,
6537                    4,
6538                    5,
6539                    6,
6540                    7,
6541                    16 + LANE2 as u32,
6542                    9,
6543                    10,
6544                    11,
6545                    12,
6546                    13,
6547                    14,
6548                    15
6549                ]
6550            ),
6551            9 => simd_shuffle!(
6552                a,
6553                b,
6554                [
6555                    0,
6556                    1,
6557                    2,
6558                    3,
6559                    4,
6560                    5,
6561                    6,
6562                    7,
6563                    8,
6564                    16 + LANE2 as u32,
6565                    10,
6566                    11,
6567                    12,
6568                    13,
6569                    14,
6570                    15
6571                ]
6572            ),
6573            10 => simd_shuffle!(
6574                a,
6575                b,
6576                [
6577                    0,
6578                    1,
6579                    2,
6580                    3,
6581                    4,
6582                    5,
6583                    6,
6584                    7,
6585                    8,
6586                    9,
6587                    16 + LANE2 as u32,
6588                    11,
6589                    12,
6590                    13,
6591                    14,
6592                    15
6593                ]
6594            ),
6595            11 => simd_shuffle!(
6596                a,
6597                b,
6598                [
6599                    0,
6600                    1,
6601                    2,
6602                    3,
6603                    4,
6604                    5,
6605                    6,
6606                    7,
6607                    8,
6608                    9,
6609                    10,
6610                    16 + LANE2 as u32,
6611                    12,
6612                    13,
6613                    14,
6614                    15
6615                ]
6616            ),
6617            12 => simd_shuffle!(
6618                a,
6619                b,
6620                [
6621                    0,
6622                    1,
6623                    2,
6624                    3,
6625                    4,
6626                    5,
6627                    6,
6628                    7,
6629                    8,
6630                    9,
6631                    10,
6632                    11,
6633                    16 + LANE2 as u32,
6634                    13,
6635                    14,
6636                    15
6637                ]
6638            ),
6639            13 => simd_shuffle!(
6640                a,
6641                b,
6642                [
6643                    0,
6644                    1,
6645                    2,
6646                    3,
6647                    4,
6648                    5,
6649                    6,
6650                    7,
6651                    8,
6652                    9,
6653                    10,
6654                    11,
6655                    12,
6656                    16 + LANE2 as u32,
6657                    14,
6658                    15
6659                ]
6660            ),
6661            14 => simd_shuffle!(
6662                a,
6663                b,
6664                [
6665                    0,
6666                    1,
6667                    2,
6668                    3,
6669                    4,
6670                    5,
6671                    6,
6672                    7,
6673                    8,
6674                    9,
6675                    10,
6676                    11,
6677                    12,
6678                    13,
6679                    16 + LANE2 as u32,
6680                    15
6681                ]
6682            ),
6683            15 => simd_shuffle!(
6684                a,
6685                b,
6686                [
6687                    0,
6688                    1,
6689                    2,
6690                    3,
6691                    4,
6692                    5,
6693                    6,
6694                    7,
6695                    8,
6696                    9,
6697                    10,
6698                    11,
6699                    12,
6700                    13,
6701                    14,
6702                    16 + LANE2 as u32
6703                ]
6704            ),
6705            _ => unreachable_unchecked(),
6706        }
6707    }
6708}
6709#[doc = "Insert vector element from another vector element"]
6710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6711#[inline]
6712#[target_feature(enable = "neon")]
6713#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6714#[rustc_legacy_const_generics(1, 3)]
6715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6716pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6717    a: uint16x8_t,
6718    b: uint16x8_t,
6719) -> uint16x8_t {
6720    static_assert_uimm_bits!(LANE1, 3);
6721    static_assert_uimm_bits!(LANE2, 3);
6722    unsafe {
6723        match LANE1 & 0b111 {
6724            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6725            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6726            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6727            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6728            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6729            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6730            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6731            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6732            _ => unreachable_unchecked(),
6733        }
6734    }
6735}
6736#[doc = "Insert vector element from another vector element"]
6737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6738#[inline]
6739#[target_feature(enable = "neon")]
6740#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6741#[rustc_legacy_const_generics(1, 3)]
6742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6743pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6744    a: uint32x4_t,
6745    b: uint32x4_t,
6746) -> uint32x4_t {
6747    static_assert_uimm_bits!(LANE1, 2);
6748    static_assert_uimm_bits!(LANE2, 2);
6749    unsafe {
6750        match LANE1 & 0b11 {
6751            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6752            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6753            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6754            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6755            _ => unreachable_unchecked(),
6756        }
6757    }
6758}
6759#[doc = "Insert vector element from another vector element"]
6760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6761#[inline]
6762#[target_feature(enable = "neon")]
6763#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6764#[rustc_legacy_const_generics(1, 3)]
6765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6766pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6767    a: uint64x2_t,
6768    b: uint64x2_t,
6769) -> uint64x2_t {
6770    static_assert_uimm_bits!(LANE1, 1);
6771    static_assert_uimm_bits!(LANE2, 1);
6772    unsafe {
6773        match LANE1 & 0b1 {
6774            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6775            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6776            _ => unreachable_unchecked(),
6777        }
6778    }
6779}
6780#[doc = "Insert vector element from another vector element"]
6781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6782#[inline]
6783#[target_feature(enable = "neon")]
6784#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6785#[rustc_legacy_const_generics(1, 3)]
6786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6787pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6788    a: poly8x16_t,
6789    b: poly8x16_t,
6790) -> poly8x16_t {
6791    static_assert_uimm_bits!(LANE1, 4);
6792    static_assert_uimm_bits!(LANE2, 4);
6793    unsafe {
6794        match LANE1 & 0b1111 {
6795            0 => simd_shuffle!(
6796                a,
6797                b,
6798                [
6799                    16 + LANE2 as u32,
6800                    1,
6801                    2,
6802                    3,
6803                    4,
6804                    5,
6805                    6,
6806                    7,
6807                    8,
6808                    9,
6809                    10,
6810                    11,
6811                    12,
6812                    13,
6813                    14,
6814                    15
6815                ]
6816            ),
6817            1 => simd_shuffle!(
6818                a,
6819                b,
6820                [
6821                    0,
6822                    16 + LANE2 as u32,
6823                    2,
6824                    3,
6825                    4,
6826                    5,
6827                    6,
6828                    7,
6829                    8,
6830                    9,
6831                    10,
6832                    11,
6833                    12,
6834                    13,
6835                    14,
6836                    15
6837                ]
6838            ),
6839            2 => simd_shuffle!(
6840                a,
6841                b,
6842                [
6843                    0,
6844                    1,
6845                    16 + LANE2 as u32,
6846                    3,
6847                    4,
6848                    5,
6849                    6,
6850                    7,
6851                    8,
6852                    9,
6853                    10,
6854                    11,
6855                    12,
6856                    13,
6857                    14,
6858                    15
6859                ]
6860            ),
6861            3 => simd_shuffle!(
6862                a,
6863                b,
6864                [
6865                    0,
6866                    1,
6867                    2,
6868                    16 + LANE2 as u32,
6869                    4,
6870                    5,
6871                    6,
6872                    7,
6873                    8,
6874                    9,
6875                    10,
6876                    11,
6877                    12,
6878                    13,
6879                    14,
6880                    15
6881                ]
6882            ),
6883            4 => simd_shuffle!(
6884                a,
6885                b,
6886                [
6887                    0,
6888                    1,
6889                    2,
6890                    3,
6891                    16 + LANE2 as u32,
6892                    5,
6893                    6,
6894                    7,
6895                    8,
6896                    9,
6897                    10,
6898                    11,
6899                    12,
6900                    13,
6901                    14,
6902                    15
6903                ]
6904            ),
6905            5 => simd_shuffle!(
6906                a,
6907                b,
6908                [
6909                    0,
6910                    1,
6911                    2,
6912                    3,
6913                    4,
6914                    16 + LANE2 as u32,
6915                    6,
6916                    7,
6917                    8,
6918                    9,
6919                    10,
6920                    11,
6921                    12,
6922                    13,
6923                    14,
6924                    15
6925                ]
6926            ),
6927            6 => simd_shuffle!(
6928                a,
6929                b,
6930                [
6931                    0,
6932                    1,
6933                    2,
6934                    3,
6935                    4,
6936                    5,
6937                    16 + LANE2 as u32,
6938                    7,
6939                    8,
6940                    9,
6941                    10,
6942                    11,
6943                    12,
6944                    13,
6945                    14,
6946                    15
6947                ]
6948            ),
6949            7 => simd_shuffle!(
6950                a,
6951                b,
6952                [
6953                    0,
6954                    1,
6955                    2,
6956                    3,
6957                    4,
6958                    5,
6959                    6,
6960                    16 + LANE2 as u32,
6961                    8,
6962                    9,
6963                    10,
6964                    11,
6965                    12,
6966                    13,
6967                    14,
6968                    15
6969                ]
6970            ),
6971            8 => simd_shuffle!(
6972                a,
6973                b,
6974                [
6975                    0,
6976                    1,
6977                    2,
6978                    3,
6979                    4,
6980                    5,
6981                    6,
6982                    7,
6983                    16 + LANE2 as u32,
6984                    9,
6985                    10,
6986                    11,
6987                    12,
6988                    13,
6989                    14,
6990                    15
6991                ]
6992            ),
6993            9 => simd_shuffle!(
6994                a,
6995                b,
6996                [
6997                    0,
6998                    1,
6999                    2,
7000                    3,
7001                    4,
7002                    5,
7003                    6,
7004                    7,
7005                    8,
7006                    16 + LANE2 as u32,
7007                    10,
7008                    11,
7009                    12,
7010                    13,
7011                    14,
7012                    15
7013                ]
7014            ),
7015            10 => simd_shuffle!(
7016                a,
7017                b,
7018                [
7019                    0,
7020                    1,
7021                    2,
7022                    3,
7023                    4,
7024                    5,
7025                    6,
7026                    7,
7027                    8,
7028                    9,
7029                    16 + LANE2 as u32,
7030                    11,
7031                    12,
7032                    13,
7033                    14,
7034                    15
7035                ]
7036            ),
7037            11 => simd_shuffle!(
7038                a,
7039                b,
7040                [
7041                    0,
7042                    1,
7043                    2,
7044                    3,
7045                    4,
7046                    5,
7047                    6,
7048                    7,
7049                    8,
7050                    9,
7051                    10,
7052                    16 + LANE2 as u32,
7053                    12,
7054                    13,
7055                    14,
7056                    15
7057                ]
7058            ),
7059            12 => simd_shuffle!(
7060                a,
7061                b,
7062                [
7063                    0,
7064                    1,
7065                    2,
7066                    3,
7067                    4,
7068                    5,
7069                    6,
7070                    7,
7071                    8,
7072                    9,
7073                    10,
7074                    11,
7075                    16 + LANE2 as u32,
7076                    13,
7077                    14,
7078                    15
7079                ]
7080            ),
7081            13 => simd_shuffle!(
7082                a,
7083                b,
7084                [
7085                    0,
7086                    1,
7087                    2,
7088                    3,
7089                    4,
7090                    5,
7091                    6,
7092                    7,
7093                    8,
7094                    9,
7095                    10,
7096                    11,
7097                    12,
7098                    16 + LANE2 as u32,
7099                    14,
7100                    15
7101                ]
7102            ),
7103            14 => simd_shuffle!(
7104                a,
7105                b,
7106                [
7107                    0,
7108                    1,
7109                    2,
7110                    3,
7111                    4,
7112                    5,
7113                    6,
7114                    7,
7115                    8,
7116                    9,
7117                    10,
7118                    11,
7119                    12,
7120                    13,
7121                    16 + LANE2 as u32,
7122                    15
7123                ]
7124            ),
7125            15 => simd_shuffle!(
7126                a,
7127                b,
7128                [
7129                    0,
7130                    1,
7131                    2,
7132                    3,
7133                    4,
7134                    5,
7135                    6,
7136                    7,
7137                    8,
7138                    9,
7139                    10,
7140                    11,
7141                    12,
7142                    13,
7143                    14,
7144                    16 + LANE2 as u32
7145                ]
7146            ),
7147            _ => unreachable_unchecked(),
7148        }
7149    }
7150}
7151#[doc = "Insert vector element from another vector element"]
7152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7153#[inline]
7154#[target_feature(enable = "neon")]
7155#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7156#[rustc_legacy_const_generics(1, 3)]
7157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7158pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7159    a: poly16x8_t,
7160    b: poly16x8_t,
7161) -> poly16x8_t {
7162    static_assert_uimm_bits!(LANE1, 3);
7163    static_assert_uimm_bits!(LANE2, 3);
7164    unsafe {
7165        match LANE1 & 0b111 {
7166            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7167            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7168            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7169            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7170            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7171            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7172            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7173            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7174            _ => unreachable_unchecked(),
7175        }
7176    }
7177}
7178#[doc = "Insert vector element from another vector element"]
7179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7180#[inline]
7181#[target_feature(enable = "neon")]
7182#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7183#[rustc_legacy_const_generics(1, 3)]
7184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7185pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7186    a: poly64x2_t,
7187    b: poly64x2_t,
7188) -> poly64x2_t {
7189    static_assert_uimm_bits!(LANE1, 1);
7190    static_assert_uimm_bits!(LANE2, 1);
7191    unsafe {
7192        match LANE1 & 0b1 {
7193            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7194            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7195            _ => unreachable_unchecked(),
7196        }
7197    }
7198}
7199#[doc = "Insert vector element from another vector element"]
7200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7201#[inline]
7202#[target_feature(enable = "neon")]
7203#[cfg_attr(test, assert_instr(nop))]
7204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7205pub fn vcreate_f64(a: u64) -> float64x1_t {
7206    unsafe { transmute(a) }
7207}
7208#[doc = "Floating-point convert"]
7209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7210#[inline]
7211#[target_feature(enable = "neon")]
7212#[cfg_attr(test, assert_instr(fcvtn))]
7213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7214pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7215    unsafe { simd_cast(a) }
7216}
7217#[doc = "Floating-point convert to higher precision long"]
7218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7219#[inline]
7220#[target_feature(enable = "neon")]
7221#[cfg_attr(test, assert_instr(fcvtl))]
7222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7223pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7224    unsafe { simd_cast(a) }
7225}
7226#[doc = "Fixed-point convert to floating-point"]
7227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7228#[inline]
7229#[target_feature(enable = "neon")]
7230#[cfg_attr(test, assert_instr(scvtf))]
7231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7232pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7233    unsafe { simd_cast(a) }
7234}
7235#[doc = "Fixed-point convert to floating-point"]
7236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7237#[inline]
7238#[target_feature(enable = "neon")]
7239#[cfg_attr(test, assert_instr(scvtf))]
7240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7241pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7242    unsafe { simd_cast(a) }
7243}
7244#[doc = "Fixed-point convert to floating-point"]
7245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7246#[inline]
7247#[target_feature(enable = "neon")]
7248#[cfg_attr(test, assert_instr(ucvtf))]
7249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7250pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7251    unsafe { simd_cast(a) }
7252}
7253#[doc = "Fixed-point convert to floating-point"]
7254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7255#[inline]
7256#[target_feature(enable = "neon")]
7257#[cfg_attr(test, assert_instr(ucvtf))]
7258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7259pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7260    unsafe { simd_cast(a) }
7261}
7262#[doc = "Floating-point convert to lower precision"]
7263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7264#[inline]
7265#[cfg_attr(test, assert_instr(fcvtn2))]
7266#[target_feature(enable = "neon,fp16")]
7267#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7268pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7269    vcombine_f16(a, vcvt_f16_f32(b))
7270}
7271#[doc = "Floating-point convert to higher precision"]
7272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7273#[inline]
7274#[cfg_attr(test, assert_instr(fcvtl2))]
7275#[target_feature(enable = "neon,fp16")]
7276#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7277pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7278    vcvt_f32_f16(vget_high_f16(a))
7279}
7280#[doc = "Floating-point convert to lower precision narrow"]
7281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7282#[inline]
7283#[target_feature(enable = "neon")]
7284#[cfg_attr(test, assert_instr(fcvtn))]
7285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7286pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7287    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7288}
7289#[doc = "Floating-point convert to higher precision long"]
7290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7291#[inline]
7292#[target_feature(enable = "neon")]
7293#[cfg_attr(test, assert_instr(fcvtl))]
7294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7295pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7296    unsafe {
7297        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7298        simd_cast(b)
7299    }
7300}
7301#[doc = "Fixed-point convert to floating-point"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7303#[inline]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7306#[rustc_legacy_const_generics(1)]
7307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7308pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7309    static_assert!(N >= 1 && N <= 64);
7310    unsafe extern "unadjusted" {
7311        #[cfg_attr(
7312            any(target_arch = "aarch64", target_arch = "arm64ec"),
7313            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7314        )]
7315        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7316    }
7317    unsafe { _vcvt_n_f64_s64(a, N) }
7318}
7319#[doc = "Fixed-point convert to floating-point"]
7320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7321#[inline]
7322#[target_feature(enable = "neon")]
7323#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7324#[rustc_legacy_const_generics(1)]
7325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7326pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7327    static_assert!(N >= 1 && N <= 64);
7328    unsafe extern "unadjusted" {
7329        #[cfg_attr(
7330            any(target_arch = "aarch64", target_arch = "arm64ec"),
7331            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7332        )]
7333        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7334    }
7335    unsafe { _vcvtq_n_f64_s64(a, N) }
7336}
7337#[doc = "Fixed-point convert to floating-point"]
7338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7339#[inline]
7340#[target_feature(enable = "neon")]
7341#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7342#[rustc_legacy_const_generics(1)]
7343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7344pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7345    static_assert!(N >= 1 && N <= 64);
7346    unsafe extern "unadjusted" {
7347        #[cfg_attr(
7348            any(target_arch = "aarch64", target_arch = "arm64ec"),
7349            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7350        )]
7351        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7352    }
7353    unsafe { _vcvt_n_f64_u64(a, N) }
7354}
7355#[doc = "Fixed-point convert to floating-point"]
7356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7357#[inline]
7358#[target_feature(enable = "neon")]
7359#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7360#[rustc_legacy_const_generics(1)]
7361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7362pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7363    static_assert!(N >= 1 && N <= 64);
7364    unsafe extern "unadjusted" {
7365        #[cfg_attr(
7366            any(target_arch = "aarch64", target_arch = "arm64ec"),
7367            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7368        )]
7369        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7370    }
7371    unsafe { _vcvtq_n_f64_u64(a, N) }
7372}
7373#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7375#[inline]
7376#[target_feature(enable = "neon")]
7377#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7378#[rustc_legacy_const_generics(1)]
7379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7380pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7381    static_assert!(N >= 1 && N <= 64);
7382    unsafe extern "unadjusted" {
7383        #[cfg_attr(
7384            any(target_arch = "aarch64", target_arch = "arm64ec"),
7385            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7386        )]
7387        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7388    }
7389    unsafe { _vcvt_n_s64_f64(a, N) }
7390}
7391#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7393#[inline]
7394#[target_feature(enable = "neon")]
7395#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7396#[rustc_legacy_const_generics(1)]
7397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7398pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7399    static_assert!(N >= 1 && N <= 64);
7400    unsafe extern "unadjusted" {
7401        #[cfg_attr(
7402            any(target_arch = "aarch64", target_arch = "arm64ec"),
7403            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7404        )]
7405        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7406    }
7407    unsafe { _vcvtq_n_s64_f64(a, N) }
7408}
7409#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7411#[inline]
7412#[target_feature(enable = "neon")]
7413#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7414#[rustc_legacy_const_generics(1)]
7415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7416pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7417    static_assert!(N >= 1 && N <= 64);
7418    unsafe extern "unadjusted" {
7419        #[cfg_attr(
7420            any(target_arch = "aarch64", target_arch = "arm64ec"),
7421            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7422        )]
7423        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7424    }
7425    unsafe { _vcvt_n_u64_f64(a, N) }
7426}
7427#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7429#[inline]
7430#[target_feature(enable = "neon")]
7431#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7432#[rustc_legacy_const_generics(1)]
7433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7434pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7435    static_assert!(N >= 1 && N <= 64);
7436    unsafe extern "unadjusted" {
7437        #[cfg_attr(
7438            any(target_arch = "aarch64", target_arch = "arm64ec"),
7439            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7440        )]
7441        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7442    }
7443    unsafe { _vcvtq_n_u64_f64(a, N) }
7444}
7445#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7447#[inline]
7448#[target_feature(enable = "neon")]
7449#[cfg_attr(test, assert_instr(fcvtzs))]
7450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7451pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7452    unsafe extern "unadjusted" {
7453        #[cfg_attr(
7454            any(target_arch = "aarch64", target_arch = "arm64ec"),
7455            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7456        )]
7457        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7458    }
7459    unsafe { _vcvt_s64_f64(a) }
7460}
7461#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7463#[inline]
7464#[target_feature(enable = "neon")]
7465#[cfg_attr(test, assert_instr(fcvtzs))]
7466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7467pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7468    unsafe extern "unadjusted" {
7469        #[cfg_attr(
7470            any(target_arch = "aarch64", target_arch = "arm64ec"),
7471            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7472        )]
7473        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7474    }
7475    unsafe { _vcvtq_s64_f64(a) }
7476}
7477#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7479#[inline]
7480#[target_feature(enable = "neon")]
7481#[cfg_attr(test, assert_instr(fcvtzu))]
7482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7483pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7484    unsafe extern "unadjusted" {
7485        #[cfg_attr(
7486            any(target_arch = "aarch64", target_arch = "arm64ec"),
7487            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7488        )]
7489        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7490    }
7491    unsafe { _vcvt_u64_f64(a) }
7492}
7493#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7495#[inline]
7496#[target_feature(enable = "neon")]
7497#[cfg_attr(test, assert_instr(fcvtzu))]
7498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7499pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7500    unsafe extern "unadjusted" {
7501        #[cfg_attr(
7502            any(target_arch = "aarch64", target_arch = "arm64ec"),
7503            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7504        )]
7505        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7506    }
7507    unsafe { _vcvtq_u64_f64(a) }
7508}
7509#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7511#[inline]
7512#[cfg_attr(test, assert_instr(fcvtas))]
7513#[target_feature(enable = "neon,fp16")]
7514#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7515pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7516    unsafe extern "unadjusted" {
7517        #[cfg_attr(
7518            any(target_arch = "aarch64", target_arch = "arm64ec"),
7519            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7520        )]
7521        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7522    }
7523    unsafe { _vcvta_s16_f16(a) }
7524}
7525#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7527#[inline]
7528#[cfg_attr(test, assert_instr(fcvtas))]
7529#[target_feature(enable = "neon,fp16")]
7530#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7531pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7532    unsafe extern "unadjusted" {
7533        #[cfg_attr(
7534            any(target_arch = "aarch64", target_arch = "arm64ec"),
7535            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7536        )]
7537        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7538    }
7539    unsafe { _vcvtaq_s16_f16(a) }
7540}
7541#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7543#[inline]
7544#[target_feature(enable = "neon")]
7545#[cfg_attr(test, assert_instr(fcvtas))]
7546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7547pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7548    unsafe extern "unadjusted" {
7549        #[cfg_attr(
7550            any(target_arch = "aarch64", target_arch = "arm64ec"),
7551            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7552        )]
7553        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7554    }
7555    unsafe { _vcvta_s32_f32(a) }
7556}
7557#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7559#[inline]
7560#[target_feature(enable = "neon")]
7561#[cfg_attr(test, assert_instr(fcvtas))]
7562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7563pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7564    unsafe extern "unadjusted" {
7565        #[cfg_attr(
7566            any(target_arch = "aarch64", target_arch = "arm64ec"),
7567            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7568        )]
7569        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7570    }
7571    unsafe { _vcvtaq_s32_f32(a) }
7572}
7573#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7575#[inline]
7576#[target_feature(enable = "neon")]
7577#[cfg_attr(test, assert_instr(fcvtas))]
7578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7579pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7580    unsafe extern "unadjusted" {
7581        #[cfg_attr(
7582            any(target_arch = "aarch64", target_arch = "arm64ec"),
7583            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7584        )]
7585        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7586    }
7587    unsafe { _vcvta_s64_f64(a) }
7588}
7589#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7591#[inline]
7592#[target_feature(enable = "neon")]
7593#[cfg_attr(test, assert_instr(fcvtas))]
7594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7595pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7596    unsafe extern "unadjusted" {
7597        #[cfg_attr(
7598            any(target_arch = "aarch64", target_arch = "arm64ec"),
7599            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7600        )]
7601        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7602    }
7603    unsafe { _vcvtaq_s64_f64(a) }
7604}
7605#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7607#[inline]
7608#[cfg_attr(test, assert_instr(fcvtau))]
7609#[target_feature(enable = "neon,fp16")]
7610#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7611pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7612    unsafe extern "unadjusted" {
7613        #[cfg_attr(
7614            any(target_arch = "aarch64", target_arch = "arm64ec"),
7615            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7616        )]
7617        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7618    }
7619    unsafe { _vcvta_u16_f16(a) }
7620}
7621#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7623#[inline]
7624#[cfg_attr(test, assert_instr(fcvtau))]
7625#[target_feature(enable = "neon,fp16")]
7626#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7627pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7628    unsafe extern "unadjusted" {
7629        #[cfg_attr(
7630            any(target_arch = "aarch64", target_arch = "arm64ec"),
7631            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7632        )]
7633        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7634    }
7635    unsafe { _vcvtaq_u16_f16(a) }
7636}
7637#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7639#[inline]
7640#[target_feature(enable = "neon")]
7641#[cfg_attr(test, assert_instr(fcvtau))]
7642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7643pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7644    unsafe extern "unadjusted" {
7645        #[cfg_attr(
7646            any(target_arch = "aarch64", target_arch = "arm64ec"),
7647            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7648        )]
7649        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7650    }
7651    unsafe { _vcvta_u32_f32(a) }
7652}
7653#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7655#[inline]
7656#[target_feature(enable = "neon")]
7657#[cfg_attr(test, assert_instr(fcvtau))]
7658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7659pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7660    unsafe extern "unadjusted" {
7661        #[cfg_attr(
7662            any(target_arch = "aarch64", target_arch = "arm64ec"),
7663            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7664        )]
7665        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7666    }
7667    unsafe { _vcvtaq_u32_f32(a) }
7668}
7669#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7671#[inline]
7672#[target_feature(enable = "neon")]
7673#[cfg_attr(test, assert_instr(fcvtau))]
7674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7675pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7676    unsafe extern "unadjusted" {
7677        #[cfg_attr(
7678            any(target_arch = "aarch64", target_arch = "arm64ec"),
7679            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7680        )]
7681        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7682    }
7683    unsafe { _vcvta_u64_f64(a) }
7684}
7685#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7687#[inline]
7688#[target_feature(enable = "neon")]
7689#[cfg_attr(test, assert_instr(fcvtau))]
7690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7691pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7692    unsafe extern "unadjusted" {
7693        #[cfg_attr(
7694            any(target_arch = "aarch64", target_arch = "arm64ec"),
7695            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7696        )]
7697        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7698    }
7699    unsafe { _vcvtaq_u64_f64(a) }
7700}
7701#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7703#[inline]
7704#[cfg_attr(test, assert_instr(fcvtas))]
7705#[target_feature(enable = "neon,fp16")]
7706#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7707pub fn vcvtah_s16_f16(a: f16) -> i16 {
7708    vcvtah_s32_f16(a) as i16
7709}
7710#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7712#[inline]
7713#[cfg_attr(test, assert_instr(fcvtas))]
7714#[target_feature(enable = "neon,fp16")]
7715#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7716pub fn vcvtah_s32_f16(a: f16) -> i32 {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7721        )]
7722        fn _vcvtah_s32_f16(a: f16) -> i32;
7723    }
7724    unsafe { _vcvtah_s32_f16(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7728#[inline]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732pub fn vcvtah_s64_f16(a: f16) -> i64 {
7733    unsafe extern "unadjusted" {
7734        #[cfg_attr(
7735            any(target_arch = "aarch64", target_arch = "arm64ec"),
7736            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7737        )]
7738        fn _vcvtah_s64_f16(a: f16) -> i64;
7739    }
7740    unsafe { _vcvtah_s64_f16(a) }
7741}
7742#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7744#[inline]
7745#[cfg_attr(test, assert_instr(fcvtau))]
7746#[target_feature(enable = "neon,fp16")]
7747#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7748pub fn vcvtah_u16_f16(a: f16) -> u16 {
7749    vcvtah_u32_f16(a) as u16
7750}
7751#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7753#[inline]
7754#[cfg_attr(test, assert_instr(fcvtau))]
7755#[target_feature(enable = "neon,fp16")]
7756#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7757pub fn vcvtah_u32_f16(a: f16) -> u32 {
7758    unsafe extern "unadjusted" {
7759        #[cfg_attr(
7760            any(target_arch = "aarch64", target_arch = "arm64ec"),
7761            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7762        )]
7763        fn _vcvtah_u32_f16(a: f16) -> u32;
7764    }
7765    unsafe { _vcvtah_u32_f16(a) }
7766}
7767#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7769#[inline]
7770#[cfg_attr(test, assert_instr(fcvtau))]
7771#[target_feature(enable = "neon,fp16")]
7772#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7773pub fn vcvtah_u64_f16(a: f16) -> u64 {
7774    unsafe extern "unadjusted" {
7775        #[cfg_attr(
7776            any(target_arch = "aarch64", target_arch = "arm64ec"),
7777            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7778        )]
7779        fn _vcvtah_u64_f16(a: f16) -> u64;
7780    }
7781    unsafe { _vcvtah_u64_f16(a) }
7782}
7783#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7785#[inline]
7786#[target_feature(enable = "neon")]
7787#[cfg_attr(test, assert_instr(fcvtas))]
7788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7789pub fn vcvtas_s32_f32(a: f32) -> i32 {
7790    unsafe extern "unadjusted" {
7791        #[cfg_attr(
7792            any(target_arch = "aarch64", target_arch = "arm64ec"),
7793            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7794        )]
7795        fn _vcvtas_s32_f32(a: f32) -> i32;
7796    }
7797    unsafe { _vcvtas_s32_f32(a) }
7798}
7799#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7801#[inline]
7802#[target_feature(enable = "neon")]
7803#[cfg_attr(test, assert_instr(fcvtas))]
7804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7805pub fn vcvtad_s64_f64(a: f64) -> i64 {
7806    unsafe extern "unadjusted" {
7807        #[cfg_attr(
7808            any(target_arch = "aarch64", target_arch = "arm64ec"),
7809            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7810        )]
7811        fn _vcvtad_s64_f64(a: f64) -> i64;
7812    }
7813    unsafe { _vcvtad_s64_f64(a) }
7814}
7815#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7817#[inline]
7818#[target_feature(enable = "neon")]
7819#[cfg_attr(test, assert_instr(fcvtau))]
7820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7821pub fn vcvtas_u32_f32(a: f32) -> u32 {
7822    unsafe extern "unadjusted" {
7823        #[cfg_attr(
7824            any(target_arch = "aarch64", target_arch = "arm64ec"),
7825            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7826        )]
7827        fn _vcvtas_u32_f32(a: f32) -> u32;
7828    }
7829    unsafe { _vcvtas_u32_f32(a) }
7830}
7831#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7833#[inline]
7834#[target_feature(enable = "neon")]
7835#[cfg_attr(test, assert_instr(fcvtau))]
7836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7837pub fn vcvtad_u64_f64(a: f64) -> u64 {
7838    unsafe extern "unadjusted" {
7839        #[cfg_attr(
7840            any(target_arch = "aarch64", target_arch = "arm64ec"),
7841            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7842        )]
7843        fn _vcvtad_u64_f64(a: f64) -> u64;
7844    }
7845    unsafe { _vcvtad_u64_f64(a) }
7846}
7847#[doc = "Fixed-point convert to floating-point"]
7848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7849#[inline]
7850#[target_feature(enable = "neon")]
7851#[cfg_attr(test, assert_instr(scvtf))]
7852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7853pub fn vcvtd_f64_s64(a: i64) -> f64 {
7854    a as f64
7855}
7856#[doc = "Fixed-point convert to floating-point"]
7857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7858#[inline]
7859#[target_feature(enable = "neon")]
7860#[cfg_attr(test, assert_instr(scvtf))]
7861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7862pub fn vcvts_f32_s32(a: i32) -> f32 {
7863    a as f32
7864}
7865#[doc = "Fixed-point convert to floating-point"]
7866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7867#[inline]
7868#[cfg_attr(test, assert_instr(scvtf))]
7869#[target_feature(enable = "neon,fp16")]
7870#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7871pub fn vcvth_f16_s16(a: i16) -> f16 {
7872    a as f16
7873}
7874#[doc = "Fixed-point convert to floating-point"]
7875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7876#[inline]
7877#[cfg_attr(test, assert_instr(scvtf))]
7878#[target_feature(enable = "neon,fp16")]
7879#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7880pub fn vcvth_f16_s32(a: i32) -> f16 {
7881    a as f16
7882}
7883#[doc = "Fixed-point convert to floating-point"]
7884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7885#[inline]
7886#[cfg_attr(test, assert_instr(scvtf))]
7887#[target_feature(enable = "neon,fp16")]
7888#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7889pub fn vcvth_f16_s64(a: i64) -> f16 {
7890    a as f16
7891}
7892#[doc = "Unsigned fixed-point convert to floating-point"]
7893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7894#[inline]
7895#[cfg_attr(test, assert_instr(ucvtf))]
7896#[target_feature(enable = "neon,fp16")]
7897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7898pub fn vcvth_f16_u16(a: u16) -> f16 {
7899    a as f16
7900}
7901#[doc = "Unsigned fixed-point convert to floating-point"]
7902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7903#[inline]
7904#[cfg_attr(test, assert_instr(ucvtf))]
7905#[target_feature(enable = "neon,fp16")]
7906#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7907pub fn vcvth_f16_u32(a: u32) -> f16 {
7908    a as f16
7909}
7910#[doc = "Unsigned fixed-point convert to floating-point"]
7911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7912#[inline]
7913#[cfg_attr(test, assert_instr(ucvtf))]
7914#[target_feature(enable = "neon,fp16")]
7915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7916pub fn vcvth_f16_u64(a: u64) -> f16 {
7917    a as f16
7918}
7919#[doc = "Fixed-point convert to floating-point"]
7920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7921#[inline]
7922#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7923#[rustc_legacy_const_generics(1)]
7924#[target_feature(enable = "neon,fp16")]
7925#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7926pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7927    static_assert!(N >= 1 && N <= 16);
7928    vcvth_n_f16_s32::<N>(a as i32) as f16
7929}
7930#[doc = "Fixed-point convert to floating-point"]
7931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7932#[inline]
7933#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7934#[rustc_legacy_const_generics(1)]
7935#[target_feature(enable = "neon,fp16")]
7936#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7937pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7938    static_assert!(N >= 1 && N <= 16);
7939    unsafe extern "unadjusted" {
7940        #[cfg_attr(
7941            any(target_arch = "aarch64", target_arch = "arm64ec"),
7942            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7943        )]
7944        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7945    }
7946    unsafe { _vcvth_n_f16_s32(a, N) }
7947}
7948#[doc = "Fixed-point convert to floating-point"]
7949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7950#[inline]
7951#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7952#[rustc_legacy_const_generics(1)]
7953#[target_feature(enable = "neon,fp16")]
7954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7955pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7956    static_assert!(N >= 1 && N <= 16);
7957    unsafe extern "unadjusted" {
7958        #[cfg_attr(
7959            any(target_arch = "aarch64", target_arch = "arm64ec"),
7960            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7961        )]
7962        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7963    }
7964    unsafe { _vcvth_n_f16_s64(a, N) }
7965}
7966#[doc = "Fixed-point convert to floating-point"]
7967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7968#[inline]
7969#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7970#[rustc_legacy_const_generics(1)]
7971#[target_feature(enable = "neon,fp16")]
7972#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7973pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7974    static_assert!(N >= 1 && N <= 16);
7975    vcvth_n_f16_u32::<N>(a as u32) as f16
7976}
7977#[doc = "Fixed-point convert to floating-point"]
7978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7979#[inline]
7980#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7981#[rustc_legacy_const_generics(1)]
7982#[target_feature(enable = "neon,fp16")]
7983#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7984pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7985    static_assert!(N >= 1 && N <= 16);
7986    unsafe extern "unadjusted" {
7987        #[cfg_attr(
7988            any(target_arch = "aarch64", target_arch = "arm64ec"),
7989            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7990        )]
7991        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7992    }
7993    unsafe { _vcvth_n_f16_u32(a, N) }
7994}
7995#[doc = "Fixed-point convert to floating-point"]
7996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7997#[inline]
7998#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7999#[rustc_legacy_const_generics(1)]
8000#[target_feature(enable = "neon,fp16")]
8001#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8002pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8003    static_assert!(N >= 1 && N <= 16);
8004    unsafe extern "unadjusted" {
8005        #[cfg_attr(
8006            any(target_arch = "aarch64", target_arch = "arm64ec"),
8007            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8008        )]
8009        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8010    }
8011    unsafe { _vcvth_n_f16_u64(a, N) }
8012}
8013#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8015#[inline]
8016#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8017#[rustc_legacy_const_generics(1)]
8018#[target_feature(enable = "neon,fp16")]
8019#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8020pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8021    static_assert!(N >= 1 && N <= 16);
8022    vcvth_n_s32_f16::<N>(a) as i16
8023}
8024#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8026#[inline]
8027#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8028#[rustc_legacy_const_generics(1)]
8029#[target_feature(enable = "neon,fp16")]
8030#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8031pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8032    static_assert!(N >= 1 && N <= 16);
8033    unsafe extern "unadjusted" {
8034        #[cfg_attr(
8035            any(target_arch = "aarch64", target_arch = "arm64ec"),
8036            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8037        )]
8038        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8039    }
8040    unsafe { _vcvth_n_s32_f16(a, N) }
8041}
8042#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8044#[inline]
8045#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8046#[rustc_legacy_const_generics(1)]
8047#[target_feature(enable = "neon,fp16")]
8048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8049pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8050    static_assert!(N >= 1 && N <= 16);
8051    unsafe extern "unadjusted" {
8052        #[cfg_attr(
8053            any(target_arch = "aarch64", target_arch = "arm64ec"),
8054            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8055        )]
8056        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8057    }
8058    unsafe { _vcvth_n_s64_f16(a, N) }
8059}
8060#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8062#[inline]
8063#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8064#[rustc_legacy_const_generics(1)]
8065#[target_feature(enable = "neon,fp16")]
8066#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8067pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8068    static_assert!(N >= 1 && N <= 16);
8069    vcvth_n_u32_f16::<N>(a) as u16
8070}
8071#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8073#[inline]
8074#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8075#[rustc_legacy_const_generics(1)]
8076#[target_feature(enable = "neon,fp16")]
8077#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8078pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8079    static_assert!(N >= 1 && N <= 16);
8080    unsafe extern "unadjusted" {
8081        #[cfg_attr(
8082            any(target_arch = "aarch64", target_arch = "arm64ec"),
8083            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8084        )]
8085        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8086    }
8087    unsafe { _vcvth_n_u32_f16(a, N) }
8088}
8089#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8091#[inline]
8092#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8093#[rustc_legacy_const_generics(1)]
8094#[target_feature(enable = "neon,fp16")]
8095#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8096pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8097    static_assert!(N >= 1 && N <= 16);
8098    unsafe extern "unadjusted" {
8099        #[cfg_attr(
8100            any(target_arch = "aarch64", target_arch = "arm64ec"),
8101            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8102        )]
8103        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8104    }
8105    unsafe { _vcvth_n_u64_f16(a, N) }
8106}
8107#[doc = "Floating-point convert to signed fixed-point"]
8108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8109#[inline]
8110#[cfg_attr(test, assert_instr(fcvtzs))]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113pub fn vcvth_s16_f16(a: f16) -> i16 {
8114    a as i16
8115}
8116#[doc = "Floating-point convert to signed fixed-point"]
8117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8118#[inline]
8119#[cfg_attr(test, assert_instr(fcvtzs))]
8120#[target_feature(enable = "neon,fp16")]
8121#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8122pub fn vcvth_s32_f16(a: f16) -> i32 {
8123    a as i32
8124}
8125#[doc = "Floating-point convert to signed fixed-point"]
8126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8127#[inline]
8128#[cfg_attr(test, assert_instr(fcvtzs))]
8129#[target_feature(enable = "neon,fp16")]
8130#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8131pub fn vcvth_s64_f16(a: f16) -> i64 {
8132    a as i64
8133}
8134#[doc = "Floating-point convert to unsigned fixed-point"]
8135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8136#[inline]
8137#[cfg_attr(test, assert_instr(fcvtzu))]
8138#[target_feature(enable = "neon,fp16")]
8139#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8140pub fn vcvth_u16_f16(a: f16) -> u16 {
8141    a as u16
8142}
8143#[doc = "Floating-point convert to unsigned fixed-point"]
8144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8145#[inline]
8146#[cfg_attr(test, assert_instr(fcvtzu))]
8147#[target_feature(enable = "neon,fp16")]
8148#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8149pub fn vcvth_u32_f16(a: f16) -> u32 {
8150    a as u32
8151}
8152#[doc = "Floating-point convert to unsigned fixed-point"]
8153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8154#[inline]
8155#[cfg_attr(test, assert_instr(fcvtzu))]
8156#[target_feature(enable = "neon,fp16")]
8157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8158pub fn vcvth_u64_f16(a: f16) -> u64 {
8159    a as u64
8160}
8161#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8163#[inline]
8164#[cfg_attr(test, assert_instr(fcvtms))]
8165#[target_feature(enable = "neon,fp16")]
8166#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8167pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8168    unsafe extern "unadjusted" {
8169        #[cfg_attr(
8170            any(target_arch = "aarch64", target_arch = "arm64ec"),
8171            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8172        )]
8173        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8174    }
8175    unsafe { _vcvtm_s16_f16(a) }
8176}
8177#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8179#[inline]
8180#[cfg_attr(test, assert_instr(fcvtms))]
8181#[target_feature(enable = "neon,fp16")]
8182#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8183pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8184    unsafe extern "unadjusted" {
8185        #[cfg_attr(
8186            any(target_arch = "aarch64", target_arch = "arm64ec"),
8187            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8188        )]
8189        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8190    }
8191    unsafe { _vcvtmq_s16_f16(a) }
8192}
8193#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8195#[inline]
8196#[target_feature(enable = "neon")]
8197#[cfg_attr(test, assert_instr(fcvtms))]
8198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8199pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8200    unsafe extern "unadjusted" {
8201        #[cfg_attr(
8202            any(target_arch = "aarch64", target_arch = "arm64ec"),
8203            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8204        )]
8205        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8206    }
8207    unsafe { _vcvtm_s32_f32(a) }
8208}
8209#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8211#[inline]
8212#[target_feature(enable = "neon")]
8213#[cfg_attr(test, assert_instr(fcvtms))]
8214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8215pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8216    unsafe extern "unadjusted" {
8217        #[cfg_attr(
8218            any(target_arch = "aarch64", target_arch = "arm64ec"),
8219            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8220        )]
8221        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8222    }
8223    unsafe { _vcvtmq_s32_f32(a) }
8224}
8225#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8227#[inline]
8228#[target_feature(enable = "neon")]
8229#[cfg_attr(test, assert_instr(fcvtms))]
8230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8231pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8232    unsafe extern "unadjusted" {
8233        #[cfg_attr(
8234            any(target_arch = "aarch64", target_arch = "arm64ec"),
8235            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8236        )]
8237        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8238    }
8239    unsafe { _vcvtm_s64_f64(a) }
8240}
8241#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8243#[inline]
8244#[target_feature(enable = "neon")]
8245#[cfg_attr(test, assert_instr(fcvtms))]
8246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8247pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8248    unsafe extern "unadjusted" {
8249        #[cfg_attr(
8250            any(target_arch = "aarch64", target_arch = "arm64ec"),
8251            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8252        )]
8253        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8254    }
8255    unsafe { _vcvtmq_s64_f64(a) }
8256}
8257#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8259#[inline]
8260#[cfg_attr(test, assert_instr(fcvtmu))]
8261#[target_feature(enable = "neon,fp16")]
8262#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8263pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8264    unsafe extern "unadjusted" {
8265        #[cfg_attr(
8266            any(target_arch = "aarch64", target_arch = "arm64ec"),
8267            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8268        )]
8269        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8270    }
8271    unsafe { _vcvtm_u16_f16(a) }
8272}
8273#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8275#[inline]
8276#[cfg_attr(test, assert_instr(fcvtmu))]
8277#[target_feature(enable = "neon,fp16")]
8278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8279pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8280    unsafe extern "unadjusted" {
8281        #[cfg_attr(
8282            any(target_arch = "aarch64", target_arch = "arm64ec"),
8283            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8284        )]
8285        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8286    }
8287    unsafe { _vcvtmq_u16_f16(a) }
8288}
8289#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8291#[inline]
8292#[target_feature(enable = "neon")]
8293#[cfg_attr(test, assert_instr(fcvtmu))]
8294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8295pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8296    unsafe extern "unadjusted" {
8297        #[cfg_attr(
8298            any(target_arch = "aarch64", target_arch = "arm64ec"),
8299            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8300        )]
8301        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8302    }
8303    unsafe { _vcvtm_u32_f32(a) }
8304}
8305#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8307#[inline]
8308#[target_feature(enable = "neon")]
8309#[cfg_attr(test, assert_instr(fcvtmu))]
8310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8311pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8312    unsafe extern "unadjusted" {
8313        #[cfg_attr(
8314            any(target_arch = "aarch64", target_arch = "arm64ec"),
8315            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8316        )]
8317        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8318    }
8319    unsafe { _vcvtmq_u32_f32(a) }
8320}
8321#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8323#[inline]
8324#[target_feature(enable = "neon")]
8325#[cfg_attr(test, assert_instr(fcvtmu))]
8326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8327pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8328    unsafe extern "unadjusted" {
8329        #[cfg_attr(
8330            any(target_arch = "aarch64", target_arch = "arm64ec"),
8331            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8332        )]
8333        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8334    }
8335    unsafe { _vcvtm_u64_f64(a) }
8336}
8337#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8339#[inline]
8340#[target_feature(enable = "neon")]
8341#[cfg_attr(test, assert_instr(fcvtmu))]
8342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8343pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8344    unsafe extern "unadjusted" {
8345        #[cfg_attr(
8346            any(target_arch = "aarch64", target_arch = "arm64ec"),
8347            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8348        )]
8349        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8350    }
8351    unsafe { _vcvtmq_u64_f64(a) }
8352}
8353#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8355#[inline]
8356#[cfg_attr(test, assert_instr(fcvtms))]
8357#[target_feature(enable = "neon,fp16")]
8358#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8359pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8360    vcvtmh_s32_f16(a) as i16
8361}
8362#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8364#[inline]
8365#[cfg_attr(test, assert_instr(fcvtms))]
8366#[target_feature(enable = "neon,fp16")]
8367#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8368pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8369    unsafe extern "unadjusted" {
8370        #[cfg_attr(
8371            any(target_arch = "aarch64", target_arch = "arm64ec"),
8372            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8373        )]
8374        fn _vcvtmh_s32_f16(a: f16) -> i32;
8375    }
8376    unsafe { _vcvtmh_s32_f16(a) }
8377}
8378#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8380#[inline]
8381#[cfg_attr(test, assert_instr(fcvtms))]
8382#[target_feature(enable = "neon,fp16")]
8383#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8384pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8385    unsafe extern "unadjusted" {
8386        #[cfg_attr(
8387            any(target_arch = "aarch64", target_arch = "arm64ec"),
8388            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8389        )]
8390        fn _vcvtmh_s64_f16(a: f16) -> i64;
8391    }
8392    unsafe { _vcvtmh_s64_f16(a) }
8393}
8394#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8396#[inline]
8397#[cfg_attr(test, assert_instr(fcvtmu))]
8398#[target_feature(enable = "neon,fp16")]
8399#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8400pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8401    vcvtmh_u32_f16(a) as u16
8402}
8403#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8405#[inline]
8406#[cfg_attr(test, assert_instr(fcvtmu))]
8407#[target_feature(enable = "neon,fp16")]
8408#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8409pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8410    unsafe extern "unadjusted" {
8411        #[cfg_attr(
8412            any(target_arch = "aarch64", target_arch = "arm64ec"),
8413            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8414        )]
8415        fn _vcvtmh_u32_f16(a: f16) -> u32;
8416    }
8417    unsafe { _vcvtmh_u32_f16(a) }
8418}
8419#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8421#[inline]
8422#[cfg_attr(test, assert_instr(fcvtmu))]
8423#[target_feature(enable = "neon,fp16")]
8424#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8425pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8426    unsafe extern "unadjusted" {
8427        #[cfg_attr(
8428            any(target_arch = "aarch64", target_arch = "arm64ec"),
8429            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8430        )]
8431        fn _vcvtmh_u64_f16(a: f16) -> u64;
8432    }
8433    unsafe { _vcvtmh_u64_f16(a) }
8434}
8435#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8437#[inline]
8438#[target_feature(enable = "neon")]
8439#[cfg_attr(test, assert_instr(fcvtms))]
8440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8441pub fn vcvtms_s32_f32(a: f32) -> i32 {
8442    unsafe extern "unadjusted" {
8443        #[cfg_attr(
8444            any(target_arch = "aarch64", target_arch = "arm64ec"),
8445            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8446        )]
8447        fn _vcvtms_s32_f32(a: f32) -> i32;
8448    }
8449    unsafe { _vcvtms_s32_f32(a) }
8450}
8451#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8453#[inline]
8454#[target_feature(enable = "neon")]
8455#[cfg_attr(test, assert_instr(fcvtms))]
8456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8457pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8458    unsafe extern "unadjusted" {
8459        #[cfg_attr(
8460            any(target_arch = "aarch64", target_arch = "arm64ec"),
8461            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8462        )]
8463        fn _vcvtmd_s64_f64(a: f64) -> i64;
8464    }
8465    unsafe { _vcvtmd_s64_f64(a) }
8466}
8467#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8469#[inline]
8470#[target_feature(enable = "neon")]
8471#[cfg_attr(test, assert_instr(fcvtmu))]
8472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8473pub fn vcvtms_u32_f32(a: f32) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8478        )]
8479        fn _vcvtms_u32_f32(a: f32) -> u32;
8480    }
8481    unsafe { _vcvtms_u32_f32(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8485#[inline]
8486#[target_feature(enable = "neon")]
8487#[cfg_attr(test, assert_instr(fcvtmu))]
8488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8489pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8490    unsafe extern "unadjusted" {
8491        #[cfg_attr(
8492            any(target_arch = "aarch64", target_arch = "arm64ec"),
8493            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8494        )]
8495        fn _vcvtmd_u64_f64(a: f64) -> u64;
8496    }
8497    unsafe { _vcvtmd_u64_f64(a) }
8498}
8499#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8501#[inline]
8502#[cfg_attr(test, assert_instr(fcvtns))]
8503#[target_feature(enable = "neon,fp16")]
8504#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8505pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8506    unsafe extern "unadjusted" {
8507        #[cfg_attr(
8508            any(target_arch = "aarch64", target_arch = "arm64ec"),
8509            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8510        )]
8511        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8512    }
8513    unsafe { _vcvtn_s16_f16(a) }
8514}
8515#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8517#[inline]
8518#[cfg_attr(test, assert_instr(fcvtns))]
8519#[target_feature(enable = "neon,fp16")]
8520#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8521pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8522    unsafe extern "unadjusted" {
8523        #[cfg_attr(
8524            any(target_arch = "aarch64", target_arch = "arm64ec"),
8525            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8526        )]
8527        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8528    }
8529    unsafe { _vcvtnq_s16_f16(a) }
8530}
8531#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8533#[inline]
8534#[target_feature(enable = "neon")]
8535#[cfg_attr(test, assert_instr(fcvtns))]
8536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8537pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8538    unsafe extern "unadjusted" {
8539        #[cfg_attr(
8540            any(target_arch = "aarch64", target_arch = "arm64ec"),
8541            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8542        )]
8543        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8544    }
8545    unsafe { _vcvtn_s32_f32(a) }
8546}
8547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8549#[inline]
8550#[target_feature(enable = "neon")]
8551#[cfg_attr(test, assert_instr(fcvtns))]
8552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8553pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8554    unsafe extern "unadjusted" {
8555        #[cfg_attr(
8556            any(target_arch = "aarch64", target_arch = "arm64ec"),
8557            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8558        )]
8559        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8560    }
8561    unsafe { _vcvtnq_s32_f32(a) }
8562}
8563#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8565#[inline]
8566#[target_feature(enable = "neon")]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8569pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8570    unsafe extern "unadjusted" {
8571        #[cfg_attr(
8572            any(target_arch = "aarch64", target_arch = "arm64ec"),
8573            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8574        )]
8575        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8576    }
8577    unsafe { _vcvtn_s64_f64(a) }
8578}
8579#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8581#[inline]
8582#[target_feature(enable = "neon")]
8583#[cfg_attr(test, assert_instr(fcvtns))]
8584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8585pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8586    unsafe extern "unadjusted" {
8587        #[cfg_attr(
8588            any(target_arch = "aarch64", target_arch = "arm64ec"),
8589            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8590        )]
8591        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8592    }
8593    unsafe { _vcvtnq_s64_f64(a) }
8594}
8595#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8597#[inline]
8598#[cfg_attr(test, assert_instr(fcvtnu))]
8599#[target_feature(enable = "neon,fp16")]
8600#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8601pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8602    unsafe extern "unadjusted" {
8603        #[cfg_attr(
8604            any(target_arch = "aarch64", target_arch = "arm64ec"),
8605            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8606        )]
8607        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8608    }
8609    unsafe { _vcvtn_u16_f16(a) }
8610}
8611#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8613#[inline]
8614#[cfg_attr(test, assert_instr(fcvtnu))]
8615#[target_feature(enable = "neon,fp16")]
8616#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8617pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8618    unsafe extern "unadjusted" {
8619        #[cfg_attr(
8620            any(target_arch = "aarch64", target_arch = "arm64ec"),
8621            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8622        )]
8623        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8624    }
8625    unsafe { _vcvtnq_u16_f16(a) }
8626}
8627#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8629#[inline]
8630#[target_feature(enable = "neon")]
8631#[cfg_attr(test, assert_instr(fcvtnu))]
8632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8633pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8634    unsafe extern "unadjusted" {
8635        #[cfg_attr(
8636            any(target_arch = "aarch64", target_arch = "arm64ec"),
8637            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8638        )]
8639        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8640    }
8641    unsafe { _vcvtn_u32_f32(a) }
8642}
8643#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8645#[inline]
8646#[target_feature(enable = "neon")]
8647#[cfg_attr(test, assert_instr(fcvtnu))]
8648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8649pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8650    unsafe extern "unadjusted" {
8651        #[cfg_attr(
8652            any(target_arch = "aarch64", target_arch = "arm64ec"),
8653            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8654        )]
8655        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8656    }
8657    unsafe { _vcvtnq_u32_f32(a) }
8658}
8659#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8661#[inline]
8662#[target_feature(enable = "neon")]
8663#[cfg_attr(test, assert_instr(fcvtnu))]
8664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8665pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8666    unsafe extern "unadjusted" {
8667        #[cfg_attr(
8668            any(target_arch = "aarch64", target_arch = "arm64ec"),
8669            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8670        )]
8671        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8672    }
8673    unsafe { _vcvtn_u64_f64(a) }
8674}
8675#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8677#[inline]
8678#[target_feature(enable = "neon")]
8679#[cfg_attr(test, assert_instr(fcvtnu))]
8680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8681pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8682    unsafe extern "unadjusted" {
8683        #[cfg_attr(
8684            any(target_arch = "aarch64", target_arch = "arm64ec"),
8685            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8686        )]
8687        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8688    }
8689    unsafe { _vcvtnq_u64_f64(a) }
8690}
8691#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8693#[inline]
8694#[cfg_attr(test, assert_instr(fcvtns))]
8695#[target_feature(enable = "neon,fp16")]
8696#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8697pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8698    vcvtnh_s32_f16(a) as i16
8699}
8700#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8702#[inline]
8703#[cfg_attr(test, assert_instr(fcvtns))]
8704#[target_feature(enable = "neon,fp16")]
8705#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8706pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8707    unsafe extern "unadjusted" {
8708        #[cfg_attr(
8709            any(target_arch = "aarch64", target_arch = "arm64ec"),
8710            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8711        )]
8712        fn _vcvtnh_s32_f16(a: f16) -> i32;
8713    }
8714    unsafe { _vcvtnh_s32_f16(a) }
8715}
8716#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8718#[inline]
8719#[cfg_attr(test, assert_instr(fcvtns))]
8720#[target_feature(enable = "neon,fp16")]
8721#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8722pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8723    unsafe extern "unadjusted" {
8724        #[cfg_attr(
8725            any(target_arch = "aarch64", target_arch = "arm64ec"),
8726            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8727        )]
8728        fn _vcvtnh_s64_f16(a: f16) -> i64;
8729    }
8730    unsafe { _vcvtnh_s64_f16(a) }
8731}
8732#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8734#[inline]
8735#[cfg_attr(test, assert_instr(fcvtnu))]
8736#[target_feature(enable = "neon,fp16")]
8737#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8738pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8739    vcvtnh_u32_f16(a) as u16
8740}
8741#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8743#[inline]
8744#[cfg_attr(test, assert_instr(fcvtnu))]
8745#[target_feature(enable = "neon,fp16")]
8746#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8747pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8748    unsafe extern "unadjusted" {
8749        #[cfg_attr(
8750            any(target_arch = "aarch64", target_arch = "arm64ec"),
8751            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8752        )]
8753        fn _vcvtnh_u32_f16(a: f16) -> u32;
8754    }
8755    unsafe { _vcvtnh_u32_f16(a) }
8756}
8757#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8759#[inline]
8760#[cfg_attr(test, assert_instr(fcvtnu))]
8761#[target_feature(enable = "neon,fp16")]
8762#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8763pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8764    unsafe extern "unadjusted" {
8765        #[cfg_attr(
8766            any(target_arch = "aarch64", target_arch = "arm64ec"),
8767            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8768        )]
8769        fn _vcvtnh_u64_f16(a: f16) -> u64;
8770    }
8771    unsafe { _vcvtnh_u64_f16(a) }
8772}
8773#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8775#[inline]
8776#[target_feature(enable = "neon")]
8777#[cfg_attr(test, assert_instr(fcvtns))]
8778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8779pub fn vcvtns_s32_f32(a: f32) -> i32 {
8780    unsafe extern "unadjusted" {
8781        #[cfg_attr(
8782            any(target_arch = "aarch64", target_arch = "arm64ec"),
8783            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8784        )]
8785        fn _vcvtns_s32_f32(a: f32) -> i32;
8786    }
8787    unsafe { _vcvtns_s32_f32(a) }
8788}
8789#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8791#[inline]
8792#[target_feature(enable = "neon")]
8793#[cfg_attr(test, assert_instr(fcvtns))]
8794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8795pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8796    unsafe extern "unadjusted" {
8797        #[cfg_attr(
8798            any(target_arch = "aarch64", target_arch = "arm64ec"),
8799            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8800        )]
8801        fn _vcvtnd_s64_f64(a: f64) -> i64;
8802    }
8803    unsafe { _vcvtnd_s64_f64(a) }
8804}
8805#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8807#[inline]
8808#[target_feature(enable = "neon")]
8809#[cfg_attr(test, assert_instr(fcvtnu))]
8810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8811pub fn vcvtns_u32_f32(a: f32) -> u32 {
8812    unsafe extern "unadjusted" {
8813        #[cfg_attr(
8814            any(target_arch = "aarch64", target_arch = "arm64ec"),
8815            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8816        )]
8817        fn _vcvtns_u32_f32(a: f32) -> u32;
8818    }
8819    unsafe { _vcvtns_u32_f32(a) }
8820}
8821#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8823#[inline]
8824#[target_feature(enable = "neon")]
8825#[cfg_attr(test, assert_instr(fcvtnu))]
8826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8827pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8828    unsafe extern "unadjusted" {
8829        #[cfg_attr(
8830            any(target_arch = "aarch64", target_arch = "arm64ec"),
8831            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8832        )]
8833        fn _vcvtnd_u64_f64(a: f64) -> u64;
8834    }
8835    unsafe { _vcvtnd_u64_f64(a) }
8836}
8837#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8839#[inline]
8840#[cfg_attr(test, assert_instr(fcvtps))]
8841#[target_feature(enable = "neon,fp16")]
8842#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8843pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8844    unsafe extern "unadjusted" {
8845        #[cfg_attr(
8846            any(target_arch = "aarch64", target_arch = "arm64ec"),
8847            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8848        )]
8849        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8850    }
8851    unsafe { _vcvtp_s16_f16(a) }
8852}
8853#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8855#[inline]
8856#[cfg_attr(test, assert_instr(fcvtps))]
8857#[target_feature(enable = "neon,fp16")]
8858#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8859pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8860    unsafe extern "unadjusted" {
8861        #[cfg_attr(
8862            any(target_arch = "aarch64", target_arch = "arm64ec"),
8863            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8864        )]
8865        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8866    }
8867    unsafe { _vcvtpq_s16_f16(a) }
8868}
8869#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8871#[inline]
8872#[target_feature(enable = "neon")]
8873#[cfg_attr(test, assert_instr(fcvtps))]
8874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8875pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8876    unsafe extern "unadjusted" {
8877        #[cfg_attr(
8878            any(target_arch = "aarch64", target_arch = "arm64ec"),
8879            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8880        )]
8881        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8882    }
8883    unsafe { _vcvtp_s32_f32(a) }
8884}
8885#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8887#[inline]
8888#[target_feature(enable = "neon")]
8889#[cfg_attr(test, assert_instr(fcvtps))]
8890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8891pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8892    unsafe extern "unadjusted" {
8893        #[cfg_attr(
8894            any(target_arch = "aarch64", target_arch = "arm64ec"),
8895            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8896        )]
8897        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8898    }
8899    unsafe { _vcvtpq_s32_f32(a) }
8900}
8901#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8903#[inline]
8904#[target_feature(enable = "neon")]
8905#[cfg_attr(test, assert_instr(fcvtps))]
8906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8907pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8908    unsafe extern "unadjusted" {
8909        #[cfg_attr(
8910            any(target_arch = "aarch64", target_arch = "arm64ec"),
8911            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8912        )]
8913        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8914    }
8915    unsafe { _vcvtp_s64_f64(a) }
8916}
8917#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8919#[inline]
8920#[target_feature(enable = "neon")]
8921#[cfg_attr(test, assert_instr(fcvtps))]
8922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8923pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8924    unsafe extern "unadjusted" {
8925        #[cfg_attr(
8926            any(target_arch = "aarch64", target_arch = "arm64ec"),
8927            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8928        )]
8929        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8930    }
8931    unsafe { _vcvtpq_s64_f64(a) }
8932}
8933#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8935#[inline]
8936#[cfg_attr(test, assert_instr(fcvtpu))]
8937#[target_feature(enable = "neon,fp16")]
8938#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8939pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8940    unsafe extern "unadjusted" {
8941        #[cfg_attr(
8942            any(target_arch = "aarch64", target_arch = "arm64ec"),
8943            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8944        )]
8945        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8946    }
8947    unsafe { _vcvtp_u16_f16(a) }
8948}
8949#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8951#[inline]
8952#[cfg_attr(test, assert_instr(fcvtpu))]
8953#[target_feature(enable = "neon,fp16")]
8954#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8955pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8956    unsafe extern "unadjusted" {
8957        #[cfg_attr(
8958            any(target_arch = "aarch64", target_arch = "arm64ec"),
8959            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8960        )]
8961        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8962    }
8963    unsafe { _vcvtpq_u16_f16(a) }
8964}
8965#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8967#[inline]
8968#[target_feature(enable = "neon")]
8969#[cfg_attr(test, assert_instr(fcvtpu))]
8970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8971pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8972    unsafe extern "unadjusted" {
8973        #[cfg_attr(
8974            any(target_arch = "aarch64", target_arch = "arm64ec"),
8975            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8976        )]
8977        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8978    }
8979    unsafe { _vcvtp_u32_f32(a) }
8980}
8981#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8983#[inline]
8984#[target_feature(enable = "neon")]
8985#[cfg_attr(test, assert_instr(fcvtpu))]
8986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8987pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
8988    unsafe extern "unadjusted" {
8989        #[cfg_attr(
8990            any(target_arch = "aarch64", target_arch = "arm64ec"),
8991            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
8992        )]
8993        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
8994    }
8995    unsafe { _vcvtpq_u32_f32(a) }
8996}
8997#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
8999#[inline]
9000#[target_feature(enable = "neon")]
9001#[cfg_attr(test, assert_instr(fcvtpu))]
9002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9003pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9004    unsafe extern "unadjusted" {
9005        #[cfg_attr(
9006            any(target_arch = "aarch64", target_arch = "arm64ec"),
9007            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9008        )]
9009        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9010    }
9011    unsafe { _vcvtp_u64_f64(a) }
9012}
9013#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9015#[inline]
9016#[target_feature(enable = "neon")]
9017#[cfg_attr(test, assert_instr(fcvtpu))]
9018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9019pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9020    unsafe extern "unadjusted" {
9021        #[cfg_attr(
9022            any(target_arch = "aarch64", target_arch = "arm64ec"),
9023            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9024        )]
9025        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9026    }
9027    unsafe { _vcvtpq_u64_f64(a) }
9028}
9029#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9031#[inline]
9032#[cfg_attr(test, assert_instr(fcvtps))]
9033#[target_feature(enable = "neon,fp16")]
9034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9035pub fn vcvtph_s16_f16(a: f16) -> i16 {
9036    vcvtph_s32_f16(a) as i16
9037}
9038#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9040#[inline]
9041#[cfg_attr(test, assert_instr(fcvtps))]
9042#[target_feature(enable = "neon,fp16")]
9043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9044pub fn vcvtph_s32_f16(a: f16) -> i32 {
9045    unsafe extern "unadjusted" {
9046        #[cfg_attr(
9047            any(target_arch = "aarch64", target_arch = "arm64ec"),
9048            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9049        )]
9050        fn _vcvtph_s32_f16(a: f16) -> i32;
9051    }
9052    unsafe { _vcvtph_s32_f16(a) }
9053}
9054#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9056#[inline]
9057#[cfg_attr(test, assert_instr(fcvtps))]
9058#[target_feature(enable = "neon,fp16")]
9059#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9060pub fn vcvtph_s64_f16(a: f16) -> i64 {
9061    unsafe extern "unadjusted" {
9062        #[cfg_attr(
9063            any(target_arch = "aarch64", target_arch = "arm64ec"),
9064            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9065        )]
9066        fn _vcvtph_s64_f16(a: f16) -> i64;
9067    }
9068    unsafe { _vcvtph_s64_f16(a) }
9069}
9070#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9072#[inline]
9073#[cfg_attr(test, assert_instr(fcvtpu))]
9074#[target_feature(enable = "neon,fp16")]
9075#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9076pub fn vcvtph_u16_f16(a: f16) -> u16 {
9077    vcvtph_u32_f16(a) as u16
9078}
9079#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9081#[inline]
9082#[cfg_attr(test, assert_instr(fcvtpu))]
9083#[target_feature(enable = "neon,fp16")]
9084#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9085pub fn vcvtph_u32_f16(a: f16) -> u32 {
9086    unsafe extern "unadjusted" {
9087        #[cfg_attr(
9088            any(target_arch = "aarch64", target_arch = "arm64ec"),
9089            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9090        )]
9091        fn _vcvtph_u32_f16(a: f16) -> u32;
9092    }
9093    unsafe { _vcvtph_u32_f16(a) }
9094}
9095#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9097#[inline]
9098#[cfg_attr(test, assert_instr(fcvtpu))]
9099#[target_feature(enable = "neon,fp16")]
9100#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9101pub fn vcvtph_u64_f16(a: f16) -> u64 {
9102    unsafe extern "unadjusted" {
9103        #[cfg_attr(
9104            any(target_arch = "aarch64", target_arch = "arm64ec"),
9105            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9106        )]
9107        fn _vcvtph_u64_f16(a: f16) -> u64;
9108    }
9109    unsafe { _vcvtph_u64_f16(a) }
9110}
9111#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9113#[inline]
9114#[target_feature(enable = "neon")]
9115#[cfg_attr(test, assert_instr(fcvtps))]
9116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9117pub fn vcvtps_s32_f32(a: f32) -> i32 {
9118    unsafe extern "unadjusted" {
9119        #[cfg_attr(
9120            any(target_arch = "aarch64", target_arch = "arm64ec"),
9121            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9122        )]
9123        fn _vcvtps_s32_f32(a: f32) -> i32;
9124    }
9125    unsafe { _vcvtps_s32_f32(a) }
9126}
9127#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9129#[inline]
9130#[target_feature(enable = "neon")]
9131#[cfg_attr(test, assert_instr(fcvtps))]
9132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9133pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9134    unsafe extern "unadjusted" {
9135        #[cfg_attr(
9136            any(target_arch = "aarch64", target_arch = "arm64ec"),
9137            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9138        )]
9139        fn _vcvtpd_s64_f64(a: f64) -> i64;
9140    }
9141    unsafe { _vcvtpd_s64_f64(a) }
9142}
9143#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9145#[inline]
9146#[target_feature(enable = "neon")]
9147#[cfg_attr(test, assert_instr(fcvtpu))]
9148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9149pub fn vcvtps_u32_f32(a: f32) -> u32 {
9150    unsafe extern "unadjusted" {
9151        #[cfg_attr(
9152            any(target_arch = "aarch64", target_arch = "arm64ec"),
9153            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9154        )]
9155        fn _vcvtps_u32_f32(a: f32) -> u32;
9156    }
9157    unsafe { _vcvtps_u32_f32(a) }
9158}
9159#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9161#[inline]
9162#[target_feature(enable = "neon")]
9163#[cfg_attr(test, assert_instr(fcvtpu))]
9164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9165pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9166    unsafe extern "unadjusted" {
9167        #[cfg_attr(
9168            any(target_arch = "aarch64", target_arch = "arm64ec"),
9169            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9170        )]
9171        fn _vcvtpd_u64_f64(a: f64) -> u64;
9172    }
9173    unsafe { _vcvtpd_u64_f64(a) }
9174}
9175#[doc = "Fixed-point convert to floating-point"]
9176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9177#[inline]
9178#[target_feature(enable = "neon")]
9179#[cfg_attr(test, assert_instr(ucvtf))]
9180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9181pub fn vcvts_f32_u32(a: u32) -> f32 {
9182    a as f32
9183}
9184#[doc = "Fixed-point convert to floating-point"]
9185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9186#[inline]
9187#[target_feature(enable = "neon")]
9188#[cfg_attr(test, assert_instr(ucvtf))]
9189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9190pub fn vcvtd_f64_u64(a: u64) -> f64 {
9191    a as f64
9192}
9193#[doc = "Fixed-point convert to floating-point"]
9194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9195#[inline]
9196#[target_feature(enable = "neon")]
9197#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9198#[rustc_legacy_const_generics(1)]
9199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9200pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9201    static_assert!(N >= 1 && N <= 64);
9202    unsafe extern "unadjusted" {
9203        #[cfg_attr(
9204            any(target_arch = "aarch64", target_arch = "arm64ec"),
9205            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9206        )]
9207        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9208    }
9209    unsafe { _vcvts_n_f32_s32(a, N) }
9210}
9211#[doc = "Fixed-point convert to floating-point"]
9212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9213#[inline]
9214#[target_feature(enable = "neon")]
9215#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9216#[rustc_legacy_const_generics(1)]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9219    static_assert!(N >= 1 && N <= 64);
9220    unsafe extern "unadjusted" {
9221        #[cfg_attr(
9222            any(target_arch = "aarch64", target_arch = "arm64ec"),
9223            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9224        )]
9225        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9226    }
9227    unsafe { _vcvtd_n_f64_s64(a, N) }
9228}
9229#[doc = "Fixed-point convert to floating-point"]
9230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9231#[inline]
9232#[target_feature(enable = "neon")]
9233#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9234#[rustc_legacy_const_generics(1)]
9235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9236pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9237    static_assert!(N >= 1 && N <= 32);
9238    unsafe extern "unadjusted" {
9239        #[cfg_attr(
9240            any(target_arch = "aarch64", target_arch = "arm64ec"),
9241            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9242        )]
9243        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9244    }
9245    unsafe { _vcvts_n_f32_u32(a, N) }
9246}
9247#[doc = "Fixed-point convert to floating-point"]
9248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9249#[inline]
9250#[target_feature(enable = "neon")]
9251#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9252#[rustc_legacy_const_generics(1)]
9253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9254pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9255    static_assert!(N >= 1 && N <= 64);
9256    unsafe extern "unadjusted" {
9257        #[cfg_attr(
9258            any(target_arch = "aarch64", target_arch = "arm64ec"),
9259            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9260        )]
9261        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9262    }
9263    unsafe { _vcvtd_n_f64_u64(a, N) }
9264}
9265#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9267#[inline]
9268#[target_feature(enable = "neon")]
9269#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9270#[rustc_legacy_const_generics(1)]
9271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9272pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9273    static_assert!(N >= 1 && N <= 32);
9274    unsafe extern "unadjusted" {
9275        #[cfg_attr(
9276            any(target_arch = "aarch64", target_arch = "arm64ec"),
9277            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9278        )]
9279        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9280    }
9281    unsafe { _vcvts_n_s32_f32(a, N) }
9282}
9283#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9285#[inline]
9286#[target_feature(enable = "neon")]
9287#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9288#[rustc_legacy_const_generics(1)]
9289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9290pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9291    static_assert!(N >= 1 && N <= 64);
9292    unsafe extern "unadjusted" {
9293        #[cfg_attr(
9294            any(target_arch = "aarch64", target_arch = "arm64ec"),
9295            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9296        )]
9297        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9298    }
9299    unsafe { _vcvtd_n_s64_f64(a, N) }
9300}
9301#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9303#[inline]
9304#[target_feature(enable = "neon")]
9305#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9306#[rustc_legacy_const_generics(1)]
9307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9308pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9309    static_assert!(N >= 1 && N <= 32);
9310    unsafe extern "unadjusted" {
9311        #[cfg_attr(
9312            any(target_arch = "aarch64", target_arch = "arm64ec"),
9313            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9314        )]
9315        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9316    }
9317    unsafe { _vcvts_n_u32_f32(a, N) }
9318}
9319#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9321#[inline]
9322#[target_feature(enable = "neon")]
9323#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9324#[rustc_legacy_const_generics(1)]
9325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9326pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9327    static_assert!(N >= 1 && N <= 64);
9328    unsafe extern "unadjusted" {
9329        #[cfg_attr(
9330            any(target_arch = "aarch64", target_arch = "arm64ec"),
9331            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9332        )]
9333        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9334    }
9335    unsafe { _vcvtd_n_u64_f64(a, N) }
9336}
9337#[doc = "Fixed-point convert to floating-point"]
9338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9339#[inline]
9340#[target_feature(enable = "neon")]
9341#[cfg_attr(test, assert_instr(fcvtzs))]
9342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9343pub fn vcvts_s32_f32(a: f32) -> i32 {
9344    a as i32
9345}
9346#[doc = "Fixed-point convert to floating-point"]
9347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9348#[inline]
9349#[target_feature(enable = "neon")]
9350#[cfg_attr(test, assert_instr(fcvtzs))]
9351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9352pub fn vcvtd_s64_f64(a: f64) -> i64 {
9353    a as i64
9354}
9355#[doc = "Fixed-point convert to floating-point"]
9356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9357#[inline]
9358#[target_feature(enable = "neon")]
9359#[cfg_attr(test, assert_instr(fcvtzu))]
9360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9361pub fn vcvts_u32_f32(a: f32) -> u32 {
9362    a as u32
9363}
9364#[doc = "Fixed-point convert to floating-point"]
9365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9366#[inline]
9367#[target_feature(enable = "neon")]
9368#[cfg_attr(test, assert_instr(fcvtzu))]
9369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9370pub fn vcvtd_u64_f64(a: f64) -> u64 {
9371    a as u64
9372}
9373#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9375#[inline]
9376#[target_feature(enable = "neon")]
9377#[cfg_attr(test, assert_instr(fcvtxn))]
9378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9379pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9380    unsafe extern "unadjusted" {
9381        #[cfg_attr(
9382            any(target_arch = "aarch64", target_arch = "arm64ec"),
9383            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9384        )]
9385        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9386    }
9387    unsafe { _vcvtx_f32_f64(a) }
9388}
9389#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9391#[inline]
9392#[target_feature(enable = "neon")]
9393#[cfg_attr(test, assert_instr(fcvtxn))]
9394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9395pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9396    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9397}
9398#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9400#[inline]
9401#[target_feature(enable = "neon")]
9402#[cfg_attr(test, assert_instr(fcvtxn))]
9403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9404pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9405    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9406}
9407#[doc = "Divide"]
9408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9409#[inline]
9410#[target_feature(enable = "neon,fp16")]
9411#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9412#[cfg_attr(test, assert_instr(fdiv))]
9413pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9414    unsafe { simd_div(a, b) }
9415}
9416#[doc = "Divide"]
9417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9418#[inline]
9419#[target_feature(enable = "neon,fp16")]
9420#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9421#[cfg_attr(test, assert_instr(fdiv))]
9422pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9423    unsafe { simd_div(a, b) }
9424}
9425#[doc = "Divide"]
9426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9427#[inline]
9428#[target_feature(enable = "neon")]
9429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9430#[cfg_attr(test, assert_instr(fdiv))]
9431pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9432    unsafe { simd_div(a, b) }
9433}
9434#[doc = "Divide"]
9435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9436#[inline]
9437#[target_feature(enable = "neon")]
9438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9439#[cfg_attr(test, assert_instr(fdiv))]
9440pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9441    unsafe { simd_div(a, b) }
9442}
9443#[doc = "Divide"]
9444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9445#[inline]
9446#[target_feature(enable = "neon")]
9447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9448#[cfg_attr(test, assert_instr(fdiv))]
9449pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9450    unsafe { simd_div(a, b) }
9451}
9452#[doc = "Divide"]
9453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9454#[inline]
9455#[target_feature(enable = "neon")]
9456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9457#[cfg_attr(test, assert_instr(fdiv))]
9458pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9459    unsafe { simd_div(a, b) }
9460}
9461#[doc = "Divide"]
9462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9463#[inline]
9464#[target_feature(enable = "neon,fp16")]
9465#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9466#[cfg_attr(test, assert_instr(nop))]
9467pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9468    a / b
9469}
9470#[doc = "Dot product arithmetic (indexed)"]
9471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9472#[inline]
9473#[target_feature(enable = "neon,dotprod")]
9474#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9475#[rustc_legacy_const_generics(3)]
9476#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9477pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9478    static_assert_uimm_bits!(LANE, 2);
9479    unsafe {
9480        let c: int32x4_t = transmute(c);
9481        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9482        vdot_s32(a, b, transmute(c))
9483    }
9484}
9485#[doc = "Dot product arithmetic (indexed)"]
9486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9487#[inline]
9488#[target_feature(enable = "neon,dotprod")]
9489#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9490#[rustc_legacy_const_generics(3)]
9491#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9492pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9493    static_assert_uimm_bits!(LANE, 2);
9494    unsafe {
9495        let c: int32x4_t = transmute(c);
9496        let c: int32x4_t =
9497            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9498        vdotq_s32(a, b, transmute(c))
9499    }
9500}
9501#[doc = "Dot product arithmetic (indexed)"]
9502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9503#[inline]
9504#[target_feature(enable = "neon,dotprod")]
9505#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9506#[rustc_legacy_const_generics(3)]
9507#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9508pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9509    static_assert_uimm_bits!(LANE, 2);
9510    unsafe {
9511        let c: uint32x4_t = transmute(c);
9512        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9513        vdot_u32(a, b, transmute(c))
9514    }
9515}
9516#[doc = "Dot product arithmetic (indexed)"]
9517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9518#[inline]
9519#[target_feature(enable = "neon,dotprod")]
9520#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9521#[rustc_legacy_const_generics(3)]
9522#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9523pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9524    static_assert_uimm_bits!(LANE, 2);
9525    unsafe {
9526        let c: uint32x4_t = transmute(c);
9527        let c: uint32x4_t =
9528            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9529        vdotq_u32(a, b, transmute(c))
9530    }
9531}
9532#[doc = "Set all vector lanes to the same value"]
9533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9534#[inline]
9535#[target_feature(enable = "neon")]
9536#[cfg_attr(test, assert_instr(nop, N = 0))]
9537#[rustc_legacy_const_generics(1)]
9538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9539pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9540    static_assert!(N == 0);
9541    a
9542}
9543#[doc = "Set all vector lanes to the same value"]
9544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9545#[inline]
9546#[target_feature(enable = "neon")]
9547#[cfg_attr(test, assert_instr(nop, N = 0))]
9548#[rustc_legacy_const_generics(1)]
9549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9550pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9551    static_assert!(N == 0);
9552    a
9553}
9554#[doc = "Set all vector lanes to the same value"]
9555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9556#[inline]
9557#[target_feature(enable = "neon")]
9558#[cfg_attr(test, assert_instr(nop, N = 1))]
9559#[rustc_legacy_const_generics(1)]
9560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9561pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9562    static_assert_uimm_bits!(N, 1);
9563    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9564}
9565#[doc = "Set all vector lanes to the same value"]
9566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9567#[inline]
9568#[target_feature(enable = "neon")]
9569#[cfg_attr(test, assert_instr(nop, N = 1))]
9570#[rustc_legacy_const_generics(1)]
9571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9572pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9573    static_assert_uimm_bits!(N, 1);
9574    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9575}
9576#[doc = "Set all vector lanes to the same value"]
9577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9578#[inline]
9579#[target_feature(enable = "neon")]
9580#[cfg_attr(test, assert_instr(nop, N = 4))]
9581#[rustc_legacy_const_generics(1)]
9582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9583pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9584    static_assert_uimm_bits!(N, 3);
9585    unsafe { simd_extract!(a, N as u32) }
9586}
9587#[doc = "Set all vector lanes to the same value"]
9588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9589#[inline]
9590#[target_feature(enable = "neon")]
9591#[cfg_attr(test, assert_instr(nop, N = 4))]
9592#[rustc_legacy_const_generics(1)]
9593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9594pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9595    static_assert_uimm_bits!(N, 3);
9596    unsafe { simd_extract!(a, N as u32) }
9597}
9598#[doc = "Set all vector lanes to the same value"]
9599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9600#[inline]
9601#[target_feature(enable = "neon")]
9602#[cfg_attr(test, assert_instr(nop, N = 4))]
9603#[rustc_legacy_const_generics(1)]
9604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9605pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9606    static_assert_uimm_bits!(N, 3);
9607    unsafe { simd_extract!(a, N as u32) }
9608}
9609#[doc = "Set all vector lanes to the same value"]
9610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9611#[inline]
9612#[target_feature(enable = "neon")]
9613#[cfg_attr(test, assert_instr(nop, N = 4))]
9614#[rustc_legacy_const_generics(1)]
9615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9616pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9617    static_assert_uimm_bits!(N, 3);
9618    unsafe { simd_extract!(a, N as u32) }
9619}
9620#[doc = "Set all vector lanes to the same value"]
9621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9622#[inline]
9623#[target_feature(enable = "neon")]
9624#[cfg_attr(test, assert_instr(nop, N = 4))]
9625#[rustc_legacy_const_generics(1)]
9626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9627pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9628    static_assert_uimm_bits!(N, 3);
9629    unsafe { simd_extract!(a, N as u32) }
9630}
9631#[doc = "Set all vector lanes to the same value"]
9632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9633#[inline]
9634#[target_feature(enable = "neon")]
9635#[cfg_attr(test, assert_instr(nop, N = 4))]
9636#[rustc_legacy_const_generics(1)]
9637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9638pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9639    static_assert_uimm_bits!(N, 3);
9640    unsafe { simd_extract!(a, N as u32) }
9641}
9642#[doc = "Extract an element from a vector"]
9643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9644#[inline]
9645#[target_feature(enable = "neon")]
9646#[cfg_attr(test, assert_instr(nop, N = 8))]
9647#[rustc_legacy_const_generics(1)]
9648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9649pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9650    static_assert_uimm_bits!(N, 4);
9651    unsafe { simd_extract!(a, N as u32) }
9652}
9653#[doc = "Extract an element from a vector"]
9654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9655#[inline]
9656#[target_feature(enable = "neon")]
9657#[cfg_attr(test, assert_instr(nop, N = 8))]
9658#[rustc_legacy_const_generics(1)]
9659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9660pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9661    static_assert_uimm_bits!(N, 4);
9662    unsafe { simd_extract!(a, N as u32) }
9663}
9664#[doc = "Extract an element from a vector"]
9665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9666#[inline]
9667#[target_feature(enable = "neon")]
9668#[cfg_attr(test, assert_instr(nop, N = 8))]
9669#[rustc_legacy_const_generics(1)]
9670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9671pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9672    static_assert_uimm_bits!(N, 4);
9673    unsafe { simd_extract!(a, N as u32) }
9674}
9675#[doc = "Set all vector lanes to the same value"]
9676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9677#[inline]
9678#[target_feature(enable = "neon")]
9679#[cfg_attr(test, assert_instr(nop, N = 0))]
9680#[rustc_legacy_const_generics(1)]
9681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9682pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9683    static_assert!(N == 0);
9684    unsafe { simd_extract!(a, N as u32) }
9685}
9686#[doc = "Set all vector lanes to the same value"]
9687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9688#[inline]
9689#[target_feature(enable = "neon")]
9690#[cfg_attr(test, assert_instr(nop, N = 0))]
9691#[rustc_legacy_const_generics(1)]
9692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9693pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9694    static_assert!(N == 0);
9695    unsafe { simd_extract!(a, N as u32) }
9696}
9697#[doc = "Set all vector lanes to the same value"]
9698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9699#[inline]
9700#[target_feature(enable = "neon")]
9701#[cfg_attr(test, assert_instr(nop, N = 0))]
9702#[rustc_legacy_const_generics(1)]
9703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9704pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9705    static_assert!(N == 0);
9706    unsafe { simd_extract!(a, N as u32) }
9707}
9708#[doc = "Set all vector lanes to the same value"]
9709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9710#[inline]
9711#[cfg_attr(test, assert_instr(nop, N = 2))]
9712#[rustc_legacy_const_generics(1)]
9713#[target_feature(enable = "neon,fp16")]
9714#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9715pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9716    static_assert_uimm_bits!(N, 2);
9717    unsafe { simd_extract!(a, N as u32) }
9718}
9719#[doc = "Extract an element from a vector"]
9720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9721#[inline]
9722#[cfg_attr(test, assert_instr(nop, N = 4))]
9723#[rustc_legacy_const_generics(1)]
9724#[target_feature(enable = "neon,fp16")]
9725#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9726pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9727    static_assert_uimm_bits!(N, 4);
9728    unsafe { simd_extract!(a, N as u32) }
9729}
9730#[doc = "Set all vector lanes to the same value"]
9731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9732#[inline]
9733#[target_feature(enable = "neon")]
9734#[cfg_attr(test, assert_instr(dup, N = 0))]
9735#[rustc_legacy_const_generics(1)]
9736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9737pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9738    static_assert!(N == 0);
9739    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9740}
9741#[doc = "Set all vector lanes to the same value"]
9742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9743#[inline]
9744#[target_feature(enable = "neon")]
9745#[cfg_attr(test, assert_instr(dup, N = 0))]
9746#[rustc_legacy_const_generics(1)]
9747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9748pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9749    static_assert!(N == 0);
9750    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9751}
9752#[doc = "Set all vector lanes to the same value"]
9753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9754#[inline]
9755#[target_feature(enable = "neon")]
9756#[cfg_attr(test, assert_instr(dup, N = 1))]
9757#[rustc_legacy_const_generics(1)]
9758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9759pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9760    static_assert_uimm_bits!(N, 1);
9761    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9762}
9763#[doc = "Set all vector lanes to the same value"]
9764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9765#[inline]
9766#[target_feature(enable = "neon")]
9767#[cfg_attr(test, assert_instr(dup, N = 1))]
9768#[rustc_legacy_const_generics(1)]
9769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9770pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9771    static_assert_uimm_bits!(N, 1);
9772    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9773}
9774#[doc = "Set all vector lanes to the same value"]
9775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9776#[inline]
9777#[target_feature(enable = "neon")]
9778#[cfg_attr(test, assert_instr(nop, N = 1))]
9779#[rustc_legacy_const_generics(1)]
9780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9781pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9782    static_assert_uimm_bits!(N, 1);
9783    unsafe { simd_extract!(a, N as u32) }
9784}
9785#[doc = "Set all vector lanes to the same value"]
9786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9787#[inline]
9788#[target_feature(enable = "neon")]
9789#[cfg_attr(test, assert_instr(nop, N = 1))]
9790#[rustc_legacy_const_generics(1)]
9791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9792pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9793    static_assert_uimm_bits!(N, 1);
9794    unsafe { simd_extract!(a, N as u32) }
9795}
9796#[doc = "Set all vector lanes to the same value"]
9797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9798#[inline]
9799#[target_feature(enable = "neon")]
9800#[cfg_attr(test, assert_instr(nop, N = 1))]
9801#[rustc_legacy_const_generics(1)]
9802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9803pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9804    static_assert_uimm_bits!(N, 1);
9805    unsafe { simd_extract!(a, N as u32) }
9806}
9807#[doc = "Set all vector lanes to the same value"]
9808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9809#[inline]
9810#[target_feature(enable = "neon")]
9811#[cfg_attr(test, assert_instr(nop, N = 1))]
9812#[rustc_legacy_const_generics(1)]
9813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9814pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9815    static_assert_uimm_bits!(N, 1);
9816    unsafe { simd_extract!(a, N as u32) }
9817}
9818#[doc = "Set all vector lanes to the same value"]
9819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9820#[inline]
9821#[target_feature(enable = "neon")]
9822#[cfg_attr(test, assert_instr(nop, N = 1))]
9823#[rustc_legacy_const_generics(1)]
9824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9825pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9826    static_assert_uimm_bits!(N, 1);
9827    unsafe { simd_extract!(a, N as u32) }
9828}
9829#[doc = "Set all vector lanes to the same value"]
9830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9831#[inline]
9832#[target_feature(enable = "neon")]
9833#[cfg_attr(test, assert_instr(nop, N = 1))]
9834#[rustc_legacy_const_generics(1)]
9835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9836pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9837    static_assert_uimm_bits!(N, 1);
9838    unsafe { simd_extract!(a, N as u32) }
9839}
9840#[doc = "Set all vector lanes to the same value"]
9841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9842#[inline]
9843#[target_feature(enable = "neon")]
9844#[cfg_attr(test, assert_instr(nop, N = 2))]
9845#[rustc_legacy_const_generics(1)]
9846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9847pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9848    static_assert_uimm_bits!(N, 2);
9849    unsafe { simd_extract!(a, N as u32) }
9850}
9851#[doc = "Set all vector lanes to the same value"]
9852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9853#[inline]
9854#[target_feature(enable = "neon")]
9855#[cfg_attr(test, assert_instr(nop, N = 2))]
9856#[rustc_legacy_const_generics(1)]
9857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9858pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9859    static_assert_uimm_bits!(N, 2);
9860    unsafe { simd_extract!(a, N as u32) }
9861}
9862#[doc = "Set all vector lanes to the same value"]
9863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9864#[inline]
9865#[target_feature(enable = "neon")]
9866#[cfg_attr(test, assert_instr(nop, N = 2))]
9867#[rustc_legacy_const_generics(1)]
9868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9869pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9870    static_assert_uimm_bits!(N, 2);
9871    unsafe { simd_extract!(a, N as u32) }
9872}
9873#[doc = "Set all vector lanes to the same value"]
9874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9875#[inline]
9876#[target_feature(enable = "neon")]
9877#[cfg_attr(test, assert_instr(nop, N = 2))]
9878#[rustc_legacy_const_generics(1)]
9879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9880pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9881    static_assert_uimm_bits!(N, 2);
9882    unsafe { simd_extract!(a, N as u32) }
9883}
9884#[doc = "Set all vector lanes to the same value"]
9885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9886#[inline]
9887#[target_feature(enable = "neon")]
9888#[cfg_attr(test, assert_instr(nop, N = 2))]
9889#[rustc_legacy_const_generics(1)]
9890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9891pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9892    static_assert_uimm_bits!(N, 2);
9893    unsafe { simd_extract!(a, N as u32) }
9894}
9895#[doc = "Set all vector lanes to the same value"]
9896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9897#[inline]
9898#[target_feature(enable = "neon")]
9899#[cfg_attr(test, assert_instr(nop, N = 2))]
9900#[rustc_legacy_const_generics(1)]
9901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9902pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9903    static_assert_uimm_bits!(N, 2);
9904    unsafe { simd_extract!(a, N as u32) }
9905}
9906#[doc = "Three-way exclusive OR"]
9907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9908#[inline]
9909#[target_feature(enable = "neon,sha3")]
9910#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9911#[cfg_attr(test, assert_instr(eor3))]
9912pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9913    unsafe extern "unadjusted" {
9914        #[cfg_attr(
9915            any(target_arch = "aarch64", target_arch = "arm64ec"),
9916            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9917        )]
9918        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9919    }
9920    unsafe { _veor3q_s8(a, b, c) }
9921}
9922#[doc = "Three-way exclusive OR"]
9923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9924#[inline]
9925#[target_feature(enable = "neon,sha3")]
9926#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9927#[cfg_attr(test, assert_instr(eor3))]
9928pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9929    unsafe extern "unadjusted" {
9930        #[cfg_attr(
9931            any(target_arch = "aarch64", target_arch = "arm64ec"),
9932            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9933        )]
9934        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9935    }
9936    unsafe { _veor3q_s16(a, b, c) }
9937}
9938#[doc = "Three-way exclusive OR"]
9939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9940#[inline]
9941#[target_feature(enable = "neon,sha3")]
9942#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9943#[cfg_attr(test, assert_instr(eor3))]
9944pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9945    unsafe extern "unadjusted" {
9946        #[cfg_attr(
9947            any(target_arch = "aarch64", target_arch = "arm64ec"),
9948            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9949        )]
9950        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9951    }
9952    unsafe { _veor3q_s32(a, b, c) }
9953}
9954#[doc = "Three-way exclusive OR"]
9955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9956#[inline]
9957#[target_feature(enable = "neon,sha3")]
9958#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9959#[cfg_attr(test, assert_instr(eor3))]
9960pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9961    unsafe extern "unadjusted" {
9962        #[cfg_attr(
9963            any(target_arch = "aarch64", target_arch = "arm64ec"),
9964            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9965        )]
9966        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9967    }
9968    unsafe { _veor3q_s64(a, b, c) }
9969}
9970#[doc = "Three-way exclusive OR"]
9971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9972#[inline]
9973#[target_feature(enable = "neon,sha3")]
9974#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9975#[cfg_attr(test, assert_instr(eor3))]
9976pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9977    unsafe extern "unadjusted" {
9978        #[cfg_attr(
9979            any(target_arch = "aarch64", target_arch = "arm64ec"),
9980            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
9981        )]
9982        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9983    }
9984    unsafe { _veor3q_u8(a, b, c) }
9985}
9986#[doc = "Three-way exclusive OR"]
9987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
9988#[inline]
9989#[target_feature(enable = "neon,sha3")]
9990#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9991#[cfg_attr(test, assert_instr(eor3))]
9992pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
9993    unsafe extern "unadjusted" {
9994        #[cfg_attr(
9995            any(target_arch = "aarch64", target_arch = "arm64ec"),
9996            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
9997        )]
9998        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
9999    }
10000    unsafe { _veor3q_u16(a, b, c) }
10001}
10002#[doc = "Three-way exclusive OR"]
10003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10004#[inline]
10005#[target_feature(enable = "neon,sha3")]
10006#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10007#[cfg_attr(test, assert_instr(eor3))]
10008pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10009    unsafe extern "unadjusted" {
10010        #[cfg_attr(
10011            any(target_arch = "aarch64", target_arch = "arm64ec"),
10012            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10013        )]
10014        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10015    }
10016    unsafe { _veor3q_u32(a, b, c) }
10017}
10018#[doc = "Three-way exclusive OR"]
10019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10020#[inline]
10021#[target_feature(enable = "neon,sha3")]
10022#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10023#[cfg_attr(test, assert_instr(eor3))]
10024pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10025    unsafe extern "unadjusted" {
10026        #[cfg_attr(
10027            any(target_arch = "aarch64", target_arch = "arm64ec"),
10028            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10029        )]
10030        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10031    }
10032    unsafe { _veor3q_u64(a, b, c) }
10033}
10034#[doc = "Extract vector from pair of vectors"]
10035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10036#[inline]
10037#[target_feature(enable = "neon")]
10038#[cfg_attr(test, assert_instr(ext, N = 1))]
10039#[rustc_legacy_const_generics(2)]
10040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10041pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10042    static_assert_uimm_bits!(N, 1);
10043    unsafe {
10044        match N & 0b1 {
10045            0 => simd_shuffle!(a, b, [0, 1]),
10046            1 => simd_shuffle!(a, b, [1, 2]),
10047            _ => unreachable_unchecked(),
10048        }
10049    }
10050}
10051#[doc = "Extract vector from pair of vectors"]
10052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10053#[inline]
10054#[target_feature(enable = "neon")]
10055#[cfg_attr(test, assert_instr(ext, N = 1))]
10056#[rustc_legacy_const_generics(2)]
10057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10058pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10059    static_assert_uimm_bits!(N, 1);
10060    unsafe {
10061        match N & 0b1 {
10062            0 => simd_shuffle!(a, b, [0, 1]),
10063            1 => simd_shuffle!(a, b, [1, 2]),
10064            _ => unreachable_unchecked(),
10065        }
10066    }
10067}
10068#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10070#[inline]
10071#[target_feature(enable = "neon")]
10072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10073#[cfg_attr(test, assert_instr(fmadd))]
10074pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10075    unsafe { simd_fma(b, c, a) }
10076}
10077#[doc = "Floating-point fused multiply-add to accumulator"]
10078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10079#[inline]
10080#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10081#[rustc_legacy_const_generics(3)]
10082#[target_feature(enable = "neon,fp16")]
10083#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10084pub fn vfma_lane_f16<const LANE: i32>(
10085    a: float16x4_t,
10086    b: float16x4_t,
10087    c: float16x4_t,
10088) -> float16x4_t {
10089    static_assert_uimm_bits!(LANE, 2);
10090    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10091}
10092#[doc = "Floating-point fused multiply-add to accumulator"]
10093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10094#[inline]
10095#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10096#[rustc_legacy_const_generics(3)]
10097#[target_feature(enable = "neon,fp16")]
10098#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10099pub fn vfma_laneq_f16<const LANE: i32>(
10100    a: float16x4_t,
10101    b: float16x4_t,
10102    c: float16x8_t,
10103) -> float16x4_t {
10104    static_assert_uimm_bits!(LANE, 3);
10105    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10106}
10107#[doc = "Floating-point fused multiply-add to accumulator"]
10108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10109#[inline]
10110#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10111#[rustc_legacy_const_generics(3)]
10112#[target_feature(enable = "neon,fp16")]
10113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10114pub fn vfmaq_lane_f16<const LANE: i32>(
10115    a: float16x8_t,
10116    b: float16x8_t,
10117    c: float16x4_t,
10118) -> float16x8_t {
10119    static_assert_uimm_bits!(LANE, 2);
10120    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10121}
10122#[doc = "Floating-point fused multiply-add to accumulator"]
10123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10124#[inline]
10125#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10126#[rustc_legacy_const_generics(3)]
10127#[target_feature(enable = "neon,fp16")]
10128#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10129pub fn vfmaq_laneq_f16<const LANE: i32>(
10130    a: float16x8_t,
10131    b: float16x8_t,
10132    c: float16x8_t,
10133) -> float16x8_t {
10134    static_assert_uimm_bits!(LANE, 3);
10135    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10139#[inline]
10140#[target_feature(enable = "neon")]
10141#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10142#[rustc_legacy_const_generics(3)]
10143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10144pub fn vfma_lane_f32<const LANE: i32>(
10145    a: float32x2_t,
10146    b: float32x2_t,
10147    c: float32x2_t,
10148) -> float32x2_t {
10149    static_assert_uimm_bits!(LANE, 1);
10150    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10151}
10152#[doc = "Floating-point fused multiply-add to accumulator"]
10153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10154#[inline]
10155#[target_feature(enable = "neon")]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10159pub fn vfma_laneq_f32<const LANE: i32>(
10160    a: float32x2_t,
10161    b: float32x2_t,
10162    c: float32x4_t,
10163) -> float32x2_t {
10164    static_assert_uimm_bits!(LANE, 2);
10165    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10166}
10167#[doc = "Floating-point fused multiply-add to accumulator"]
10168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10169#[inline]
10170#[target_feature(enable = "neon")]
10171#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10172#[rustc_legacy_const_generics(3)]
10173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10174pub fn vfmaq_lane_f32<const LANE: i32>(
10175    a: float32x4_t,
10176    b: float32x4_t,
10177    c: float32x2_t,
10178) -> float32x4_t {
10179    static_assert_uimm_bits!(LANE, 1);
10180    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10181}
10182#[doc = "Floating-point fused multiply-add to accumulator"]
10183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10184#[inline]
10185#[target_feature(enable = "neon")]
10186#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10187#[rustc_legacy_const_generics(3)]
10188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10189pub fn vfmaq_laneq_f32<const LANE: i32>(
10190    a: float32x4_t,
10191    b: float32x4_t,
10192    c: float32x4_t,
10193) -> float32x4_t {
10194    static_assert_uimm_bits!(LANE, 2);
10195    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10196}
10197#[doc = "Floating-point fused multiply-add to accumulator"]
10198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10199#[inline]
10200#[target_feature(enable = "neon")]
10201#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10202#[rustc_legacy_const_generics(3)]
10203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10204pub fn vfmaq_laneq_f64<const LANE: i32>(
10205    a: float64x2_t,
10206    b: float64x2_t,
10207    c: float64x2_t,
10208) -> float64x2_t {
10209    static_assert_uimm_bits!(LANE, 1);
10210    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10211}
10212#[doc = "Floating-point fused multiply-add to accumulator"]
10213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10214#[inline]
10215#[target_feature(enable = "neon")]
10216#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10217#[rustc_legacy_const_generics(3)]
10218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10219pub fn vfma_lane_f64<const LANE: i32>(
10220    a: float64x1_t,
10221    b: float64x1_t,
10222    c: float64x1_t,
10223) -> float64x1_t {
10224    static_assert!(LANE == 0);
10225    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10226}
10227#[doc = "Floating-point fused multiply-add to accumulator"]
10228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10229#[inline]
10230#[target_feature(enable = "neon")]
10231#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10232#[rustc_legacy_const_generics(3)]
10233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10234pub fn vfma_laneq_f64<const LANE: i32>(
10235    a: float64x1_t,
10236    b: float64x1_t,
10237    c: float64x2_t,
10238) -> float64x1_t {
10239    static_assert_uimm_bits!(LANE, 1);
10240    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10241}
10242#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10244#[inline]
10245#[target_feature(enable = "neon,fp16")]
10246#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10247#[cfg_attr(test, assert_instr(fmla))]
10248pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10249    vfma_f16(a, b, vdup_n_f16(c))
10250}
10251#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10253#[inline]
10254#[target_feature(enable = "neon,fp16")]
10255#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10256#[cfg_attr(test, assert_instr(fmla))]
10257pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10258    vfmaq_f16(a, b, vdupq_n_f16(c))
10259}
10260#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10262#[inline]
10263#[target_feature(enable = "neon")]
10264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10265#[cfg_attr(test, assert_instr(fmadd))]
10266pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10267    vfma_f64(a, b, vdup_n_f64(c))
10268}
10269#[doc = "Floating-point fused multiply-add to accumulator"]
10270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10271#[inline]
10272#[target_feature(enable = "neon")]
10273#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10274#[rustc_legacy_const_generics(3)]
10275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10276pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10277    static_assert!(LANE == 0);
10278    unsafe {
10279        let c: f64 = simd_extract!(c, LANE as u32);
10280        fmaf64(b, c, a)
10281    }
10282}
10283#[doc = "Floating-point fused multiply-add to accumulator"]
10284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10285#[inline]
10286#[cfg_attr(test, assert_instr(fmadd))]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10290    unsafe { fmaf16(b, c, a) }
10291}
10292#[doc = "Floating-point fused multiply-add to accumulator"]
10293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10294#[inline]
10295#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10296#[rustc_legacy_const_generics(3)]
10297#[target_feature(enable = "neon,fp16")]
10298#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10299pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10300    static_assert_uimm_bits!(LANE, 2);
10301    unsafe {
10302        let c: f16 = simd_extract!(v, LANE as u32);
10303        vfmah_f16(a, b, c)
10304    }
10305}
10306#[doc = "Floating-point fused multiply-add to accumulator"]
10307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10308#[inline]
10309#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10310#[rustc_legacy_const_generics(3)]
10311#[target_feature(enable = "neon,fp16")]
10312#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10313pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10314    static_assert_uimm_bits!(LANE, 3);
10315    unsafe {
10316        let c: f16 = simd_extract!(v, LANE as u32);
10317        vfmah_f16(a, b, c)
10318    }
10319}
10320#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10322#[inline]
10323#[target_feature(enable = "neon")]
10324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10325#[cfg_attr(test, assert_instr(fmla))]
10326pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10327    unsafe { simd_fma(b, c, a) }
10328}
10329#[doc = "Floating-point fused multiply-add to accumulator"]
10330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10331#[inline]
10332#[target_feature(enable = "neon")]
10333#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10334#[rustc_legacy_const_generics(3)]
10335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10336pub fn vfmaq_lane_f64<const LANE: i32>(
10337    a: float64x2_t,
10338    b: float64x2_t,
10339    c: float64x1_t,
10340) -> float64x2_t {
10341    static_assert!(LANE == 0);
10342    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10343}
10344#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10346#[inline]
10347#[target_feature(enable = "neon")]
10348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10349#[cfg_attr(test, assert_instr(fmla))]
10350pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10351    vfmaq_f64(a, b, vdupq_n_f64(c))
10352}
10353#[doc = "Floating-point fused multiply-add to accumulator"]
10354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10355#[inline]
10356#[target_feature(enable = "neon")]
10357#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10358#[rustc_legacy_const_generics(3)]
10359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10360pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10361    static_assert_uimm_bits!(LANE, 1);
10362    unsafe {
10363        let c: f32 = simd_extract!(c, LANE as u32);
10364        fmaf32(b, c, a)
10365    }
10366}
10367#[doc = "Floating-point fused multiply-add to accumulator"]
10368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10369#[inline]
10370#[target_feature(enable = "neon")]
10371#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10372#[rustc_legacy_const_generics(3)]
10373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10374pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10375    static_assert_uimm_bits!(LANE, 2);
10376    unsafe {
10377        let c: f32 = simd_extract!(c, LANE as u32);
10378        fmaf32(b, c, a)
10379    }
10380}
10381#[doc = "Floating-point fused multiply-add to accumulator"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10383#[inline]
10384#[target_feature(enable = "neon")]
10385#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10386#[rustc_legacy_const_generics(3)]
10387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10388pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10389    static_assert_uimm_bits!(LANE, 1);
10390    unsafe {
10391        let c: f64 = simd_extract!(c, LANE as u32);
10392        fmaf64(b, c, a)
10393    }
10394}
10395#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10397#[inline]
10398#[target_feature(enable = "neon,fp16")]
10399#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10400#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10401#[cfg_attr(test, assert_instr(fmlal2))]
10402pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10403    unsafe extern "unadjusted" {
10404        #[cfg_attr(
10405            any(target_arch = "aarch64", target_arch = "arm64ec"),
10406            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10407        )]
10408        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10409    }
10410    unsafe { _vfmlal_high_f16(r, a, b) }
10411}
10412#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10414#[inline]
10415#[target_feature(enable = "neon,fp16")]
10416#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10418#[cfg_attr(test, assert_instr(fmlal2))]
10419pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10420    unsafe extern "unadjusted" {
10421        #[cfg_attr(
10422            any(target_arch = "aarch64", target_arch = "arm64ec"),
10423            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10424        )]
10425        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10426    }
10427    unsafe { _vfmlalq_high_f16(r, a, b) }
10428}
10429#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10431#[inline]
10432#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10433#[target_feature(enable = "neon,fp16")]
10434#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10435#[rustc_legacy_const_generics(3)]
10436#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10437pub fn vfmlal_lane_high_f16<const LANE: i32>(
10438    r: float32x2_t,
10439    a: float16x4_t,
10440    b: float16x4_t,
10441) -> float32x2_t {
10442    static_assert_uimm_bits!(LANE, 2);
10443    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10444}
10445#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10447#[inline]
10448#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10449#[target_feature(enable = "neon,fp16")]
10450#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10451#[rustc_legacy_const_generics(3)]
10452#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10453pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10454    r: float32x2_t,
10455    a: float16x4_t,
10456    b: float16x8_t,
10457) -> float32x2_t {
10458    static_assert_uimm_bits!(LANE, 3);
10459    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10460}
10461#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10463#[inline]
10464#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10465#[target_feature(enable = "neon,fp16")]
10466#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10467#[rustc_legacy_const_generics(3)]
10468#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10469pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10470    r: float32x4_t,
10471    a: float16x8_t,
10472    b: float16x4_t,
10473) -> float32x4_t {
10474    static_assert_uimm_bits!(LANE, 2);
10475    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10476}
10477#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10479#[inline]
10480#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10481#[target_feature(enable = "neon,fp16")]
10482#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10483#[rustc_legacy_const_generics(3)]
10484#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10485pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10486    r: float32x4_t,
10487    a: float16x8_t,
10488    b: float16x8_t,
10489) -> float32x4_t {
10490    static_assert_uimm_bits!(LANE, 3);
10491    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10492}
10493#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10495#[inline]
10496#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10497#[target_feature(enable = "neon,fp16")]
10498#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10499#[rustc_legacy_const_generics(3)]
10500#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10501pub fn vfmlal_lane_low_f16<const LANE: i32>(
10502    r: float32x2_t,
10503    a: float16x4_t,
10504    b: float16x4_t,
10505) -> float32x2_t {
10506    static_assert_uimm_bits!(LANE, 2);
10507    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10508}
10509#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10511#[inline]
10512#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10513#[target_feature(enable = "neon,fp16")]
10514#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10515#[rustc_legacy_const_generics(3)]
10516#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10517pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10518    r: float32x2_t,
10519    a: float16x4_t,
10520    b: float16x8_t,
10521) -> float32x2_t {
10522    static_assert_uimm_bits!(LANE, 3);
10523    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10524}
10525#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10527#[inline]
10528#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10529#[target_feature(enable = "neon,fp16")]
10530#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10531#[rustc_legacy_const_generics(3)]
10532#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10533pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10534    r: float32x4_t,
10535    a: float16x8_t,
10536    b: float16x4_t,
10537) -> float32x4_t {
10538    static_assert_uimm_bits!(LANE, 2);
10539    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10540}
10541#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10543#[inline]
10544#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10545#[target_feature(enable = "neon,fp16")]
10546#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10547#[rustc_legacy_const_generics(3)]
10548#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10549pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10550    r: float32x4_t,
10551    a: float16x8_t,
10552    b: float16x8_t,
10553) -> float32x4_t {
10554    static_assert_uimm_bits!(LANE, 3);
10555    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10556}
10557#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10559#[inline]
10560#[target_feature(enable = "neon,fp16")]
10561#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10562#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10563#[cfg_attr(test, assert_instr(fmlal))]
10564pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10565    unsafe extern "unadjusted" {
10566        #[cfg_attr(
10567            any(target_arch = "aarch64", target_arch = "arm64ec"),
10568            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10569        )]
10570        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10571    }
10572    unsafe { _vfmlal_low_f16(r, a, b) }
10573}
10574#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10576#[inline]
10577#[target_feature(enable = "neon,fp16")]
10578#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10579#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10580#[cfg_attr(test, assert_instr(fmlal))]
10581pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10582    unsafe extern "unadjusted" {
10583        #[cfg_attr(
10584            any(target_arch = "aarch64", target_arch = "arm64ec"),
10585            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10586        )]
10587        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10588    }
10589    unsafe { _vfmlalq_low_f16(r, a, b) }
10590}
10591#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10593#[inline]
10594#[target_feature(enable = "neon,fp16")]
10595#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10596#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10597#[cfg_attr(test, assert_instr(fmlsl2))]
10598pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10599    unsafe extern "unadjusted" {
10600        #[cfg_attr(
10601            any(target_arch = "aarch64", target_arch = "arm64ec"),
10602            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10603        )]
10604        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10605    }
10606    unsafe { _vfmlsl_high_f16(r, a, b) }
10607}
10608#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10610#[inline]
10611#[target_feature(enable = "neon,fp16")]
10612#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10613#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10614#[cfg_attr(test, assert_instr(fmlsl2))]
10615pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10616    unsafe extern "unadjusted" {
10617        #[cfg_attr(
10618            any(target_arch = "aarch64", target_arch = "arm64ec"),
10619            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10620        )]
10621        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10622    }
10623    unsafe { _vfmlslq_high_f16(r, a, b) }
10624}
10625#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10627#[inline]
10628#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10629#[target_feature(enable = "neon,fp16")]
10630#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10631#[rustc_legacy_const_generics(3)]
10632#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10633pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10634    r: float32x2_t,
10635    a: float16x4_t,
10636    b: float16x4_t,
10637) -> float32x2_t {
10638    static_assert_uimm_bits!(LANE, 2);
10639    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10640}
10641#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10643#[inline]
10644#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10645#[target_feature(enable = "neon,fp16")]
10646#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10647#[rustc_legacy_const_generics(3)]
10648#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10649pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10650    r: float32x2_t,
10651    a: float16x4_t,
10652    b: float16x8_t,
10653) -> float32x2_t {
10654    static_assert_uimm_bits!(LANE, 3);
10655    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10656}
10657#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10659#[inline]
10660#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[rustc_legacy_const_generics(3)]
10664#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10665pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10666    r: float32x4_t,
10667    a: float16x8_t,
10668    b: float16x4_t,
10669) -> float32x4_t {
10670    static_assert_uimm_bits!(LANE, 2);
10671    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10672}
10673#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10675#[inline]
10676#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10677#[target_feature(enable = "neon,fp16")]
10678#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10679#[rustc_legacy_const_generics(3)]
10680#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10681pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10682    r: float32x4_t,
10683    a: float16x8_t,
10684    b: float16x8_t,
10685) -> float32x4_t {
10686    static_assert_uimm_bits!(LANE, 3);
10687    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10688}
10689#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10691#[inline]
10692#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10693#[target_feature(enable = "neon,fp16")]
10694#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10695#[rustc_legacy_const_generics(3)]
10696#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10697pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10698    r: float32x2_t,
10699    a: float16x4_t,
10700    b: float16x4_t,
10701) -> float32x2_t {
10702    static_assert_uimm_bits!(LANE, 2);
10703    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10704}
10705#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10707#[inline]
10708#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10709#[target_feature(enable = "neon,fp16")]
10710#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10711#[rustc_legacy_const_generics(3)]
10712#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10713pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10714    r: float32x2_t,
10715    a: float16x4_t,
10716    b: float16x8_t,
10717) -> float32x2_t {
10718    static_assert_uimm_bits!(LANE, 3);
10719    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10720}
10721#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10723#[inline]
10724#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10725#[target_feature(enable = "neon,fp16")]
10726#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10727#[rustc_legacy_const_generics(3)]
10728#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10729pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10730    r: float32x4_t,
10731    a: float16x8_t,
10732    b: float16x4_t,
10733) -> float32x4_t {
10734    static_assert_uimm_bits!(LANE, 2);
10735    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10736}
10737#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10739#[inline]
10740#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10741#[target_feature(enable = "neon,fp16")]
10742#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10743#[rustc_legacy_const_generics(3)]
10744#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10745pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10746    r: float32x4_t,
10747    a: float16x8_t,
10748    b: float16x8_t,
10749) -> float32x4_t {
10750    static_assert_uimm_bits!(LANE, 3);
10751    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10752}
10753#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10755#[inline]
10756#[target_feature(enable = "neon,fp16")]
10757#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10759#[cfg_attr(test, assert_instr(fmlsl))]
10760pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10761    unsafe extern "unadjusted" {
10762        #[cfg_attr(
10763            any(target_arch = "aarch64", target_arch = "arm64ec"),
10764            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10765        )]
10766        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10767    }
10768    unsafe { _vfmlsl_low_f16(r, a, b) }
10769}
10770#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10772#[inline]
10773#[target_feature(enable = "neon,fp16")]
10774#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10776#[cfg_attr(test, assert_instr(fmlsl))]
10777pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10778    unsafe extern "unadjusted" {
10779        #[cfg_attr(
10780            any(target_arch = "aarch64", target_arch = "arm64ec"),
10781            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10782        )]
10783        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10784    }
10785    unsafe { _vfmlslq_low_f16(r, a, b) }
10786}
10787#[doc = "Floating-point fused multiply-subtract from accumulator"]
10788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10789#[inline]
10790#[target_feature(enable = "neon")]
10791#[cfg_attr(test, assert_instr(fmsub))]
10792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10793pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10794    unsafe {
10795        let b: float64x1_t = simd_neg(b);
10796        vfma_f64(a, b, c)
10797    }
10798}
10799#[doc = "Floating-point fused multiply-subtract from accumulator"]
10800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10801#[inline]
10802#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10803#[rustc_legacy_const_generics(3)]
10804#[target_feature(enable = "neon,fp16")]
10805#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10806pub fn vfms_lane_f16<const LANE: i32>(
10807    a: float16x4_t,
10808    b: float16x4_t,
10809    c: float16x4_t,
10810) -> float16x4_t {
10811    static_assert_uimm_bits!(LANE, 2);
10812    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10813}
10814#[doc = "Floating-point fused multiply-subtract from accumulator"]
10815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10816#[inline]
10817#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10818#[rustc_legacy_const_generics(3)]
10819#[target_feature(enable = "neon,fp16")]
10820#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10821pub fn vfms_laneq_f16<const LANE: i32>(
10822    a: float16x4_t,
10823    b: float16x4_t,
10824    c: float16x8_t,
10825) -> float16x4_t {
10826    static_assert_uimm_bits!(LANE, 3);
10827    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10828}
10829#[doc = "Floating-point fused multiply-subtract from accumulator"]
10830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10831#[inline]
10832#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10833#[rustc_legacy_const_generics(3)]
10834#[target_feature(enable = "neon,fp16")]
10835#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10836pub fn vfmsq_lane_f16<const LANE: i32>(
10837    a: float16x8_t,
10838    b: float16x8_t,
10839    c: float16x4_t,
10840) -> float16x8_t {
10841    static_assert_uimm_bits!(LANE, 2);
10842    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10843}
10844#[doc = "Floating-point fused multiply-subtract from accumulator"]
10845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10846#[inline]
10847#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10848#[rustc_legacy_const_generics(3)]
10849#[target_feature(enable = "neon,fp16")]
10850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10851pub fn vfmsq_laneq_f16<const LANE: i32>(
10852    a: float16x8_t,
10853    b: float16x8_t,
10854    c: float16x8_t,
10855) -> float16x8_t {
10856    static_assert_uimm_bits!(LANE, 3);
10857    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10858}
10859#[doc = "Floating-point fused multiply-subtract to accumulator"]
10860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10861#[inline]
10862#[target_feature(enable = "neon")]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10866pub fn vfms_lane_f32<const LANE: i32>(
10867    a: float32x2_t,
10868    b: float32x2_t,
10869    c: float32x2_t,
10870) -> float32x2_t {
10871    static_assert_uimm_bits!(LANE, 1);
10872    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10873}
10874#[doc = "Floating-point fused multiply-subtract to accumulator"]
10875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10876#[inline]
10877#[target_feature(enable = "neon")]
10878#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10879#[rustc_legacy_const_generics(3)]
10880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10881pub fn vfms_laneq_f32<const LANE: i32>(
10882    a: float32x2_t,
10883    b: float32x2_t,
10884    c: float32x4_t,
10885) -> float32x2_t {
10886    static_assert_uimm_bits!(LANE, 2);
10887    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10888}
10889#[doc = "Floating-point fused multiply-subtract to accumulator"]
10890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10891#[inline]
10892#[target_feature(enable = "neon")]
10893#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10894#[rustc_legacy_const_generics(3)]
10895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10896pub fn vfmsq_lane_f32<const LANE: i32>(
10897    a: float32x4_t,
10898    b: float32x4_t,
10899    c: float32x2_t,
10900) -> float32x4_t {
10901    static_assert_uimm_bits!(LANE, 1);
10902    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10903}
10904#[doc = "Floating-point fused multiply-subtract to accumulator"]
10905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10906#[inline]
10907#[target_feature(enable = "neon")]
10908#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10909#[rustc_legacy_const_generics(3)]
10910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10911pub fn vfmsq_laneq_f32<const LANE: i32>(
10912    a: float32x4_t,
10913    b: float32x4_t,
10914    c: float32x4_t,
10915) -> float32x4_t {
10916    static_assert_uimm_bits!(LANE, 2);
10917    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10918}
10919#[doc = "Floating-point fused multiply-subtract to accumulator"]
10920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10921#[inline]
10922#[target_feature(enable = "neon")]
10923#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10924#[rustc_legacy_const_generics(3)]
10925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10926pub fn vfmsq_laneq_f64<const LANE: i32>(
10927    a: float64x2_t,
10928    b: float64x2_t,
10929    c: float64x2_t,
10930) -> float64x2_t {
10931    static_assert_uimm_bits!(LANE, 1);
10932    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10933}
10934#[doc = "Floating-point fused multiply-subtract to accumulator"]
10935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10936#[inline]
10937#[target_feature(enable = "neon")]
10938#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10939#[rustc_legacy_const_generics(3)]
10940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10941pub fn vfms_lane_f64<const LANE: i32>(
10942    a: float64x1_t,
10943    b: float64x1_t,
10944    c: float64x1_t,
10945) -> float64x1_t {
10946    static_assert!(LANE == 0);
10947    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10948}
10949#[doc = "Floating-point fused multiply-subtract to accumulator"]
10950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
10951#[inline]
10952#[target_feature(enable = "neon")]
10953#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10954#[rustc_legacy_const_generics(3)]
10955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10956pub fn vfms_laneq_f64<const LANE: i32>(
10957    a: float64x1_t,
10958    b: float64x1_t,
10959    c: float64x2_t,
10960) -> float64x1_t {
10961    static_assert_uimm_bits!(LANE, 1);
10962    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10963}
10964#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
10966#[inline]
10967#[target_feature(enable = "neon,fp16")]
10968#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10969#[cfg_attr(test, assert_instr(fmls))]
10970pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10971    vfms_f16(a, b, vdup_n_f16(c))
10972}
10973#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
10975#[inline]
10976#[target_feature(enable = "neon,fp16")]
10977#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10978#[cfg_attr(test, assert_instr(fmls))]
10979pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10980    vfmsq_f16(a, b, vdupq_n_f16(c))
10981}
10982#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
10983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
10984#[inline]
10985#[target_feature(enable = "neon")]
10986#[cfg_attr(test, assert_instr(fmsub))]
10987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10988pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10989    vfms_f64(a, b, vdup_n_f64(c))
10990}
10991#[doc = "Floating-point fused multiply-subtract from accumulator"]
10992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
10993#[inline]
10994#[cfg_attr(test, assert_instr(fmsub))]
10995#[target_feature(enable = "neon,fp16")]
10996#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10997pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
10998    vfmah_f16(a, -b, c)
10999}
11000#[doc = "Floating-point fused multiply-subtract from accumulator"]
11001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11002#[inline]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[target_feature(enable = "neon,fp16")]
11006#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11007pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11008    static_assert_uimm_bits!(LANE, 2);
11009    unsafe {
11010        let c: f16 = simd_extract!(v, LANE as u32);
11011        vfmsh_f16(a, b, c)
11012    }
11013}
11014#[doc = "Floating-point fused multiply-subtract from accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11016#[inline]
11017#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11018#[rustc_legacy_const_generics(3)]
11019#[target_feature(enable = "neon,fp16")]
11020#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11021pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11022    static_assert_uimm_bits!(LANE, 3);
11023    unsafe {
11024        let c: f16 = simd_extract!(v, LANE as u32);
11025        vfmsh_f16(a, b, c)
11026    }
11027}
11028#[doc = "Floating-point fused multiply-subtract from accumulator"]
11029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11030#[inline]
11031#[target_feature(enable = "neon")]
11032#[cfg_attr(test, assert_instr(fmls))]
11033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11034pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11035    unsafe {
11036        let b: float64x2_t = simd_neg(b);
11037        vfmaq_f64(a, b, c)
11038    }
11039}
11040#[doc = "Floating-point fused multiply-subtract to accumulator"]
11041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11042#[inline]
11043#[target_feature(enable = "neon")]
11044#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11045#[rustc_legacy_const_generics(3)]
11046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11047pub fn vfmsq_lane_f64<const LANE: i32>(
11048    a: float64x2_t,
11049    b: float64x2_t,
11050    c: float64x1_t,
11051) -> float64x2_t {
11052    static_assert!(LANE == 0);
11053    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11054}
11055#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11057#[inline]
11058#[target_feature(enable = "neon")]
11059#[cfg_attr(test, assert_instr(fmls))]
11060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11061pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11062    vfmsq_f64(a, b, vdupq_n_f64(c))
11063}
11064#[doc = "Floating-point fused multiply-subtract to accumulator"]
11065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11066#[inline]
11067#[target_feature(enable = "neon")]
11068#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11069#[rustc_legacy_const_generics(3)]
11070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11071pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11072    vfmas_lane_f32::<LANE>(a, -b, c)
11073}
11074#[doc = "Floating-point fused multiply-subtract to accumulator"]
11075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11076#[inline]
11077#[target_feature(enable = "neon")]
11078#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11079#[rustc_legacy_const_generics(3)]
11080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11081pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11082    vfmas_laneq_f32::<LANE>(a, -b, c)
11083}
11084#[doc = "Floating-point fused multiply-subtract to accumulator"]
11085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11086#[inline]
11087#[target_feature(enable = "neon")]
11088#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11089#[rustc_legacy_const_generics(3)]
11090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11091pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11092    vfmad_lane_f64::<LANE>(a, -b, c)
11093}
11094#[doc = "Floating-point fused multiply-subtract to accumulator"]
11095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11096#[inline]
11097#[target_feature(enable = "neon")]
11098#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11099#[rustc_legacy_const_generics(3)]
11100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11101pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11102    vfmad_laneq_f64::<LANE>(a, -b, c)
11103}
11104#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11106#[doc = "## Safety"]
11107#[doc = "  * Neon instrinsic unsafe"]
11108#[inline]
11109#[target_feature(enable = "neon,fp16")]
11110#[cfg_attr(test, assert_instr(ldr))]
11111#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11112pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11113    crate::ptr::read_unaligned(ptr.cast())
11114}
11115#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11117#[doc = "## Safety"]
11118#[doc = "  * Neon instrinsic unsafe"]
11119#[inline]
11120#[target_feature(enable = "neon,fp16")]
11121#[cfg_attr(test, assert_instr(ldr))]
11122#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11123pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11124    crate::ptr::read_unaligned(ptr.cast())
11125}
11126#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11128#[doc = "## Safety"]
11129#[doc = "  * Neon instrinsic unsafe"]
11130#[inline]
11131#[target_feature(enable = "neon")]
11132#[cfg_attr(test, assert_instr(ldr))]
11133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11134pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11135    crate::ptr::read_unaligned(ptr.cast())
11136}
11137#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11139#[doc = "## Safety"]
11140#[doc = "  * Neon instrinsic unsafe"]
11141#[inline]
11142#[target_feature(enable = "neon")]
11143#[cfg_attr(test, assert_instr(ldr))]
11144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11145pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11146    crate::ptr::read_unaligned(ptr.cast())
11147}
11148#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11150#[doc = "## Safety"]
11151#[doc = "  * Neon instrinsic unsafe"]
11152#[inline]
11153#[target_feature(enable = "neon")]
11154#[cfg_attr(test, assert_instr(ldr))]
11155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11156pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11157    crate::ptr::read_unaligned(ptr.cast())
11158}
11159#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11161#[doc = "## Safety"]
11162#[doc = "  * Neon instrinsic unsafe"]
11163#[inline]
11164#[target_feature(enable = "neon")]
11165#[cfg_attr(test, assert_instr(ldr))]
11166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11167pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11168    crate::ptr::read_unaligned(ptr.cast())
11169}
11170#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11172#[doc = "## Safety"]
11173#[doc = "  * Neon instrinsic unsafe"]
11174#[inline]
11175#[target_feature(enable = "neon")]
11176#[cfg_attr(test, assert_instr(ldr))]
11177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11178pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11179    crate::ptr::read_unaligned(ptr.cast())
11180}
11181#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11183#[doc = "## Safety"]
11184#[doc = "  * Neon instrinsic unsafe"]
11185#[inline]
11186#[target_feature(enable = "neon")]
11187#[cfg_attr(test, assert_instr(ldr))]
11188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11189pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11190    crate::ptr::read_unaligned(ptr.cast())
11191}
11192#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11194#[doc = "## Safety"]
11195#[doc = "  * Neon instrinsic unsafe"]
11196#[inline]
11197#[target_feature(enable = "neon")]
11198#[cfg_attr(test, assert_instr(ldr))]
11199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11200pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11201    crate::ptr::read_unaligned(ptr.cast())
11202}
11203#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11205#[doc = "## Safety"]
11206#[doc = "  * Neon instrinsic unsafe"]
11207#[inline]
11208#[target_feature(enable = "neon")]
11209#[cfg_attr(test, assert_instr(ldr))]
11210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11211pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11212    crate::ptr::read_unaligned(ptr.cast())
11213}
11214#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11216#[doc = "## Safety"]
11217#[doc = "  * Neon instrinsic unsafe"]
11218#[inline]
11219#[target_feature(enable = "neon")]
11220#[cfg_attr(test, assert_instr(ldr))]
11221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11222pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11223    crate::ptr::read_unaligned(ptr.cast())
11224}
11225#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11227#[doc = "## Safety"]
11228#[doc = "  * Neon instrinsic unsafe"]
11229#[inline]
11230#[target_feature(enable = "neon")]
11231#[cfg_attr(test, assert_instr(ldr))]
11232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11233pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11234    crate::ptr::read_unaligned(ptr.cast())
11235}
11236#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11238#[doc = "## Safety"]
11239#[doc = "  * Neon instrinsic unsafe"]
11240#[inline]
11241#[target_feature(enable = "neon")]
11242#[cfg_attr(test, assert_instr(ldr))]
11243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11244pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11245    crate::ptr::read_unaligned(ptr.cast())
11246}
11247#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11249#[doc = "## Safety"]
11250#[doc = "  * Neon instrinsic unsafe"]
11251#[inline]
11252#[target_feature(enable = "neon")]
11253#[cfg_attr(test, assert_instr(ldr))]
11254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11255pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11256    crate::ptr::read_unaligned(ptr.cast())
11257}
11258#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11260#[doc = "## Safety"]
11261#[doc = "  * Neon instrinsic unsafe"]
11262#[inline]
11263#[target_feature(enable = "neon")]
11264#[cfg_attr(test, assert_instr(ldr))]
11265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11266pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11267    crate::ptr::read_unaligned(ptr.cast())
11268}
11269#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11271#[doc = "## Safety"]
11272#[doc = "  * Neon instrinsic unsafe"]
11273#[inline]
11274#[target_feature(enable = "neon")]
11275#[cfg_attr(test, assert_instr(ldr))]
11276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11277pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11278    crate::ptr::read_unaligned(ptr.cast())
11279}
11280#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11282#[doc = "## Safety"]
11283#[doc = "  * Neon instrinsic unsafe"]
11284#[inline]
11285#[target_feature(enable = "neon")]
11286#[cfg_attr(test, assert_instr(ldr))]
11287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11288pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11289    crate::ptr::read_unaligned(ptr.cast())
11290}
11291#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11293#[doc = "## Safety"]
11294#[doc = "  * Neon instrinsic unsafe"]
11295#[inline]
11296#[target_feature(enable = "neon")]
11297#[cfg_attr(test, assert_instr(ldr))]
11298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11299pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11300    crate::ptr::read_unaligned(ptr.cast())
11301}
11302#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11304#[doc = "## Safety"]
11305#[doc = "  * Neon instrinsic unsafe"]
11306#[inline]
11307#[target_feature(enable = "neon")]
11308#[cfg_attr(test, assert_instr(ldr))]
11309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11310pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11311    crate::ptr::read_unaligned(ptr.cast())
11312}
11313#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11315#[doc = "## Safety"]
11316#[doc = "  * Neon instrinsic unsafe"]
11317#[inline]
11318#[target_feature(enable = "neon")]
11319#[cfg_attr(test, assert_instr(ldr))]
11320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11321pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11322    crate::ptr::read_unaligned(ptr.cast())
11323}
11324#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11326#[doc = "## Safety"]
11327#[doc = "  * Neon instrinsic unsafe"]
11328#[inline]
11329#[target_feature(enable = "neon")]
11330#[cfg_attr(test, assert_instr(ldr))]
11331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11332pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11333    crate::ptr::read_unaligned(ptr.cast())
11334}
11335#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11337#[doc = "## Safety"]
11338#[doc = "  * Neon instrinsic unsafe"]
11339#[inline]
11340#[target_feature(enable = "neon")]
11341#[cfg_attr(test, assert_instr(ldr))]
11342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11343pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11344    crate::ptr::read_unaligned(ptr.cast())
11345}
11346#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11348#[doc = "## Safety"]
11349#[doc = "  * Neon instrinsic unsafe"]
11350#[inline]
11351#[target_feature(enable = "neon")]
11352#[cfg_attr(test, assert_instr(ldr))]
11353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11354pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11355    crate::ptr::read_unaligned(ptr.cast())
11356}
11357#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11359#[doc = "## Safety"]
11360#[doc = "  * Neon instrinsic unsafe"]
11361#[inline]
11362#[target_feature(enable = "neon")]
11363#[cfg_attr(test, assert_instr(ldr))]
11364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11365pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11366    crate::ptr::read_unaligned(ptr.cast())
11367}
11368#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11370#[doc = "## Safety"]
11371#[doc = "  * Neon instrinsic unsafe"]
11372#[inline]
11373#[target_feature(enable = "neon")]
11374#[cfg_attr(test, assert_instr(ldr))]
11375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11376pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11377    crate::ptr::read_unaligned(ptr.cast())
11378}
11379#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11381#[doc = "## Safety"]
11382#[doc = "  * Neon instrinsic unsafe"]
11383#[inline]
11384#[target_feature(enable = "neon")]
11385#[cfg_attr(test, assert_instr(ldr))]
11386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11387pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11388    crate::ptr::read_unaligned(ptr.cast())
11389}
11390#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11392#[doc = "## Safety"]
11393#[doc = "  * Neon instrinsic unsafe"]
11394#[inline]
11395#[target_feature(enable = "neon,aes")]
11396#[cfg_attr(test, assert_instr(ldr))]
11397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11398pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11399    crate::ptr::read_unaligned(ptr.cast())
11400}
11401#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11403#[doc = "## Safety"]
11404#[doc = "  * Neon instrinsic unsafe"]
11405#[inline]
11406#[target_feature(enable = "neon,aes")]
11407#[cfg_attr(test, assert_instr(ldr))]
11408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11409pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11410    crate::ptr::read_unaligned(ptr.cast())
11411}
11412#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11414#[doc = "## Safety"]
11415#[doc = "  * Neon instrinsic unsafe"]
11416#[inline]
11417#[target_feature(enable = "neon")]
11418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11419#[cfg_attr(test, assert_instr(ld1))]
11420pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11421    unsafe extern "unadjusted" {
11422        #[cfg_attr(
11423            any(target_arch = "aarch64", target_arch = "arm64ec"),
11424            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11425        )]
11426        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11427    }
11428    _vld1_f64_x2(a)
11429}
11430#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11432#[doc = "## Safety"]
11433#[doc = "  * Neon instrinsic unsafe"]
11434#[inline]
11435#[target_feature(enable = "neon")]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437#[cfg_attr(test, assert_instr(ld1))]
11438pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11439    unsafe extern "unadjusted" {
11440        #[cfg_attr(
11441            any(target_arch = "aarch64", target_arch = "arm64ec"),
11442            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11443        )]
11444        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11445    }
11446    _vld1_f64_x3(a)
11447}
11448#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11450#[doc = "## Safety"]
11451#[doc = "  * Neon instrinsic unsafe"]
11452#[inline]
11453#[target_feature(enable = "neon")]
11454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11455#[cfg_attr(test, assert_instr(ld1))]
11456pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11457    unsafe extern "unadjusted" {
11458        #[cfg_attr(
11459            any(target_arch = "aarch64", target_arch = "arm64ec"),
11460            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11461        )]
11462        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11463    }
11464    _vld1_f64_x4(a)
11465}
11466#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11468#[doc = "## Safety"]
11469#[doc = "  * Neon instrinsic unsafe"]
11470#[inline]
11471#[target_feature(enable = "neon")]
11472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11473#[cfg_attr(test, assert_instr(ld1))]
11474pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11475    unsafe extern "unadjusted" {
11476        #[cfg_attr(
11477            any(target_arch = "aarch64", target_arch = "arm64ec"),
11478            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11479        )]
11480        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11481    }
11482    _vld1q_f64_x2(a)
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11486#[doc = "## Safety"]
11487#[doc = "  * Neon instrinsic unsafe"]
11488#[inline]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld1))]
11492pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11493    unsafe extern "unadjusted" {
11494        #[cfg_attr(
11495            any(target_arch = "aarch64", target_arch = "arm64ec"),
11496            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11497        )]
11498        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11499    }
11500    _vld1q_f64_x3(a)
11501}
11502#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11504#[doc = "## Safety"]
11505#[doc = "  * Neon instrinsic unsafe"]
11506#[inline]
11507#[target_feature(enable = "neon")]
11508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11509#[cfg_attr(test, assert_instr(ld1))]
11510pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11511    unsafe extern "unadjusted" {
11512        #[cfg_attr(
11513            any(target_arch = "aarch64", target_arch = "arm64ec"),
11514            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11515        )]
11516        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11517    }
11518    _vld1q_f64_x4(a)
11519}
11520#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11522#[doc = "## Safety"]
11523#[doc = "  * Neon instrinsic unsafe"]
11524#[inline]
11525#[target_feature(enable = "neon")]
11526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11527#[cfg_attr(test, assert_instr(ld2r))]
11528pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11529    unsafe extern "unadjusted" {
11530        #[cfg_attr(
11531            any(target_arch = "aarch64", target_arch = "arm64ec"),
11532            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11533        )]
11534        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11535    }
11536    _vld2_dup_f64(a as _)
11537}
11538#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11540#[doc = "## Safety"]
11541#[doc = "  * Neon instrinsic unsafe"]
11542#[inline]
11543#[target_feature(enable = "neon")]
11544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11545#[cfg_attr(test, assert_instr(ld2r))]
11546pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11547    unsafe extern "unadjusted" {
11548        #[cfg_attr(
11549            any(target_arch = "aarch64", target_arch = "arm64ec"),
11550            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11551        )]
11552        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11553    }
11554    _vld2q_dup_f64(a as _)
11555}
11556#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11558#[doc = "## Safety"]
11559#[doc = "  * Neon instrinsic unsafe"]
11560#[inline]
11561#[target_feature(enable = "neon")]
11562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11563#[cfg_attr(test, assert_instr(ld2r))]
11564pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11565    unsafe extern "unadjusted" {
11566        #[cfg_attr(
11567            any(target_arch = "aarch64", target_arch = "arm64ec"),
11568            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11569        )]
11570        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11571    }
11572    _vld2q_dup_s64(a as _)
11573}
11574#[doc = "Load multiple 2-element structures to two registers"]
11575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11576#[doc = "## Safety"]
11577#[doc = "  * Neon instrinsic unsafe"]
11578#[inline]
11579#[target_feature(enable = "neon")]
11580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11581#[cfg_attr(test, assert_instr(nop))]
11582pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11583    unsafe extern "unadjusted" {
11584        #[cfg_attr(
11585            any(target_arch = "aarch64", target_arch = "arm64ec"),
11586            link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11587        )]
11588        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11589    }
11590    _vld2_f64(a as _)
11591}
11592#[doc = "Load multiple 2-element structures to two registers"]
11593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11594#[doc = "## Safety"]
11595#[doc = "  * Neon instrinsic unsafe"]
11596#[inline]
11597#[target_feature(enable = "neon")]
11598#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11599#[rustc_legacy_const_generics(2)]
11600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11601pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11602    static_assert!(LANE == 0);
11603    unsafe extern "unadjusted" {
11604        #[cfg_attr(
11605            any(target_arch = "aarch64", target_arch = "arm64ec"),
11606            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11607        )]
11608        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11609    }
11610    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11611}
11612#[doc = "Load multiple 2-element structures to two registers"]
11613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11614#[doc = "## Safety"]
11615#[doc = "  * Neon instrinsic unsafe"]
11616#[inline]
11617#[target_feature(enable = "neon")]
11618#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11619#[rustc_legacy_const_generics(2)]
11620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11621pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11622    static_assert!(LANE == 0);
11623    unsafe extern "unadjusted" {
11624        #[cfg_attr(
11625            any(target_arch = "aarch64", target_arch = "arm64ec"),
11626            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11627        )]
11628        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11629    }
11630    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11631}
11632#[doc = "Load multiple 2-element structures to two registers"]
11633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11634#[doc = "## Safety"]
11635#[doc = "  * Neon instrinsic unsafe"]
11636#[inline]
11637#[target_feature(enable = "neon,aes")]
11638#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11639#[rustc_legacy_const_generics(2)]
11640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11641pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11642    static_assert!(LANE == 0);
11643    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11644}
11645#[doc = "Load multiple 2-element structures to two registers"]
11646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11647#[doc = "## Safety"]
11648#[doc = "  * Neon instrinsic unsafe"]
11649#[inline]
11650#[target_feature(enable = "neon")]
11651#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11652#[rustc_legacy_const_generics(2)]
11653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11654pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11655    static_assert!(LANE == 0);
11656    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11657}
11658#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11660#[doc = "## Safety"]
11661#[doc = "  * Neon instrinsic unsafe"]
11662#[inline]
11663#[cfg(target_endian = "little")]
11664#[target_feature(enable = "neon,aes")]
11665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11666#[cfg_attr(test, assert_instr(ld2r))]
11667pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11668    transmute(vld2q_dup_s64(transmute(a)))
11669}
11670#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11672#[doc = "## Safety"]
11673#[doc = "  * Neon instrinsic unsafe"]
11674#[inline]
11675#[cfg(target_endian = "big")]
11676#[target_feature(enable = "neon,aes")]
11677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11678#[cfg_attr(test, assert_instr(ld2r))]
11679pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11680    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11681    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11682    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11683    ret_val
11684}
11685#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11687#[doc = "## Safety"]
11688#[doc = "  * Neon instrinsic unsafe"]
11689#[inline]
11690#[cfg(target_endian = "little")]
11691#[target_feature(enable = "neon")]
11692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11693#[cfg_attr(test, assert_instr(ld2r))]
11694pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11695    transmute(vld2q_dup_s64(transmute(a)))
11696}
11697#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11699#[doc = "## Safety"]
11700#[doc = "  * Neon instrinsic unsafe"]
11701#[inline]
11702#[cfg(target_endian = "big")]
11703#[target_feature(enable = "neon")]
11704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11705#[cfg_attr(test, assert_instr(ld2r))]
11706pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11707    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11708    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11709    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11710    ret_val
11711}
11712#[doc = "Load multiple 2-element structures to two registers"]
11713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11714#[doc = "## Safety"]
11715#[doc = "  * Neon instrinsic unsafe"]
11716#[inline]
11717#[target_feature(enable = "neon")]
11718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11719#[cfg_attr(test, assert_instr(ld2))]
11720pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11721    unsafe extern "unadjusted" {
11722        #[cfg_attr(
11723            any(target_arch = "aarch64", target_arch = "arm64ec"),
11724            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11725        )]
11726        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11727    }
11728    _vld2q_f64(a as _)
11729}
11730#[doc = "Load multiple 2-element structures to two registers"]
11731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11732#[doc = "## Safety"]
11733#[doc = "  * Neon instrinsic unsafe"]
11734#[inline]
11735#[target_feature(enable = "neon")]
11736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11737#[cfg_attr(test, assert_instr(ld2))]
11738pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11739    unsafe extern "unadjusted" {
11740        #[cfg_attr(
11741            any(target_arch = "aarch64", target_arch = "arm64ec"),
11742            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11743        )]
11744        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11745    }
11746    _vld2q_s64(a as _)
11747}
11748#[doc = "Load multiple 2-element structures to two registers"]
11749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11750#[doc = "## Safety"]
11751#[doc = "  * Neon instrinsic unsafe"]
11752#[inline]
11753#[target_feature(enable = "neon")]
11754#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11755#[rustc_legacy_const_generics(2)]
11756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11757pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11758    static_assert_uimm_bits!(LANE, 1);
11759    unsafe extern "unadjusted" {
11760        #[cfg_attr(
11761            any(target_arch = "aarch64", target_arch = "arm64ec"),
11762            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11763        )]
11764        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11765            -> float64x2x2_t;
11766    }
11767    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11768}
11769#[doc = "Load multiple 2-element structures to two registers"]
11770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11771#[doc = "## Safety"]
11772#[doc = "  * Neon instrinsic unsafe"]
11773#[inline]
11774#[target_feature(enable = "neon")]
11775#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11776#[rustc_legacy_const_generics(2)]
11777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11778pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11779    static_assert_uimm_bits!(LANE, 4);
11780    unsafe extern "unadjusted" {
11781        #[cfg_attr(
11782            any(target_arch = "aarch64", target_arch = "arm64ec"),
11783            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11784        )]
11785        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11786    }
11787    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11788}
11789#[doc = "Load multiple 2-element structures to two registers"]
11790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11791#[doc = "## Safety"]
11792#[doc = "  * Neon instrinsic unsafe"]
11793#[inline]
11794#[target_feature(enable = "neon")]
11795#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11796#[rustc_legacy_const_generics(2)]
11797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11798pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11799    static_assert_uimm_bits!(LANE, 1);
11800    unsafe extern "unadjusted" {
11801        #[cfg_attr(
11802            any(target_arch = "aarch64", target_arch = "arm64ec"),
11803            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11804        )]
11805        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11806    }
11807    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11808}
11809#[doc = "Load multiple 2-element structures to two registers"]
11810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11811#[doc = "## Safety"]
11812#[doc = "  * Neon instrinsic unsafe"]
11813#[inline]
11814#[target_feature(enable = "neon,aes")]
11815#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11816#[rustc_legacy_const_generics(2)]
11817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11818pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11819    static_assert_uimm_bits!(LANE, 1);
11820    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11821}
11822#[doc = "Load multiple 2-element structures to two registers"]
11823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11824#[doc = "## Safety"]
11825#[doc = "  * Neon instrinsic unsafe"]
11826#[inline]
11827#[target_feature(enable = "neon")]
11828#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11829#[rustc_legacy_const_generics(2)]
11830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11831pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11832    static_assert_uimm_bits!(LANE, 4);
11833    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11834}
11835#[doc = "Load multiple 2-element structures to two registers"]
11836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11837#[doc = "## Safety"]
11838#[doc = "  * Neon instrinsic unsafe"]
11839#[inline]
11840#[target_feature(enable = "neon")]
11841#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11842#[rustc_legacy_const_generics(2)]
11843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11844pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11845    static_assert_uimm_bits!(LANE, 1);
11846    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11847}
11848#[doc = "Load multiple 2-element structures to two registers"]
11849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11850#[doc = "## Safety"]
11851#[doc = "  * Neon instrinsic unsafe"]
11852#[inline]
11853#[target_feature(enable = "neon")]
11854#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11855#[rustc_legacy_const_generics(2)]
11856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11857pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11858    static_assert_uimm_bits!(LANE, 4);
11859    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11860}
11861#[doc = "Load multiple 2-element structures to two registers"]
11862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11863#[doc = "## Safety"]
11864#[doc = "  * Neon instrinsic unsafe"]
11865#[inline]
11866#[cfg(target_endian = "little")]
11867#[target_feature(enable = "neon,aes")]
11868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11869#[cfg_attr(test, assert_instr(ld2))]
11870pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11871    transmute(vld2q_s64(transmute(a)))
11872}
11873#[doc = "Load multiple 2-element structures to two registers"]
11874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11875#[doc = "## Safety"]
11876#[doc = "  * Neon instrinsic unsafe"]
11877#[inline]
11878#[cfg(target_endian = "big")]
11879#[target_feature(enable = "neon,aes")]
11880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11881#[cfg_attr(test, assert_instr(ld2))]
11882pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11883    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11884    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11885    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11886    ret_val
11887}
11888#[doc = "Load multiple 2-element structures to two registers"]
11889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11890#[doc = "## Safety"]
11891#[doc = "  * Neon instrinsic unsafe"]
11892#[inline]
11893#[cfg(target_endian = "little")]
11894#[target_feature(enable = "neon")]
11895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11896#[cfg_attr(test, assert_instr(ld2))]
11897pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11898    transmute(vld2q_s64(transmute(a)))
11899}
11900#[doc = "Load multiple 2-element structures to two registers"]
11901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11902#[doc = "## Safety"]
11903#[doc = "  * Neon instrinsic unsafe"]
11904#[inline]
11905#[cfg(target_endian = "big")]
11906#[target_feature(enable = "neon")]
11907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11908#[cfg_attr(test, assert_instr(ld2))]
11909pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11910    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11911    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11912    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11913    ret_val
11914}
11915#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11917#[doc = "## Safety"]
11918#[doc = "  * Neon instrinsic unsafe"]
11919#[inline]
11920#[target_feature(enable = "neon")]
11921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11922#[cfg_attr(test, assert_instr(ld3r))]
11923pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11924    unsafe extern "unadjusted" {
11925        #[cfg_attr(
11926            any(target_arch = "aarch64", target_arch = "arm64ec"),
11927            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11928        )]
11929        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11930    }
11931    _vld3_dup_f64(a as _)
11932}
11933#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11935#[doc = "## Safety"]
11936#[doc = "  * Neon instrinsic unsafe"]
11937#[inline]
11938#[target_feature(enable = "neon")]
11939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11940#[cfg_attr(test, assert_instr(ld3r))]
11941pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11942    unsafe extern "unadjusted" {
11943        #[cfg_attr(
11944            any(target_arch = "aarch64", target_arch = "arm64ec"),
11945            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11946        )]
11947        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11948    }
11949    _vld3q_dup_f64(a as _)
11950}
11951#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11953#[doc = "## Safety"]
11954#[doc = "  * Neon instrinsic unsafe"]
11955#[inline]
11956#[target_feature(enable = "neon")]
11957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11958#[cfg_attr(test, assert_instr(ld3r))]
11959pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11960    unsafe extern "unadjusted" {
11961        #[cfg_attr(
11962            any(target_arch = "aarch64", target_arch = "arm64ec"),
11963            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11964        )]
11965        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11966    }
11967    _vld3q_dup_s64(a as _)
11968}
11969#[doc = "Load multiple 3-element structures to three registers"]
11970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11971#[doc = "## Safety"]
11972#[doc = "  * Neon instrinsic unsafe"]
11973#[inline]
11974#[target_feature(enable = "neon")]
11975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11976#[cfg_attr(test, assert_instr(nop))]
11977pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11978    unsafe extern "unadjusted" {
11979        #[cfg_attr(
11980            any(target_arch = "aarch64", target_arch = "arm64ec"),
11981            link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
11982        )]
11983        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
11984    }
11985    _vld3_f64(a as _)
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = "  * Neon instrinsic unsafe"]
11991#[inline]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997    static_assert!(LANE == 0);
11998    unsafe extern "unadjusted" {
11999        #[cfg_attr(
12000            any(target_arch = "aarch64", target_arch = "arm64ec"),
12001            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002        )]
12003        fn _vld3_lane_f64(
12004            a: float64x1_t,
12005            b: float64x1_t,
12006            c: float64x1_t,
12007            n: i64,
12008            ptr: *const i8,
12009        ) -> float64x1x3_t;
12010    }
12011    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = "  * Neon instrinsic unsafe"]
12017#[inline]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023    static_assert!(LANE == 0);
12024    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = "  * Neon instrinsic unsafe"]
12030#[inline]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036    static_assert!(LANE == 0);
12037    unsafe extern "unadjusted" {
12038        #[cfg_attr(
12039            any(target_arch = "aarch64", target_arch = "arm64ec"),
12040            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041        )]
12042        fn _vld3_lane_s64(
12043            a: int64x1_t,
12044            b: int64x1_t,
12045            c: int64x1_t,
12046            n: i64,
12047            ptr: *const i8,
12048        ) -> int64x1x3_t;
12049    }
12050    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = "  * Neon instrinsic unsafe"]
12056#[inline]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062    static_assert!(LANE == 0);
12063    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = "  * Neon instrinsic unsafe"]
12069#[inline]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075    transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = "  * Neon instrinsic unsafe"]
12081#[inline]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091    ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = "  * Neon instrinsic unsafe"]
12097#[inline]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103    transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = "  * Neon instrinsic unsafe"]
12109#[inline]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119    ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = "  * Neon instrinsic unsafe"]
12125#[inline]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130    unsafe extern "unadjusted" {
12131        #[cfg_attr(
12132            any(target_arch = "aarch64", target_arch = "arm64ec"),
12133            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12134        )]
12135        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12136    }
12137    _vld3q_f64(a as _)
12138}
12139#[doc = "Load multiple 3-element structures to three registers"]
12140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12141#[doc = "## Safety"]
12142#[doc = "  * Neon instrinsic unsafe"]
12143#[inline]
12144#[target_feature(enable = "neon")]
12145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12146#[cfg_attr(test, assert_instr(ld3))]
12147pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12148    unsafe extern "unadjusted" {
12149        #[cfg_attr(
12150            any(target_arch = "aarch64", target_arch = "arm64ec"),
12151            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12152        )]
12153        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12154    }
12155    _vld3q_s64(a as _)
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12159#[doc = "## Safety"]
12160#[doc = "  * Neon instrinsic unsafe"]
12161#[inline]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12167    static_assert_uimm_bits!(LANE, 1);
12168    unsafe extern "unadjusted" {
12169        #[cfg_attr(
12170            any(target_arch = "aarch64", target_arch = "arm64ec"),
12171            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12172        )]
12173        fn _vld3q_lane_f64(
12174            a: float64x2_t,
12175            b: float64x2_t,
12176            c: float64x2_t,
12177            n: i64,
12178            ptr: *const i8,
12179        ) -> float64x2x3_t;
12180    }
12181    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12185#[doc = "## Safety"]
12186#[doc = "  * Neon instrinsic unsafe"]
12187#[inline]
12188#[target_feature(enable = "neon,aes")]
12189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12190#[rustc_legacy_const_generics(2)]
12191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12192pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12193    static_assert_uimm_bits!(LANE, 1);
12194    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12195}
12196#[doc = "Load multiple 3-element structures to two registers"]
12197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12198#[doc = "## Safety"]
12199#[doc = "  * Neon instrinsic unsafe"]
12200#[inline]
12201#[target_feature(enable = "neon")]
12202#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12203#[rustc_legacy_const_generics(2)]
12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12205pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12206    static_assert_uimm_bits!(LANE, 3);
12207    unsafe extern "unadjusted" {
12208        #[cfg_attr(
12209            any(target_arch = "aarch64", target_arch = "arm64ec"),
12210            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12211        )]
12212        fn _vld3q_lane_s8(
12213            a: int8x16_t,
12214            b: int8x16_t,
12215            c: int8x16_t,
12216            n: i64,
12217            ptr: *const i8,
12218        ) -> int8x16x3_t;
12219    }
12220    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12221}
12222#[doc = "Load multiple 3-element structures to two registers"]
12223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12224#[doc = "## Safety"]
12225#[doc = "  * Neon instrinsic unsafe"]
12226#[inline]
12227#[target_feature(enable = "neon")]
12228#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12229#[rustc_legacy_const_generics(2)]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12232    static_assert_uimm_bits!(LANE, 1);
12233    unsafe extern "unadjusted" {
12234        #[cfg_attr(
12235            any(target_arch = "aarch64", target_arch = "arm64ec"),
12236            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12237        )]
12238        fn _vld3q_lane_s64(
12239            a: int64x2_t,
12240            b: int64x2_t,
12241            c: int64x2_t,
12242            n: i64,
12243            ptr: *const i8,
12244        ) -> int64x2x3_t;
12245    }
12246    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12247}
12248#[doc = "Load multiple 3-element structures to three registers"]
12249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12250#[doc = "## Safety"]
12251#[doc = "  * Neon instrinsic unsafe"]
12252#[inline]
12253#[target_feature(enable = "neon")]
12254#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12255#[rustc_legacy_const_generics(2)]
12256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12257pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12258    static_assert_uimm_bits!(LANE, 4);
12259    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12260}
12261#[doc = "Load multiple 3-element structures to three registers"]
12262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12263#[doc = "## Safety"]
12264#[doc = "  * Neon instrinsic unsafe"]
12265#[inline]
12266#[target_feature(enable = "neon")]
12267#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12268#[rustc_legacy_const_generics(2)]
12269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12270pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12271    static_assert_uimm_bits!(LANE, 1);
12272    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12273}
12274#[doc = "Load multiple 3-element structures to three registers"]
12275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12276#[doc = "## Safety"]
12277#[doc = "  * Neon instrinsic unsafe"]
12278#[inline]
12279#[target_feature(enable = "neon")]
12280#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12281#[rustc_legacy_const_generics(2)]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12284    static_assert_uimm_bits!(LANE, 4);
12285    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12286}
12287#[doc = "Load multiple 3-element structures to three registers"]
12288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12289#[doc = "## Safety"]
12290#[doc = "  * Neon instrinsic unsafe"]
12291#[inline]
12292#[cfg(target_endian = "little")]
12293#[target_feature(enable = "neon,aes")]
12294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12295#[cfg_attr(test, assert_instr(ld3))]
12296pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12297    transmute(vld3q_s64(transmute(a)))
12298}
12299#[doc = "Load multiple 3-element structures to three registers"]
12300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12301#[doc = "## Safety"]
12302#[doc = "  * Neon instrinsic unsafe"]
12303#[inline]
12304#[cfg(target_endian = "big")]
12305#[target_feature(enable = "neon,aes")]
12306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12307#[cfg_attr(test, assert_instr(ld3))]
12308pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12309    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12310    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12311    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12312    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12313    ret_val
12314}
12315#[doc = "Load multiple 3-element structures to three registers"]
12316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12317#[doc = "## Safety"]
12318#[doc = "  * Neon instrinsic unsafe"]
12319#[inline]
12320#[cfg(target_endian = "little")]
12321#[target_feature(enable = "neon")]
12322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12323#[cfg_attr(test, assert_instr(ld3))]
12324pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12325    transmute(vld3q_s64(transmute(a)))
12326}
12327#[doc = "Load multiple 3-element structures to three registers"]
12328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12329#[doc = "## Safety"]
12330#[doc = "  * Neon instrinsic unsafe"]
12331#[inline]
12332#[cfg(target_endian = "big")]
12333#[target_feature(enable = "neon")]
12334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12335#[cfg_attr(test, assert_instr(ld3))]
12336pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12337    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12338    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12339    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12340    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12341    ret_val
12342}
12343#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12345#[doc = "## Safety"]
12346#[doc = "  * Neon instrinsic unsafe"]
12347#[inline]
12348#[target_feature(enable = "neon")]
12349#[cfg_attr(test, assert_instr(ld4r))]
12350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12351pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12352    unsafe extern "unadjusted" {
12353        #[cfg_attr(
12354            any(target_arch = "aarch64", target_arch = "arm64ec"),
12355            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12356        )]
12357        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12358    }
12359    _vld4_dup_f64(a as _)
12360}
12361#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12363#[doc = "## Safety"]
12364#[doc = "  * Neon instrinsic unsafe"]
12365#[inline]
12366#[target_feature(enable = "neon")]
12367#[cfg_attr(test, assert_instr(ld4r))]
12368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12369pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12370    unsafe extern "unadjusted" {
12371        #[cfg_attr(
12372            any(target_arch = "aarch64", target_arch = "arm64ec"),
12373            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12374        )]
12375        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12376    }
12377    _vld4q_dup_f64(a as _)
12378}
12379#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12381#[doc = "## Safety"]
12382#[doc = "  * Neon instrinsic unsafe"]
12383#[inline]
12384#[target_feature(enable = "neon")]
12385#[cfg_attr(test, assert_instr(ld4r))]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12388    unsafe extern "unadjusted" {
12389        #[cfg_attr(
12390            any(target_arch = "aarch64", target_arch = "arm64ec"),
12391            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12392        )]
12393        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12394    }
12395    _vld4q_dup_s64(a as _)
12396}
12397#[doc = "Load multiple 4-element structures to four registers"]
12398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12399#[doc = "## Safety"]
12400#[doc = "  * Neon instrinsic unsafe"]
12401#[inline]
12402#[target_feature(enable = "neon")]
12403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12404#[cfg_attr(test, assert_instr(nop))]
12405pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12406    unsafe extern "unadjusted" {
12407        #[cfg_attr(
12408            any(target_arch = "aarch64", target_arch = "arm64ec"),
12409            link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12410        )]
12411        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12412    }
12413    _vld4_f64(a as _)
12414}
12415#[doc = "Load multiple 4-element structures to four registers"]
12416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12417#[doc = "## Safety"]
12418#[doc = "  * Neon instrinsic unsafe"]
12419#[inline]
12420#[target_feature(enable = "neon")]
12421#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12422#[rustc_legacy_const_generics(2)]
12423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12424pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12425    static_assert!(LANE == 0);
12426    unsafe extern "unadjusted" {
12427        #[cfg_attr(
12428            any(target_arch = "aarch64", target_arch = "arm64ec"),
12429            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12430        )]
12431        fn _vld4_lane_f64(
12432            a: float64x1_t,
12433            b: float64x1_t,
12434            c: float64x1_t,
12435            d: float64x1_t,
12436            n: i64,
12437            ptr: *const i8,
12438        ) -> float64x1x4_t;
12439    }
12440    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12441}
12442#[doc = "Load multiple 4-element structures to four registers"]
12443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12444#[doc = "## Safety"]
12445#[doc = "  * Neon instrinsic unsafe"]
12446#[inline]
12447#[target_feature(enable = "neon")]
12448#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12449#[rustc_legacy_const_generics(2)]
12450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12451pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12452    static_assert!(LANE == 0);
12453    unsafe extern "unadjusted" {
12454        #[cfg_attr(
12455            any(target_arch = "aarch64", target_arch = "arm64ec"),
12456            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12457        )]
12458        fn _vld4_lane_s64(
12459            a: int64x1_t,
12460            b: int64x1_t,
12461            c: int64x1_t,
12462            d: int64x1_t,
12463            n: i64,
12464            ptr: *const i8,
12465        ) -> int64x1x4_t;
12466    }
12467    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12468}
12469#[doc = "Load multiple 4-element structures to four registers"]
12470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12471#[doc = "## Safety"]
12472#[doc = "  * Neon instrinsic unsafe"]
12473#[inline]
12474#[target_feature(enable = "neon,aes")]
12475#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12476#[rustc_legacy_const_generics(2)]
12477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12478pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12479    static_assert!(LANE == 0);
12480    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12481}
12482#[doc = "Load multiple 4-element structures to four registers"]
12483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12484#[doc = "## Safety"]
12485#[doc = "  * Neon instrinsic unsafe"]
12486#[inline]
12487#[target_feature(enable = "neon")]
12488#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12489#[rustc_legacy_const_generics(2)]
12490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12491pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12492    static_assert!(LANE == 0);
12493    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12494}
12495#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12497#[doc = "## Safety"]
12498#[doc = "  * Neon instrinsic unsafe"]
12499#[inline]
12500#[cfg(target_endian = "little")]
12501#[target_feature(enable = "neon,aes")]
12502#[cfg_attr(test, assert_instr(ld4r))]
12503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12504pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12505    transmute(vld4q_dup_s64(transmute(a)))
12506}
12507#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12509#[doc = "## Safety"]
12510#[doc = "  * Neon instrinsic unsafe"]
12511#[inline]
12512#[cfg(target_endian = "big")]
12513#[target_feature(enable = "neon,aes")]
12514#[cfg_attr(test, assert_instr(ld4r))]
12515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12516pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12517    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12518    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12519    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12520    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12521    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12522    ret_val
12523}
12524#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12526#[doc = "## Safety"]
12527#[doc = "  * Neon instrinsic unsafe"]
12528#[inline]
12529#[cfg(target_endian = "little")]
12530#[target_feature(enable = "neon")]
12531#[cfg_attr(test, assert_instr(ld4r))]
12532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12533pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12534    transmute(vld4q_dup_s64(transmute(a)))
12535}
12536#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12538#[doc = "## Safety"]
12539#[doc = "  * Neon instrinsic unsafe"]
12540#[inline]
12541#[cfg(target_endian = "big")]
12542#[target_feature(enable = "neon")]
12543#[cfg_attr(test, assert_instr(ld4r))]
12544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12545pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12546    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12547    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12548    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12549    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12550    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12551    ret_val
12552}
12553#[doc = "Load multiple 4-element structures to four registers"]
12554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12555#[doc = "## Safety"]
12556#[doc = "  * Neon instrinsic unsafe"]
12557#[inline]
12558#[target_feature(enable = "neon")]
12559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12560#[cfg_attr(test, assert_instr(ld4))]
12561pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12562    unsafe extern "unadjusted" {
12563        #[cfg_attr(
12564            any(target_arch = "aarch64", target_arch = "arm64ec"),
12565            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12566        )]
12567        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12568    }
12569    _vld4q_f64(a as _)
12570}
12571#[doc = "Load multiple 4-element structures to four registers"]
12572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12573#[doc = "## Safety"]
12574#[doc = "  * Neon instrinsic unsafe"]
12575#[inline]
12576#[target_feature(enable = "neon")]
12577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12578#[cfg_attr(test, assert_instr(ld4))]
12579pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12580    unsafe extern "unadjusted" {
12581        #[cfg_attr(
12582            any(target_arch = "aarch64", target_arch = "arm64ec"),
12583            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12584        )]
12585        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12586    }
12587    _vld4q_s64(a as _)
12588}
12589#[doc = "Load multiple 4-element structures to four registers"]
12590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12591#[doc = "## Safety"]
12592#[doc = "  * Neon instrinsic unsafe"]
12593#[inline]
12594#[target_feature(enable = "neon")]
12595#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12596#[rustc_legacy_const_generics(2)]
12597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12598pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12599    static_assert_uimm_bits!(LANE, 1);
12600    unsafe extern "unadjusted" {
12601        #[cfg_attr(
12602            any(target_arch = "aarch64", target_arch = "arm64ec"),
12603            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12604        )]
12605        fn _vld4q_lane_f64(
12606            a: float64x2_t,
12607            b: float64x2_t,
12608            c: float64x2_t,
12609            d: float64x2_t,
12610            n: i64,
12611            ptr: *const i8,
12612        ) -> float64x2x4_t;
12613    }
12614    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12615}
12616#[doc = "Load multiple 4-element structures to four registers"]
12617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12618#[doc = "## Safety"]
12619#[doc = "  * Neon instrinsic unsafe"]
12620#[inline]
12621#[target_feature(enable = "neon")]
12622#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12623#[rustc_legacy_const_generics(2)]
12624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12625pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12626    static_assert_uimm_bits!(LANE, 3);
12627    unsafe extern "unadjusted" {
12628        #[cfg_attr(
12629            any(target_arch = "aarch64", target_arch = "arm64ec"),
12630            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12631        )]
12632        fn _vld4q_lane_s8(
12633            a: int8x16_t,
12634            b: int8x16_t,
12635            c: int8x16_t,
12636            d: int8x16_t,
12637            n: i64,
12638            ptr: *const i8,
12639        ) -> int8x16x4_t;
12640    }
12641    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12642}
12643#[doc = "Load multiple 4-element structures to four registers"]
12644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12645#[doc = "## Safety"]
12646#[doc = "  * Neon instrinsic unsafe"]
12647#[inline]
12648#[target_feature(enable = "neon")]
12649#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12650#[rustc_legacy_const_generics(2)]
12651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12652pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12653    static_assert_uimm_bits!(LANE, 1);
12654    unsafe extern "unadjusted" {
12655        #[cfg_attr(
12656            any(target_arch = "aarch64", target_arch = "arm64ec"),
12657            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12658        )]
12659        fn _vld4q_lane_s64(
12660            a: int64x2_t,
12661            b: int64x2_t,
12662            c: int64x2_t,
12663            d: int64x2_t,
12664            n: i64,
12665            ptr: *const i8,
12666        ) -> int64x2x4_t;
12667    }
12668    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12669}
12670#[doc = "Load multiple 4-element structures to four registers"]
12671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12672#[doc = "## Safety"]
12673#[doc = "  * Neon instrinsic unsafe"]
12674#[inline]
12675#[target_feature(enable = "neon,aes")]
12676#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12677#[rustc_legacy_const_generics(2)]
12678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12679pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12680    static_assert_uimm_bits!(LANE, 1);
12681    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12682}
12683#[doc = "Load multiple 4-element structures to four registers"]
12684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12685#[doc = "## Safety"]
12686#[doc = "  * Neon instrinsic unsafe"]
12687#[inline]
12688#[target_feature(enable = "neon")]
12689#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12690#[rustc_legacy_const_generics(2)]
12691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12692pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12693    static_assert_uimm_bits!(LANE, 4);
12694    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12695}
12696#[doc = "Load multiple 4-element structures to four registers"]
12697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12698#[doc = "## Safety"]
12699#[doc = "  * Neon instrinsic unsafe"]
12700#[inline]
12701#[target_feature(enable = "neon")]
12702#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12703#[rustc_legacy_const_generics(2)]
12704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12705pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12706    static_assert_uimm_bits!(LANE, 1);
12707    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12708}
12709#[doc = "Load multiple 4-element structures to four registers"]
12710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12711#[doc = "## Safety"]
12712#[doc = "  * Neon instrinsic unsafe"]
12713#[inline]
12714#[target_feature(enable = "neon")]
12715#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12716#[rustc_legacy_const_generics(2)]
12717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12718pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12719    static_assert_uimm_bits!(LANE, 4);
12720    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12721}
12722#[doc = "Load multiple 4-element structures to four registers"]
12723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12724#[doc = "## Safety"]
12725#[doc = "  * Neon instrinsic unsafe"]
12726#[inline]
12727#[cfg(target_endian = "little")]
12728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12729#[target_feature(enable = "neon,aes")]
12730#[cfg_attr(test, assert_instr(ld4))]
12731pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12732    transmute(vld4q_s64(transmute(a)))
12733}
12734#[doc = "Load multiple 4-element structures to four registers"]
12735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12736#[doc = "## Safety"]
12737#[doc = "  * Neon instrinsic unsafe"]
12738#[inline]
12739#[cfg(target_endian = "big")]
12740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12741#[target_feature(enable = "neon,aes")]
12742#[cfg_attr(test, assert_instr(ld4))]
12743pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12744    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12745    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12746    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12747    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12748    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12749    ret_val
12750}
12751#[doc = "Load multiple 4-element structures to four registers"]
12752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12753#[doc = "## Safety"]
12754#[doc = "  * Neon instrinsic unsafe"]
12755#[inline]
12756#[cfg(target_endian = "little")]
12757#[target_feature(enable = "neon")]
12758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12759#[cfg_attr(test, assert_instr(ld4))]
12760pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12761    transmute(vld4q_s64(transmute(a)))
12762}
12763#[doc = "Load multiple 4-element structures to four registers"]
12764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12765#[doc = "## Safety"]
12766#[doc = "  * Neon instrinsic unsafe"]
12767#[inline]
12768#[cfg(target_endian = "big")]
12769#[target_feature(enable = "neon")]
12770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12771#[cfg_attr(test, assert_instr(ld4))]
12772pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12773    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12774    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12775    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12776    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12777    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12778    ret_val
12779}
12780#[doc = "Lookup table read with 2-bit indices"]
12781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12782#[doc = "## Safety"]
12783#[doc = "  * Neon instrinsic unsafe"]
12784#[inline]
12785#[target_feature(enable = "neon,lut")]
12786#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12787#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12788#[rustc_legacy_const_generics(2)]
12789pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12790    static_assert!(LANE >= 0 && LANE <= 1);
12791    unsafe extern "unadjusted" {
12792        #[cfg_attr(
12793            any(target_arch = "aarch64", target_arch = "arm64ec"),
12794            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12795        )]
12796        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12797    }
12798    _vluti2_lane_s8(a, b, LANE)
12799}
12800#[doc = "Lookup table read with 2-bit indices"]
12801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12802#[doc = "## Safety"]
12803#[doc = "  * Neon instrinsic unsafe"]
12804#[inline]
12805#[target_feature(enable = "neon,lut")]
12806#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12807#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12808#[rustc_legacy_const_generics(2)]
12809pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12810    static_assert!(LANE >= 0 && LANE <= 1);
12811    unsafe extern "unadjusted" {
12812        #[cfg_attr(
12813            any(target_arch = "aarch64", target_arch = "arm64ec"),
12814            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12815        )]
12816        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12817    }
12818    _vluti2q_lane_s8(a, b, LANE)
12819}
12820#[doc = "Lookup table read with 2-bit indices"]
12821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12822#[doc = "## Safety"]
12823#[doc = "  * Neon instrinsic unsafe"]
12824#[inline]
12825#[target_feature(enable = "neon,lut")]
12826#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12827#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12828#[rustc_legacy_const_generics(2)]
12829pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12830    static_assert!(LANE >= 0 && LANE <= 3);
12831    unsafe extern "unadjusted" {
12832        #[cfg_attr(
12833            any(target_arch = "aarch64", target_arch = "arm64ec"),
12834            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12835        )]
12836        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12837    }
12838    _vluti2_lane_s16(a, b, LANE)
12839}
12840#[doc = "Lookup table read with 2-bit indices"]
12841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12842#[doc = "## Safety"]
12843#[doc = "  * Neon instrinsic unsafe"]
12844#[inline]
12845#[target_feature(enable = "neon,lut")]
12846#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12847#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12848#[rustc_legacy_const_generics(2)]
12849pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12850    static_assert!(LANE >= 0 && LANE <= 3);
12851    unsafe extern "unadjusted" {
12852        #[cfg_attr(
12853            any(target_arch = "aarch64", target_arch = "arm64ec"),
12854            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12855        )]
12856        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12857    }
12858    _vluti2q_lane_s16(a, b, LANE)
12859}
12860#[doc = "Lookup table read with 2-bit indices"]
12861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12862#[doc = "## Safety"]
12863#[doc = "  * Neon instrinsic unsafe"]
12864#[inline]
12865#[target_feature(enable = "neon,lut")]
12866#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12867#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12868#[rustc_legacy_const_generics(2)]
12869pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12870    static_assert!(LANE >= 0 && LANE <= 1);
12871    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12872}
12873#[doc = "Lookup table read with 2-bit indices"]
12874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12875#[doc = "## Safety"]
12876#[doc = "  * Neon instrinsic unsafe"]
12877#[inline]
12878#[target_feature(enable = "neon,lut")]
12879#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12880#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12881#[rustc_legacy_const_generics(2)]
12882pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12883    static_assert!(LANE >= 0 && LANE <= 1);
12884    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12885}
12886#[doc = "Lookup table read with 2-bit indices"]
12887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12888#[doc = "## Safety"]
12889#[doc = "  * Neon instrinsic unsafe"]
12890#[inline]
12891#[target_feature(enable = "neon,lut")]
12892#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12893#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12894#[rustc_legacy_const_generics(2)]
12895pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12896    static_assert!(LANE >= 0 && LANE <= 3);
12897    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12898}
12899#[doc = "Lookup table read with 2-bit indices"]
12900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12901#[doc = "## Safety"]
12902#[doc = "  * Neon instrinsic unsafe"]
12903#[inline]
12904#[target_feature(enable = "neon,lut")]
12905#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12906#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12907#[rustc_legacy_const_generics(2)]
12908pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12909    static_assert!(LANE >= 0 && LANE <= 3);
12910    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12911}
12912#[doc = "Lookup table read with 2-bit indices"]
12913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12914#[doc = "## Safety"]
12915#[doc = "  * Neon instrinsic unsafe"]
12916#[inline]
12917#[target_feature(enable = "neon,lut")]
12918#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12919#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12920#[rustc_legacy_const_generics(2)]
12921pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12922    static_assert!(LANE >= 0 && LANE <= 1);
12923    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12924}
12925#[doc = "Lookup table read with 2-bit indices"]
12926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12927#[doc = "## Safety"]
12928#[doc = "  * Neon instrinsic unsafe"]
12929#[inline]
12930#[target_feature(enable = "neon,lut")]
12931#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12932#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12933#[rustc_legacy_const_generics(2)]
12934pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12935    static_assert!(LANE >= 0 && LANE <= 1);
12936    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12937}
12938#[doc = "Lookup table read with 2-bit indices"]
12939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12940#[doc = "## Safety"]
12941#[doc = "  * Neon instrinsic unsafe"]
12942#[inline]
12943#[target_feature(enable = "neon,lut")]
12944#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12945#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12946#[rustc_legacy_const_generics(2)]
12947pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12948    static_assert!(LANE >= 0 && LANE <= 3);
12949    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12950}
12951#[doc = "Lookup table read with 2-bit indices"]
12952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12953#[doc = "## Safety"]
12954#[doc = "  * Neon instrinsic unsafe"]
12955#[inline]
12956#[target_feature(enable = "neon,lut")]
12957#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12958#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12959#[rustc_legacy_const_generics(2)]
12960pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12961    static_assert!(LANE >= 0 && LANE <= 3);
12962    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12963}
12964#[doc = "Lookup table read with 4-bit indices"]
12965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
12966#[doc = "## Safety"]
12967#[doc = "  * Neon instrinsic unsafe"]
12968#[inline]
12969#[target_feature(enable = "neon,lut,fp16")]
12970#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12971#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12972#[rustc_legacy_const_generics(2)]
12973pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
12974    static_assert!(LANE >= 0 && LANE <= 1);
12975    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12976}
12977#[doc = "Lookup table read with 4-bit indices"]
12978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
12979#[doc = "## Safety"]
12980#[doc = "  * Neon instrinsic unsafe"]
12981#[inline]
12982#[target_feature(enable = "neon,lut")]
12983#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12984#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12985#[rustc_legacy_const_generics(2)]
12986pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
12987    static_assert!(LANE >= 0 && LANE <= 1);
12988    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12989}
12990#[doc = "Lookup table read with 4-bit indices"]
12991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
12992#[doc = "## Safety"]
12993#[doc = "  * Neon instrinsic unsafe"]
12994#[inline]
12995#[target_feature(enable = "neon,lut")]
12996#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12997#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12998#[rustc_legacy_const_generics(2)]
12999pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13000    static_assert!(LANE >= 0 && LANE <= 1);
13001    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13002}
13003#[doc = "Lookup table read with 4-bit indices"]
13004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13005#[doc = "## Safety"]
13006#[doc = "  * Neon instrinsic unsafe"]
13007#[inline]
13008#[target_feature(enable = "neon,lut")]
13009#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13010#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13011#[rustc_legacy_const_generics(2)]
13012pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13013    static_assert!(LANE >= 0 && LANE <= 1);
13014    unsafe extern "unadjusted" {
13015        #[cfg_attr(
13016            any(target_arch = "aarch64", target_arch = "arm64ec"),
13017            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13018        )]
13019        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13020    }
13021    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13022}
13023#[doc = "Lookup table read with 4-bit indices"]
13024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13025#[doc = "## Safety"]
13026#[doc = "  * Neon instrinsic unsafe"]
13027#[inline]
13028#[target_feature(enable = "neon,lut")]
13029#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13030#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13031#[rustc_legacy_const_generics(2)]
13032pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13033    static_assert!(LANE == 0);
13034    unsafe extern "unadjusted" {
13035        #[cfg_attr(
13036            any(target_arch = "aarch64", target_arch = "arm64ec"),
13037            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13038        )]
13039        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13040    }
13041    _vluti4q_lane_s8(a, b, LANE)
13042}
13043#[doc = "Lookup table read with 4-bit indices"]
13044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13045#[doc = "## Safety"]
13046#[doc = "  * Neon instrinsic unsafe"]
13047#[inline]
13048#[target_feature(enable = "neon,lut")]
13049#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13050#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13051#[rustc_legacy_const_generics(2)]
13052pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13053    static_assert!(LANE == 0);
13054    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13055}
13056#[doc = "Lookup table read with 4-bit indices"]
13057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13058#[doc = "## Safety"]
13059#[doc = "  * Neon instrinsic unsafe"]
13060#[inline]
13061#[target_feature(enable = "neon,lut")]
13062#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13063#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13064#[rustc_legacy_const_generics(2)]
13065pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13066    static_assert!(LANE == 0);
13067    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13068}
13069#[doc = "Lookup table read with 4-bit indices"]
13070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13071#[doc = "## Safety"]
13072#[doc = "  * Neon instrinsic unsafe"]
13073#[inline]
13074#[target_feature(enable = "neon,lut,fp16")]
13075#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13076#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13077#[rustc_legacy_const_generics(2)]
13078pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13079    a: float16x8x2_t,
13080    b: uint8x16_t,
13081) -> float16x8_t {
13082    static_assert!(LANE >= 0 && LANE <= 3);
13083    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13084}
13085#[doc = "Lookup table read with 4-bit indices"]
13086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13087#[doc = "## Safety"]
13088#[doc = "  * Neon instrinsic unsafe"]
13089#[inline]
13090#[target_feature(enable = "neon,lut")]
13091#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13092#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13093#[rustc_legacy_const_generics(2)]
13094pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13095    static_assert!(LANE >= 0 && LANE <= 3);
13096    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13097}
13098#[doc = "Lookup table read with 4-bit indices"]
13099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13100#[doc = "## Safety"]
13101#[doc = "  * Neon instrinsic unsafe"]
13102#[inline]
13103#[target_feature(enable = "neon,lut")]
13104#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13105#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13106#[rustc_legacy_const_generics(2)]
13107pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13108    static_assert!(LANE >= 0 && LANE <= 3);
13109    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13110}
13111#[doc = "Lookup table read with 4-bit indices"]
13112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13113#[doc = "## Safety"]
13114#[doc = "  * Neon instrinsic unsafe"]
13115#[inline]
13116#[target_feature(enable = "neon,lut")]
13117#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13118#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13119#[rustc_legacy_const_generics(2)]
13120pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13121    static_assert!(LANE >= 0 && LANE <= 3);
13122    unsafe extern "unadjusted" {
13123        #[cfg_attr(
13124            any(target_arch = "aarch64", target_arch = "arm64ec"),
13125            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13126        )]
13127        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13128    }
13129    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13130}
13131#[doc = "Lookup table read with 4-bit indices"]
13132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13133#[doc = "## Safety"]
13134#[doc = "  * Neon instrinsic unsafe"]
13135#[inline]
13136#[target_feature(enable = "neon,lut")]
13137#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13138#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13139#[rustc_legacy_const_generics(2)]
13140pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13141    static_assert!(LANE >= 0 && LANE <= 1);
13142    unsafe extern "unadjusted" {
13143        #[cfg_attr(
13144            any(target_arch = "aarch64", target_arch = "arm64ec"),
13145            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13146        )]
13147        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13148    }
13149    _vluti4q_laneq_s8(a, b, LANE)
13150}
13151#[doc = "Lookup table read with 4-bit indices"]
13152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13153#[doc = "## Safety"]
13154#[doc = "  * Neon instrinsic unsafe"]
13155#[inline]
13156#[target_feature(enable = "neon,lut")]
13157#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13158#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13159#[rustc_legacy_const_generics(2)]
13160pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13161    static_assert!(LANE >= 0 && LANE <= 1);
13162    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13163}
13164#[doc = "Lookup table read with 4-bit indices"]
13165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13166#[doc = "## Safety"]
13167#[doc = "  * Neon instrinsic unsafe"]
13168#[inline]
13169#[target_feature(enable = "neon,lut")]
13170#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13171#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13172#[rustc_legacy_const_generics(2)]
13173pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13174    static_assert!(LANE >= 0 && LANE <= 1);
13175    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13176}
13177#[doc = "Maximum (vector)"]
13178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13179#[inline]
13180#[target_feature(enable = "neon")]
13181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13182#[cfg_attr(test, assert_instr(fmax))]
13183pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13184    unsafe extern "unadjusted" {
13185        #[cfg_attr(
13186            any(target_arch = "aarch64", target_arch = "arm64ec"),
13187            link_name = "llvm.aarch64.neon.fmax.v1f64"
13188        )]
13189        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13190    }
13191    unsafe { _vmax_f64(a, b) }
13192}
13193#[doc = "Maximum (vector)"]
13194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13195#[inline]
13196#[target_feature(enable = "neon")]
13197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13198#[cfg_attr(test, assert_instr(fmax))]
13199pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13200    unsafe extern "unadjusted" {
13201        #[cfg_attr(
13202            any(target_arch = "aarch64", target_arch = "arm64ec"),
13203            link_name = "llvm.aarch64.neon.fmax.v2f64"
13204        )]
13205        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13206    }
13207    unsafe { _vmaxq_f64(a, b) }
13208}
13209#[doc = "Maximum (vector)"]
13210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13211#[inline]
13212#[target_feature(enable = "neon,fp16")]
13213#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13214#[cfg_attr(test, assert_instr(fmax))]
13215pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13216    unsafe extern "unadjusted" {
13217        #[cfg_attr(
13218            any(target_arch = "aarch64", target_arch = "arm64ec"),
13219            link_name = "llvm.aarch64.neon.fmax.f16"
13220        )]
13221        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13222    }
13223    unsafe { _vmaxh_f16(a, b) }
13224}
13225#[doc = "Floating-point Maximum Number (vector)"]
13226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13227#[inline]
13228#[target_feature(enable = "neon")]
13229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13230#[cfg_attr(test, assert_instr(fmaxnm))]
13231pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13232    unsafe extern "unadjusted" {
13233        #[cfg_attr(
13234            any(target_arch = "aarch64", target_arch = "arm64ec"),
13235            link_name = "llvm.aarch64.neon.fmaxnm.v1f64"
13236        )]
13237        fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13238    }
13239    unsafe { _vmaxnm_f64(a, b) }
13240}
13241#[doc = "Floating-point Maximum Number (vector)"]
13242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13243#[inline]
13244#[target_feature(enable = "neon")]
13245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13246#[cfg_attr(test, assert_instr(fmaxnm))]
13247pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13248    unsafe extern "unadjusted" {
13249        #[cfg_attr(
13250            any(target_arch = "aarch64", target_arch = "arm64ec"),
13251            link_name = "llvm.aarch64.neon.fmaxnm.v2f64"
13252        )]
13253        fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13254    }
13255    unsafe { _vmaxnmq_f64(a, b) }
13256}
13257#[doc = "Floating-point Maximum Number"]
13258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13259#[inline]
13260#[target_feature(enable = "neon,fp16")]
13261#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13262#[cfg_attr(test, assert_instr(fmaxnm))]
13263pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13264    unsafe extern "unadjusted" {
13265        #[cfg_attr(
13266            any(target_arch = "aarch64", target_arch = "arm64ec"),
13267            link_name = "llvm.aarch64.neon.fmaxnm.f16"
13268        )]
13269        fn _vmaxnmh_f16(a: f16, b: f16) -> f16;
13270    }
13271    unsafe { _vmaxnmh_f16(a, b) }
13272}
13273#[doc = "Floating-point maximum number across vector"]
13274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13275#[inline]
13276#[target_feature(enable = "neon,fp16")]
13277#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13278#[cfg_attr(test, assert_instr(fmaxnmv))]
13279pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13280    unsafe extern "unadjusted" {
13281        #[cfg_attr(
13282            any(target_arch = "aarch64", target_arch = "arm64ec"),
13283            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16"
13284        )]
13285        fn _vmaxnmv_f16(a: float16x4_t) -> f16;
13286    }
13287    unsafe { _vmaxnmv_f16(a) }
13288}
13289#[doc = "Floating-point maximum number across vector"]
13290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13291#[inline]
13292#[target_feature(enable = "neon,fp16")]
13293#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13294#[cfg_attr(test, assert_instr(fmaxnmv))]
13295pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13296    unsafe extern "unadjusted" {
13297        #[cfg_attr(
13298            any(target_arch = "aarch64", target_arch = "arm64ec"),
13299            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16"
13300        )]
13301        fn _vmaxnmvq_f16(a: float16x8_t) -> f16;
13302    }
13303    unsafe { _vmaxnmvq_f16(a) }
13304}
13305#[doc = "Floating-point maximum number across vector"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13307#[inline]
13308#[target_feature(enable = "neon")]
13309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13310#[cfg_attr(test, assert_instr(fmaxnmp))]
13311pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13312    unsafe extern "unadjusted" {
13313        #[cfg_attr(
13314            any(target_arch = "aarch64", target_arch = "arm64ec"),
13315            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
13316        )]
13317        fn _vmaxnmv_f32(a: float32x2_t) -> f32;
13318    }
13319    unsafe { _vmaxnmv_f32(a) }
13320}
13321#[doc = "Floating-point maximum number across vector"]
13322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13323#[inline]
13324#[target_feature(enable = "neon")]
13325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13326#[cfg_attr(test, assert_instr(fmaxnmp))]
13327pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13328    unsafe extern "unadjusted" {
13329        #[cfg_attr(
13330            any(target_arch = "aarch64", target_arch = "arm64ec"),
13331            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
13332        )]
13333        fn _vmaxnmvq_f64(a: float64x2_t) -> f64;
13334    }
13335    unsafe { _vmaxnmvq_f64(a) }
13336}
13337#[doc = "Floating-point maximum number across vector"]
13338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13339#[inline]
13340#[target_feature(enable = "neon")]
13341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13342#[cfg_attr(test, assert_instr(fmaxnmv))]
13343pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13344    unsafe extern "unadjusted" {
13345        #[cfg_attr(
13346            any(target_arch = "aarch64", target_arch = "arm64ec"),
13347            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32"
13348        )]
13349        fn _vmaxnmvq_f32(a: float32x4_t) -> f32;
13350    }
13351    unsafe { _vmaxnmvq_f32(a) }
13352}
13353#[doc = "Floating-point maximum number across vector"]
13354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13355#[inline]
13356#[target_feature(enable = "neon,fp16")]
13357#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13358#[cfg_attr(test, assert_instr(fmaxv))]
13359pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13360    unsafe extern "unadjusted" {
13361        #[cfg_attr(
13362            any(target_arch = "aarch64", target_arch = "arm64ec"),
13363            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13364        )]
13365        fn _vmaxv_f16(a: float16x4_t) -> f16;
13366    }
13367    unsafe { _vmaxv_f16(a) }
13368}
13369#[doc = "Floating-point maximum number across vector"]
13370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13371#[inline]
13372#[target_feature(enable = "neon,fp16")]
13373#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13374#[cfg_attr(test, assert_instr(fmaxv))]
13375pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13376    unsafe extern "unadjusted" {
13377        #[cfg_attr(
13378            any(target_arch = "aarch64", target_arch = "arm64ec"),
13379            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13380        )]
13381        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13382    }
13383    unsafe { _vmaxvq_f16(a) }
13384}
13385#[doc = "Horizontal vector max."]
13386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13387#[inline]
13388#[target_feature(enable = "neon")]
13389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13390#[cfg_attr(test, assert_instr(fmaxp))]
13391pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13392    unsafe extern "unadjusted" {
13393        #[cfg_attr(
13394            any(target_arch = "aarch64", target_arch = "arm64ec"),
13395            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13396        )]
13397        fn _vmaxv_f32(a: float32x2_t) -> f32;
13398    }
13399    unsafe { _vmaxv_f32(a) }
13400}
13401#[doc = "Horizontal vector max."]
13402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13403#[inline]
13404#[target_feature(enable = "neon")]
13405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13406#[cfg_attr(test, assert_instr(fmaxv))]
13407pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13408    unsafe extern "unadjusted" {
13409        #[cfg_attr(
13410            any(target_arch = "aarch64", target_arch = "arm64ec"),
13411            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13412        )]
13413        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13414    }
13415    unsafe { _vmaxvq_f32(a) }
13416}
13417#[doc = "Horizontal vector max."]
13418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13419#[inline]
13420#[target_feature(enable = "neon")]
13421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13422#[cfg_attr(test, assert_instr(fmaxp))]
13423pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13424    unsafe extern "unadjusted" {
13425        #[cfg_attr(
13426            any(target_arch = "aarch64", target_arch = "arm64ec"),
13427            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13428        )]
13429        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13430    }
13431    unsafe { _vmaxvq_f64(a) }
13432}
13433#[doc = "Horizontal vector max."]
13434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13435#[inline]
13436#[target_feature(enable = "neon")]
13437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13438#[cfg_attr(test, assert_instr(smaxv))]
13439pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13440    unsafe extern "unadjusted" {
13441        #[cfg_attr(
13442            any(target_arch = "aarch64", target_arch = "arm64ec"),
13443            link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"
13444        )]
13445        fn _vmaxv_s8(a: int8x8_t) -> i8;
13446    }
13447    unsafe { _vmaxv_s8(a) }
13448}
13449#[doc = "Horizontal vector max."]
13450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13451#[inline]
13452#[target_feature(enable = "neon")]
13453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13454#[cfg_attr(test, assert_instr(smaxv))]
13455pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13456    unsafe extern "unadjusted" {
13457        #[cfg_attr(
13458            any(target_arch = "aarch64", target_arch = "arm64ec"),
13459            link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"
13460        )]
13461        fn _vmaxvq_s8(a: int8x16_t) -> i8;
13462    }
13463    unsafe { _vmaxvq_s8(a) }
13464}
13465#[doc = "Horizontal vector max."]
13466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13467#[inline]
13468#[target_feature(enable = "neon")]
13469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13470#[cfg_attr(test, assert_instr(smaxv))]
13471pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13472    unsafe extern "unadjusted" {
13473        #[cfg_attr(
13474            any(target_arch = "aarch64", target_arch = "arm64ec"),
13475            link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"
13476        )]
13477        fn _vmaxv_s16(a: int16x4_t) -> i16;
13478    }
13479    unsafe { _vmaxv_s16(a) }
13480}
13481#[doc = "Horizontal vector max."]
13482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13483#[inline]
13484#[target_feature(enable = "neon")]
13485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13486#[cfg_attr(test, assert_instr(smaxv))]
13487pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13488    unsafe extern "unadjusted" {
13489        #[cfg_attr(
13490            any(target_arch = "aarch64", target_arch = "arm64ec"),
13491            link_name = "llvm.aarch64.neon.smaxv.i16.v8i16"
13492        )]
13493        fn _vmaxvq_s16(a: int16x8_t) -> i16;
13494    }
13495    unsafe { _vmaxvq_s16(a) }
13496}
13497#[doc = "Horizontal vector max."]
13498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13499#[inline]
13500#[target_feature(enable = "neon")]
13501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13502#[cfg_attr(test, assert_instr(smaxp))]
13503pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13504    unsafe extern "unadjusted" {
13505        #[cfg_attr(
13506            any(target_arch = "aarch64", target_arch = "arm64ec"),
13507            link_name = "llvm.aarch64.neon.smaxv.i32.v2i32"
13508        )]
13509        fn _vmaxv_s32(a: int32x2_t) -> i32;
13510    }
13511    unsafe { _vmaxv_s32(a) }
13512}
13513#[doc = "Horizontal vector max."]
13514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13515#[inline]
13516#[target_feature(enable = "neon")]
13517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13518#[cfg_attr(test, assert_instr(smaxv))]
13519pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13520    unsafe extern "unadjusted" {
13521        #[cfg_attr(
13522            any(target_arch = "aarch64", target_arch = "arm64ec"),
13523            link_name = "llvm.aarch64.neon.smaxv.i32.v4i32"
13524        )]
13525        fn _vmaxvq_s32(a: int32x4_t) -> i32;
13526    }
13527    unsafe { _vmaxvq_s32(a) }
13528}
13529#[doc = "Horizontal vector max."]
13530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13531#[inline]
13532#[target_feature(enable = "neon")]
13533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13534#[cfg_attr(test, assert_instr(umaxv))]
13535pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13536    unsafe extern "unadjusted" {
13537        #[cfg_attr(
13538            any(target_arch = "aarch64", target_arch = "arm64ec"),
13539            link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"
13540        )]
13541        fn _vmaxv_u8(a: uint8x8_t) -> u8;
13542    }
13543    unsafe { _vmaxv_u8(a) }
13544}
13545#[doc = "Horizontal vector max."]
13546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13547#[inline]
13548#[target_feature(enable = "neon")]
13549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13550#[cfg_attr(test, assert_instr(umaxv))]
13551pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13552    unsafe extern "unadjusted" {
13553        #[cfg_attr(
13554            any(target_arch = "aarch64", target_arch = "arm64ec"),
13555            link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"
13556        )]
13557        fn _vmaxvq_u8(a: uint8x16_t) -> u8;
13558    }
13559    unsafe { _vmaxvq_u8(a) }
13560}
13561#[doc = "Horizontal vector max."]
13562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13563#[inline]
13564#[target_feature(enable = "neon")]
13565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13566#[cfg_attr(test, assert_instr(umaxv))]
13567pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13568    unsafe extern "unadjusted" {
13569        #[cfg_attr(
13570            any(target_arch = "aarch64", target_arch = "arm64ec"),
13571            link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"
13572        )]
13573        fn _vmaxv_u16(a: uint16x4_t) -> u16;
13574    }
13575    unsafe { _vmaxv_u16(a) }
13576}
13577#[doc = "Horizontal vector max."]
13578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13579#[inline]
13580#[target_feature(enable = "neon")]
13581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13582#[cfg_attr(test, assert_instr(umaxv))]
13583pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13584    unsafe extern "unadjusted" {
13585        #[cfg_attr(
13586            any(target_arch = "aarch64", target_arch = "arm64ec"),
13587            link_name = "llvm.aarch64.neon.umaxv.i16.v8i16"
13588        )]
13589        fn _vmaxvq_u16(a: uint16x8_t) -> u16;
13590    }
13591    unsafe { _vmaxvq_u16(a) }
13592}
13593#[doc = "Horizontal vector max."]
13594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13595#[inline]
13596#[target_feature(enable = "neon")]
13597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13598#[cfg_attr(test, assert_instr(umaxp))]
13599pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13600    unsafe extern "unadjusted" {
13601        #[cfg_attr(
13602            any(target_arch = "aarch64", target_arch = "arm64ec"),
13603            link_name = "llvm.aarch64.neon.umaxv.i32.v2i32"
13604        )]
13605        fn _vmaxv_u32(a: uint32x2_t) -> u32;
13606    }
13607    unsafe { _vmaxv_u32(a) }
13608}
13609#[doc = "Horizontal vector max."]
13610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13611#[inline]
13612#[target_feature(enable = "neon")]
13613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13614#[cfg_attr(test, assert_instr(umaxv))]
13615pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13616    unsafe extern "unadjusted" {
13617        #[cfg_attr(
13618            any(target_arch = "aarch64", target_arch = "arm64ec"),
13619            link_name = "llvm.aarch64.neon.umaxv.i32.v4i32"
13620        )]
13621        fn _vmaxvq_u32(a: uint32x4_t) -> u32;
13622    }
13623    unsafe { _vmaxvq_u32(a) }
13624}
13625#[doc = "Minimum (vector)"]
13626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13627#[inline]
13628#[target_feature(enable = "neon")]
13629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13630#[cfg_attr(test, assert_instr(fmin))]
13631pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13632    unsafe extern "unadjusted" {
13633        #[cfg_attr(
13634            any(target_arch = "aarch64", target_arch = "arm64ec"),
13635            link_name = "llvm.aarch64.neon.fmin.v1f64"
13636        )]
13637        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13638    }
13639    unsafe { _vmin_f64(a, b) }
13640}
13641#[doc = "Minimum (vector)"]
13642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13643#[inline]
13644#[target_feature(enable = "neon")]
13645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13646#[cfg_attr(test, assert_instr(fmin))]
13647pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13648    unsafe extern "unadjusted" {
13649        #[cfg_attr(
13650            any(target_arch = "aarch64", target_arch = "arm64ec"),
13651            link_name = "llvm.aarch64.neon.fmin.v2f64"
13652        )]
13653        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13654    }
13655    unsafe { _vminq_f64(a, b) }
13656}
13657#[doc = "Minimum (vector)"]
13658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13659#[inline]
13660#[target_feature(enable = "neon,fp16")]
13661#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13662#[cfg_attr(test, assert_instr(fmin))]
13663pub fn vminh_f16(a: f16, b: f16) -> f16 {
13664    unsafe extern "unadjusted" {
13665        #[cfg_attr(
13666            any(target_arch = "aarch64", target_arch = "arm64ec"),
13667            link_name = "llvm.aarch64.neon.fmin.f16"
13668        )]
13669        fn _vminh_f16(a: f16, b: f16) -> f16;
13670    }
13671    unsafe { _vminh_f16(a, b) }
13672}
13673#[doc = "Floating-point Minimum Number (vector)"]
13674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13675#[inline]
13676#[target_feature(enable = "neon")]
13677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13678#[cfg_attr(test, assert_instr(fminnm))]
13679pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13680    unsafe extern "unadjusted" {
13681        #[cfg_attr(
13682            any(target_arch = "aarch64", target_arch = "arm64ec"),
13683            link_name = "llvm.aarch64.neon.fminnm.v1f64"
13684        )]
13685        fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13686    }
13687    unsafe { _vminnm_f64(a, b) }
13688}
13689#[doc = "Floating-point Minimum Number (vector)"]
13690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13691#[inline]
13692#[target_feature(enable = "neon")]
13693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13694#[cfg_attr(test, assert_instr(fminnm))]
13695pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13696    unsafe extern "unadjusted" {
13697        #[cfg_attr(
13698            any(target_arch = "aarch64", target_arch = "arm64ec"),
13699            link_name = "llvm.aarch64.neon.fminnm.v2f64"
13700        )]
13701        fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13702    }
13703    unsafe { _vminnmq_f64(a, b) }
13704}
13705#[doc = "Floating-point Minimum Number"]
13706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13707#[inline]
13708#[target_feature(enable = "neon,fp16")]
13709#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13710#[cfg_attr(test, assert_instr(fminnm))]
13711pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13712    unsafe extern "unadjusted" {
13713        #[cfg_attr(
13714            any(target_arch = "aarch64", target_arch = "arm64ec"),
13715            link_name = "llvm.aarch64.neon.fminnm.f16"
13716        )]
13717        fn _vminnmh_f16(a: f16, b: f16) -> f16;
13718    }
13719    unsafe { _vminnmh_f16(a, b) }
13720}
13721#[doc = "Floating-point minimum number across vector"]
13722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13723#[inline]
13724#[target_feature(enable = "neon,fp16")]
13725#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13726#[cfg_attr(test, assert_instr(fminnmv))]
13727pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13728    unsafe extern "unadjusted" {
13729        #[cfg_attr(
13730            any(target_arch = "aarch64", target_arch = "arm64ec"),
13731            link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16"
13732        )]
13733        fn _vminnmv_f16(a: float16x4_t) -> f16;
13734    }
13735    unsafe { _vminnmv_f16(a) }
13736}
13737#[doc = "Floating-point minimum number across vector"]
13738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13739#[inline]
13740#[target_feature(enable = "neon,fp16")]
13741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13742#[cfg_attr(test, assert_instr(fminnmv))]
13743pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13744    unsafe extern "unadjusted" {
13745        #[cfg_attr(
13746            any(target_arch = "aarch64", target_arch = "arm64ec"),
13747            link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16"
13748        )]
13749        fn _vminnmvq_f16(a: float16x8_t) -> f16;
13750    }
13751    unsafe { _vminnmvq_f16(a) }
13752}
13753#[doc = "Floating-point minimum number across vector"]
13754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13755#[inline]
13756#[target_feature(enable = "neon")]
13757#[cfg_attr(test, assert_instr(fminnmp))]
13758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13759pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13760    unsafe extern "unadjusted" {
13761        #[cfg_attr(
13762            any(target_arch = "aarch64", target_arch = "arm64ec"),
13763            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
13764        )]
13765        fn _vminnmv_f32(a: float32x2_t) -> f32;
13766    }
13767    unsafe { _vminnmv_f32(a) }
13768}
13769#[doc = "Floating-point minimum number across vector"]
13770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13771#[inline]
13772#[target_feature(enable = "neon")]
13773#[cfg_attr(test, assert_instr(fminnmp))]
13774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13775pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13776    unsafe extern "unadjusted" {
13777        #[cfg_attr(
13778            any(target_arch = "aarch64", target_arch = "arm64ec"),
13779            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
13780        )]
13781        fn _vminnmvq_f64(a: float64x2_t) -> f64;
13782    }
13783    unsafe { _vminnmvq_f64(a) }
13784}
13785#[doc = "Floating-point minimum number across vector"]
13786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13787#[inline]
13788#[target_feature(enable = "neon")]
13789#[cfg_attr(test, assert_instr(fminnmv))]
13790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13791pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13792    unsafe extern "unadjusted" {
13793        #[cfg_attr(
13794            any(target_arch = "aarch64", target_arch = "arm64ec"),
13795            link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32"
13796        )]
13797        fn _vminnmvq_f32(a: float32x4_t) -> f32;
13798    }
13799    unsafe { _vminnmvq_f32(a) }
13800}
13801#[doc = "Floating-point minimum number across vector"]
13802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13803#[inline]
13804#[target_feature(enable = "neon,fp16")]
13805#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13806#[cfg_attr(test, assert_instr(fminv))]
13807pub fn vminv_f16(a: float16x4_t) -> f16 {
13808    unsafe extern "unadjusted" {
13809        #[cfg_attr(
13810            any(target_arch = "aarch64", target_arch = "arm64ec"),
13811            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13812        )]
13813        fn _vminv_f16(a: float16x4_t) -> f16;
13814    }
13815    unsafe { _vminv_f16(a) }
13816}
13817#[doc = "Floating-point minimum number across vector"]
13818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13819#[inline]
13820#[target_feature(enable = "neon,fp16")]
13821#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13822#[cfg_attr(test, assert_instr(fminv))]
13823pub fn vminvq_f16(a: float16x8_t) -> f16 {
13824    unsafe extern "unadjusted" {
13825        #[cfg_attr(
13826            any(target_arch = "aarch64", target_arch = "arm64ec"),
13827            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13828        )]
13829        fn _vminvq_f16(a: float16x8_t) -> f16;
13830    }
13831    unsafe { _vminvq_f16(a) }
13832}
13833#[doc = "Horizontal vector min."]
13834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13835#[inline]
13836#[target_feature(enable = "neon")]
13837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13838#[cfg_attr(test, assert_instr(fminp))]
13839pub fn vminv_f32(a: float32x2_t) -> f32 {
13840    unsafe extern "unadjusted" {
13841        #[cfg_attr(
13842            any(target_arch = "aarch64", target_arch = "arm64ec"),
13843            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13844        )]
13845        fn _vminv_f32(a: float32x2_t) -> f32;
13846    }
13847    unsafe { _vminv_f32(a) }
13848}
13849#[doc = "Horizontal vector min."]
13850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13851#[inline]
13852#[target_feature(enable = "neon")]
13853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13854#[cfg_attr(test, assert_instr(fminv))]
13855pub fn vminvq_f32(a: float32x4_t) -> f32 {
13856    unsafe extern "unadjusted" {
13857        #[cfg_attr(
13858            any(target_arch = "aarch64", target_arch = "arm64ec"),
13859            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13860        )]
13861        fn _vminvq_f32(a: float32x4_t) -> f32;
13862    }
13863    unsafe { _vminvq_f32(a) }
13864}
13865#[doc = "Horizontal vector min."]
13866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13867#[inline]
13868#[target_feature(enable = "neon")]
13869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13870#[cfg_attr(test, assert_instr(fminp))]
13871pub fn vminvq_f64(a: float64x2_t) -> f64 {
13872    unsafe extern "unadjusted" {
13873        #[cfg_attr(
13874            any(target_arch = "aarch64", target_arch = "arm64ec"),
13875            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13876        )]
13877        fn _vminvq_f64(a: float64x2_t) -> f64;
13878    }
13879    unsafe { _vminvq_f64(a) }
13880}
13881#[doc = "Horizontal vector min."]
13882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13883#[inline]
13884#[target_feature(enable = "neon")]
13885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13886#[cfg_attr(test, assert_instr(sminv))]
13887pub fn vminv_s8(a: int8x8_t) -> i8 {
13888    unsafe extern "unadjusted" {
13889        #[cfg_attr(
13890            any(target_arch = "aarch64", target_arch = "arm64ec"),
13891            link_name = "llvm.aarch64.neon.sminv.i8.v8i8"
13892        )]
13893        fn _vminv_s8(a: int8x8_t) -> i8;
13894    }
13895    unsafe { _vminv_s8(a) }
13896}
13897#[doc = "Horizontal vector min."]
13898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13899#[inline]
13900#[target_feature(enable = "neon")]
13901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13902#[cfg_attr(test, assert_instr(sminv))]
13903pub fn vminvq_s8(a: int8x16_t) -> i8 {
13904    unsafe extern "unadjusted" {
13905        #[cfg_attr(
13906            any(target_arch = "aarch64", target_arch = "arm64ec"),
13907            link_name = "llvm.aarch64.neon.sminv.i8.v16i8"
13908        )]
13909        fn _vminvq_s8(a: int8x16_t) -> i8;
13910    }
13911    unsafe { _vminvq_s8(a) }
13912}
13913#[doc = "Horizontal vector min."]
13914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13915#[inline]
13916#[target_feature(enable = "neon")]
13917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13918#[cfg_attr(test, assert_instr(sminv))]
13919pub fn vminv_s16(a: int16x4_t) -> i16 {
13920    unsafe extern "unadjusted" {
13921        #[cfg_attr(
13922            any(target_arch = "aarch64", target_arch = "arm64ec"),
13923            link_name = "llvm.aarch64.neon.sminv.i16.v4i16"
13924        )]
13925        fn _vminv_s16(a: int16x4_t) -> i16;
13926    }
13927    unsafe { _vminv_s16(a) }
13928}
13929#[doc = "Horizontal vector min."]
13930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13931#[inline]
13932#[target_feature(enable = "neon")]
13933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13934#[cfg_attr(test, assert_instr(sminv))]
13935pub fn vminvq_s16(a: int16x8_t) -> i16 {
13936    unsafe extern "unadjusted" {
13937        #[cfg_attr(
13938            any(target_arch = "aarch64", target_arch = "arm64ec"),
13939            link_name = "llvm.aarch64.neon.sminv.i16.v8i16"
13940        )]
13941        fn _vminvq_s16(a: int16x8_t) -> i16;
13942    }
13943    unsafe { _vminvq_s16(a) }
13944}
13945#[doc = "Horizontal vector min."]
13946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13947#[inline]
13948#[target_feature(enable = "neon")]
13949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13950#[cfg_attr(test, assert_instr(sminp))]
13951pub fn vminv_s32(a: int32x2_t) -> i32 {
13952    unsafe extern "unadjusted" {
13953        #[cfg_attr(
13954            any(target_arch = "aarch64", target_arch = "arm64ec"),
13955            link_name = "llvm.aarch64.neon.sminv.i32.v2i32"
13956        )]
13957        fn _vminv_s32(a: int32x2_t) -> i32;
13958    }
13959    unsafe { _vminv_s32(a) }
13960}
13961#[doc = "Horizontal vector min."]
13962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
13963#[inline]
13964#[target_feature(enable = "neon")]
13965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13966#[cfg_attr(test, assert_instr(sminv))]
13967pub fn vminvq_s32(a: int32x4_t) -> i32 {
13968    unsafe extern "unadjusted" {
13969        #[cfg_attr(
13970            any(target_arch = "aarch64", target_arch = "arm64ec"),
13971            link_name = "llvm.aarch64.neon.sminv.i32.v4i32"
13972        )]
13973        fn _vminvq_s32(a: int32x4_t) -> i32;
13974    }
13975    unsafe { _vminvq_s32(a) }
13976}
13977#[doc = "Horizontal vector min."]
13978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
13979#[inline]
13980#[target_feature(enable = "neon")]
13981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13982#[cfg_attr(test, assert_instr(uminv))]
13983pub fn vminv_u8(a: uint8x8_t) -> u8 {
13984    unsafe extern "unadjusted" {
13985        #[cfg_attr(
13986            any(target_arch = "aarch64", target_arch = "arm64ec"),
13987            link_name = "llvm.aarch64.neon.uminv.i8.v8i8"
13988        )]
13989        fn _vminv_u8(a: uint8x8_t) -> u8;
13990    }
13991    unsafe { _vminv_u8(a) }
13992}
13993#[doc = "Horizontal vector min."]
13994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
13995#[inline]
13996#[target_feature(enable = "neon")]
13997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13998#[cfg_attr(test, assert_instr(uminv))]
13999pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14000    unsafe extern "unadjusted" {
14001        #[cfg_attr(
14002            any(target_arch = "aarch64", target_arch = "arm64ec"),
14003            link_name = "llvm.aarch64.neon.uminv.i8.v16i8"
14004        )]
14005        fn _vminvq_u8(a: uint8x16_t) -> u8;
14006    }
14007    unsafe { _vminvq_u8(a) }
14008}
14009#[doc = "Horizontal vector min."]
14010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14011#[inline]
14012#[target_feature(enable = "neon")]
14013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14014#[cfg_attr(test, assert_instr(uminv))]
14015pub fn vminv_u16(a: uint16x4_t) -> u16 {
14016    unsafe extern "unadjusted" {
14017        #[cfg_attr(
14018            any(target_arch = "aarch64", target_arch = "arm64ec"),
14019            link_name = "llvm.aarch64.neon.uminv.i16.v4i16"
14020        )]
14021        fn _vminv_u16(a: uint16x4_t) -> u16;
14022    }
14023    unsafe { _vminv_u16(a) }
14024}
14025#[doc = "Horizontal vector min."]
14026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14027#[inline]
14028#[target_feature(enable = "neon")]
14029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14030#[cfg_attr(test, assert_instr(uminv))]
14031pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14032    unsafe extern "unadjusted" {
14033        #[cfg_attr(
14034            any(target_arch = "aarch64", target_arch = "arm64ec"),
14035            link_name = "llvm.aarch64.neon.uminv.i16.v8i16"
14036        )]
14037        fn _vminvq_u16(a: uint16x8_t) -> u16;
14038    }
14039    unsafe { _vminvq_u16(a) }
14040}
14041#[doc = "Horizontal vector min."]
14042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14043#[inline]
14044#[target_feature(enable = "neon")]
14045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14046#[cfg_attr(test, assert_instr(uminp))]
14047pub fn vminv_u32(a: uint32x2_t) -> u32 {
14048    unsafe extern "unadjusted" {
14049        #[cfg_attr(
14050            any(target_arch = "aarch64", target_arch = "arm64ec"),
14051            link_name = "llvm.aarch64.neon.uminv.i32.v2i32"
14052        )]
14053        fn _vminv_u32(a: uint32x2_t) -> u32;
14054    }
14055    unsafe { _vminv_u32(a) }
14056}
14057#[doc = "Horizontal vector min."]
14058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14059#[inline]
14060#[target_feature(enable = "neon")]
14061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14062#[cfg_attr(test, assert_instr(uminv))]
14063pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14064    unsafe extern "unadjusted" {
14065        #[cfg_attr(
14066            any(target_arch = "aarch64", target_arch = "arm64ec"),
14067            link_name = "llvm.aarch64.neon.uminv.i32.v4i32"
14068        )]
14069        fn _vminvq_u32(a: uint32x4_t) -> u32;
14070    }
14071    unsafe { _vminvq_u32(a) }
14072}
14073#[doc = "Floating-point multiply-add to accumulator"]
14074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14075#[inline]
14076#[target_feature(enable = "neon")]
14077#[cfg_attr(test, assert_instr(fmul))]
14078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14079pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14080    unsafe { simd_add(a, simd_mul(b, c)) }
14081}
14082#[doc = "Floating-point multiply-add to accumulator"]
14083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14084#[inline]
14085#[target_feature(enable = "neon")]
14086#[cfg_attr(test, assert_instr(fmul))]
14087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14088pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14089    unsafe { simd_add(a, simd_mul(b, c)) }
14090}
14091#[doc = "Multiply-add long"]
14092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14093#[inline]
14094#[target_feature(enable = "neon")]
14095#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14096#[rustc_legacy_const_generics(3)]
14097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14098pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14099    static_assert_uimm_bits!(LANE, 2);
14100    unsafe {
14101        vmlal_high_s16(
14102            a,
14103            b,
14104            simd_shuffle!(
14105                c,
14106                c,
14107                [
14108                    LANE as u32,
14109                    LANE as u32,
14110                    LANE as u32,
14111                    LANE as u32,
14112                    LANE as u32,
14113                    LANE as u32,
14114                    LANE as u32,
14115                    LANE as u32
14116                ]
14117            ),
14118        )
14119    }
14120}
14121#[doc = "Multiply-add long"]
14122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14123#[inline]
14124#[target_feature(enable = "neon")]
14125#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14126#[rustc_legacy_const_generics(3)]
14127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14128pub fn vmlal_high_laneq_s16<const LANE: i32>(
14129    a: int32x4_t,
14130    b: int16x8_t,
14131    c: int16x8_t,
14132) -> int32x4_t {
14133    static_assert_uimm_bits!(LANE, 3);
14134    unsafe {
14135        vmlal_high_s16(
14136            a,
14137            b,
14138            simd_shuffle!(
14139                c,
14140                c,
14141                [
14142                    LANE as u32,
14143                    LANE as u32,
14144                    LANE as u32,
14145                    LANE as u32,
14146                    LANE as u32,
14147                    LANE as u32,
14148                    LANE as u32,
14149                    LANE as u32
14150                ]
14151            ),
14152        )
14153    }
14154}
14155#[doc = "Multiply-add long"]
14156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14157#[inline]
14158#[target_feature(enable = "neon")]
14159#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14160#[rustc_legacy_const_generics(3)]
14161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14162pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14163    static_assert_uimm_bits!(LANE, 1);
14164    unsafe {
14165        vmlal_high_s32(
14166            a,
14167            b,
14168            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14169        )
14170    }
14171}
14172#[doc = "Multiply-add long"]
14173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14174#[inline]
14175#[target_feature(enable = "neon")]
14176#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14177#[rustc_legacy_const_generics(3)]
14178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14179pub fn vmlal_high_laneq_s32<const LANE: i32>(
14180    a: int64x2_t,
14181    b: int32x4_t,
14182    c: int32x4_t,
14183) -> int64x2_t {
14184    static_assert_uimm_bits!(LANE, 2);
14185    unsafe {
14186        vmlal_high_s32(
14187            a,
14188            b,
14189            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14190        )
14191    }
14192}
14193#[doc = "Multiply-add long"]
14194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14195#[inline]
14196#[target_feature(enable = "neon")]
14197#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14198#[rustc_legacy_const_generics(3)]
14199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14200pub fn vmlal_high_lane_u16<const LANE: i32>(
14201    a: uint32x4_t,
14202    b: uint16x8_t,
14203    c: uint16x4_t,
14204) -> uint32x4_t {
14205    static_assert_uimm_bits!(LANE, 2);
14206    unsafe {
14207        vmlal_high_u16(
14208            a,
14209            b,
14210            simd_shuffle!(
14211                c,
14212                c,
14213                [
14214                    LANE as u32,
14215                    LANE as u32,
14216                    LANE as u32,
14217                    LANE as u32,
14218                    LANE as u32,
14219                    LANE as u32,
14220                    LANE as u32,
14221                    LANE as u32
14222                ]
14223            ),
14224        )
14225    }
14226}
14227#[doc = "Multiply-add long"]
14228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14229#[inline]
14230#[target_feature(enable = "neon")]
14231#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14232#[rustc_legacy_const_generics(3)]
14233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14234pub fn vmlal_high_laneq_u16<const LANE: i32>(
14235    a: uint32x4_t,
14236    b: uint16x8_t,
14237    c: uint16x8_t,
14238) -> uint32x4_t {
14239    static_assert_uimm_bits!(LANE, 3);
14240    unsafe {
14241        vmlal_high_u16(
14242            a,
14243            b,
14244            simd_shuffle!(
14245                c,
14246                c,
14247                [
14248                    LANE as u32,
14249                    LANE as u32,
14250                    LANE as u32,
14251                    LANE as u32,
14252                    LANE as u32,
14253                    LANE as u32,
14254                    LANE as u32,
14255                    LANE as u32
14256                ]
14257            ),
14258        )
14259    }
14260}
14261#[doc = "Multiply-add long"]
14262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14263#[inline]
14264#[target_feature(enable = "neon")]
14265#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14266#[rustc_legacy_const_generics(3)]
14267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14268pub fn vmlal_high_lane_u32<const LANE: i32>(
14269    a: uint64x2_t,
14270    b: uint32x4_t,
14271    c: uint32x2_t,
14272) -> uint64x2_t {
14273    static_assert_uimm_bits!(LANE, 1);
14274    unsafe {
14275        vmlal_high_u32(
14276            a,
14277            b,
14278            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14279        )
14280    }
14281}
14282#[doc = "Multiply-add long"]
14283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14284#[inline]
14285#[target_feature(enable = "neon")]
14286#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14287#[rustc_legacy_const_generics(3)]
14288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14289pub fn vmlal_high_laneq_u32<const LANE: i32>(
14290    a: uint64x2_t,
14291    b: uint32x4_t,
14292    c: uint32x4_t,
14293) -> uint64x2_t {
14294    static_assert_uimm_bits!(LANE, 2);
14295    unsafe {
14296        vmlal_high_u32(
14297            a,
14298            b,
14299            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14300        )
14301    }
14302}
14303#[doc = "Multiply-add long"]
14304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14305#[inline]
14306#[target_feature(enable = "neon")]
14307#[cfg_attr(test, assert_instr(smlal2))]
14308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14309pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14310    vmlal_high_s16(a, b, vdupq_n_s16(c))
14311}
14312#[doc = "Multiply-add long"]
14313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14314#[inline]
14315#[target_feature(enable = "neon")]
14316#[cfg_attr(test, assert_instr(smlal2))]
14317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14318pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14319    vmlal_high_s32(a, b, vdupq_n_s32(c))
14320}
14321#[doc = "Multiply-add long"]
14322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14323#[inline]
14324#[target_feature(enable = "neon")]
14325#[cfg_attr(test, assert_instr(umlal2))]
14326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14327pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14328    vmlal_high_u16(a, b, vdupq_n_u16(c))
14329}
14330#[doc = "Multiply-add long"]
14331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14332#[inline]
14333#[target_feature(enable = "neon")]
14334#[cfg_attr(test, assert_instr(umlal2))]
14335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14336pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14337    vmlal_high_u32(a, b, vdupq_n_u32(c))
14338}
14339#[doc = "Signed multiply-add long"]
14340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14341#[inline]
14342#[target_feature(enable = "neon")]
14343#[cfg_attr(test, assert_instr(smlal2))]
14344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14345pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14346    unsafe {
14347        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14348        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14349        vmlal_s8(a, b, c)
14350    }
14351}
14352#[doc = "Signed multiply-add long"]
14353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14354#[inline]
14355#[target_feature(enable = "neon")]
14356#[cfg_attr(test, assert_instr(smlal2))]
14357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14358pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14359    unsafe {
14360        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14361        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14362        vmlal_s16(a, b, c)
14363    }
14364}
14365#[doc = "Signed multiply-add long"]
14366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14367#[inline]
14368#[target_feature(enable = "neon")]
14369#[cfg_attr(test, assert_instr(smlal2))]
14370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14371pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14372    unsafe {
14373        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14374        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14375        vmlal_s32(a, b, c)
14376    }
14377}
14378#[doc = "Unsigned multiply-add long"]
14379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14380#[inline]
14381#[target_feature(enable = "neon")]
14382#[cfg_attr(test, assert_instr(umlal2))]
14383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14384pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14385    unsafe {
14386        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14387        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14388        vmlal_u8(a, b, c)
14389    }
14390}
14391#[doc = "Unsigned multiply-add long"]
14392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14393#[inline]
14394#[target_feature(enable = "neon")]
14395#[cfg_attr(test, assert_instr(umlal2))]
14396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14397pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14398    unsafe {
14399        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14400        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14401        vmlal_u16(a, b, c)
14402    }
14403}
14404#[doc = "Unsigned multiply-add long"]
14405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14406#[inline]
14407#[target_feature(enable = "neon")]
14408#[cfg_attr(test, assert_instr(umlal2))]
14409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14410pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14411    unsafe {
14412        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14413        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14414        vmlal_u32(a, b, c)
14415    }
14416}
14417#[doc = "Floating-point multiply-subtract from accumulator"]
14418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14419#[inline]
14420#[target_feature(enable = "neon")]
14421#[cfg_attr(test, assert_instr(fmul))]
14422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14423pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14424    unsafe { simd_sub(a, simd_mul(b, c)) }
14425}
14426#[doc = "Floating-point multiply-subtract from accumulator"]
14427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14428#[inline]
14429#[target_feature(enable = "neon")]
14430#[cfg_attr(test, assert_instr(fmul))]
14431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14432pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14433    unsafe { simd_sub(a, simd_mul(b, c)) }
14434}
14435#[doc = "Multiply-subtract long"]
14436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14437#[inline]
14438#[target_feature(enable = "neon")]
14439#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14440#[rustc_legacy_const_generics(3)]
14441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14442pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14443    static_assert_uimm_bits!(LANE, 2);
14444    unsafe {
14445        vmlsl_high_s16(
14446            a,
14447            b,
14448            simd_shuffle!(
14449                c,
14450                c,
14451                [
14452                    LANE as u32,
14453                    LANE as u32,
14454                    LANE as u32,
14455                    LANE as u32,
14456                    LANE as u32,
14457                    LANE as u32,
14458                    LANE as u32,
14459                    LANE as u32
14460                ]
14461            ),
14462        )
14463    }
14464}
14465#[doc = "Multiply-subtract long"]
14466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14467#[inline]
14468#[target_feature(enable = "neon")]
14469#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14470#[rustc_legacy_const_generics(3)]
14471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14472pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14473    a: int32x4_t,
14474    b: int16x8_t,
14475    c: int16x8_t,
14476) -> int32x4_t {
14477    static_assert_uimm_bits!(LANE, 3);
14478    unsafe {
14479        vmlsl_high_s16(
14480            a,
14481            b,
14482            simd_shuffle!(
14483                c,
14484                c,
14485                [
14486                    LANE as u32,
14487                    LANE as u32,
14488                    LANE as u32,
14489                    LANE as u32,
14490                    LANE as u32,
14491                    LANE as u32,
14492                    LANE as u32,
14493                    LANE as u32
14494                ]
14495            ),
14496        )
14497    }
14498}
14499#[doc = "Multiply-subtract long"]
14500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14501#[inline]
14502#[target_feature(enable = "neon")]
14503#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14504#[rustc_legacy_const_generics(3)]
14505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14506pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14507    static_assert_uimm_bits!(LANE, 1);
14508    unsafe {
14509        vmlsl_high_s32(
14510            a,
14511            b,
14512            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14513        )
14514    }
14515}
14516#[doc = "Multiply-subtract long"]
14517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14518#[inline]
14519#[target_feature(enable = "neon")]
14520#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14521#[rustc_legacy_const_generics(3)]
14522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14523pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14524    a: int64x2_t,
14525    b: int32x4_t,
14526    c: int32x4_t,
14527) -> int64x2_t {
14528    static_assert_uimm_bits!(LANE, 2);
14529    unsafe {
14530        vmlsl_high_s32(
14531            a,
14532            b,
14533            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14534        )
14535    }
14536}
14537#[doc = "Multiply-subtract long"]
14538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14539#[inline]
14540#[target_feature(enable = "neon")]
14541#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14542#[rustc_legacy_const_generics(3)]
14543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14544pub fn vmlsl_high_lane_u16<const LANE: i32>(
14545    a: uint32x4_t,
14546    b: uint16x8_t,
14547    c: uint16x4_t,
14548) -> uint32x4_t {
14549    static_assert_uimm_bits!(LANE, 2);
14550    unsafe {
14551        vmlsl_high_u16(
14552            a,
14553            b,
14554            simd_shuffle!(
14555                c,
14556                c,
14557                [
14558                    LANE as u32,
14559                    LANE as u32,
14560                    LANE as u32,
14561                    LANE as u32,
14562                    LANE as u32,
14563                    LANE as u32,
14564                    LANE as u32,
14565                    LANE as u32
14566                ]
14567            ),
14568        )
14569    }
14570}
14571#[doc = "Multiply-subtract long"]
14572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14573#[inline]
14574#[target_feature(enable = "neon")]
14575#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14576#[rustc_legacy_const_generics(3)]
14577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14578pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14579    a: uint32x4_t,
14580    b: uint16x8_t,
14581    c: uint16x8_t,
14582) -> uint32x4_t {
14583    static_assert_uimm_bits!(LANE, 3);
14584    unsafe {
14585        vmlsl_high_u16(
14586            a,
14587            b,
14588            simd_shuffle!(
14589                c,
14590                c,
14591                [
14592                    LANE as u32,
14593                    LANE as u32,
14594                    LANE as u32,
14595                    LANE as u32,
14596                    LANE as u32,
14597                    LANE as u32,
14598                    LANE as u32,
14599                    LANE as u32
14600                ]
14601            ),
14602        )
14603    }
14604}
14605#[doc = "Multiply-subtract long"]
14606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14607#[inline]
14608#[target_feature(enable = "neon")]
14609#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14610#[rustc_legacy_const_generics(3)]
14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14612pub fn vmlsl_high_lane_u32<const LANE: i32>(
14613    a: uint64x2_t,
14614    b: uint32x4_t,
14615    c: uint32x2_t,
14616) -> uint64x2_t {
14617    static_assert_uimm_bits!(LANE, 1);
14618    unsafe {
14619        vmlsl_high_u32(
14620            a,
14621            b,
14622            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14623        )
14624    }
14625}
14626#[doc = "Multiply-subtract long"]
14627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14628#[inline]
14629#[target_feature(enable = "neon")]
14630#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14631#[rustc_legacy_const_generics(3)]
14632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14633pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14634    a: uint64x2_t,
14635    b: uint32x4_t,
14636    c: uint32x4_t,
14637) -> uint64x2_t {
14638    static_assert_uimm_bits!(LANE, 2);
14639    unsafe {
14640        vmlsl_high_u32(
14641            a,
14642            b,
14643            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14644        )
14645    }
14646}
14647#[doc = "Multiply-subtract long"]
14648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14649#[inline]
14650#[target_feature(enable = "neon")]
14651#[cfg_attr(test, assert_instr(smlsl2))]
14652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14653pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14654    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14655}
14656#[doc = "Multiply-subtract long"]
14657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14658#[inline]
14659#[target_feature(enable = "neon")]
14660#[cfg_attr(test, assert_instr(smlsl2))]
14661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14662pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14663    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14664}
14665#[doc = "Multiply-subtract long"]
14666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14667#[inline]
14668#[target_feature(enable = "neon")]
14669#[cfg_attr(test, assert_instr(umlsl2))]
14670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14671pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14672    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14673}
14674#[doc = "Multiply-subtract long"]
14675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14676#[inline]
14677#[target_feature(enable = "neon")]
14678#[cfg_attr(test, assert_instr(umlsl2))]
14679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14680pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14681    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14682}
14683#[doc = "Signed multiply-subtract long"]
14684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14685#[inline]
14686#[target_feature(enable = "neon")]
14687#[cfg_attr(test, assert_instr(smlsl2))]
14688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14689pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14690    unsafe {
14691        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14692        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14693        vmlsl_s8(a, b, c)
14694    }
14695}
14696#[doc = "Signed multiply-subtract long"]
14697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14698#[inline]
14699#[target_feature(enable = "neon")]
14700#[cfg_attr(test, assert_instr(smlsl2))]
14701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14702pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14703    unsafe {
14704        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14705        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14706        vmlsl_s16(a, b, c)
14707    }
14708}
14709#[doc = "Signed multiply-subtract long"]
14710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14711#[inline]
14712#[target_feature(enable = "neon")]
14713#[cfg_attr(test, assert_instr(smlsl2))]
14714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14715pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14716    unsafe {
14717        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14718        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14719        vmlsl_s32(a, b, c)
14720    }
14721}
14722#[doc = "Unsigned multiply-subtract long"]
14723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14724#[inline]
14725#[target_feature(enable = "neon")]
14726#[cfg_attr(test, assert_instr(umlsl2))]
14727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14728pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14729    unsafe {
14730        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14731        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14732        vmlsl_u8(a, b, c)
14733    }
14734}
14735#[doc = "Unsigned multiply-subtract long"]
14736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14737#[inline]
14738#[target_feature(enable = "neon")]
14739#[cfg_attr(test, assert_instr(umlsl2))]
14740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14741pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14742    unsafe {
14743        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14744        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14745        vmlsl_u16(a, b, c)
14746    }
14747}
14748#[doc = "Unsigned multiply-subtract long"]
14749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14750#[inline]
14751#[target_feature(enable = "neon")]
14752#[cfg_attr(test, assert_instr(umlsl2))]
14753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14754pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14755    unsafe {
14756        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14757        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14758        vmlsl_u32(a, b, c)
14759    }
14760}
14761#[doc = "Vector move"]
14762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14763#[inline]
14764#[target_feature(enable = "neon")]
14765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14766#[cfg_attr(test, assert_instr(sxtl2))]
14767pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14768    unsafe {
14769        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14770        vmovl_s8(a)
14771    }
14772}
14773#[doc = "Vector move"]
14774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14775#[inline]
14776#[target_feature(enable = "neon")]
14777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14778#[cfg_attr(test, assert_instr(sxtl2))]
14779pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14780    unsafe {
14781        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14782        vmovl_s16(a)
14783    }
14784}
14785#[doc = "Vector move"]
14786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14787#[inline]
14788#[target_feature(enable = "neon")]
14789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14790#[cfg_attr(test, assert_instr(sxtl2))]
14791pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14792    unsafe {
14793        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14794        vmovl_s32(a)
14795    }
14796}
14797#[doc = "Vector move"]
14798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14799#[inline]
14800#[target_feature(enable = "neon")]
14801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14802#[cfg_attr(test, assert_instr(uxtl2))]
14803pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14804    unsafe {
14805        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14806        vmovl_u8(a)
14807    }
14808}
14809#[doc = "Vector move"]
14810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14811#[inline]
14812#[target_feature(enable = "neon")]
14813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14814#[cfg_attr(test, assert_instr(uxtl2))]
14815pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14816    unsafe {
14817        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14818        vmovl_u16(a)
14819    }
14820}
14821#[doc = "Vector move"]
14822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14823#[inline]
14824#[target_feature(enable = "neon")]
14825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14826#[cfg_attr(test, assert_instr(uxtl2))]
14827pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14828    unsafe {
14829        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14830        vmovl_u32(a)
14831    }
14832}
14833#[doc = "Extract narrow"]
14834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14835#[inline]
14836#[target_feature(enable = "neon")]
14837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14838#[cfg_attr(test, assert_instr(xtn2))]
14839pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14840    unsafe {
14841        let c: int8x8_t = simd_cast(b);
14842        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14843    }
14844}
14845#[doc = "Extract narrow"]
14846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14847#[inline]
14848#[target_feature(enable = "neon")]
14849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14850#[cfg_attr(test, assert_instr(xtn2))]
14851pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14852    unsafe {
14853        let c: int16x4_t = simd_cast(b);
14854        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14855    }
14856}
14857#[doc = "Extract narrow"]
14858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14859#[inline]
14860#[target_feature(enable = "neon")]
14861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14862#[cfg_attr(test, assert_instr(xtn2))]
14863pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14864    unsafe {
14865        let c: int32x2_t = simd_cast(b);
14866        simd_shuffle!(a, c, [0, 1, 2, 3])
14867    }
14868}
14869#[doc = "Extract narrow"]
14870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14871#[inline]
14872#[target_feature(enable = "neon")]
14873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14874#[cfg_attr(test, assert_instr(xtn2))]
14875pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14876    unsafe {
14877        let c: uint8x8_t = simd_cast(b);
14878        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14879    }
14880}
14881#[doc = "Extract narrow"]
14882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14883#[inline]
14884#[target_feature(enable = "neon")]
14885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14886#[cfg_attr(test, assert_instr(xtn2))]
14887pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14888    unsafe {
14889        let c: uint16x4_t = simd_cast(b);
14890        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14891    }
14892}
14893#[doc = "Extract narrow"]
14894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14895#[inline]
14896#[target_feature(enable = "neon")]
14897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14898#[cfg_attr(test, assert_instr(xtn2))]
14899pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14900    unsafe {
14901        let c: uint32x2_t = simd_cast(b);
14902        simd_shuffle!(a, c, [0, 1, 2, 3])
14903    }
14904}
14905#[doc = "Multiply"]
14906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14907#[inline]
14908#[target_feature(enable = "neon")]
14909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14910#[cfg_attr(test, assert_instr(fmul))]
14911pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14912    unsafe { simd_mul(a, b) }
14913}
14914#[doc = "Multiply"]
14915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14916#[inline]
14917#[target_feature(enable = "neon")]
14918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14919#[cfg_attr(test, assert_instr(fmul))]
14920pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14921    unsafe { simd_mul(a, b) }
14922}
14923#[doc = "Floating-point multiply"]
14924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14925#[inline]
14926#[target_feature(enable = "neon")]
14927#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14928#[rustc_legacy_const_generics(2)]
14929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14930pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14931    static_assert!(LANE == 0);
14932    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14933}
14934#[doc = "Floating-point multiply"]
14935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14936#[inline]
14937#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14938#[rustc_legacy_const_generics(2)]
14939#[target_feature(enable = "neon,fp16")]
14940#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14941pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14942    static_assert_uimm_bits!(LANE, 3);
14943    unsafe {
14944        simd_mul(
14945            a,
14946            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14947        )
14948    }
14949}
14950#[doc = "Floating-point multiply"]
14951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14952#[inline]
14953#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14954#[rustc_legacy_const_generics(2)]
14955#[target_feature(enable = "neon,fp16")]
14956#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14957pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14958    static_assert_uimm_bits!(LANE, 3);
14959    unsafe {
14960        simd_mul(
14961            a,
14962            simd_shuffle!(
14963                b,
14964                b,
14965                [
14966                    LANE as u32,
14967                    LANE as u32,
14968                    LANE as u32,
14969                    LANE as u32,
14970                    LANE as u32,
14971                    LANE as u32,
14972                    LANE as u32,
14973                    LANE as u32
14974                ]
14975            ),
14976        )
14977    }
14978}
14979#[doc = "Floating-point multiply"]
14980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14981#[inline]
14982#[target_feature(enable = "neon")]
14983#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14984#[rustc_legacy_const_generics(2)]
14985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14986pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14987    static_assert_uimm_bits!(LANE, 1);
14988    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14989}
14990#[doc = "Vector multiply by scalar"]
14991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14992#[inline]
14993#[target_feature(enable = "neon")]
14994#[cfg_attr(test, assert_instr(fmul))]
14995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14996pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14997    unsafe { simd_mul(a, vdup_n_f64(b)) }
14998}
14999#[doc = "Vector multiply by scalar"]
15000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15001#[inline]
15002#[target_feature(enable = "neon")]
15003#[cfg_attr(test, assert_instr(fmul))]
15004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15005pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15006    unsafe { simd_mul(a, vdupq_n_f64(b)) }
15007}
15008#[doc = "Floating-point multiply"]
15009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15010#[inline]
15011#[target_feature(enable = "neon")]
15012#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15013#[rustc_legacy_const_generics(2)]
15014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15015pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15016    static_assert!(LANE == 0);
15017    unsafe {
15018        let b: f64 = simd_extract!(b, LANE as u32);
15019        a * b
15020    }
15021}
15022#[doc = "Add"]
15023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15024#[inline]
15025#[target_feature(enable = "neon,fp16")]
15026#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15027#[cfg_attr(test, assert_instr(nop))]
15028pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15029    a * b
15030}
15031#[doc = "Floating-point multiply"]
15032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15033#[inline]
15034#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15035#[rustc_legacy_const_generics(2)]
15036#[target_feature(enable = "neon,fp16")]
15037#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15038pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15039    static_assert_uimm_bits!(LANE, 2);
15040    unsafe {
15041        let b: f16 = simd_extract!(b, LANE as u32);
15042        a * b
15043    }
15044}
15045#[doc = "Floating-point multiply"]
15046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15047#[inline]
15048#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15049#[rustc_legacy_const_generics(2)]
15050#[target_feature(enable = "neon,fp16")]
15051#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15052pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15053    static_assert_uimm_bits!(LANE, 3);
15054    unsafe {
15055        let b: f16 = simd_extract!(b, LANE as u32);
15056        a * b
15057    }
15058}
15059#[doc = "Multiply long"]
15060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15061#[inline]
15062#[target_feature(enable = "neon")]
15063#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15064#[rustc_legacy_const_generics(2)]
15065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15066pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15067    static_assert_uimm_bits!(LANE, 2);
15068    unsafe {
15069        vmull_high_s16(
15070            a,
15071            simd_shuffle!(
15072                b,
15073                b,
15074                [
15075                    LANE as u32,
15076                    LANE as u32,
15077                    LANE as u32,
15078                    LANE as u32,
15079                    LANE as u32,
15080                    LANE as u32,
15081                    LANE as u32,
15082                    LANE as u32
15083                ]
15084            ),
15085        )
15086    }
15087}
15088#[doc = "Multiply long"]
15089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15090#[inline]
15091#[target_feature(enable = "neon")]
15092#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15093#[rustc_legacy_const_generics(2)]
15094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15095pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15096    static_assert_uimm_bits!(LANE, 3);
15097    unsafe {
15098        vmull_high_s16(
15099            a,
15100            simd_shuffle!(
15101                b,
15102                b,
15103                [
15104                    LANE as u32,
15105                    LANE as u32,
15106                    LANE as u32,
15107                    LANE as u32,
15108                    LANE as u32,
15109                    LANE as u32,
15110                    LANE as u32,
15111                    LANE as u32
15112                ]
15113            ),
15114        )
15115    }
15116}
15117#[doc = "Multiply long"]
15118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15119#[inline]
15120#[target_feature(enable = "neon")]
15121#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15122#[rustc_legacy_const_generics(2)]
15123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15124pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15125    static_assert_uimm_bits!(LANE, 1);
15126    unsafe {
15127        vmull_high_s32(
15128            a,
15129            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15130        )
15131    }
15132}
15133#[doc = "Multiply long"]
15134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15135#[inline]
15136#[target_feature(enable = "neon")]
15137#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15138#[rustc_legacy_const_generics(2)]
15139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15140pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15141    static_assert_uimm_bits!(LANE, 2);
15142    unsafe {
15143        vmull_high_s32(
15144            a,
15145            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15146        )
15147    }
15148}
15149#[doc = "Multiply long"]
15150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15151#[inline]
15152#[target_feature(enable = "neon")]
15153#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15154#[rustc_legacy_const_generics(2)]
15155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15156pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15157    static_assert_uimm_bits!(LANE, 2);
15158    unsafe {
15159        vmull_high_u16(
15160            a,
15161            simd_shuffle!(
15162                b,
15163                b,
15164                [
15165                    LANE as u32,
15166                    LANE as u32,
15167                    LANE as u32,
15168                    LANE as u32,
15169                    LANE as u32,
15170                    LANE as u32,
15171                    LANE as u32,
15172                    LANE as u32
15173                ]
15174            ),
15175        )
15176    }
15177}
15178#[doc = "Multiply long"]
15179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15180#[inline]
15181#[target_feature(enable = "neon")]
15182#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15183#[rustc_legacy_const_generics(2)]
15184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15185pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15186    static_assert_uimm_bits!(LANE, 3);
15187    unsafe {
15188        vmull_high_u16(
15189            a,
15190            simd_shuffle!(
15191                b,
15192                b,
15193                [
15194                    LANE as u32,
15195                    LANE as u32,
15196                    LANE as u32,
15197                    LANE as u32,
15198                    LANE as u32,
15199                    LANE as u32,
15200                    LANE as u32,
15201                    LANE as u32
15202                ]
15203            ),
15204        )
15205    }
15206}
15207#[doc = "Multiply long"]
15208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15209#[inline]
15210#[target_feature(enable = "neon")]
15211#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15212#[rustc_legacy_const_generics(2)]
15213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15214pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15215    static_assert_uimm_bits!(LANE, 1);
15216    unsafe {
15217        vmull_high_u32(
15218            a,
15219            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15220        )
15221    }
15222}
15223#[doc = "Multiply long"]
15224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15225#[inline]
15226#[target_feature(enable = "neon")]
15227#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15228#[rustc_legacy_const_generics(2)]
15229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15230pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15231    static_assert_uimm_bits!(LANE, 2);
15232    unsafe {
15233        vmull_high_u32(
15234            a,
15235            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15236        )
15237    }
15238}
15239#[doc = "Multiply long"]
15240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15241#[inline]
15242#[target_feature(enable = "neon")]
15243#[cfg_attr(test, assert_instr(smull2))]
15244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15245pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15246    vmull_high_s16(a, vdupq_n_s16(b))
15247}
15248#[doc = "Multiply long"]
15249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15250#[inline]
15251#[target_feature(enable = "neon")]
15252#[cfg_attr(test, assert_instr(smull2))]
15253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15254pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15255    vmull_high_s32(a, vdupq_n_s32(b))
15256}
15257#[doc = "Multiply long"]
15258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15259#[inline]
15260#[target_feature(enable = "neon")]
15261#[cfg_attr(test, assert_instr(umull2))]
15262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15263pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15264    vmull_high_u16(a, vdupq_n_u16(b))
15265}
15266#[doc = "Multiply long"]
15267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15268#[inline]
15269#[target_feature(enable = "neon")]
15270#[cfg_attr(test, assert_instr(umull2))]
15271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15272pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15273    vmull_high_u32(a, vdupq_n_u32(b))
15274}
15275#[doc = "Polynomial multiply long"]
15276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15277#[inline]
15278#[target_feature(enable = "neon,aes")]
15279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15280#[cfg_attr(test, assert_instr(pmull))]
15281pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15282    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15283}
15284#[doc = "Polynomial multiply long"]
15285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15286#[inline]
15287#[target_feature(enable = "neon")]
15288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15289#[cfg_attr(test, assert_instr(pmull))]
15290pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15291    unsafe {
15292        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15293        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15294        vmull_p8(a, b)
15295    }
15296}
15297#[doc = "Signed multiply long"]
15298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15299#[inline]
15300#[target_feature(enable = "neon")]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302#[cfg_attr(test, assert_instr(smull2))]
15303pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15304    unsafe {
15305        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15306        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15307        vmull_s8(a, b)
15308    }
15309}
15310#[doc = "Signed multiply long"]
15311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15312#[inline]
15313#[target_feature(enable = "neon")]
15314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15315#[cfg_attr(test, assert_instr(smull2))]
15316pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15317    unsafe {
15318        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15319        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15320        vmull_s16(a, b)
15321    }
15322}
15323#[doc = "Signed multiply long"]
15324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15325#[inline]
15326#[target_feature(enable = "neon")]
15327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15328#[cfg_attr(test, assert_instr(smull2))]
15329pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15330    unsafe {
15331        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15332        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15333        vmull_s32(a, b)
15334    }
15335}
15336#[doc = "Unsigned multiply long"]
15337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15338#[inline]
15339#[target_feature(enable = "neon")]
15340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15341#[cfg_attr(test, assert_instr(umull2))]
15342pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15343    unsafe {
15344        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15345        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15346        vmull_u8(a, b)
15347    }
15348}
15349#[doc = "Unsigned multiply long"]
15350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15351#[inline]
15352#[target_feature(enable = "neon")]
15353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15354#[cfg_attr(test, assert_instr(umull2))]
15355pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15356    unsafe {
15357        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15358        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15359        vmull_u16(a, b)
15360    }
15361}
15362#[doc = "Unsigned multiply long"]
15363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15364#[inline]
15365#[target_feature(enable = "neon")]
15366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15367#[cfg_attr(test, assert_instr(umull2))]
15368pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15369    unsafe {
15370        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15371        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15372        vmull_u32(a, b)
15373    }
15374}
15375#[doc = "Polynomial multiply long"]
15376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15377#[inline]
15378#[target_feature(enable = "neon,aes")]
15379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15380#[cfg_attr(test, assert_instr(pmull))]
15381pub fn vmull_p64(a: p64, b: p64) -> p128 {
15382    unsafe extern "unadjusted" {
15383        #[cfg_attr(
15384            any(target_arch = "aarch64", target_arch = "arm64ec"),
15385            link_name = "llvm.aarch64.neon.pmull64"
15386        )]
15387        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15388    }
15389    unsafe { transmute(_vmull_p64(a, b)) }
15390}
15391#[doc = "Floating-point multiply"]
15392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15393#[inline]
15394#[target_feature(enable = "neon")]
15395#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15396#[rustc_legacy_const_generics(2)]
15397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15398pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15399    static_assert!(LANE == 0);
15400    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15401}
15402#[doc = "Floating-point multiply"]
15403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15404#[inline]
15405#[target_feature(enable = "neon")]
15406#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15407#[rustc_legacy_const_generics(2)]
15408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15409pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15410    static_assert_uimm_bits!(LANE, 1);
15411    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15412}
15413#[doc = "Floating-point multiply"]
15414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15415#[inline]
15416#[target_feature(enable = "neon")]
15417#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15418#[rustc_legacy_const_generics(2)]
15419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15420pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15421    static_assert_uimm_bits!(LANE, 1);
15422    unsafe {
15423        let b: f32 = simd_extract!(b, LANE as u32);
15424        a * b
15425    }
15426}
15427#[doc = "Floating-point multiply"]
15428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15429#[inline]
15430#[target_feature(enable = "neon")]
15431#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15432#[rustc_legacy_const_generics(2)]
15433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15434pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15435    static_assert_uimm_bits!(LANE, 2);
15436    unsafe {
15437        let b: f32 = simd_extract!(b, LANE as u32);
15438        a * b
15439    }
15440}
15441#[doc = "Floating-point multiply"]
15442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15443#[inline]
15444#[target_feature(enable = "neon")]
15445#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15446#[rustc_legacy_const_generics(2)]
15447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15448pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15449    static_assert_uimm_bits!(LANE, 1);
15450    unsafe {
15451        let b: f64 = simd_extract!(b, LANE as u32);
15452        a * b
15453    }
15454}
15455#[doc = "Floating-point multiply extended"]
15456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15457#[inline]
15458#[target_feature(enable = "neon,fp16")]
15459#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15460#[cfg_attr(test, assert_instr(fmulx))]
15461pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15462    unsafe extern "unadjusted" {
15463        #[cfg_attr(
15464            any(target_arch = "aarch64", target_arch = "arm64ec"),
15465            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15466        )]
15467        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15468    }
15469    unsafe { _vmulx_f16(a, b) }
15470}
15471#[doc = "Floating-point multiply extended"]
15472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15473#[inline]
15474#[target_feature(enable = "neon,fp16")]
15475#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15476#[cfg_attr(test, assert_instr(fmulx))]
15477pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15478    unsafe extern "unadjusted" {
15479        #[cfg_attr(
15480            any(target_arch = "aarch64", target_arch = "arm64ec"),
15481            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15482        )]
15483        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15484    }
15485    unsafe { _vmulxq_f16(a, b) }
15486}
15487#[doc = "Floating-point multiply extended"]
15488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15489#[inline]
15490#[target_feature(enable = "neon")]
15491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15492#[cfg_attr(test, assert_instr(fmulx))]
15493pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15494    unsafe extern "unadjusted" {
15495        #[cfg_attr(
15496            any(target_arch = "aarch64", target_arch = "arm64ec"),
15497            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15498        )]
15499        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15500    }
15501    unsafe { _vmulx_f32(a, b) }
15502}
15503#[doc = "Floating-point multiply extended"]
15504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15505#[inline]
15506#[target_feature(enable = "neon")]
15507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15508#[cfg_attr(test, assert_instr(fmulx))]
15509pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15510    unsafe extern "unadjusted" {
15511        #[cfg_attr(
15512            any(target_arch = "aarch64", target_arch = "arm64ec"),
15513            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15514        )]
15515        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15516    }
15517    unsafe { _vmulxq_f32(a, b) }
15518}
15519#[doc = "Floating-point multiply extended"]
15520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15521#[inline]
15522#[target_feature(enable = "neon")]
15523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15524#[cfg_attr(test, assert_instr(fmulx))]
15525pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15526    unsafe extern "unadjusted" {
15527        #[cfg_attr(
15528            any(target_arch = "aarch64", target_arch = "arm64ec"),
15529            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15530        )]
15531        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15532    }
15533    unsafe { _vmulx_f64(a, b) }
15534}
15535#[doc = "Floating-point multiply extended"]
15536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15537#[inline]
15538#[target_feature(enable = "neon")]
15539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15540#[cfg_attr(test, assert_instr(fmulx))]
15541pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15542    unsafe extern "unadjusted" {
15543        #[cfg_attr(
15544            any(target_arch = "aarch64", target_arch = "arm64ec"),
15545            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15546        )]
15547        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15548    }
15549    unsafe { _vmulxq_f64(a, b) }
15550}
15551#[doc = "Floating-point multiply extended"]
15552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15553#[inline]
15554#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15555#[rustc_legacy_const_generics(2)]
15556#[target_feature(enable = "neon,fp16")]
15557#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15558pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15559    static_assert_uimm_bits!(LANE, 2);
15560    unsafe {
15561        vmulx_f16(
15562            a,
15563            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15564        )
15565    }
15566}
15567#[doc = "Floating-point multiply extended"]
15568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15569#[inline]
15570#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15571#[rustc_legacy_const_generics(2)]
15572#[target_feature(enable = "neon,fp16")]
15573#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15574pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15575    static_assert_uimm_bits!(LANE, 3);
15576    unsafe {
15577        vmulx_f16(
15578            a,
15579            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15580        )
15581    }
15582}
15583#[doc = "Floating-point multiply extended"]
15584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15585#[inline]
15586#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15587#[rustc_legacy_const_generics(2)]
15588#[target_feature(enable = "neon,fp16")]
15589#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15590pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15591    static_assert_uimm_bits!(LANE, 2);
15592    unsafe {
15593        vmulxq_f16(
15594            a,
15595            simd_shuffle!(
15596                b,
15597                b,
15598                [
15599                    LANE as u32,
15600                    LANE as u32,
15601                    LANE as u32,
15602                    LANE as u32,
15603                    LANE as u32,
15604                    LANE as u32,
15605                    LANE as u32,
15606                    LANE as u32
15607                ]
15608            ),
15609        )
15610    }
15611}
15612#[doc = "Floating-point multiply extended"]
15613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15614#[inline]
15615#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15616#[rustc_legacy_const_generics(2)]
15617#[target_feature(enable = "neon,fp16")]
15618#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15619pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15620    static_assert_uimm_bits!(LANE, 3);
15621    unsafe {
15622        vmulxq_f16(
15623            a,
15624            simd_shuffle!(
15625                b,
15626                b,
15627                [
15628                    LANE as u32,
15629                    LANE as u32,
15630                    LANE as u32,
15631                    LANE as u32,
15632                    LANE as u32,
15633                    LANE as u32,
15634                    LANE as u32,
15635                    LANE as u32
15636                ]
15637            ),
15638        )
15639    }
15640}
15641#[doc = "Floating-point multiply extended"]
15642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15643#[inline]
15644#[target_feature(enable = "neon")]
15645#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15646#[rustc_legacy_const_generics(2)]
15647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15648pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15649    static_assert_uimm_bits!(LANE, 1);
15650    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15651}
15652#[doc = "Floating-point multiply extended"]
15653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15654#[inline]
15655#[target_feature(enable = "neon")]
15656#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15657#[rustc_legacy_const_generics(2)]
15658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15659pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15660    static_assert_uimm_bits!(LANE, 2);
15661    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15662}
15663#[doc = "Floating-point multiply extended"]
15664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15665#[inline]
15666#[target_feature(enable = "neon")]
15667#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15668#[rustc_legacy_const_generics(2)]
15669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15670pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15671    static_assert_uimm_bits!(LANE, 1);
15672    unsafe {
15673        vmulxq_f32(
15674            a,
15675            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15676        )
15677    }
15678}
15679#[doc = "Floating-point multiply extended"]
15680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15681#[inline]
15682#[target_feature(enable = "neon")]
15683#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15684#[rustc_legacy_const_generics(2)]
15685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15686pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15687    static_assert_uimm_bits!(LANE, 2);
15688    unsafe {
15689        vmulxq_f32(
15690            a,
15691            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15692        )
15693    }
15694}
15695#[doc = "Floating-point multiply extended"]
15696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15697#[inline]
15698#[target_feature(enable = "neon")]
15699#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15700#[rustc_legacy_const_generics(2)]
15701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15702pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15703    static_assert_uimm_bits!(LANE, 1);
15704    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15705}
15706#[doc = "Floating-point multiply extended"]
15707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15708#[inline]
15709#[target_feature(enable = "neon")]
15710#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15711#[rustc_legacy_const_generics(2)]
15712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15713pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15714    static_assert!(LANE == 0);
15715    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15716}
15717#[doc = "Floating-point multiply extended"]
15718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15719#[inline]
15720#[target_feature(enable = "neon")]
15721#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15722#[rustc_legacy_const_generics(2)]
15723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15724pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15725    static_assert_uimm_bits!(LANE, 1);
15726    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15727}
15728#[doc = "Vector multiply by scalar"]
15729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15730#[inline]
15731#[cfg_attr(test, assert_instr(fmulx))]
15732#[target_feature(enable = "neon,fp16")]
15733#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15734pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15735    vmulx_f16(a, vdup_n_f16(b))
15736}
15737#[doc = "Vector multiply by scalar"]
15738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15739#[inline]
15740#[cfg_attr(test, assert_instr(fmulx))]
15741#[target_feature(enable = "neon,fp16")]
15742#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15743pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15744    vmulxq_f16(a, vdupq_n_f16(b))
15745}
15746#[doc = "Floating-point multiply extended"]
15747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15748#[inline]
15749#[target_feature(enable = "neon")]
15750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15751#[cfg_attr(test, assert_instr(fmulx))]
15752pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15753    unsafe extern "unadjusted" {
15754        #[cfg_attr(
15755            any(target_arch = "aarch64", target_arch = "arm64ec"),
15756            link_name = "llvm.aarch64.neon.fmulx.f64"
15757        )]
15758        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15759    }
15760    unsafe { _vmulxd_f64(a, b) }
15761}
15762#[doc = "Floating-point multiply extended"]
15763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15764#[inline]
15765#[target_feature(enable = "neon")]
15766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15767#[cfg_attr(test, assert_instr(fmulx))]
15768pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15769    unsafe extern "unadjusted" {
15770        #[cfg_attr(
15771            any(target_arch = "aarch64", target_arch = "arm64ec"),
15772            link_name = "llvm.aarch64.neon.fmulx.f32"
15773        )]
15774        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15775    }
15776    unsafe { _vmulxs_f32(a, b) }
15777}
15778#[doc = "Floating-point multiply extended"]
15779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15780#[inline]
15781#[target_feature(enable = "neon")]
15782#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15783#[rustc_legacy_const_generics(2)]
15784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15785pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15786    static_assert!(LANE == 0);
15787    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15788}
15789#[doc = "Floating-point multiply extended"]
15790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15791#[inline]
15792#[target_feature(enable = "neon")]
15793#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15794#[rustc_legacy_const_generics(2)]
15795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15796pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15797    static_assert_uimm_bits!(LANE, 1);
15798    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15799}
15800#[doc = "Floating-point multiply extended"]
15801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15802#[inline]
15803#[target_feature(enable = "neon")]
15804#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15805#[rustc_legacy_const_generics(2)]
15806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15807pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15808    static_assert_uimm_bits!(LANE, 1);
15809    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15810}
15811#[doc = "Floating-point multiply extended"]
15812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15813#[inline]
15814#[target_feature(enable = "neon")]
15815#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15816#[rustc_legacy_const_generics(2)]
15817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15818pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15819    static_assert_uimm_bits!(LANE, 2);
15820    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15821}
15822#[doc = "Floating-point multiply extended"]
15823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15824#[inline]
15825#[target_feature(enable = "neon,fp16")]
15826#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15827#[cfg_attr(test, assert_instr(fmulx))]
15828pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15829    unsafe extern "unadjusted" {
15830        #[cfg_attr(
15831            any(target_arch = "aarch64", target_arch = "arm64ec"),
15832            link_name = "llvm.aarch64.neon.fmulx.f16"
15833        )]
15834        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15835    }
15836    unsafe { _vmulxh_f16(a, b) }
15837}
15838#[doc = "Floating-point multiply extended"]
15839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15840#[inline]
15841#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15842#[rustc_legacy_const_generics(2)]
15843#[target_feature(enable = "neon,fp16")]
15844#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15845pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15846    static_assert_uimm_bits!(LANE, 2);
15847    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15848}
15849#[doc = "Floating-point multiply extended"]
15850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15851#[inline]
15852#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15853#[rustc_legacy_const_generics(2)]
15854#[target_feature(enable = "neon,fp16")]
15855#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15856pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15857    static_assert_uimm_bits!(LANE, 3);
15858    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15859}
15860#[doc = "Floating-point multiply extended"]
15861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15862#[inline]
15863#[target_feature(enable = "neon")]
15864#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15865#[rustc_legacy_const_generics(2)]
15866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15867pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15868    static_assert!(LANE == 0);
15869    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15870}
15871#[doc = "Negate"]
15872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15873#[inline]
15874#[target_feature(enable = "neon")]
15875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15876#[cfg_attr(test, assert_instr(fneg))]
15877pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15878    unsafe { simd_neg(a) }
15879}
15880#[doc = "Negate"]
15881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15882#[inline]
15883#[target_feature(enable = "neon")]
15884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15885#[cfg_attr(test, assert_instr(fneg))]
15886pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15887    unsafe { simd_neg(a) }
15888}
15889#[doc = "Negate"]
15890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15891#[inline]
15892#[target_feature(enable = "neon")]
15893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15894#[cfg_attr(test, assert_instr(neg))]
15895pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15896    unsafe { simd_neg(a) }
15897}
15898#[doc = "Negate"]
15899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15900#[inline]
15901#[target_feature(enable = "neon")]
15902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15903#[cfg_attr(test, assert_instr(neg))]
15904pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15905    unsafe { simd_neg(a) }
15906}
15907#[doc = "Negate"]
15908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15909#[inline]
15910#[target_feature(enable = "neon")]
15911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15912#[cfg_attr(test, assert_instr(neg))]
15913pub fn vnegd_s64(a: i64) -> i64 {
15914    a.wrapping_neg()
15915}
15916#[doc = "Negate"]
15917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15918#[inline]
15919#[target_feature(enable = "neon,fp16")]
15920#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15921#[cfg_attr(test, assert_instr(fneg))]
15922pub fn vnegh_f16(a: f16) -> f16 {
15923    -a
15924}
15925#[doc = "Floating-point add pairwise"]
15926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15927#[inline]
15928#[target_feature(enable = "neon")]
15929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15930#[cfg_attr(test, assert_instr(nop))]
15931pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15932    unsafe {
15933        let a1: f64 = simd_extract!(a, 0);
15934        let a2: f64 = simd_extract!(a, 1);
15935        a1 + a2
15936    }
15937}
15938#[doc = "Floating-point add pairwise"]
15939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15940#[inline]
15941#[target_feature(enable = "neon")]
15942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15943#[cfg_attr(test, assert_instr(nop))]
15944pub fn vpadds_f32(a: float32x2_t) -> f32 {
15945    unsafe {
15946        let a1: f32 = simd_extract!(a, 0);
15947        let a2: f32 = simd_extract!(a, 1);
15948        a1 + a2
15949    }
15950}
15951#[doc = "Add pairwise"]
15952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15953#[inline]
15954#[cfg(target_endian = "little")]
15955#[target_feature(enable = "neon")]
15956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15957#[cfg_attr(test, assert_instr(addp))]
15958pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15959    unsafe { transmute(vaddvq_u64(transmute(a))) }
15960}
15961#[doc = "Add pairwise"]
15962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15963#[inline]
15964#[cfg(target_endian = "big")]
15965#[target_feature(enable = "neon")]
15966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15967#[cfg_attr(test, assert_instr(addp))]
15968pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15969    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
15970    unsafe { transmute(vaddvq_u64(transmute(a))) }
15971}
15972#[doc = "Add pairwise"]
15973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15974#[inline]
15975#[target_feature(enable = "neon")]
15976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15977#[cfg_attr(test, assert_instr(addp))]
15978pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15979    vaddvq_u64(a)
15980}
15981#[doc = "Floating-point add pairwise"]
15982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15983#[inline]
15984#[target_feature(enable = "neon,fp16")]
15985#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15986#[cfg_attr(test, assert_instr(faddp))]
15987pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15988    unsafe extern "unadjusted" {
15989        #[cfg_attr(
15990            any(target_arch = "aarch64", target_arch = "arm64ec"),
15991            link_name = "llvm.aarch64.neon.faddp.v8f16"
15992        )]
15993        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15994    }
15995    unsafe { _vpaddq_f16(a, b) }
15996}
15997#[doc = "Floating-point add pairwise"]
15998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15999#[inline]
16000#[target_feature(enable = "neon")]
16001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16002#[cfg_attr(test, assert_instr(faddp))]
16003pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16004    unsafe extern "unadjusted" {
16005        #[cfg_attr(
16006            any(target_arch = "aarch64", target_arch = "arm64ec"),
16007            link_name = "llvm.aarch64.neon.faddp.v4f32"
16008        )]
16009        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16010    }
16011    unsafe { _vpaddq_f32(a, b) }
16012}
16013#[doc = "Floating-point add pairwise"]
16014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16015#[inline]
16016#[target_feature(enable = "neon")]
16017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16018#[cfg_attr(test, assert_instr(faddp))]
16019pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16020    unsafe extern "unadjusted" {
16021        #[cfg_attr(
16022            any(target_arch = "aarch64", target_arch = "arm64ec"),
16023            link_name = "llvm.aarch64.neon.faddp.v2f64"
16024        )]
16025        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16026    }
16027    unsafe { _vpaddq_f64(a, b) }
16028}
16029#[doc = "Add Pairwise"]
16030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16031#[inline]
16032#[target_feature(enable = "neon")]
16033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16034#[cfg_attr(test, assert_instr(addp))]
16035pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16036    unsafe extern "unadjusted" {
16037        #[cfg_attr(
16038            any(target_arch = "aarch64", target_arch = "arm64ec"),
16039            link_name = "llvm.aarch64.neon.addp.v16i8"
16040        )]
16041        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16042    }
16043    unsafe { _vpaddq_s8(a, b) }
16044}
16045#[doc = "Add Pairwise"]
16046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16047#[inline]
16048#[target_feature(enable = "neon")]
16049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16050#[cfg_attr(test, assert_instr(addp))]
16051pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16052    unsafe extern "unadjusted" {
16053        #[cfg_attr(
16054            any(target_arch = "aarch64", target_arch = "arm64ec"),
16055            link_name = "llvm.aarch64.neon.addp.v8i16"
16056        )]
16057        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16058    }
16059    unsafe { _vpaddq_s16(a, b) }
16060}
16061#[doc = "Add Pairwise"]
16062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16063#[inline]
16064#[target_feature(enable = "neon")]
16065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16066#[cfg_attr(test, assert_instr(addp))]
16067pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16068    unsafe extern "unadjusted" {
16069        #[cfg_attr(
16070            any(target_arch = "aarch64", target_arch = "arm64ec"),
16071            link_name = "llvm.aarch64.neon.addp.v4i32"
16072        )]
16073        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16074    }
16075    unsafe { _vpaddq_s32(a, b) }
16076}
16077#[doc = "Add Pairwise"]
16078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16079#[inline]
16080#[target_feature(enable = "neon")]
16081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16082#[cfg_attr(test, assert_instr(addp))]
16083pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16084    unsafe extern "unadjusted" {
16085        #[cfg_attr(
16086            any(target_arch = "aarch64", target_arch = "arm64ec"),
16087            link_name = "llvm.aarch64.neon.addp.v2i64"
16088        )]
16089        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
16090    }
16091    unsafe { _vpaddq_s64(a, b) }
16092}
16093#[doc = "Add Pairwise"]
16094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16095#[inline]
16096#[cfg(target_endian = "little")]
16097#[target_feature(enable = "neon")]
16098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16099#[cfg_attr(test, assert_instr(addp))]
16100pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16101    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
16102}
16103#[doc = "Add Pairwise"]
16104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16105#[inline]
16106#[cfg(target_endian = "big")]
16107#[target_feature(enable = "neon")]
16108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16109#[cfg_attr(test, assert_instr(addp))]
16110pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16111    let a: uint8x16_t =
16112        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16113    let b: uint8x16_t =
16114        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16115    unsafe {
16116        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
16117        simd_shuffle!(
16118            ret_val,
16119            ret_val,
16120            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
16121        )
16122    }
16123}
16124#[doc = "Add Pairwise"]
16125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16126#[inline]
16127#[cfg(target_endian = "little")]
16128#[target_feature(enable = "neon")]
16129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16130#[cfg_attr(test, assert_instr(addp))]
16131pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16132    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
16133}
16134#[doc = "Add Pairwise"]
16135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16136#[inline]
16137#[cfg(target_endian = "big")]
16138#[target_feature(enable = "neon")]
16139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16140#[cfg_attr(test, assert_instr(addp))]
16141pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16142    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
16143    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
16144    unsafe {
16145        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
16146        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
16147    }
16148}
16149#[doc = "Add Pairwise"]
16150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16151#[inline]
16152#[cfg(target_endian = "little")]
16153#[target_feature(enable = "neon")]
16154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16155#[cfg_attr(test, assert_instr(addp))]
16156pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16157    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
16158}
16159#[doc = "Add Pairwise"]
16160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16161#[inline]
16162#[cfg(target_endian = "big")]
16163#[target_feature(enable = "neon")]
16164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16165#[cfg_attr(test, assert_instr(addp))]
16166pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16167    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
16168    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
16169    unsafe {
16170        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
16171        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
16172    }
16173}
16174#[doc = "Add Pairwise"]
16175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16176#[inline]
16177#[cfg(target_endian = "little")]
16178#[target_feature(enable = "neon")]
16179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16180#[cfg_attr(test, assert_instr(addp))]
16181pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16182    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
16183}
16184#[doc = "Add Pairwise"]
16185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16186#[inline]
16187#[cfg(target_endian = "big")]
16188#[target_feature(enable = "neon")]
16189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16190#[cfg_attr(test, assert_instr(addp))]
16191pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16192    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16193    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16194    unsafe {
16195        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16196        simd_shuffle!(ret_val, ret_val, [1, 0])
16197    }
16198}
16199#[doc = "Floating-point add pairwise"]
16200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16201#[inline]
16202#[target_feature(enable = "neon,fp16")]
16203#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16204#[cfg_attr(test, assert_instr(fmaxp))]
16205pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16206    unsafe extern "unadjusted" {
16207        #[cfg_attr(
16208            any(target_arch = "aarch64", target_arch = "arm64ec"),
16209            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16210        )]
16211        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16212    }
16213    unsafe { _vpmax_f16(a, b) }
16214}
16215#[doc = "Floating-point add pairwise"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16217#[inline]
16218#[target_feature(enable = "neon,fp16")]
16219#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16220#[cfg_attr(test, assert_instr(fmaxp))]
16221pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16222    unsafe extern "unadjusted" {
16223        #[cfg_attr(
16224            any(target_arch = "aarch64", target_arch = "arm64ec"),
16225            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16226        )]
16227        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16228    }
16229    unsafe { _vpmaxq_f16(a, b) }
16230}
16231#[doc = "Floating-point add pairwise"]
16232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16233#[inline]
16234#[target_feature(enable = "neon,fp16")]
16235#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16236#[cfg_attr(test, assert_instr(fmaxnmp))]
16237pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16238    unsafe extern "unadjusted" {
16239        #[cfg_attr(
16240            any(target_arch = "aarch64", target_arch = "arm64ec"),
16241            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16242        )]
16243        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16244    }
16245    unsafe { _vpmaxnm_f16(a, b) }
16246}
16247#[doc = "Floating-point add pairwise"]
16248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16249#[inline]
16250#[target_feature(enable = "neon,fp16")]
16251#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16252#[cfg_attr(test, assert_instr(fmaxnmp))]
16253pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16254    unsafe extern "unadjusted" {
16255        #[cfg_attr(
16256            any(target_arch = "aarch64", target_arch = "arm64ec"),
16257            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16258        )]
16259        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16260    }
16261    unsafe { _vpmaxnmq_f16(a, b) }
16262}
16263#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16265#[inline]
16266#[target_feature(enable = "neon")]
16267#[cfg_attr(test, assert_instr(fmaxnmp))]
16268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16269pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16270    unsafe extern "unadjusted" {
16271        #[cfg_attr(
16272            any(target_arch = "aarch64", target_arch = "arm64ec"),
16273            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16274        )]
16275        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16276    }
16277    unsafe { _vpmaxnm_f32(a, b) }
16278}
16279#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16281#[inline]
16282#[target_feature(enable = "neon")]
16283#[cfg_attr(test, assert_instr(fmaxnmp))]
16284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16285pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16286    unsafe extern "unadjusted" {
16287        #[cfg_attr(
16288            any(target_arch = "aarch64", target_arch = "arm64ec"),
16289            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16290        )]
16291        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16292    }
16293    unsafe { _vpmaxnmq_f32(a, b) }
16294}
16295#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16297#[inline]
16298#[target_feature(enable = "neon")]
16299#[cfg_attr(test, assert_instr(fmaxnmp))]
16300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16301pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16302    unsafe extern "unadjusted" {
16303        #[cfg_attr(
16304            any(target_arch = "aarch64", target_arch = "arm64ec"),
16305            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16306        )]
16307        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16308    }
16309    unsafe { _vpmaxnmq_f64(a, b) }
16310}
16311#[doc = "Floating-point maximum number pairwise"]
16312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16313#[inline]
16314#[target_feature(enable = "neon")]
16315#[cfg_attr(test, assert_instr(fmaxnmp))]
16316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16317pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16318    unsafe extern "unadjusted" {
16319        #[cfg_attr(
16320            any(target_arch = "aarch64", target_arch = "arm64ec"),
16321            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16322        )]
16323        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16324    }
16325    unsafe { _vpmaxnmqd_f64(a) }
16326}
16327#[doc = "Floating-point maximum number pairwise"]
16328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16329#[inline]
16330#[target_feature(enable = "neon")]
16331#[cfg_attr(test, assert_instr(fmaxnmp))]
16332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16333pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16334    unsafe extern "unadjusted" {
16335        #[cfg_attr(
16336            any(target_arch = "aarch64", target_arch = "arm64ec"),
16337            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16338        )]
16339        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16340    }
16341    unsafe { _vpmaxnms_f32(a) }
16342}
16343#[doc = "Folding maximum of adjacent pairs"]
16344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16345#[inline]
16346#[target_feature(enable = "neon")]
16347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16348#[cfg_attr(test, assert_instr(fmaxp))]
16349pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16350    unsafe extern "unadjusted" {
16351        #[cfg_attr(
16352            any(target_arch = "aarch64", target_arch = "arm64ec"),
16353            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16354        )]
16355        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16356    }
16357    unsafe { _vpmaxq_f32(a, b) }
16358}
16359#[doc = "Folding maximum of adjacent pairs"]
16360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16361#[inline]
16362#[target_feature(enable = "neon")]
16363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16364#[cfg_attr(test, assert_instr(fmaxp))]
16365pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16366    unsafe extern "unadjusted" {
16367        #[cfg_attr(
16368            any(target_arch = "aarch64", target_arch = "arm64ec"),
16369            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16370        )]
16371        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16372    }
16373    unsafe { _vpmaxq_f64(a, b) }
16374}
16375#[doc = "Folding maximum of adjacent pairs"]
16376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16377#[inline]
16378#[target_feature(enable = "neon")]
16379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16380#[cfg_attr(test, assert_instr(smaxp))]
16381pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16382    unsafe extern "unadjusted" {
16383        #[cfg_attr(
16384            any(target_arch = "aarch64", target_arch = "arm64ec"),
16385            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16386        )]
16387        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16388    }
16389    unsafe { _vpmaxq_s8(a, b) }
16390}
16391#[doc = "Folding maximum of adjacent pairs"]
16392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16393#[inline]
16394#[target_feature(enable = "neon")]
16395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16396#[cfg_attr(test, assert_instr(smaxp))]
16397pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16398    unsafe extern "unadjusted" {
16399        #[cfg_attr(
16400            any(target_arch = "aarch64", target_arch = "arm64ec"),
16401            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16402        )]
16403        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16404    }
16405    unsafe { _vpmaxq_s16(a, b) }
16406}
16407#[doc = "Folding maximum of adjacent pairs"]
16408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16409#[inline]
16410#[target_feature(enable = "neon")]
16411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16412#[cfg_attr(test, assert_instr(smaxp))]
16413pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16414    unsafe extern "unadjusted" {
16415        #[cfg_attr(
16416            any(target_arch = "aarch64", target_arch = "arm64ec"),
16417            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16418        )]
16419        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16420    }
16421    unsafe { _vpmaxq_s32(a, b) }
16422}
16423#[doc = "Folding maximum of adjacent pairs"]
16424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16425#[inline]
16426#[target_feature(enable = "neon")]
16427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16428#[cfg_attr(test, assert_instr(umaxp))]
16429pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16430    unsafe extern "unadjusted" {
16431        #[cfg_attr(
16432            any(target_arch = "aarch64", target_arch = "arm64ec"),
16433            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16434        )]
16435        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16436    }
16437    unsafe { _vpmaxq_u8(a, b) }
16438}
16439#[doc = "Folding maximum of adjacent pairs"]
16440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16441#[inline]
16442#[target_feature(enable = "neon")]
16443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16444#[cfg_attr(test, assert_instr(umaxp))]
16445pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16446    unsafe extern "unadjusted" {
16447        #[cfg_attr(
16448            any(target_arch = "aarch64", target_arch = "arm64ec"),
16449            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16450        )]
16451        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16452    }
16453    unsafe { _vpmaxq_u16(a, b) }
16454}
16455#[doc = "Folding maximum of adjacent pairs"]
16456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16457#[inline]
16458#[target_feature(enable = "neon")]
16459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16460#[cfg_attr(test, assert_instr(umaxp))]
16461pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16462    unsafe extern "unadjusted" {
16463        #[cfg_attr(
16464            any(target_arch = "aarch64", target_arch = "arm64ec"),
16465            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16466        )]
16467        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16468    }
16469    unsafe { _vpmaxq_u32(a, b) }
16470}
16471#[doc = "Floating-point maximum pairwise"]
16472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16473#[inline]
16474#[target_feature(enable = "neon")]
16475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16476#[cfg_attr(test, assert_instr(fmaxp))]
16477pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16478    unsafe extern "unadjusted" {
16479        #[cfg_attr(
16480            any(target_arch = "aarch64", target_arch = "arm64ec"),
16481            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16482        )]
16483        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16484    }
16485    unsafe { _vpmaxqd_f64(a) }
16486}
16487#[doc = "Floating-point maximum pairwise"]
16488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16489#[inline]
16490#[target_feature(enable = "neon")]
16491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16492#[cfg_attr(test, assert_instr(fmaxp))]
16493pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16494    unsafe extern "unadjusted" {
16495        #[cfg_attr(
16496            any(target_arch = "aarch64", target_arch = "arm64ec"),
16497            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16498        )]
16499        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16500    }
16501    unsafe { _vpmaxs_f32(a) }
16502}
16503#[doc = "Floating-point add pairwise"]
16504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16505#[inline]
16506#[target_feature(enable = "neon,fp16")]
16507#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16508#[cfg_attr(test, assert_instr(fminp))]
16509pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16510    unsafe extern "unadjusted" {
16511        #[cfg_attr(
16512            any(target_arch = "aarch64", target_arch = "arm64ec"),
16513            link_name = "llvm.aarch64.neon.fminp.v4f16"
16514        )]
16515        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16516    }
16517    unsafe { _vpmin_f16(a, b) }
16518}
16519#[doc = "Floating-point add pairwise"]
16520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16521#[inline]
16522#[target_feature(enable = "neon,fp16")]
16523#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16524#[cfg_attr(test, assert_instr(fminp))]
16525pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16526    unsafe extern "unadjusted" {
16527        #[cfg_attr(
16528            any(target_arch = "aarch64", target_arch = "arm64ec"),
16529            link_name = "llvm.aarch64.neon.fminp.v8f16"
16530        )]
16531        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16532    }
16533    unsafe { _vpminq_f16(a, b) }
16534}
16535#[doc = "Floating-point add pairwise"]
16536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16537#[inline]
16538#[target_feature(enable = "neon,fp16")]
16539#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16540#[cfg_attr(test, assert_instr(fminnmp))]
16541pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16542    unsafe extern "unadjusted" {
16543        #[cfg_attr(
16544            any(target_arch = "aarch64", target_arch = "arm64ec"),
16545            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16546        )]
16547        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16548    }
16549    unsafe { _vpminnm_f16(a, b) }
16550}
16551#[doc = "Floating-point add pairwise"]
16552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16553#[inline]
16554#[target_feature(enable = "neon,fp16")]
16555#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16556#[cfg_attr(test, assert_instr(fminnmp))]
16557pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16558    unsafe extern "unadjusted" {
16559        #[cfg_attr(
16560            any(target_arch = "aarch64", target_arch = "arm64ec"),
16561            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16562        )]
16563        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16564    }
16565    unsafe { _vpminnmq_f16(a, b) }
16566}
16567#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16569#[inline]
16570#[target_feature(enable = "neon")]
16571#[cfg_attr(test, assert_instr(fminnmp))]
16572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16573pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16574    unsafe extern "unadjusted" {
16575        #[cfg_attr(
16576            any(target_arch = "aarch64", target_arch = "arm64ec"),
16577            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16578        )]
16579        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16580    }
16581    unsafe { _vpminnm_f32(a, b) }
16582}
16583#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16585#[inline]
16586#[target_feature(enable = "neon")]
16587#[cfg_attr(test, assert_instr(fminnmp))]
16588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16589pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16590    unsafe extern "unadjusted" {
16591        #[cfg_attr(
16592            any(target_arch = "aarch64", target_arch = "arm64ec"),
16593            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16594        )]
16595        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16596    }
16597    unsafe { _vpminnmq_f32(a, b) }
16598}
16599#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16601#[inline]
16602#[target_feature(enable = "neon")]
16603#[cfg_attr(test, assert_instr(fminnmp))]
16604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16605pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16606    unsafe extern "unadjusted" {
16607        #[cfg_attr(
16608            any(target_arch = "aarch64", target_arch = "arm64ec"),
16609            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16610        )]
16611        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16612    }
16613    unsafe { _vpminnmq_f64(a, b) }
16614}
16615#[doc = "Floating-point minimum number pairwise"]
16616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16617#[inline]
16618#[target_feature(enable = "neon")]
16619#[cfg_attr(test, assert_instr(fminnmp))]
16620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16621pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16622    unsafe extern "unadjusted" {
16623        #[cfg_attr(
16624            any(target_arch = "aarch64", target_arch = "arm64ec"),
16625            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16626        )]
16627        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16628    }
16629    unsafe { _vpminnmqd_f64(a) }
16630}
16631#[doc = "Floating-point minimum number pairwise"]
16632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16633#[inline]
16634#[target_feature(enable = "neon")]
16635#[cfg_attr(test, assert_instr(fminnmp))]
16636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16637pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16638    unsafe extern "unadjusted" {
16639        #[cfg_attr(
16640            any(target_arch = "aarch64", target_arch = "arm64ec"),
16641            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16642        )]
16643        fn _vpminnms_f32(a: float32x2_t) -> f32;
16644    }
16645    unsafe { _vpminnms_f32(a) }
16646}
16647#[doc = "Folding minimum of adjacent pairs"]
16648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16649#[inline]
16650#[target_feature(enable = "neon")]
16651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16652#[cfg_attr(test, assert_instr(fminp))]
16653pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16654    unsafe extern "unadjusted" {
16655        #[cfg_attr(
16656            any(target_arch = "aarch64", target_arch = "arm64ec"),
16657            link_name = "llvm.aarch64.neon.fminp.v4f32"
16658        )]
16659        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16660    }
16661    unsafe { _vpminq_f32(a, b) }
16662}
16663#[doc = "Folding minimum of adjacent pairs"]
16664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16665#[inline]
16666#[target_feature(enable = "neon")]
16667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16668#[cfg_attr(test, assert_instr(fminp))]
16669pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16670    unsafe extern "unadjusted" {
16671        #[cfg_attr(
16672            any(target_arch = "aarch64", target_arch = "arm64ec"),
16673            link_name = "llvm.aarch64.neon.fminp.v2f64"
16674        )]
16675        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16676    }
16677    unsafe { _vpminq_f64(a, b) }
16678}
16679#[doc = "Folding minimum of adjacent pairs"]
16680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16681#[inline]
16682#[target_feature(enable = "neon")]
16683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16684#[cfg_attr(test, assert_instr(sminp))]
16685pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16686    unsafe extern "unadjusted" {
16687        #[cfg_attr(
16688            any(target_arch = "aarch64", target_arch = "arm64ec"),
16689            link_name = "llvm.aarch64.neon.sminp.v16i8"
16690        )]
16691        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16692    }
16693    unsafe { _vpminq_s8(a, b) }
16694}
16695#[doc = "Folding minimum of adjacent pairs"]
16696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16697#[inline]
16698#[target_feature(enable = "neon")]
16699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16700#[cfg_attr(test, assert_instr(sminp))]
16701pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16702    unsafe extern "unadjusted" {
16703        #[cfg_attr(
16704            any(target_arch = "aarch64", target_arch = "arm64ec"),
16705            link_name = "llvm.aarch64.neon.sminp.v8i16"
16706        )]
16707        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16708    }
16709    unsafe { _vpminq_s16(a, b) }
16710}
16711#[doc = "Folding minimum of adjacent pairs"]
16712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16713#[inline]
16714#[target_feature(enable = "neon")]
16715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16716#[cfg_attr(test, assert_instr(sminp))]
16717pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16718    unsafe extern "unadjusted" {
16719        #[cfg_attr(
16720            any(target_arch = "aarch64", target_arch = "arm64ec"),
16721            link_name = "llvm.aarch64.neon.sminp.v4i32"
16722        )]
16723        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16724    }
16725    unsafe { _vpminq_s32(a, b) }
16726}
16727#[doc = "Folding minimum of adjacent pairs"]
16728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16729#[inline]
16730#[target_feature(enable = "neon")]
16731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16732#[cfg_attr(test, assert_instr(uminp))]
16733pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16734    unsafe extern "unadjusted" {
16735        #[cfg_attr(
16736            any(target_arch = "aarch64", target_arch = "arm64ec"),
16737            link_name = "llvm.aarch64.neon.uminp.v16i8"
16738        )]
16739        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16740    }
16741    unsafe { _vpminq_u8(a, b) }
16742}
16743#[doc = "Folding minimum of adjacent pairs"]
16744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16745#[inline]
16746#[target_feature(enable = "neon")]
16747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16748#[cfg_attr(test, assert_instr(uminp))]
16749pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16750    unsafe extern "unadjusted" {
16751        #[cfg_attr(
16752            any(target_arch = "aarch64", target_arch = "arm64ec"),
16753            link_name = "llvm.aarch64.neon.uminp.v8i16"
16754        )]
16755        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16756    }
16757    unsafe { _vpminq_u16(a, b) }
16758}
16759#[doc = "Folding minimum of adjacent pairs"]
16760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16761#[inline]
16762#[target_feature(enable = "neon")]
16763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16764#[cfg_attr(test, assert_instr(uminp))]
16765pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16766    unsafe extern "unadjusted" {
16767        #[cfg_attr(
16768            any(target_arch = "aarch64", target_arch = "arm64ec"),
16769            link_name = "llvm.aarch64.neon.uminp.v4i32"
16770        )]
16771        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16772    }
16773    unsafe { _vpminq_u32(a, b) }
16774}
16775#[doc = "Floating-point minimum pairwise"]
16776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16777#[inline]
16778#[target_feature(enable = "neon")]
16779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16780#[cfg_attr(test, assert_instr(fminp))]
16781pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16782    unsafe extern "unadjusted" {
16783        #[cfg_attr(
16784            any(target_arch = "aarch64", target_arch = "arm64ec"),
16785            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16786        )]
16787        fn _vpminqd_f64(a: float64x2_t) -> f64;
16788    }
16789    unsafe { _vpminqd_f64(a) }
16790}
16791#[doc = "Floating-point minimum pairwise"]
16792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16793#[inline]
16794#[target_feature(enable = "neon")]
16795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16796#[cfg_attr(test, assert_instr(fminp))]
16797pub fn vpmins_f32(a: float32x2_t) -> f32 {
16798    unsafe extern "unadjusted" {
16799        #[cfg_attr(
16800            any(target_arch = "aarch64", target_arch = "arm64ec"),
16801            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16802        )]
16803        fn _vpmins_f32(a: float32x2_t) -> f32;
16804    }
16805    unsafe { _vpmins_f32(a) }
16806}
16807#[doc = "Signed saturating Absolute value"]
16808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16809#[inline]
16810#[target_feature(enable = "neon")]
16811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16812#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16813pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16814    unsafe extern "unadjusted" {
16815        #[cfg_attr(
16816            any(target_arch = "aarch64", target_arch = "arm64ec"),
16817            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16818        )]
16819        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16820    }
16821    unsafe { _vqabs_s64(a) }
16822}
16823#[doc = "Signed saturating Absolute value"]
16824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16825#[inline]
16826#[target_feature(enable = "neon")]
16827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16829pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16830    unsafe extern "unadjusted" {
16831        #[cfg_attr(
16832            any(target_arch = "aarch64", target_arch = "arm64ec"),
16833            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16834        )]
16835        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16836    }
16837    unsafe { _vqabsq_s64(a) }
16838}
16839#[doc = "Signed saturating absolute value"]
16840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16841#[inline]
16842#[target_feature(enable = "neon")]
16843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16844#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16845pub fn vqabsb_s8(a: i8) -> i8 {
16846    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16847}
16848#[doc = "Signed saturating absolute value"]
16849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16850#[inline]
16851#[target_feature(enable = "neon")]
16852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16853#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16854pub fn vqabsh_s16(a: i16) -> i16 {
16855    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16856}
16857#[doc = "Signed saturating absolute value"]
16858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16859#[inline]
16860#[target_feature(enable = "neon")]
16861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16862#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16863pub fn vqabss_s32(a: i32) -> i32 {
16864    unsafe extern "unadjusted" {
16865        #[cfg_attr(
16866            any(target_arch = "aarch64", target_arch = "arm64ec"),
16867            link_name = "llvm.aarch64.neon.sqabs.i32"
16868        )]
16869        fn _vqabss_s32(a: i32) -> i32;
16870    }
16871    unsafe { _vqabss_s32(a) }
16872}
16873#[doc = "Signed saturating absolute value"]
16874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16875#[inline]
16876#[target_feature(enable = "neon")]
16877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16878#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16879pub fn vqabsd_s64(a: i64) -> i64 {
16880    unsafe extern "unadjusted" {
16881        #[cfg_attr(
16882            any(target_arch = "aarch64", target_arch = "arm64ec"),
16883            link_name = "llvm.aarch64.neon.sqabs.i64"
16884        )]
16885        fn _vqabsd_s64(a: i64) -> i64;
16886    }
16887    unsafe { _vqabsd_s64(a) }
16888}
16889#[doc = "Saturating add"]
16890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16891#[inline]
16892#[target_feature(enable = "neon")]
16893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16894#[cfg_attr(test, assert_instr(sqadd))]
16895pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16896    let a: int8x8_t = vdup_n_s8(a);
16897    let b: int8x8_t = vdup_n_s8(b);
16898    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16899}
16900#[doc = "Saturating add"]
16901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16902#[inline]
16903#[target_feature(enable = "neon")]
16904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16905#[cfg_attr(test, assert_instr(sqadd))]
16906pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16907    let a: int16x4_t = vdup_n_s16(a);
16908    let b: int16x4_t = vdup_n_s16(b);
16909    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16910}
16911#[doc = "Saturating add"]
16912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16913#[inline]
16914#[target_feature(enable = "neon")]
16915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16916#[cfg_attr(test, assert_instr(uqadd))]
16917pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16918    let a: uint8x8_t = vdup_n_u8(a);
16919    let b: uint8x8_t = vdup_n_u8(b);
16920    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16921}
16922#[doc = "Saturating add"]
16923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16924#[inline]
16925#[target_feature(enable = "neon")]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927#[cfg_attr(test, assert_instr(uqadd))]
16928pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16929    let a: uint16x4_t = vdup_n_u16(a);
16930    let b: uint16x4_t = vdup_n_u16(b);
16931    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16932}
16933#[doc = "Saturating add"]
16934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16935#[inline]
16936#[target_feature(enable = "neon")]
16937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16938#[cfg_attr(test, assert_instr(sqadd))]
16939pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16940    unsafe extern "unadjusted" {
16941        #[cfg_attr(
16942            any(target_arch = "aarch64", target_arch = "arm64ec"),
16943            link_name = "llvm.aarch64.neon.sqadd.i32"
16944        )]
16945        fn _vqadds_s32(a: i32, b: i32) -> i32;
16946    }
16947    unsafe { _vqadds_s32(a, b) }
16948}
16949#[doc = "Saturating add"]
16950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16951#[inline]
16952#[target_feature(enable = "neon")]
16953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16954#[cfg_attr(test, assert_instr(sqadd))]
16955pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16956    unsafe extern "unadjusted" {
16957        #[cfg_attr(
16958            any(target_arch = "aarch64", target_arch = "arm64ec"),
16959            link_name = "llvm.aarch64.neon.sqadd.i64"
16960        )]
16961        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16962    }
16963    unsafe { _vqaddd_s64(a, b) }
16964}
16965#[doc = "Saturating add"]
16966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16967#[inline]
16968#[target_feature(enable = "neon")]
16969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16970#[cfg_attr(test, assert_instr(uqadd))]
16971pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16972    unsafe extern "unadjusted" {
16973        #[cfg_attr(
16974            any(target_arch = "aarch64", target_arch = "arm64ec"),
16975            link_name = "llvm.aarch64.neon.uqadd.i32"
16976        )]
16977        fn _vqadds_u32(a: u32, b: u32) -> u32;
16978    }
16979    unsafe { _vqadds_u32(a, b) }
16980}
16981#[doc = "Saturating add"]
16982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16983#[inline]
16984#[target_feature(enable = "neon")]
16985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16986#[cfg_attr(test, assert_instr(uqadd))]
16987pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16988    unsafe extern "unadjusted" {
16989        #[cfg_attr(
16990            any(target_arch = "aarch64", target_arch = "arm64ec"),
16991            link_name = "llvm.aarch64.neon.uqadd.i64"
16992        )]
16993        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16994    }
16995    unsafe { _vqaddd_u64(a, b) }
16996}
16997#[doc = "Signed saturating doubling multiply-add long"]
16998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16999#[inline]
17000#[target_feature(enable = "neon")]
17001#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17002#[rustc_legacy_const_generics(3)]
17003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17004pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17005    static_assert_uimm_bits!(N, 2);
17006    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17007}
17008#[doc = "Signed saturating doubling multiply-add long"]
17009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
17010#[inline]
17011#[target_feature(enable = "neon")]
17012#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17013#[rustc_legacy_const_generics(3)]
17014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17015pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17016    static_assert_uimm_bits!(N, 3);
17017    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17018}
17019#[doc = "Signed saturating doubling multiply-add long"]
17020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
17021#[inline]
17022#[target_feature(enable = "neon")]
17023#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17024#[rustc_legacy_const_generics(3)]
17025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17026pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17027    static_assert_uimm_bits!(N, 1);
17028    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17029}
17030#[doc = "Signed saturating doubling multiply-add long"]
17031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17032#[inline]
17033#[target_feature(enable = "neon")]
17034#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17035#[rustc_legacy_const_generics(3)]
17036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17037pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17038    static_assert_uimm_bits!(N, 2);
17039    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17040}
17041#[doc = "Signed saturating doubling multiply-add long"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17043#[inline]
17044#[target_feature(enable = "neon")]
17045#[cfg_attr(test, assert_instr(sqdmlal2))]
17046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17047pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17048    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17049}
17050#[doc = "Signed saturating doubling multiply-add long"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17052#[inline]
17053#[target_feature(enable = "neon")]
17054#[cfg_attr(test, assert_instr(sqdmlal2))]
17055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17056pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17057    vqaddq_s32(a, vqdmull_high_s16(b, c))
17058}
17059#[doc = "Signed saturating doubling multiply-add long"]
17060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17061#[inline]
17062#[target_feature(enable = "neon")]
17063#[cfg_attr(test, assert_instr(sqdmlal2))]
17064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17065pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17066    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17067}
17068#[doc = "Signed saturating doubling multiply-add long"]
17069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17070#[inline]
17071#[target_feature(enable = "neon")]
17072#[cfg_attr(test, assert_instr(sqdmlal2))]
17073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17074pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17075    vqaddq_s64(a, vqdmull_high_s32(b, c))
17076}
17077#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17079#[inline]
17080#[target_feature(enable = "neon")]
17081#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17082#[rustc_legacy_const_generics(3)]
17083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17084pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17085    static_assert_uimm_bits!(N, 3);
17086    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17087}
17088#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17090#[inline]
17091#[target_feature(enable = "neon")]
17092#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17093#[rustc_legacy_const_generics(3)]
17094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17095pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17096    static_assert_uimm_bits!(N, 2);
17097    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17098}
17099#[doc = "Signed saturating doubling multiply-add long"]
17100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17101#[inline]
17102#[target_feature(enable = "neon")]
17103#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17104#[rustc_legacy_const_generics(3)]
17105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17106pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17107    static_assert_uimm_bits!(LANE, 2);
17108    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17109}
17110#[doc = "Signed saturating doubling multiply-add long"]
17111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17112#[inline]
17113#[target_feature(enable = "neon")]
17114#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17115#[rustc_legacy_const_generics(3)]
17116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17117pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17118    static_assert_uimm_bits!(LANE, 3);
17119    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17120}
17121#[doc = "Signed saturating doubling multiply-add long"]
17122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17123#[inline]
17124#[target_feature(enable = "neon")]
17125#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17126#[rustc_legacy_const_generics(3)]
17127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17128pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17129    static_assert_uimm_bits!(LANE, 1);
17130    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17131}
17132#[doc = "Signed saturating doubling multiply-add long"]
17133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17134#[inline]
17135#[target_feature(enable = "neon")]
17136#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17137#[rustc_legacy_const_generics(3)]
17138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17139pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17140    static_assert_uimm_bits!(LANE, 2);
17141    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17142}
17143#[doc = "Signed saturating doubling multiply-add long"]
17144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17145#[inline]
17146#[target_feature(enable = "neon")]
17147#[cfg_attr(test, assert_instr(sqdmlal))]
17148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17149pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17150    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17151    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17152}
17153#[doc = "Signed saturating doubling multiply-add long"]
17154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17155#[inline]
17156#[target_feature(enable = "neon")]
17157#[cfg_attr(test, assert_instr(sqdmlal))]
17158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17159pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17160    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17161    x as i64
17162}
17163#[doc = "Signed saturating doubling multiply-subtract long"]
17164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17165#[inline]
17166#[target_feature(enable = "neon")]
17167#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17168#[rustc_legacy_const_generics(3)]
17169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17170pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17171    static_assert_uimm_bits!(N, 2);
17172    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17173}
17174#[doc = "Signed saturating doubling multiply-subtract long"]
17175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17176#[inline]
17177#[target_feature(enable = "neon")]
17178#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17179#[rustc_legacy_const_generics(3)]
17180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17181pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17182    static_assert_uimm_bits!(N, 3);
17183    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17184}
17185#[doc = "Signed saturating doubling multiply-subtract long"]
17186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17187#[inline]
17188#[target_feature(enable = "neon")]
17189#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17190#[rustc_legacy_const_generics(3)]
17191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17192pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17193    static_assert_uimm_bits!(N, 1);
17194    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17195}
17196#[doc = "Signed saturating doubling multiply-subtract long"]
17197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17198#[inline]
17199#[target_feature(enable = "neon")]
17200#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17201#[rustc_legacy_const_generics(3)]
17202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17203pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17204    static_assert_uimm_bits!(N, 2);
17205    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17206}
17207#[doc = "Signed saturating doubling multiply-subtract long"]
17208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17209#[inline]
17210#[target_feature(enable = "neon")]
17211#[cfg_attr(test, assert_instr(sqdmlsl2))]
17212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17213pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17214    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17215}
17216#[doc = "Signed saturating doubling multiply-subtract long"]
17217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17218#[inline]
17219#[target_feature(enable = "neon")]
17220#[cfg_attr(test, assert_instr(sqdmlsl2))]
17221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17222pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17223    vqsubq_s32(a, vqdmull_high_s16(b, c))
17224}
17225#[doc = "Signed saturating doubling multiply-subtract long"]
17226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17227#[inline]
17228#[target_feature(enable = "neon")]
17229#[cfg_attr(test, assert_instr(sqdmlsl2))]
17230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17231pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17232    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17233}
17234#[doc = "Signed saturating doubling multiply-subtract long"]
17235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17236#[inline]
17237#[target_feature(enable = "neon")]
17238#[cfg_attr(test, assert_instr(sqdmlsl2))]
17239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17240pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17241    vqsubq_s64(a, vqdmull_high_s32(b, c))
17242}
17243#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17245#[inline]
17246#[target_feature(enable = "neon")]
17247#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17248#[rustc_legacy_const_generics(3)]
17249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17250pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17251    static_assert_uimm_bits!(N, 3);
17252    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17253}
17254#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17256#[inline]
17257#[target_feature(enable = "neon")]
17258#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17259#[rustc_legacy_const_generics(3)]
17260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17261pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17262    static_assert_uimm_bits!(N, 2);
17263    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17264}
17265#[doc = "Signed saturating doubling multiply-subtract long"]
17266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17267#[inline]
17268#[target_feature(enable = "neon")]
17269#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17270#[rustc_legacy_const_generics(3)]
17271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17272pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17273    static_assert_uimm_bits!(LANE, 2);
17274    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17275}
17276#[doc = "Signed saturating doubling multiply-subtract long"]
17277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17278#[inline]
17279#[target_feature(enable = "neon")]
17280#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17281#[rustc_legacy_const_generics(3)]
17282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17283pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17284    static_assert_uimm_bits!(LANE, 3);
17285    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17286}
17287#[doc = "Signed saturating doubling multiply-subtract long"]
17288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17289#[inline]
17290#[target_feature(enable = "neon")]
17291#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17292#[rustc_legacy_const_generics(3)]
17293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17294pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17295    static_assert_uimm_bits!(LANE, 1);
17296    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17297}
17298#[doc = "Signed saturating doubling multiply-subtract long"]
17299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17300#[inline]
17301#[target_feature(enable = "neon")]
17302#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17303#[rustc_legacy_const_generics(3)]
17304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17305pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17306    static_assert_uimm_bits!(LANE, 2);
17307    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17308}
17309#[doc = "Signed saturating doubling multiply-subtract long"]
17310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17311#[inline]
17312#[target_feature(enable = "neon")]
17313#[cfg_attr(test, assert_instr(sqdmlsl))]
17314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17315pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17316    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17317    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17318}
17319#[doc = "Signed saturating doubling multiply-subtract long"]
17320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17321#[inline]
17322#[target_feature(enable = "neon")]
17323#[cfg_attr(test, assert_instr(sqdmlsl))]
17324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17325pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17326    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17327    x as i64
17328}
17329#[doc = "Vector saturating doubling multiply high by scalar"]
17330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17331#[inline]
17332#[target_feature(enable = "neon")]
17333#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17334#[rustc_legacy_const_generics(2)]
17335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17336pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17337    static_assert_uimm_bits!(LANE, 2);
17338    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17339}
17340#[doc = "Vector saturating doubling multiply high by scalar"]
17341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17342#[inline]
17343#[target_feature(enable = "neon")]
17344#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17345#[rustc_legacy_const_generics(2)]
17346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17347pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17348    static_assert_uimm_bits!(LANE, 2);
17349    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17350}
17351#[doc = "Vector saturating doubling multiply high by scalar"]
17352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17353#[inline]
17354#[target_feature(enable = "neon")]
17355#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17356#[rustc_legacy_const_generics(2)]
17357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17358pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17359    static_assert_uimm_bits!(LANE, 1);
17360    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17361}
17362#[doc = "Vector saturating doubling multiply high by scalar"]
17363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17364#[inline]
17365#[target_feature(enable = "neon")]
17366#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17367#[rustc_legacy_const_generics(2)]
17368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17369pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17370    static_assert_uimm_bits!(LANE, 1);
17371    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17372}
17373#[doc = "Signed saturating doubling multiply returning high half"]
17374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17375#[inline]
17376#[target_feature(enable = "neon")]
17377#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17378#[rustc_legacy_const_generics(2)]
17379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17380pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17381    static_assert_uimm_bits!(N, 2);
17382    unsafe {
17383        let b: i16 = simd_extract!(b, N as u32);
17384        vqdmulhh_s16(a, b)
17385    }
17386}
17387#[doc = "Signed saturating doubling multiply returning high half"]
17388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17389#[inline]
17390#[target_feature(enable = "neon")]
17391#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17392#[rustc_legacy_const_generics(2)]
17393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17394pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17395    static_assert_uimm_bits!(N, 3);
17396    unsafe {
17397        let b: i16 = simd_extract!(b, N as u32);
17398        vqdmulhh_s16(a, b)
17399    }
17400}
17401#[doc = "Signed saturating doubling multiply returning high half"]
17402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17403#[inline]
17404#[target_feature(enable = "neon")]
17405#[cfg_attr(test, assert_instr(sqdmulh))]
17406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17407pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17408    let a: int16x4_t = vdup_n_s16(a);
17409    let b: int16x4_t = vdup_n_s16(b);
17410    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17411}
17412#[doc = "Signed saturating doubling multiply returning high half"]
17413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17414#[inline]
17415#[target_feature(enable = "neon")]
17416#[cfg_attr(test, assert_instr(sqdmulh))]
17417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17418pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17419    let a: int32x2_t = vdup_n_s32(a);
17420    let b: int32x2_t = vdup_n_s32(b);
17421    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17422}
17423#[doc = "Signed saturating doubling multiply returning high half"]
17424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17425#[inline]
17426#[target_feature(enable = "neon")]
17427#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17428#[rustc_legacy_const_generics(2)]
17429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17430pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17431    static_assert_uimm_bits!(N, 1);
17432    unsafe {
17433        let b: i32 = simd_extract!(b, N as u32);
17434        vqdmulhs_s32(a, b)
17435    }
17436}
17437#[doc = "Signed saturating doubling multiply returning high half"]
17438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17439#[inline]
17440#[target_feature(enable = "neon")]
17441#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17442#[rustc_legacy_const_generics(2)]
17443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17444pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17445    static_assert_uimm_bits!(N, 2);
17446    unsafe {
17447        let b: i32 = simd_extract!(b, N as u32);
17448        vqdmulhs_s32(a, b)
17449    }
17450}
17451#[doc = "Signed saturating doubling multiply long"]
17452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17453#[inline]
17454#[target_feature(enable = "neon")]
17455#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17456#[rustc_legacy_const_generics(2)]
17457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17458pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17459    static_assert_uimm_bits!(N, 2);
17460    unsafe {
17461        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17462        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17463        vqdmull_s16(a, b)
17464    }
17465}
17466#[doc = "Signed saturating doubling multiply long"]
17467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17468#[inline]
17469#[target_feature(enable = "neon")]
17470#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17471#[rustc_legacy_const_generics(2)]
17472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17473pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17474    static_assert_uimm_bits!(N, 2);
17475    unsafe {
17476        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17477        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17478        vqdmull_s32(a, b)
17479    }
17480}
17481#[doc = "Signed saturating doubling multiply long"]
17482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17483#[inline]
17484#[target_feature(enable = "neon")]
17485#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17486#[rustc_legacy_const_generics(2)]
17487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17488pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17489    static_assert_uimm_bits!(N, 1);
17490    unsafe {
17491        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17492        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17493        vqdmull_s32(a, b)
17494    }
17495}
17496#[doc = "Signed saturating doubling multiply long"]
17497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17498#[inline]
17499#[target_feature(enable = "neon")]
17500#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17501#[rustc_legacy_const_generics(2)]
17502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17503pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17504    static_assert_uimm_bits!(N, 3);
17505    unsafe {
17506        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17507        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17508        vqdmull_s16(a, b)
17509    }
17510}
17511#[doc = "Signed saturating doubling multiply long"]
17512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17513#[inline]
17514#[target_feature(enable = "neon")]
17515#[cfg_attr(test, assert_instr(sqdmull2))]
17516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17517pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17518    unsafe {
17519        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17520        let b: int16x4_t = vdup_n_s16(b);
17521        vqdmull_s16(a, b)
17522    }
17523}
17524#[doc = "Signed saturating doubling multiply long"]
17525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17526#[inline]
17527#[target_feature(enable = "neon")]
17528#[cfg_attr(test, assert_instr(sqdmull2))]
17529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17530pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17531    unsafe {
17532        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17533        let b: int32x2_t = vdup_n_s32(b);
17534        vqdmull_s32(a, b)
17535    }
17536}
17537#[doc = "Signed saturating doubling multiply long"]
17538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17539#[inline]
17540#[target_feature(enable = "neon")]
17541#[cfg_attr(test, assert_instr(sqdmull2))]
17542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17543pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17544    unsafe {
17545        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17546        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17547        vqdmull_s16(a, b)
17548    }
17549}
17550#[doc = "Signed saturating doubling multiply long"]
17551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17552#[inline]
17553#[target_feature(enable = "neon")]
17554#[cfg_attr(test, assert_instr(sqdmull2))]
17555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17556pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17557    unsafe {
17558        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17559        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17560        vqdmull_s32(a, b)
17561    }
17562}
17563#[doc = "Vector saturating doubling long multiply by scalar"]
17564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17565#[inline]
17566#[target_feature(enable = "neon")]
17567#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17568#[rustc_legacy_const_generics(2)]
17569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17570pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17571    static_assert_uimm_bits!(N, 3);
17572    unsafe {
17573        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17574        vqdmull_s16(a, b)
17575    }
17576}
17577#[doc = "Vector saturating doubling long multiply by scalar"]
17578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17579#[inline]
17580#[target_feature(enable = "neon")]
17581#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17582#[rustc_legacy_const_generics(2)]
17583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17584pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17585    static_assert_uimm_bits!(N, 2);
17586    unsafe {
17587        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17588        vqdmull_s32(a, b)
17589    }
17590}
17591#[doc = "Signed saturating doubling multiply long"]
17592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17593#[inline]
17594#[target_feature(enable = "neon")]
17595#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17596#[rustc_legacy_const_generics(2)]
17597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17598pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17599    static_assert_uimm_bits!(N, 2);
17600    unsafe {
17601        let b: i16 = simd_extract!(b, N as u32);
17602        vqdmullh_s16(a, b)
17603    }
17604}
17605#[doc = "Signed saturating doubling multiply long"]
17606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17607#[inline]
17608#[target_feature(enable = "neon")]
17609#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17610#[rustc_legacy_const_generics(2)]
17611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17612pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17613    static_assert_uimm_bits!(N, 2);
17614    unsafe {
17615        let b: i32 = simd_extract!(b, N as u32);
17616        vqdmulls_s32(a, b)
17617    }
17618}
17619#[doc = "Signed saturating doubling multiply long"]
17620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17621#[inline]
17622#[target_feature(enable = "neon")]
17623#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17624#[rustc_legacy_const_generics(2)]
17625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17626pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17627    static_assert_uimm_bits!(N, 3);
17628    unsafe {
17629        let b: i16 = simd_extract!(b, N as u32);
17630        vqdmullh_s16(a, b)
17631    }
17632}
17633#[doc = "Signed saturating doubling multiply long"]
17634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17635#[inline]
17636#[target_feature(enable = "neon")]
17637#[cfg_attr(test, assert_instr(sqdmull))]
17638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17639pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17640    let a: int16x4_t = vdup_n_s16(a);
17641    let b: int16x4_t = vdup_n_s16(b);
17642    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17643}
17644#[doc = "Signed saturating doubling multiply long"]
17645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17646#[inline]
17647#[target_feature(enable = "neon")]
17648#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17649#[rustc_legacy_const_generics(2)]
17650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17651pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17652    static_assert_uimm_bits!(N, 1);
17653    unsafe {
17654        let b: i32 = simd_extract!(b, N as u32);
17655        vqdmulls_s32(a, b)
17656    }
17657}
17658#[doc = "Signed saturating doubling multiply long"]
17659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17660#[inline]
17661#[target_feature(enable = "neon")]
17662#[cfg_attr(test, assert_instr(sqdmull))]
17663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17664pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17665    unsafe extern "unadjusted" {
17666        #[cfg_attr(
17667            any(target_arch = "aarch64", target_arch = "arm64ec"),
17668            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17669        )]
17670        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17671    }
17672    unsafe { _vqdmulls_s32(a, b) }
17673}
17674#[doc = "Signed saturating extract narrow"]
17675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17676#[inline]
17677#[target_feature(enable = "neon")]
17678#[cfg_attr(test, assert_instr(sqxtn2))]
17679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17680pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17681    unsafe {
17682        simd_shuffle!(
17683            a,
17684            vqmovn_s16(b),
17685            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17686        )
17687    }
17688}
17689#[doc = "Signed saturating extract narrow"]
17690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17691#[inline]
17692#[target_feature(enable = "neon")]
17693#[cfg_attr(test, assert_instr(sqxtn2))]
17694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17695pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17696    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17697}
17698#[doc = "Signed saturating extract narrow"]
17699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17700#[inline]
17701#[target_feature(enable = "neon")]
17702#[cfg_attr(test, assert_instr(sqxtn2))]
17703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17704pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17705    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17706}
17707#[doc = "Signed saturating extract narrow"]
17708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17709#[inline]
17710#[target_feature(enable = "neon")]
17711#[cfg_attr(test, assert_instr(uqxtn2))]
17712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17713pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17714    unsafe {
17715        simd_shuffle!(
17716            a,
17717            vqmovn_u16(b),
17718            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17719        )
17720    }
17721}
17722#[doc = "Signed saturating extract narrow"]
17723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17724#[inline]
17725#[target_feature(enable = "neon")]
17726#[cfg_attr(test, assert_instr(uqxtn2))]
17727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17728pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17729    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17730}
17731#[doc = "Signed saturating extract narrow"]
17732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17733#[inline]
17734#[target_feature(enable = "neon")]
17735#[cfg_attr(test, assert_instr(uqxtn2))]
17736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17737pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17738    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17739}
17740#[doc = "Saturating extract narrow"]
17741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17742#[inline]
17743#[target_feature(enable = "neon")]
17744#[cfg_attr(test, assert_instr(sqxtn))]
17745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17746pub fn vqmovnd_s64(a: i64) -> i32 {
17747    unsafe extern "unadjusted" {
17748        #[cfg_attr(
17749            any(target_arch = "aarch64", target_arch = "arm64ec"),
17750            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17751        )]
17752        fn _vqmovnd_s64(a: i64) -> i32;
17753    }
17754    unsafe { _vqmovnd_s64(a) }
17755}
17756#[doc = "Saturating extract narrow"]
17757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17758#[inline]
17759#[target_feature(enable = "neon")]
17760#[cfg_attr(test, assert_instr(uqxtn))]
17761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17762pub fn vqmovnd_u64(a: u64) -> u32 {
17763    unsafe extern "unadjusted" {
17764        #[cfg_attr(
17765            any(target_arch = "aarch64", target_arch = "arm64ec"),
17766            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17767        )]
17768        fn _vqmovnd_u64(a: u64) -> u32;
17769    }
17770    unsafe { _vqmovnd_u64(a) }
17771}
17772#[doc = "Saturating extract narrow"]
17773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17774#[inline]
17775#[target_feature(enable = "neon")]
17776#[cfg_attr(test, assert_instr(sqxtn))]
17777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17778pub fn vqmovnh_s16(a: i16) -> i8 {
17779    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17780}
17781#[doc = "Saturating extract narrow"]
17782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17783#[inline]
17784#[target_feature(enable = "neon")]
17785#[cfg_attr(test, assert_instr(sqxtn))]
17786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17787pub fn vqmovns_s32(a: i32) -> i16 {
17788    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17789}
17790#[doc = "Saturating extract narrow"]
17791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17792#[inline]
17793#[target_feature(enable = "neon")]
17794#[cfg_attr(test, assert_instr(uqxtn))]
17795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17796pub fn vqmovnh_u16(a: u16) -> u8 {
17797    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17798}
17799#[doc = "Saturating extract narrow"]
17800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17801#[inline]
17802#[target_feature(enable = "neon")]
17803#[cfg_attr(test, assert_instr(uqxtn))]
17804#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17805pub fn vqmovns_u32(a: u32) -> u16 {
17806    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17807}
17808#[doc = "Signed saturating extract unsigned narrow"]
17809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17810#[inline]
17811#[target_feature(enable = "neon")]
17812#[cfg_attr(test, assert_instr(sqxtun2))]
17813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17814pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17815    unsafe {
17816        simd_shuffle!(
17817            a,
17818            vqmovun_s16(b),
17819            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17820        )
17821    }
17822}
17823#[doc = "Signed saturating extract unsigned narrow"]
17824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17825#[inline]
17826#[target_feature(enable = "neon")]
17827#[cfg_attr(test, assert_instr(sqxtun2))]
17828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17829pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17830    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17831}
17832#[doc = "Signed saturating extract unsigned narrow"]
17833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17834#[inline]
17835#[target_feature(enable = "neon")]
17836#[cfg_attr(test, assert_instr(sqxtun2))]
17837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17838pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17839    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17840}
17841#[doc = "Signed saturating extract unsigned narrow"]
17842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17843#[inline]
17844#[target_feature(enable = "neon")]
17845#[cfg_attr(test, assert_instr(sqxtun))]
17846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17847pub fn vqmovunh_s16(a: i16) -> u8 {
17848    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17849}
17850#[doc = "Signed saturating extract unsigned narrow"]
17851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17852#[inline]
17853#[target_feature(enable = "neon")]
17854#[cfg_attr(test, assert_instr(sqxtun))]
17855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17856pub fn vqmovuns_s32(a: i32) -> u16 {
17857    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17858}
17859#[doc = "Signed saturating extract unsigned narrow"]
17860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17861#[inline]
17862#[target_feature(enable = "neon")]
17863#[cfg_attr(test, assert_instr(sqxtun))]
17864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17865pub fn vqmovund_s64(a: i64) -> u32 {
17866    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17867}
17868#[doc = "Signed saturating negate"]
17869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17870#[inline]
17871#[target_feature(enable = "neon")]
17872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17873#[cfg_attr(test, assert_instr(sqneg))]
17874pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17875    unsafe extern "unadjusted" {
17876        #[cfg_attr(
17877            any(target_arch = "aarch64", target_arch = "arm64ec"),
17878            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17879        )]
17880        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17881    }
17882    unsafe { _vqneg_s64(a) }
17883}
17884#[doc = "Signed saturating negate"]
17885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17886#[inline]
17887#[target_feature(enable = "neon")]
17888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17889#[cfg_attr(test, assert_instr(sqneg))]
17890pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17891    unsafe extern "unadjusted" {
17892        #[cfg_attr(
17893            any(target_arch = "aarch64", target_arch = "arm64ec"),
17894            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17895        )]
17896        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17897    }
17898    unsafe { _vqnegq_s64(a) }
17899}
17900#[doc = "Signed saturating negate"]
17901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17902#[inline]
17903#[target_feature(enable = "neon")]
17904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17905#[cfg_attr(test, assert_instr(sqneg))]
17906pub fn vqnegb_s8(a: i8) -> i8 {
17907    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17908}
17909#[doc = "Signed saturating negate"]
17910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17911#[inline]
17912#[target_feature(enable = "neon")]
17913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17914#[cfg_attr(test, assert_instr(sqneg))]
17915pub fn vqnegh_s16(a: i16) -> i16 {
17916    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17917}
17918#[doc = "Signed saturating negate"]
17919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17920#[inline]
17921#[target_feature(enable = "neon")]
17922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17923#[cfg_attr(test, assert_instr(sqneg))]
17924pub fn vqnegs_s32(a: i32) -> i32 {
17925    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17926}
17927#[doc = "Signed saturating negate"]
17928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17929#[inline]
17930#[target_feature(enable = "neon")]
17931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17932#[cfg_attr(test, assert_instr(sqneg))]
17933pub fn vqnegd_s64(a: i64) -> i64 {
17934    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17935}
17936#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17938#[inline]
17939#[target_feature(enable = "rdm")]
17940#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17941#[rustc_legacy_const_generics(3)]
17942#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17943pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17944    static_assert_uimm_bits!(LANE, 2);
17945    unsafe {
17946        let c: int16x4_t =
17947            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17948        vqrdmlah_s16(a, b, c)
17949    }
17950}
17951#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17953#[inline]
17954#[target_feature(enable = "rdm")]
17955#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17956#[rustc_legacy_const_generics(3)]
17957#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17958pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17959    static_assert_uimm_bits!(LANE, 1);
17960    unsafe {
17961        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17962        vqrdmlah_s32(a, b, c)
17963    }
17964}
17965#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17967#[inline]
17968#[target_feature(enable = "rdm")]
17969#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17970#[rustc_legacy_const_generics(3)]
17971#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17972pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17973    static_assert_uimm_bits!(LANE, 3);
17974    unsafe {
17975        let c: int16x4_t =
17976            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17977        vqrdmlah_s16(a, b, c)
17978    }
17979}
17980#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17982#[inline]
17983#[target_feature(enable = "rdm")]
17984#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17985#[rustc_legacy_const_generics(3)]
17986#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17987pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17988    static_assert_uimm_bits!(LANE, 2);
17989    unsafe {
17990        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17991        vqrdmlah_s32(a, b, c)
17992    }
17993}
17994#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17996#[inline]
17997#[target_feature(enable = "rdm")]
17998#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17999#[rustc_legacy_const_generics(3)]
18000#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18001pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18002    static_assert_uimm_bits!(LANE, 2);
18003    unsafe {
18004        let c: int16x8_t = simd_shuffle!(
18005            c,
18006            c,
18007            [
18008                LANE as u32,
18009                LANE as u32,
18010                LANE as u32,
18011                LANE as u32,
18012                LANE as u32,
18013                LANE as u32,
18014                LANE as u32,
18015                LANE as u32
18016            ]
18017        );
18018        vqrdmlahq_s16(a, b, c)
18019    }
18020}
18021#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
18023#[inline]
18024#[target_feature(enable = "rdm")]
18025#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18026#[rustc_legacy_const_generics(3)]
18027#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18028pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18029    static_assert_uimm_bits!(LANE, 1);
18030    unsafe {
18031        let c: int32x4_t =
18032            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18033        vqrdmlahq_s32(a, b, c)
18034    }
18035}
18036#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18038#[inline]
18039#[target_feature(enable = "rdm")]
18040#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18041#[rustc_legacy_const_generics(3)]
18042#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18043pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18044    static_assert_uimm_bits!(LANE, 3);
18045    unsafe {
18046        let c: int16x8_t = simd_shuffle!(
18047            c,
18048            c,
18049            [
18050                LANE as u32,
18051                LANE as u32,
18052                LANE as u32,
18053                LANE as u32,
18054                LANE as u32,
18055                LANE as u32,
18056                LANE as u32,
18057                LANE as u32
18058            ]
18059        );
18060        vqrdmlahq_s16(a, b, c)
18061    }
18062}
18063#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18065#[inline]
18066#[target_feature(enable = "rdm")]
18067#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18068#[rustc_legacy_const_generics(3)]
18069#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18070pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18071    static_assert_uimm_bits!(LANE, 2);
18072    unsafe {
18073        let c: int32x4_t =
18074            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18075        vqrdmlahq_s32(a, b, c)
18076    }
18077}
18078#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18080#[inline]
18081#[target_feature(enable = "rdm")]
18082#[cfg_attr(test, assert_instr(sqrdmlah))]
18083#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18084pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18085    unsafe extern "unadjusted" {
18086        #[cfg_attr(
18087            any(target_arch = "aarch64", target_arch = "arm64ec"),
18088            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18089        )]
18090        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18091    }
18092    unsafe { _vqrdmlah_s16(a, b, c) }
18093}
18094#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18096#[inline]
18097#[target_feature(enable = "rdm")]
18098#[cfg_attr(test, assert_instr(sqrdmlah))]
18099#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18100pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18101    unsafe extern "unadjusted" {
18102        #[cfg_attr(
18103            any(target_arch = "aarch64", target_arch = "arm64ec"),
18104            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18105        )]
18106        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18107    }
18108    unsafe { _vqrdmlahq_s16(a, b, c) }
18109}
18110#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18112#[inline]
18113#[target_feature(enable = "rdm")]
18114#[cfg_attr(test, assert_instr(sqrdmlah))]
18115#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18116pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18117    unsafe extern "unadjusted" {
18118        #[cfg_attr(
18119            any(target_arch = "aarch64", target_arch = "arm64ec"),
18120            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18121        )]
18122        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18123    }
18124    unsafe { _vqrdmlah_s32(a, b, c) }
18125}
18126#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18128#[inline]
18129#[target_feature(enable = "rdm")]
18130#[cfg_attr(test, assert_instr(sqrdmlah))]
18131#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18132pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18133    unsafe extern "unadjusted" {
18134        #[cfg_attr(
18135            any(target_arch = "aarch64", target_arch = "arm64ec"),
18136            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18137        )]
18138        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18139    }
18140    unsafe { _vqrdmlahq_s32(a, b, c) }
18141}
18142#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18144#[inline]
18145#[target_feature(enable = "rdm")]
18146#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18147#[rustc_legacy_const_generics(3)]
18148#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18149pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18150    static_assert_uimm_bits!(LANE, 2);
18151    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18152}
18153#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18155#[inline]
18156#[target_feature(enable = "rdm")]
18157#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18158#[rustc_legacy_const_generics(3)]
18159#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18160pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18161    static_assert_uimm_bits!(LANE, 3);
18162    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18163}
18164#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18166#[inline]
18167#[target_feature(enable = "rdm")]
18168#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18169#[rustc_legacy_const_generics(3)]
18170#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18171pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18172    static_assert_uimm_bits!(LANE, 1);
18173    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18174}
18175#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18177#[inline]
18178#[target_feature(enable = "rdm")]
18179#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18180#[rustc_legacy_const_generics(3)]
18181#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18182pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18183    static_assert_uimm_bits!(LANE, 2);
18184    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18185}
18186#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18188#[inline]
18189#[target_feature(enable = "rdm")]
18190#[cfg_attr(test, assert_instr(sqrdmlah))]
18191#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18192pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18193    let a: int16x4_t = vdup_n_s16(a);
18194    let b: int16x4_t = vdup_n_s16(b);
18195    let c: int16x4_t = vdup_n_s16(c);
18196    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18197}
18198#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18200#[inline]
18201#[target_feature(enable = "rdm")]
18202#[cfg_attr(test, assert_instr(sqrdmlah))]
18203#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18204pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18205    let a: int32x2_t = vdup_n_s32(a);
18206    let b: int32x2_t = vdup_n_s32(b);
18207    let c: int32x2_t = vdup_n_s32(c);
18208    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18209}
18210#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18212#[inline]
18213#[target_feature(enable = "rdm")]
18214#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18215#[rustc_legacy_const_generics(3)]
18216#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18217pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18218    static_assert_uimm_bits!(LANE, 2);
18219    unsafe {
18220        let c: int16x4_t =
18221            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18222        vqrdmlsh_s16(a, b, c)
18223    }
18224}
18225#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18227#[inline]
18228#[target_feature(enable = "rdm")]
18229#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18230#[rustc_legacy_const_generics(3)]
18231#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18232pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18233    static_assert_uimm_bits!(LANE, 1);
18234    unsafe {
18235        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18236        vqrdmlsh_s32(a, b, c)
18237    }
18238}
18239#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18241#[inline]
18242#[target_feature(enable = "rdm")]
18243#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18244#[rustc_legacy_const_generics(3)]
18245#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18246pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18247    static_assert_uimm_bits!(LANE, 3);
18248    unsafe {
18249        let c: int16x4_t =
18250            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18251        vqrdmlsh_s16(a, b, c)
18252    }
18253}
18254#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18256#[inline]
18257#[target_feature(enable = "rdm")]
18258#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18259#[rustc_legacy_const_generics(3)]
18260#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18261pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18262    static_assert_uimm_bits!(LANE, 2);
18263    unsafe {
18264        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18265        vqrdmlsh_s32(a, b, c)
18266    }
18267}
18268#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18270#[inline]
18271#[target_feature(enable = "rdm")]
18272#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18273#[rustc_legacy_const_generics(3)]
18274#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18275pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18276    static_assert_uimm_bits!(LANE, 2);
18277    unsafe {
18278        let c: int16x8_t = simd_shuffle!(
18279            c,
18280            c,
18281            [
18282                LANE as u32,
18283                LANE as u32,
18284                LANE as u32,
18285                LANE as u32,
18286                LANE as u32,
18287                LANE as u32,
18288                LANE as u32,
18289                LANE as u32
18290            ]
18291        );
18292        vqrdmlshq_s16(a, b, c)
18293    }
18294}
18295#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18297#[inline]
18298#[target_feature(enable = "rdm")]
18299#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18300#[rustc_legacy_const_generics(3)]
18301#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18302pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18303    static_assert_uimm_bits!(LANE, 1);
18304    unsafe {
18305        let c: int32x4_t =
18306            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18307        vqrdmlshq_s32(a, b, c)
18308    }
18309}
18310#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18312#[inline]
18313#[target_feature(enable = "rdm")]
18314#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18315#[rustc_legacy_const_generics(3)]
18316#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18317pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18318    static_assert_uimm_bits!(LANE, 3);
18319    unsafe {
18320        let c: int16x8_t = simd_shuffle!(
18321            c,
18322            c,
18323            [
18324                LANE as u32,
18325                LANE as u32,
18326                LANE as u32,
18327                LANE as u32,
18328                LANE as u32,
18329                LANE as u32,
18330                LANE as u32,
18331                LANE as u32
18332            ]
18333        );
18334        vqrdmlshq_s16(a, b, c)
18335    }
18336}
18337#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18339#[inline]
18340#[target_feature(enable = "rdm")]
18341#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18342#[rustc_legacy_const_generics(3)]
18343#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18344pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18345    static_assert_uimm_bits!(LANE, 2);
18346    unsafe {
18347        let c: int32x4_t =
18348            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18349        vqrdmlshq_s32(a, b, c)
18350    }
18351}
18352#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18354#[inline]
18355#[target_feature(enable = "rdm")]
18356#[cfg_attr(test, assert_instr(sqrdmlsh))]
18357#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18358pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18359    unsafe extern "unadjusted" {
18360        #[cfg_attr(
18361            any(target_arch = "aarch64", target_arch = "arm64ec"),
18362            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18363        )]
18364        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18365    }
18366    unsafe { _vqrdmlsh_s16(a, b, c) }
18367}
18368#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18370#[inline]
18371#[target_feature(enable = "rdm")]
18372#[cfg_attr(test, assert_instr(sqrdmlsh))]
18373#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18374pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18375    unsafe extern "unadjusted" {
18376        #[cfg_attr(
18377            any(target_arch = "aarch64", target_arch = "arm64ec"),
18378            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18379        )]
18380        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18381    }
18382    unsafe { _vqrdmlshq_s16(a, b, c) }
18383}
18384#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18386#[inline]
18387#[target_feature(enable = "rdm")]
18388#[cfg_attr(test, assert_instr(sqrdmlsh))]
18389#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18390pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18391    unsafe extern "unadjusted" {
18392        #[cfg_attr(
18393            any(target_arch = "aarch64", target_arch = "arm64ec"),
18394            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18395        )]
18396        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18397    }
18398    unsafe { _vqrdmlsh_s32(a, b, c) }
18399}
18400#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18402#[inline]
18403#[target_feature(enable = "rdm")]
18404#[cfg_attr(test, assert_instr(sqrdmlsh))]
18405#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18406pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18407    unsafe extern "unadjusted" {
18408        #[cfg_attr(
18409            any(target_arch = "aarch64", target_arch = "arm64ec"),
18410            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18411        )]
18412        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18413    }
18414    unsafe { _vqrdmlshq_s32(a, b, c) }
18415}
18416#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18418#[inline]
18419#[target_feature(enable = "rdm")]
18420#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18421#[rustc_legacy_const_generics(3)]
18422#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18423pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18424    static_assert_uimm_bits!(LANE, 2);
18425    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18426}
18427#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18429#[inline]
18430#[target_feature(enable = "rdm")]
18431#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18432#[rustc_legacy_const_generics(3)]
18433#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18434pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18435    static_assert_uimm_bits!(LANE, 3);
18436    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18437}
18438#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18440#[inline]
18441#[target_feature(enable = "rdm")]
18442#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18443#[rustc_legacy_const_generics(3)]
18444#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18445pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18446    static_assert_uimm_bits!(LANE, 1);
18447    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18448}
18449#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18451#[inline]
18452#[target_feature(enable = "rdm")]
18453#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18454#[rustc_legacy_const_generics(3)]
18455#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18456pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18457    static_assert_uimm_bits!(LANE, 2);
18458    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18459}
18460#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18462#[inline]
18463#[target_feature(enable = "rdm")]
18464#[cfg_attr(test, assert_instr(sqrdmlsh))]
18465#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18466pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18467    let a: int16x4_t = vdup_n_s16(a);
18468    let b: int16x4_t = vdup_n_s16(b);
18469    let c: int16x4_t = vdup_n_s16(c);
18470    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18471}
18472#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18474#[inline]
18475#[target_feature(enable = "rdm")]
18476#[cfg_attr(test, assert_instr(sqrdmlsh))]
18477#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18478pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18479    let a: int32x2_t = vdup_n_s32(a);
18480    let b: int32x2_t = vdup_n_s32(b);
18481    let c: int32x2_t = vdup_n_s32(c);
18482    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18483}
18484#[doc = "Signed saturating rounding doubling multiply returning high half"]
18485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18486#[inline]
18487#[target_feature(enable = "neon")]
18488#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18489#[rustc_legacy_const_generics(2)]
18490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18491pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18492    static_assert_uimm_bits!(LANE, 2);
18493    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18494}
18495#[doc = "Signed saturating rounding doubling multiply returning high half"]
18496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18497#[inline]
18498#[target_feature(enable = "neon")]
18499#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18500#[rustc_legacy_const_generics(2)]
18501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18502pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18503    static_assert_uimm_bits!(LANE, 3);
18504    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18505}
18506#[doc = "Signed saturating rounding doubling multiply returning high half"]
18507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18508#[inline]
18509#[target_feature(enable = "neon")]
18510#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18511#[rustc_legacy_const_generics(2)]
18512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18513pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18514    static_assert_uimm_bits!(LANE, 1);
18515    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18516}
18517#[doc = "Signed saturating rounding doubling multiply returning high half"]
18518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18519#[inline]
18520#[target_feature(enable = "neon")]
18521#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18522#[rustc_legacy_const_generics(2)]
18523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18524pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18525    static_assert_uimm_bits!(LANE, 2);
18526    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18527}
18528#[doc = "Signed saturating rounding doubling multiply returning high half"]
18529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18530#[inline]
18531#[target_feature(enable = "neon")]
18532#[cfg_attr(test, assert_instr(sqrdmulh))]
18533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18534pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18535    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18536}
18537#[doc = "Signed saturating rounding doubling multiply returning high half"]
18538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18539#[inline]
18540#[target_feature(enable = "neon")]
18541#[cfg_attr(test, assert_instr(sqrdmulh))]
18542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18543pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18544    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18545}
18546#[doc = "Signed saturating rounding shift left"]
18547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18548#[inline]
18549#[target_feature(enable = "neon")]
18550#[cfg_attr(test, assert_instr(sqrshl))]
18551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18552pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18553    let a: int8x8_t = vdup_n_s8(a);
18554    let b: int8x8_t = vdup_n_s8(b);
18555    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18556}
18557#[doc = "Signed saturating rounding shift left"]
18558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18559#[inline]
18560#[target_feature(enable = "neon")]
18561#[cfg_attr(test, assert_instr(sqrshl))]
18562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18563pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18564    let a: int16x4_t = vdup_n_s16(a);
18565    let b: int16x4_t = vdup_n_s16(b);
18566    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18567}
18568#[doc = "Unsigned signed saturating rounding shift left"]
18569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18570#[inline]
18571#[target_feature(enable = "neon")]
18572#[cfg_attr(test, assert_instr(uqrshl))]
18573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18574pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18575    let a: uint8x8_t = vdup_n_u8(a);
18576    let b: int8x8_t = vdup_n_s8(b);
18577    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18578}
18579#[doc = "Unsigned signed saturating rounding shift left"]
18580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18581#[inline]
18582#[target_feature(enable = "neon")]
18583#[cfg_attr(test, assert_instr(uqrshl))]
18584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18585pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18586    let a: uint16x4_t = vdup_n_u16(a);
18587    let b: int16x4_t = vdup_n_s16(b);
18588    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18589}
18590#[doc = "Signed saturating rounding shift left"]
18591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18592#[inline]
18593#[target_feature(enable = "neon")]
18594#[cfg_attr(test, assert_instr(sqrshl))]
18595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18596pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18597    unsafe extern "unadjusted" {
18598        #[cfg_attr(
18599            any(target_arch = "aarch64", target_arch = "arm64ec"),
18600            link_name = "llvm.aarch64.neon.sqrshl.i64"
18601        )]
18602        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18603    }
18604    unsafe { _vqrshld_s64(a, b) }
18605}
18606#[doc = "Signed saturating rounding shift left"]
18607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18608#[inline]
18609#[target_feature(enable = "neon")]
18610#[cfg_attr(test, assert_instr(sqrshl))]
18611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18612pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18613    unsafe extern "unadjusted" {
18614        #[cfg_attr(
18615            any(target_arch = "aarch64", target_arch = "arm64ec"),
18616            link_name = "llvm.aarch64.neon.sqrshl.i32"
18617        )]
18618        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18619    }
18620    unsafe { _vqrshls_s32(a, b) }
18621}
18622#[doc = "Unsigned signed saturating rounding shift left"]
18623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18624#[inline]
18625#[target_feature(enable = "neon")]
18626#[cfg_attr(test, assert_instr(uqrshl))]
18627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18628pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18629    unsafe extern "unadjusted" {
18630        #[cfg_attr(
18631            any(target_arch = "aarch64", target_arch = "arm64ec"),
18632            link_name = "llvm.aarch64.neon.uqrshl.i32"
18633        )]
18634        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18635    }
18636    unsafe { _vqrshls_u32(a, b) }
18637}
18638#[doc = "Unsigned signed saturating rounding shift left"]
18639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18640#[inline]
18641#[target_feature(enable = "neon")]
18642#[cfg_attr(test, assert_instr(uqrshl))]
18643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18644pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18645    unsafe extern "unadjusted" {
18646        #[cfg_attr(
18647            any(target_arch = "aarch64", target_arch = "arm64ec"),
18648            link_name = "llvm.aarch64.neon.uqrshl.i64"
18649        )]
18650        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18651    }
18652    unsafe { _vqrshld_u64(a, b) }
18653}
18654#[doc = "Signed saturating rounded shift right narrow"]
18655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18656#[inline]
18657#[target_feature(enable = "neon")]
18658#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18659#[rustc_legacy_const_generics(2)]
18660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18661pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18662    static_assert!(N >= 1 && N <= 8);
18663    unsafe {
18664        simd_shuffle!(
18665            a,
18666            vqrshrn_n_s16::<N>(b),
18667            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18668        )
18669    }
18670}
18671#[doc = "Signed saturating rounded shift right narrow"]
18672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18673#[inline]
18674#[target_feature(enable = "neon")]
18675#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18676#[rustc_legacy_const_generics(2)]
18677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18678pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18679    static_assert!(N >= 1 && N <= 16);
18680    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18681}
18682#[doc = "Signed saturating rounded shift right narrow"]
18683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18684#[inline]
18685#[target_feature(enable = "neon")]
18686#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18687#[rustc_legacy_const_generics(2)]
18688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18689pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18690    static_assert!(N >= 1 && N <= 32);
18691    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18692}
18693#[doc = "Unsigned saturating rounded shift right narrow"]
18694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18695#[inline]
18696#[target_feature(enable = "neon")]
18697#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18698#[rustc_legacy_const_generics(2)]
18699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18700pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18701    static_assert!(N >= 1 && N <= 8);
18702    unsafe {
18703        simd_shuffle!(
18704            a,
18705            vqrshrn_n_u16::<N>(b),
18706            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18707        )
18708    }
18709}
18710#[doc = "Unsigned saturating rounded shift right narrow"]
18711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18712#[inline]
18713#[target_feature(enable = "neon")]
18714#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18715#[rustc_legacy_const_generics(2)]
18716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18717pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18718    static_assert!(N >= 1 && N <= 16);
18719    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18720}
18721#[doc = "Unsigned saturating rounded shift right narrow"]
18722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18723#[inline]
18724#[target_feature(enable = "neon")]
18725#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18726#[rustc_legacy_const_generics(2)]
18727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18728pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18729    static_assert!(N >= 1 && N <= 32);
18730    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18731}
18732#[doc = "Unsigned saturating rounded shift right narrow"]
18733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18734#[inline]
18735#[target_feature(enable = "neon")]
18736#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18737#[rustc_legacy_const_generics(1)]
18738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18739pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18740    static_assert!(N >= 1 && N <= 32);
18741    let a: uint64x2_t = vdupq_n_u64(a);
18742    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18743}
18744#[doc = "Unsigned saturating rounded shift right narrow"]
18745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18746#[inline]
18747#[target_feature(enable = "neon")]
18748#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18749#[rustc_legacy_const_generics(1)]
18750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18751pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18752    static_assert!(N >= 1 && N <= 8);
18753    let a: uint16x8_t = vdupq_n_u16(a);
18754    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18755}
18756#[doc = "Unsigned saturating rounded shift right narrow"]
18757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18758#[inline]
18759#[target_feature(enable = "neon")]
18760#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18761#[rustc_legacy_const_generics(1)]
18762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18763pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18764    static_assert!(N >= 1 && N <= 16);
18765    let a: uint32x4_t = vdupq_n_u32(a);
18766    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18767}
18768#[doc = "Signed saturating rounded shift right narrow"]
18769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18770#[inline]
18771#[target_feature(enable = "neon")]
18772#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18773#[rustc_legacy_const_generics(1)]
18774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18775pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18776    static_assert!(N >= 1 && N <= 8);
18777    let a: int16x8_t = vdupq_n_s16(a);
18778    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18779}
18780#[doc = "Signed saturating rounded shift right narrow"]
18781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18782#[inline]
18783#[target_feature(enable = "neon")]
18784#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18785#[rustc_legacy_const_generics(1)]
18786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18787pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18788    static_assert!(N >= 1 && N <= 16);
18789    let a: int32x4_t = vdupq_n_s32(a);
18790    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18791}
18792#[doc = "Signed saturating rounded shift right narrow"]
18793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18794#[inline]
18795#[target_feature(enable = "neon")]
18796#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18797#[rustc_legacy_const_generics(1)]
18798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18799pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18800    static_assert!(N >= 1 && N <= 32);
18801    let a: int64x2_t = vdupq_n_s64(a);
18802    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18803}
18804#[doc = "Signed saturating rounded shift right unsigned narrow"]
18805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18806#[inline]
18807#[target_feature(enable = "neon")]
18808#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18809#[rustc_legacy_const_generics(2)]
18810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18811pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18812    static_assert!(N >= 1 && N <= 8);
18813    unsafe {
18814        simd_shuffle!(
18815            a,
18816            vqrshrun_n_s16::<N>(b),
18817            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18818        )
18819    }
18820}
18821#[doc = "Signed saturating rounded shift right unsigned narrow"]
18822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18823#[inline]
18824#[target_feature(enable = "neon")]
18825#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18826#[rustc_legacy_const_generics(2)]
18827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18828pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18829    static_assert!(N >= 1 && N <= 16);
18830    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18831}
18832#[doc = "Signed saturating rounded shift right unsigned narrow"]
18833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18834#[inline]
18835#[target_feature(enable = "neon")]
18836#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18837#[rustc_legacy_const_generics(2)]
18838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18839pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18840    static_assert!(N >= 1 && N <= 32);
18841    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18842}
18843#[doc = "Signed saturating rounded shift right unsigned narrow"]
18844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18845#[inline]
18846#[target_feature(enable = "neon")]
18847#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18848#[rustc_legacy_const_generics(1)]
18849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18850pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18851    static_assert!(N >= 1 && N <= 32);
18852    let a: int64x2_t = vdupq_n_s64(a);
18853    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18854}
18855#[doc = "Signed saturating rounded shift right unsigned narrow"]
18856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18857#[inline]
18858#[target_feature(enable = "neon")]
18859#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18860#[rustc_legacy_const_generics(1)]
18861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18862pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18863    static_assert!(N >= 1 && N <= 8);
18864    let a: int16x8_t = vdupq_n_s16(a);
18865    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18866}
18867#[doc = "Signed saturating rounded shift right unsigned narrow"]
18868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18869#[inline]
18870#[target_feature(enable = "neon")]
18871#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18872#[rustc_legacy_const_generics(1)]
18873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18874pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18875    static_assert!(N >= 1 && N <= 16);
18876    let a: int32x4_t = vdupq_n_s32(a);
18877    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18878}
18879#[doc = "Signed saturating shift left"]
18880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18881#[inline]
18882#[target_feature(enable = "neon")]
18883#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18884#[rustc_legacy_const_generics(1)]
18885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18886pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18887    static_assert_uimm_bits!(N, 3);
18888    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18889}
18890#[doc = "Signed saturating shift left"]
18891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18892#[inline]
18893#[target_feature(enable = "neon")]
18894#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18895#[rustc_legacy_const_generics(1)]
18896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18897pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18898    static_assert_uimm_bits!(N, 6);
18899    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18900}
18901#[doc = "Signed saturating shift left"]
18902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18903#[inline]
18904#[target_feature(enable = "neon")]
18905#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18906#[rustc_legacy_const_generics(1)]
18907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18908pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18909    static_assert_uimm_bits!(N, 4);
18910    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18911}
18912#[doc = "Signed saturating shift left"]
18913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18914#[inline]
18915#[target_feature(enable = "neon")]
18916#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18917#[rustc_legacy_const_generics(1)]
18918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18919pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18920    static_assert_uimm_bits!(N, 5);
18921    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18922}
18923#[doc = "Unsigned saturating shift left"]
18924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18925#[inline]
18926#[target_feature(enable = "neon")]
18927#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18928#[rustc_legacy_const_generics(1)]
18929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18930pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18931    static_assert_uimm_bits!(N, 3);
18932    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18933}
18934#[doc = "Unsigned saturating shift left"]
18935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18936#[inline]
18937#[target_feature(enable = "neon")]
18938#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18939#[rustc_legacy_const_generics(1)]
18940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18941pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18942    static_assert_uimm_bits!(N, 6);
18943    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18944}
18945#[doc = "Unsigned saturating shift left"]
18946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18947#[inline]
18948#[target_feature(enable = "neon")]
18949#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18950#[rustc_legacy_const_generics(1)]
18951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18952pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18953    static_assert_uimm_bits!(N, 4);
18954    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18955}
18956#[doc = "Unsigned saturating shift left"]
18957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18958#[inline]
18959#[target_feature(enable = "neon")]
18960#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18961#[rustc_legacy_const_generics(1)]
18962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18963pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18964    static_assert_uimm_bits!(N, 5);
18965    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18966}
18967#[doc = "Signed saturating shift left"]
18968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18969#[inline]
18970#[target_feature(enable = "neon")]
18971#[cfg_attr(test, assert_instr(sqshl))]
18972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18973pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18974    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18975    unsafe { simd_extract!(c, 0) }
18976}
18977#[doc = "Signed saturating shift left"]
18978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18979#[inline]
18980#[target_feature(enable = "neon")]
18981#[cfg_attr(test, assert_instr(sqshl))]
18982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18983pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18984    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18985    unsafe { simd_extract!(c, 0) }
18986}
18987#[doc = "Signed saturating shift left"]
18988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18989#[inline]
18990#[target_feature(enable = "neon")]
18991#[cfg_attr(test, assert_instr(sqshl))]
18992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18993pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18994    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18995    unsafe { simd_extract!(c, 0) }
18996}
18997#[doc = "Unsigned saturating shift left"]
18998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18999#[inline]
19000#[target_feature(enable = "neon")]
19001#[cfg_attr(test, assert_instr(uqshl))]
19002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19003pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
19004    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
19005    unsafe { simd_extract!(c, 0) }
19006}
19007#[doc = "Unsigned saturating shift left"]
19008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
19009#[inline]
19010#[target_feature(enable = "neon")]
19011#[cfg_attr(test, assert_instr(uqshl))]
19012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19013pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
19014    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
19015    unsafe { simd_extract!(c, 0) }
19016}
19017#[doc = "Unsigned saturating shift left"]
19018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
19019#[inline]
19020#[target_feature(enable = "neon")]
19021#[cfg_attr(test, assert_instr(uqshl))]
19022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19023pub fn vqshls_u32(a: u32, b: i32) -> u32 {
19024    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
19025    unsafe { simd_extract!(c, 0) }
19026}
19027#[doc = "Signed saturating shift left"]
19028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19029#[inline]
19030#[target_feature(enable = "neon")]
19031#[cfg_attr(test, assert_instr(sqshl))]
19032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19033pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19034    unsafe extern "unadjusted" {
19035        #[cfg_attr(
19036            any(target_arch = "aarch64", target_arch = "arm64ec"),
19037            link_name = "llvm.aarch64.neon.sqshl.i64"
19038        )]
19039        fn _vqshld_s64(a: i64, b: i64) -> i64;
19040    }
19041    unsafe { _vqshld_s64(a, b) }
19042}
19043#[doc = "Unsigned saturating shift left"]
19044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19045#[inline]
19046#[target_feature(enable = "neon")]
19047#[cfg_attr(test, assert_instr(uqshl))]
19048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19049pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19050    unsafe extern "unadjusted" {
19051        #[cfg_attr(
19052            any(target_arch = "aarch64", target_arch = "arm64ec"),
19053            link_name = "llvm.aarch64.neon.uqshl.i64"
19054        )]
19055        fn _vqshld_u64(a: u64, b: i64) -> u64;
19056    }
19057    unsafe { _vqshld_u64(a, b) }
19058}
19059#[doc = "Signed saturating shift left unsigned"]
19060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19061#[inline]
19062#[target_feature(enable = "neon")]
19063#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19064#[rustc_legacy_const_generics(1)]
19065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19066pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19067    static_assert_uimm_bits!(N, 3);
19068    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19069}
19070#[doc = "Signed saturating shift left unsigned"]
19071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19072#[inline]
19073#[target_feature(enable = "neon")]
19074#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19075#[rustc_legacy_const_generics(1)]
19076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19077pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19078    static_assert_uimm_bits!(N, 6);
19079    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19080}
19081#[doc = "Signed saturating shift left unsigned"]
19082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19083#[inline]
19084#[target_feature(enable = "neon")]
19085#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19086#[rustc_legacy_const_generics(1)]
19087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19088pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19089    static_assert_uimm_bits!(N, 4);
19090    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19091}
19092#[doc = "Signed saturating shift left unsigned"]
19093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19094#[inline]
19095#[target_feature(enable = "neon")]
19096#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19097#[rustc_legacy_const_generics(1)]
19098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19099pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19100    static_assert_uimm_bits!(N, 5);
19101    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19102}
19103#[doc = "Signed saturating shift right narrow"]
19104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19105#[inline]
19106#[target_feature(enable = "neon")]
19107#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19108#[rustc_legacy_const_generics(2)]
19109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19110pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19111    static_assert!(N >= 1 && N <= 8);
19112    unsafe {
19113        simd_shuffle!(
19114            a,
19115            vqshrn_n_s16::<N>(b),
19116            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19117        )
19118    }
19119}
19120#[doc = "Signed saturating shift right narrow"]
19121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19122#[inline]
19123#[target_feature(enable = "neon")]
19124#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19125#[rustc_legacy_const_generics(2)]
19126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19127pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19128    static_assert!(N >= 1 && N <= 16);
19129    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19130}
19131#[doc = "Signed saturating shift right narrow"]
19132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19133#[inline]
19134#[target_feature(enable = "neon")]
19135#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19136#[rustc_legacy_const_generics(2)]
19137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19138pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19139    static_assert!(N >= 1 && N <= 32);
19140    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19141}
19142#[doc = "Unsigned saturating shift right narrow"]
19143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19144#[inline]
19145#[target_feature(enable = "neon")]
19146#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19147#[rustc_legacy_const_generics(2)]
19148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19149pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19150    static_assert!(N >= 1 && N <= 8);
19151    unsafe {
19152        simd_shuffle!(
19153            a,
19154            vqshrn_n_u16::<N>(b),
19155            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19156        )
19157    }
19158}
19159#[doc = "Unsigned saturating shift right narrow"]
19160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19161#[inline]
19162#[target_feature(enable = "neon")]
19163#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19164#[rustc_legacy_const_generics(2)]
19165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19166pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19167    static_assert!(N >= 1 && N <= 16);
19168    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19169}
19170#[doc = "Unsigned saturating shift right narrow"]
19171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19172#[inline]
19173#[target_feature(enable = "neon")]
19174#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19175#[rustc_legacy_const_generics(2)]
19176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19177pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19178    static_assert!(N >= 1 && N <= 32);
19179    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19180}
19181#[doc = "Signed saturating shift right narrow"]
19182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19183#[inline]
19184#[target_feature(enable = "neon")]
19185#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19186#[rustc_legacy_const_generics(1)]
19187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19188pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19189    static_assert!(N >= 1 && N <= 32);
19190    unsafe extern "unadjusted" {
19191        #[cfg_attr(
19192            any(target_arch = "aarch64", target_arch = "arm64ec"),
19193            link_name = "llvm.aarch64.neon.sqshrn.i32"
19194        )]
19195        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19196    }
19197    unsafe { _vqshrnd_n_s64(a, N) }
19198}
19199#[doc = "Unsigned saturating shift right narrow"]
19200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19201#[inline]
19202#[target_feature(enable = "neon")]
19203#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19204#[rustc_legacy_const_generics(1)]
19205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19206pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19207    static_assert!(N >= 1 && N <= 32);
19208    unsafe extern "unadjusted" {
19209        #[cfg_attr(
19210            any(target_arch = "aarch64", target_arch = "arm64ec"),
19211            link_name = "llvm.aarch64.neon.uqshrn.i32"
19212        )]
19213        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19214    }
19215    unsafe { _vqshrnd_n_u64(a, N) }
19216}
19217#[doc = "Signed saturating shift right narrow"]
19218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19219#[inline]
19220#[target_feature(enable = "neon")]
19221#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19222#[rustc_legacy_const_generics(1)]
19223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19224pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19225    static_assert!(N >= 1 && N <= 8);
19226    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19227}
19228#[doc = "Signed saturating shift right narrow"]
19229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19230#[inline]
19231#[target_feature(enable = "neon")]
19232#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19233#[rustc_legacy_const_generics(1)]
19234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19235pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19236    static_assert!(N >= 1 && N <= 16);
19237    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19238}
19239#[doc = "Unsigned saturating shift right narrow"]
19240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19241#[inline]
19242#[target_feature(enable = "neon")]
19243#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19244#[rustc_legacy_const_generics(1)]
19245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19246pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19247    static_assert!(N >= 1 && N <= 8);
19248    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19249}
19250#[doc = "Unsigned saturating shift right narrow"]
19251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19252#[inline]
19253#[target_feature(enable = "neon")]
19254#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19255#[rustc_legacy_const_generics(1)]
19256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19257pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19258    static_assert!(N >= 1 && N <= 16);
19259    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19260}
19261#[doc = "Signed saturating shift right unsigned narrow"]
19262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19263#[inline]
19264#[target_feature(enable = "neon")]
19265#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19266#[rustc_legacy_const_generics(2)]
19267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19268pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19269    static_assert!(N >= 1 && N <= 8);
19270    unsafe {
19271        simd_shuffle!(
19272            a,
19273            vqshrun_n_s16::<N>(b),
19274            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19275        )
19276    }
19277}
19278#[doc = "Signed saturating shift right unsigned narrow"]
19279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19280#[inline]
19281#[target_feature(enable = "neon")]
19282#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19283#[rustc_legacy_const_generics(2)]
19284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19285pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19286    static_assert!(N >= 1 && N <= 16);
19287    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19288}
19289#[doc = "Signed saturating shift right unsigned narrow"]
19290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19291#[inline]
19292#[target_feature(enable = "neon")]
19293#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19294#[rustc_legacy_const_generics(2)]
19295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19296pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19297    static_assert!(N >= 1 && N <= 32);
19298    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19299}
19300#[doc = "Signed saturating shift right unsigned narrow"]
19301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19302#[inline]
19303#[target_feature(enable = "neon")]
19304#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19305#[rustc_legacy_const_generics(1)]
19306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19307pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19308    static_assert!(N >= 1 && N <= 32);
19309    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19310}
19311#[doc = "Signed saturating shift right unsigned narrow"]
19312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19313#[inline]
19314#[target_feature(enable = "neon")]
19315#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19316#[rustc_legacy_const_generics(1)]
19317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19318pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19319    static_assert!(N >= 1 && N <= 8);
19320    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19321}
19322#[doc = "Signed saturating shift right unsigned narrow"]
19323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19324#[inline]
19325#[target_feature(enable = "neon")]
19326#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19327#[rustc_legacy_const_generics(1)]
19328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19329pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19330    static_assert!(N >= 1 && N <= 16);
19331    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19332}
19333#[doc = "Saturating subtract"]
19334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19335#[inline]
19336#[target_feature(enable = "neon")]
19337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19338#[cfg_attr(test, assert_instr(sqsub))]
19339pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19340    let a: int8x8_t = vdup_n_s8(a);
19341    let b: int8x8_t = vdup_n_s8(b);
19342    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19343}
19344#[doc = "Saturating subtract"]
19345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19346#[inline]
19347#[target_feature(enable = "neon")]
19348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19349#[cfg_attr(test, assert_instr(sqsub))]
19350pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19351    let a: int16x4_t = vdup_n_s16(a);
19352    let b: int16x4_t = vdup_n_s16(b);
19353    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19354}
19355#[doc = "Saturating subtract"]
19356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19357#[inline]
19358#[target_feature(enable = "neon")]
19359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19360#[cfg_attr(test, assert_instr(uqsub))]
19361pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19362    let a: uint8x8_t = vdup_n_u8(a);
19363    let b: uint8x8_t = vdup_n_u8(b);
19364    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19365}
19366#[doc = "Saturating subtract"]
19367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19368#[inline]
19369#[target_feature(enable = "neon")]
19370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19371#[cfg_attr(test, assert_instr(uqsub))]
19372pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19373    let a: uint16x4_t = vdup_n_u16(a);
19374    let b: uint16x4_t = vdup_n_u16(b);
19375    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19376}
19377#[doc = "Saturating subtract"]
19378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19379#[inline]
19380#[target_feature(enable = "neon")]
19381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19382#[cfg_attr(test, assert_instr(sqsub))]
19383pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19384    unsafe extern "unadjusted" {
19385        #[cfg_attr(
19386            any(target_arch = "aarch64", target_arch = "arm64ec"),
19387            link_name = "llvm.aarch64.neon.sqsub.i32"
19388        )]
19389        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19390    }
19391    unsafe { _vqsubs_s32(a, b) }
19392}
19393#[doc = "Saturating subtract"]
19394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19395#[inline]
19396#[target_feature(enable = "neon")]
19397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19398#[cfg_attr(test, assert_instr(sqsub))]
19399pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19400    unsafe extern "unadjusted" {
19401        #[cfg_attr(
19402            any(target_arch = "aarch64", target_arch = "arm64ec"),
19403            link_name = "llvm.aarch64.neon.sqsub.i64"
19404        )]
19405        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19406    }
19407    unsafe { _vqsubd_s64(a, b) }
19408}
19409#[doc = "Saturating subtract"]
19410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19411#[inline]
19412#[target_feature(enable = "neon")]
19413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19414#[cfg_attr(test, assert_instr(uqsub))]
19415pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19416    unsafe extern "unadjusted" {
19417        #[cfg_attr(
19418            any(target_arch = "aarch64", target_arch = "arm64ec"),
19419            link_name = "llvm.aarch64.neon.uqsub.i32"
19420        )]
19421        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19422    }
19423    unsafe { _vqsubs_u32(a, b) }
19424}
19425#[doc = "Saturating subtract"]
19426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19427#[inline]
19428#[target_feature(enable = "neon")]
19429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19430#[cfg_attr(test, assert_instr(uqsub))]
19431pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19432    unsafe extern "unadjusted" {
19433        #[cfg_attr(
19434            any(target_arch = "aarch64", target_arch = "arm64ec"),
19435            link_name = "llvm.aarch64.neon.uqsub.i64"
19436        )]
19437        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19438    }
19439    unsafe { _vqsubd_u64(a, b) }
19440}
19441#[doc = "Table look-up"]
19442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19443#[inline]
19444#[target_feature(enable = "neon")]
19445#[cfg_attr(test, assert_instr(tbl))]
19446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19447fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19448    unsafe extern "unadjusted" {
19449        #[cfg_attr(
19450            any(target_arch = "aarch64", target_arch = "arm64ec"),
19451            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19452        )]
19453        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19454    }
19455    unsafe { _vqtbl1(a, b) }
19456}
19457#[doc = "Table look-up"]
19458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19459#[inline]
19460#[target_feature(enable = "neon")]
19461#[cfg_attr(test, assert_instr(tbl))]
19462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19463fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19464    unsafe extern "unadjusted" {
19465        #[cfg_attr(
19466            any(target_arch = "aarch64", target_arch = "arm64ec"),
19467            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19468        )]
19469        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19470    }
19471    unsafe { _vqtbl1q(a, b) }
19472}
19473#[doc = "Table look-up"]
19474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19475#[inline]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(tbl))]
19478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19479pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19480    vqtbl1(a, b)
19481}
19482#[doc = "Table look-up"]
19483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19484#[inline]
19485#[target_feature(enable = "neon")]
19486#[cfg_attr(test, assert_instr(tbl))]
19487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19488pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19489    vqtbl1q(a, b)
19490}
19491#[doc = "Table look-up"]
19492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19493#[inline]
19494#[target_feature(enable = "neon")]
19495#[cfg_attr(test, assert_instr(tbl))]
19496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19497pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19498    unsafe {
19499        let x = transmute(vqtbl1(transmute(a), b));
19500        x
19501    }
19502}
19503#[doc = "Table look-up"]
19504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19505#[inline]
19506#[target_feature(enable = "neon")]
19507#[cfg_attr(test, assert_instr(tbl))]
19508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19509pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19510    unsafe {
19511        let x = transmute(vqtbl1q(transmute(a), b));
19512        x
19513    }
19514}
19515#[doc = "Table look-up"]
19516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19517#[inline]
19518#[target_feature(enable = "neon")]
19519#[cfg_attr(test, assert_instr(tbl))]
19520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19521pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19522    unsafe {
19523        let x = transmute(vqtbl1(transmute(a), b));
19524        x
19525    }
19526}
19527#[doc = "Table look-up"]
19528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19529#[inline]
19530#[target_feature(enable = "neon")]
19531#[cfg_attr(test, assert_instr(tbl))]
19532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19533pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19534    unsafe {
19535        let x = transmute(vqtbl1q(transmute(a), b));
19536        x
19537    }
19538}
19539#[doc = "Table look-up"]
19540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19541#[inline]
19542#[target_feature(enable = "neon")]
19543#[cfg_attr(test, assert_instr(tbl))]
19544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19545fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19546    unsafe extern "unadjusted" {
19547        #[cfg_attr(
19548            any(target_arch = "aarch64", target_arch = "arm64ec"),
19549            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19550        )]
19551        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19552    }
19553    unsafe { _vqtbl2(a, b, c) }
19554}
19555#[doc = "Table look-up"]
19556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19557#[inline]
19558#[target_feature(enable = "neon")]
19559#[cfg_attr(test, assert_instr(tbl))]
19560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19561fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19562    unsafe extern "unadjusted" {
19563        #[cfg_attr(
19564            any(target_arch = "aarch64", target_arch = "arm64ec"),
19565            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19566        )]
19567        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19568    }
19569    unsafe { _vqtbl2q(a, b, c) }
19570}
19571#[doc = "Table look-up"]
19572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19573#[inline]
19574#[target_feature(enable = "neon")]
19575#[cfg_attr(test, assert_instr(tbl))]
19576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19577pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19578    vqtbl2(a.0, a.1, b)
19579}
19580#[doc = "Table look-up"]
19581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19582#[inline]
19583#[target_feature(enable = "neon")]
19584#[cfg_attr(test, assert_instr(tbl))]
19585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19586pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19587    vqtbl2q(a.0, a.1, b)
19588}
19589#[doc = "Table look-up"]
19590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19591#[inline]
19592#[cfg(target_endian = "little")]
19593#[target_feature(enable = "neon")]
19594#[cfg_attr(test, assert_instr(tbl))]
19595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19596pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19597    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19598}
19599#[doc = "Table look-up"]
19600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19601#[inline]
19602#[cfg(target_endian = "big")]
19603#[target_feature(enable = "neon")]
19604#[cfg_attr(test, assert_instr(tbl))]
19605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19606pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19607    let mut a: uint8x16x2_t = a;
19608    a.0 = unsafe {
19609        simd_shuffle!(
19610            a.0,
19611            a.0,
19612            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19613        )
19614    };
19615    a.1 = unsafe {
19616        simd_shuffle!(
19617            a.1,
19618            a.1,
19619            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19620        )
19621    };
19622    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19623    unsafe {
19624        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19625        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19626    }
19627}
19628#[doc = "Table look-up"]
19629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19630#[inline]
19631#[cfg(target_endian = "little")]
19632#[target_feature(enable = "neon")]
19633#[cfg_attr(test, assert_instr(tbl))]
19634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19635pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19636    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19637}
19638#[doc = "Table look-up"]
19639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19640#[inline]
19641#[cfg(target_endian = "big")]
19642#[target_feature(enable = "neon")]
19643#[cfg_attr(test, assert_instr(tbl))]
19644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19645pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19646    let mut a: uint8x16x2_t = a;
19647    a.0 = unsafe {
19648        simd_shuffle!(
19649            a.0,
19650            a.0,
19651            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19652        )
19653    };
19654    a.1 = unsafe {
19655        simd_shuffle!(
19656            a.1,
19657            a.1,
19658            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19659        )
19660    };
19661    let b: uint8x16_t =
19662        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19663    unsafe {
19664        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19665        simd_shuffle!(
19666            ret_val,
19667            ret_val,
19668            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19669        )
19670    }
19671}
19672#[doc = "Table look-up"]
19673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19674#[inline]
19675#[cfg(target_endian = "little")]
19676#[target_feature(enable = "neon")]
19677#[cfg_attr(test, assert_instr(tbl))]
19678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19679pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19680    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19681}
19682#[doc = "Table look-up"]
19683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19684#[inline]
19685#[cfg(target_endian = "big")]
19686#[target_feature(enable = "neon")]
19687#[cfg_attr(test, assert_instr(tbl))]
19688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19689pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19690    let mut a: poly8x16x2_t = a;
19691    a.0 = unsafe {
19692        simd_shuffle!(
19693            a.0,
19694            a.0,
19695            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19696        )
19697    };
19698    a.1 = unsafe {
19699        simd_shuffle!(
19700            a.1,
19701            a.1,
19702            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19703        )
19704    };
19705    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19706    unsafe {
19707        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19708        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19709    }
19710}
19711#[doc = "Table look-up"]
19712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19713#[inline]
19714#[cfg(target_endian = "little")]
19715#[target_feature(enable = "neon")]
19716#[cfg_attr(test, assert_instr(tbl))]
19717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19718pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19719    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19720}
19721#[doc = "Table look-up"]
19722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19723#[inline]
19724#[cfg(target_endian = "big")]
19725#[target_feature(enable = "neon")]
19726#[cfg_attr(test, assert_instr(tbl))]
19727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19728pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19729    let mut a: poly8x16x2_t = a;
19730    a.0 = unsafe {
19731        simd_shuffle!(
19732            a.0,
19733            a.0,
19734            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19735        )
19736    };
19737    a.1 = unsafe {
19738        simd_shuffle!(
19739            a.1,
19740            a.1,
19741            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19742        )
19743    };
19744    let b: uint8x16_t =
19745        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19746    unsafe {
19747        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19748        simd_shuffle!(
19749            ret_val,
19750            ret_val,
19751            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19752        )
19753    }
19754}
19755#[doc = "Table look-up"]
19756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19757#[inline]
19758#[target_feature(enable = "neon")]
19759#[cfg_attr(test, assert_instr(tbl))]
19760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19761fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19762    unsafe extern "unadjusted" {
19763        #[cfg_attr(
19764            any(target_arch = "aarch64", target_arch = "arm64ec"),
19765            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19766        )]
19767        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19768    }
19769    unsafe { _vqtbl3(a, b, c, d) }
19770}
19771#[doc = "Table look-up"]
19772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19773#[inline]
19774#[target_feature(enable = "neon")]
19775#[cfg_attr(test, assert_instr(tbl))]
19776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19777fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19778    unsafe extern "unadjusted" {
19779        #[cfg_attr(
19780            any(target_arch = "aarch64", target_arch = "arm64ec"),
19781            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19782        )]
19783        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19784    }
19785    unsafe { _vqtbl3q(a, b, c, d) }
19786}
19787#[doc = "Table look-up"]
19788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19789#[inline]
19790#[target_feature(enable = "neon")]
19791#[cfg_attr(test, assert_instr(tbl))]
19792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19793pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19794    vqtbl3(a.0, a.1, a.2, b)
19795}
19796#[doc = "Table look-up"]
19797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19798#[inline]
19799#[target_feature(enable = "neon")]
19800#[cfg_attr(test, assert_instr(tbl))]
19801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19802pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19803    vqtbl3q(a.0, a.1, a.2, b)
19804}
19805#[doc = "Table look-up"]
19806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19807#[inline]
19808#[cfg(target_endian = "little")]
19809#[target_feature(enable = "neon")]
19810#[cfg_attr(test, assert_instr(tbl))]
19811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19812pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19813    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19814}
19815#[doc = "Table look-up"]
19816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19817#[inline]
19818#[cfg(target_endian = "big")]
19819#[target_feature(enable = "neon")]
19820#[cfg_attr(test, assert_instr(tbl))]
19821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19822pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19823    let mut a: uint8x16x3_t = a;
19824    a.0 = unsafe {
19825        simd_shuffle!(
19826            a.0,
19827            a.0,
19828            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19829        )
19830    };
19831    a.1 = unsafe {
19832        simd_shuffle!(
19833            a.1,
19834            a.1,
19835            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19836        )
19837    };
19838    a.2 = unsafe {
19839        simd_shuffle!(
19840            a.2,
19841            a.2,
19842            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19843        )
19844    };
19845    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19846    unsafe {
19847        let ret_val: uint8x8_t =
19848            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19849        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19850    }
19851}
19852#[doc = "Table look-up"]
19853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19854#[inline]
19855#[cfg(target_endian = "little")]
19856#[target_feature(enable = "neon")]
19857#[cfg_attr(test, assert_instr(tbl))]
19858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19859pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19860    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19861}
19862#[doc = "Table look-up"]
19863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19864#[inline]
19865#[cfg(target_endian = "big")]
19866#[target_feature(enable = "neon")]
19867#[cfg_attr(test, assert_instr(tbl))]
19868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19869pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19870    let mut a: uint8x16x3_t = a;
19871    a.0 = unsafe {
19872        simd_shuffle!(
19873            a.0,
19874            a.0,
19875            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19876        )
19877    };
19878    a.1 = unsafe {
19879        simd_shuffle!(
19880            a.1,
19881            a.1,
19882            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19883        )
19884    };
19885    a.2 = unsafe {
19886        simd_shuffle!(
19887            a.2,
19888            a.2,
19889            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19890        )
19891    };
19892    let b: uint8x16_t =
19893        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19894    unsafe {
19895        let ret_val: uint8x16_t =
19896            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19897        simd_shuffle!(
19898            ret_val,
19899            ret_val,
19900            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19901        )
19902    }
19903}
19904#[doc = "Table look-up"]
19905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19906#[inline]
19907#[cfg(target_endian = "little")]
19908#[target_feature(enable = "neon")]
19909#[cfg_attr(test, assert_instr(tbl))]
19910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19911pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19912    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19913}
19914#[doc = "Table look-up"]
19915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19916#[inline]
19917#[cfg(target_endian = "big")]
19918#[target_feature(enable = "neon")]
19919#[cfg_attr(test, assert_instr(tbl))]
19920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19921pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19922    let mut a: poly8x16x3_t = a;
19923    a.0 = unsafe {
19924        simd_shuffle!(
19925            a.0,
19926            a.0,
19927            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19928        )
19929    };
19930    a.1 = unsafe {
19931        simd_shuffle!(
19932            a.1,
19933            a.1,
19934            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19935        )
19936    };
19937    a.2 = unsafe {
19938        simd_shuffle!(
19939            a.2,
19940            a.2,
19941            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19942        )
19943    };
19944    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19945    unsafe {
19946        let ret_val: poly8x8_t =
19947            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19948        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19949    }
19950}
19951#[doc = "Table look-up"]
19952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19953#[inline]
19954#[cfg(target_endian = "little")]
19955#[target_feature(enable = "neon")]
19956#[cfg_attr(test, assert_instr(tbl))]
19957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19958pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19959    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19960}
19961#[doc = "Table look-up"]
19962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19963#[inline]
19964#[cfg(target_endian = "big")]
19965#[target_feature(enable = "neon")]
19966#[cfg_attr(test, assert_instr(tbl))]
19967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19968pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19969    let mut a: poly8x16x3_t = a;
19970    a.0 = unsafe {
19971        simd_shuffle!(
19972            a.0,
19973            a.0,
19974            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19975        )
19976    };
19977    a.1 = unsafe {
19978        simd_shuffle!(
19979            a.1,
19980            a.1,
19981            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19982        )
19983    };
19984    a.2 = unsafe {
19985        simd_shuffle!(
19986            a.2,
19987            a.2,
19988            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19989        )
19990    };
19991    let b: uint8x16_t =
19992        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19993    unsafe {
19994        let ret_val: poly8x16_t =
19995            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19996        simd_shuffle!(
19997            ret_val,
19998            ret_val,
19999            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20000        )
20001    }
20002}
20003#[doc = "Table look-up"]
20004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
20005#[inline]
20006#[target_feature(enable = "neon")]
20007#[cfg_attr(test, assert_instr(tbl))]
20008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20009fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20010    unsafe extern "unadjusted" {
20011        #[cfg_attr(
20012            any(target_arch = "aarch64", target_arch = "arm64ec"),
20013            link_name = "llvm.aarch64.neon.tbl4.v8i8"
20014        )]
20015        fn _vqtbl4(
20016            a: int8x16_t,
20017            b: int8x16_t,
20018            c: int8x16_t,
20019            d: int8x16_t,
20020            e: uint8x8_t,
20021        ) -> int8x8_t;
20022    }
20023    unsafe { _vqtbl4(a, b, c, d, e) }
20024}
20025#[doc = "Table look-up"]
20026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
20027#[inline]
20028#[target_feature(enable = "neon")]
20029#[cfg_attr(test, assert_instr(tbl))]
20030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20031fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20032    unsafe extern "unadjusted" {
20033        #[cfg_attr(
20034            any(target_arch = "aarch64", target_arch = "arm64ec"),
20035            link_name = "llvm.aarch64.neon.tbl4.v16i8"
20036        )]
20037        fn _vqtbl4q(
20038            a: int8x16_t,
20039            b: int8x16_t,
20040            c: int8x16_t,
20041            d: int8x16_t,
20042            e: uint8x16_t,
20043        ) -> int8x16_t;
20044    }
20045    unsafe { _vqtbl4q(a, b, c, d, e) }
20046}
20047#[doc = "Table look-up"]
20048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
20049#[inline]
20050#[target_feature(enable = "neon")]
20051#[cfg_attr(test, assert_instr(tbl))]
20052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20053pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
20054    vqtbl4(a.0, a.1, a.2, a.3, b)
20055}
20056#[doc = "Table look-up"]
20057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
20058#[inline]
20059#[target_feature(enable = "neon")]
20060#[cfg_attr(test, assert_instr(tbl))]
20061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20062pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
20063    vqtbl4q(a.0, a.1, a.2, a.3, b)
20064}
20065#[doc = "Table look-up"]
20066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20067#[inline]
20068#[cfg(target_endian = "little")]
20069#[target_feature(enable = "neon")]
20070#[cfg_attr(test, assert_instr(tbl))]
20071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20072pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20073    unsafe {
20074        transmute(vqtbl4(
20075            transmute(a.0),
20076            transmute(a.1),
20077            transmute(a.2),
20078            transmute(a.3),
20079            b,
20080        ))
20081    }
20082}
20083#[doc = "Table look-up"]
20084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20085#[inline]
20086#[cfg(target_endian = "big")]
20087#[target_feature(enable = "neon")]
20088#[cfg_attr(test, assert_instr(tbl))]
20089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20090pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20091    let mut a: uint8x16x4_t = a;
20092    a.0 = unsafe {
20093        simd_shuffle!(
20094            a.0,
20095            a.0,
20096            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20097        )
20098    };
20099    a.1 = unsafe {
20100        simd_shuffle!(
20101            a.1,
20102            a.1,
20103            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20104        )
20105    };
20106    a.2 = unsafe {
20107        simd_shuffle!(
20108            a.2,
20109            a.2,
20110            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20111        )
20112    };
20113    a.3 = unsafe {
20114        simd_shuffle!(
20115            a.3,
20116            a.3,
20117            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20118        )
20119    };
20120    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20121    unsafe {
20122        let ret_val: uint8x8_t = transmute(vqtbl4(
20123            transmute(a.0),
20124            transmute(a.1),
20125            transmute(a.2),
20126            transmute(a.3),
20127            b,
20128        ));
20129        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20130    }
20131}
20132#[doc = "Table look-up"]
20133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20134#[inline]
20135#[cfg(target_endian = "little")]
20136#[target_feature(enable = "neon")]
20137#[cfg_attr(test, assert_instr(tbl))]
20138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20139pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20140    unsafe {
20141        transmute(vqtbl4q(
20142            transmute(a.0),
20143            transmute(a.1),
20144            transmute(a.2),
20145            transmute(a.3),
20146            b,
20147        ))
20148    }
20149}
20150#[doc = "Table look-up"]
20151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20152#[inline]
20153#[cfg(target_endian = "big")]
20154#[target_feature(enable = "neon")]
20155#[cfg_attr(test, assert_instr(tbl))]
20156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20157pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20158    let mut a: uint8x16x4_t = a;
20159    a.0 = unsafe {
20160        simd_shuffle!(
20161            a.0,
20162            a.0,
20163            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20164        )
20165    };
20166    a.1 = unsafe {
20167        simd_shuffle!(
20168            a.1,
20169            a.1,
20170            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20171        )
20172    };
20173    a.2 = unsafe {
20174        simd_shuffle!(
20175            a.2,
20176            a.2,
20177            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20178        )
20179    };
20180    a.3 = unsafe {
20181        simd_shuffle!(
20182            a.3,
20183            a.3,
20184            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20185        )
20186    };
20187    let b: uint8x16_t =
20188        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20189    unsafe {
20190        let ret_val: uint8x16_t = transmute(vqtbl4q(
20191            transmute(a.0),
20192            transmute(a.1),
20193            transmute(a.2),
20194            transmute(a.3),
20195            b,
20196        ));
20197        simd_shuffle!(
20198            ret_val,
20199            ret_val,
20200            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20201        )
20202    }
20203}
20204#[doc = "Table look-up"]
20205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20206#[inline]
20207#[cfg(target_endian = "little")]
20208#[target_feature(enable = "neon")]
20209#[cfg_attr(test, assert_instr(tbl))]
20210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20211pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20212    unsafe {
20213        transmute(vqtbl4(
20214            transmute(a.0),
20215            transmute(a.1),
20216            transmute(a.2),
20217            transmute(a.3),
20218            b,
20219        ))
20220    }
20221}
20222#[doc = "Table look-up"]
20223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20224#[inline]
20225#[cfg(target_endian = "big")]
20226#[target_feature(enable = "neon")]
20227#[cfg_attr(test, assert_instr(tbl))]
20228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20229pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20230    let mut a: poly8x16x4_t = a;
20231    a.0 = unsafe {
20232        simd_shuffle!(
20233            a.0,
20234            a.0,
20235            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20236        )
20237    };
20238    a.1 = unsafe {
20239        simd_shuffle!(
20240            a.1,
20241            a.1,
20242            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20243        )
20244    };
20245    a.2 = unsafe {
20246        simd_shuffle!(
20247            a.2,
20248            a.2,
20249            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20250        )
20251    };
20252    a.3 = unsafe {
20253        simd_shuffle!(
20254            a.3,
20255            a.3,
20256            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20257        )
20258    };
20259    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20260    unsafe {
20261        let ret_val: poly8x8_t = transmute(vqtbl4(
20262            transmute(a.0),
20263            transmute(a.1),
20264            transmute(a.2),
20265            transmute(a.3),
20266            b,
20267        ));
20268        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20269    }
20270}
20271#[doc = "Table look-up"]
20272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20273#[inline]
20274#[cfg(target_endian = "little")]
20275#[target_feature(enable = "neon")]
20276#[cfg_attr(test, assert_instr(tbl))]
20277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20278pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20279    unsafe {
20280        transmute(vqtbl4q(
20281            transmute(a.0),
20282            transmute(a.1),
20283            transmute(a.2),
20284            transmute(a.3),
20285            b,
20286        ))
20287    }
20288}
20289#[doc = "Table look-up"]
20290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20291#[inline]
20292#[cfg(target_endian = "big")]
20293#[target_feature(enable = "neon")]
20294#[cfg_attr(test, assert_instr(tbl))]
20295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20296pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20297    let mut a: poly8x16x4_t = a;
20298    a.0 = unsafe {
20299        simd_shuffle!(
20300            a.0,
20301            a.0,
20302            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20303        )
20304    };
20305    a.1 = unsafe {
20306        simd_shuffle!(
20307            a.1,
20308            a.1,
20309            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20310        )
20311    };
20312    a.2 = unsafe {
20313        simd_shuffle!(
20314            a.2,
20315            a.2,
20316            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20317        )
20318    };
20319    a.3 = unsafe {
20320        simd_shuffle!(
20321            a.3,
20322            a.3,
20323            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20324        )
20325    };
20326    let b: uint8x16_t =
20327        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20328    unsafe {
20329        let ret_val: poly8x16_t = transmute(vqtbl4q(
20330            transmute(a.0),
20331            transmute(a.1),
20332            transmute(a.2),
20333            transmute(a.3),
20334            b,
20335        ));
20336        simd_shuffle!(
20337            ret_val,
20338            ret_val,
20339            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20340        )
20341    }
20342}
20343#[doc = "Extended table look-up"]
20344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20345#[inline]
20346#[target_feature(enable = "neon")]
20347#[cfg_attr(test, assert_instr(tbx))]
20348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20349fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20350    unsafe extern "unadjusted" {
20351        #[cfg_attr(
20352            any(target_arch = "aarch64", target_arch = "arm64ec"),
20353            link_name = "llvm.aarch64.neon.tbx1.v8i8"
20354        )]
20355        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20356    }
20357    unsafe { _vqtbx1(a, b, c) }
20358}
20359#[doc = "Extended table look-up"]
20360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20361#[inline]
20362#[target_feature(enable = "neon")]
20363#[cfg_attr(test, assert_instr(tbx))]
20364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20365fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20366    unsafe extern "unadjusted" {
20367        #[cfg_attr(
20368            any(target_arch = "aarch64", target_arch = "arm64ec"),
20369            link_name = "llvm.aarch64.neon.tbx1.v16i8"
20370        )]
20371        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20372    }
20373    unsafe { _vqtbx1q(a, b, c) }
20374}
20375#[doc = "Extended table look-up"]
20376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20377#[inline]
20378#[target_feature(enable = "neon")]
20379#[cfg_attr(test, assert_instr(tbx))]
20380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20381pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20382    vqtbx1(a, b, c)
20383}
20384#[doc = "Extended table look-up"]
20385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20386#[inline]
20387#[target_feature(enable = "neon")]
20388#[cfg_attr(test, assert_instr(tbx))]
20389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20390pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20391    vqtbx1q(a, b, c)
20392}
20393#[doc = "Extended table look-up"]
20394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20395#[inline]
20396#[target_feature(enable = "neon")]
20397#[cfg_attr(test, assert_instr(tbx))]
20398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20399pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20400    unsafe {
20401        let x = transmute(vqtbx1(transmute(a), transmute(b), c));
20402        x
20403    }
20404}
20405#[doc = "Extended table look-up"]
20406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20407#[inline]
20408#[target_feature(enable = "neon")]
20409#[cfg_attr(test, assert_instr(tbx))]
20410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20411pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20412    unsafe {
20413        let x = transmute(vqtbx1q(transmute(a), transmute(b), c));
20414        x
20415    }
20416}
20417#[doc = "Extended table look-up"]
20418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20419#[inline]
20420#[target_feature(enable = "neon")]
20421#[cfg_attr(test, assert_instr(tbx))]
20422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20423pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20424    unsafe {
20425        let x = transmute(vqtbx1(transmute(a), transmute(b), c));
20426        x
20427    }
20428}
20429#[doc = "Extended table look-up"]
20430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20431#[inline]
20432#[target_feature(enable = "neon")]
20433#[cfg_attr(test, assert_instr(tbx))]
20434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20435pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20436    unsafe {
20437        let x = transmute(vqtbx1q(transmute(a), transmute(b), c));
20438        x
20439    }
20440}
20441#[doc = "Extended table look-up"]
20442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20443#[inline]
20444#[target_feature(enable = "neon")]
20445#[cfg_attr(test, assert_instr(tbx))]
20446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20447fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20448    unsafe extern "unadjusted" {
20449        #[cfg_attr(
20450            any(target_arch = "aarch64", target_arch = "arm64ec"),
20451            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20452        )]
20453        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20454    }
20455    unsafe { _vqtbx2(a, b, c, d) }
20456}
20457#[doc = "Extended table look-up"]
20458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20459#[inline]
20460#[target_feature(enable = "neon")]
20461#[cfg_attr(test, assert_instr(tbx))]
20462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20463fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20464    unsafe extern "unadjusted" {
20465        #[cfg_attr(
20466            any(target_arch = "aarch64", target_arch = "arm64ec"),
20467            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20468        )]
20469        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20470    }
20471    unsafe { _vqtbx2q(a, b, c, d) }
20472}
20473#[doc = "Extended table look-up"]
20474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20475#[inline]
20476#[target_feature(enable = "neon")]
20477#[cfg_attr(test, assert_instr(tbx))]
20478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20479pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20480    vqtbx2(a, b.0, b.1, c)
20481}
20482#[doc = "Extended table look-up"]
20483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20484#[inline]
20485#[target_feature(enable = "neon")]
20486#[cfg_attr(test, assert_instr(tbx))]
20487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20488pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20489    vqtbx2q(a, b.0, b.1, c)
20490}
20491#[doc = "Extended table look-up"]
20492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20493#[inline]
20494#[cfg(target_endian = "little")]
20495#[target_feature(enable = "neon")]
20496#[cfg_attr(test, assert_instr(tbx))]
20497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20498pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20499    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20500}
20501#[doc = "Extended table look-up"]
20502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20503#[inline]
20504#[cfg(target_endian = "big")]
20505#[target_feature(enable = "neon")]
20506#[cfg_attr(test, assert_instr(tbx))]
20507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20508pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20509    let mut b: uint8x16x2_t = b;
20510    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20511    b.0 = unsafe {
20512        simd_shuffle!(
20513            b.0,
20514            b.0,
20515            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20516        )
20517    };
20518    b.1 = unsafe {
20519        simd_shuffle!(
20520            b.1,
20521            b.1,
20522            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20523        )
20524    };
20525    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20526    unsafe {
20527        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20528        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20529    }
20530}
20531#[doc = "Extended table look-up"]
20532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20533#[inline]
20534#[cfg(target_endian = "little")]
20535#[target_feature(enable = "neon")]
20536#[cfg_attr(test, assert_instr(tbx))]
20537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20538pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20539    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20540}
20541#[doc = "Extended table look-up"]
20542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20543#[inline]
20544#[cfg(target_endian = "big")]
20545#[target_feature(enable = "neon")]
20546#[cfg_attr(test, assert_instr(tbx))]
20547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20548pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20549    let mut b: uint8x16x2_t = b;
20550    let a: uint8x16_t =
20551        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20552    b.0 = unsafe {
20553        simd_shuffle!(
20554            b.0,
20555            b.0,
20556            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20557        )
20558    };
20559    b.1 = unsafe {
20560        simd_shuffle!(
20561            b.1,
20562            b.1,
20563            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20564        )
20565    };
20566    let c: uint8x16_t =
20567        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20568    unsafe {
20569        let ret_val: uint8x16_t =
20570            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20571        simd_shuffle!(
20572            ret_val,
20573            ret_val,
20574            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20575        )
20576    }
20577}
20578#[doc = "Extended table look-up"]
20579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20580#[inline]
20581#[cfg(target_endian = "little")]
20582#[target_feature(enable = "neon")]
20583#[cfg_attr(test, assert_instr(tbx))]
20584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20585pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20586    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20587}
20588#[doc = "Extended table look-up"]
20589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20590#[inline]
20591#[cfg(target_endian = "big")]
20592#[target_feature(enable = "neon")]
20593#[cfg_attr(test, assert_instr(tbx))]
20594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20595pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20596    let mut b: poly8x16x2_t = b;
20597    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20598    b.0 = unsafe {
20599        simd_shuffle!(
20600            b.0,
20601            b.0,
20602            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20603        )
20604    };
20605    b.1 = unsafe {
20606        simd_shuffle!(
20607            b.1,
20608            b.1,
20609            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20610        )
20611    };
20612    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20613    unsafe {
20614        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20615        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20616    }
20617}
20618#[doc = "Extended table look-up"]
20619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20620#[inline]
20621#[cfg(target_endian = "little")]
20622#[target_feature(enable = "neon")]
20623#[cfg_attr(test, assert_instr(tbx))]
20624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20625pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20626    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20627}
20628#[doc = "Extended table look-up"]
20629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20630#[inline]
20631#[cfg(target_endian = "big")]
20632#[target_feature(enable = "neon")]
20633#[cfg_attr(test, assert_instr(tbx))]
20634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20635pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20636    let mut b: poly8x16x2_t = b;
20637    let a: poly8x16_t =
20638        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20639    b.0 = unsafe {
20640        simd_shuffle!(
20641            b.0,
20642            b.0,
20643            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20644        )
20645    };
20646    b.1 = unsafe {
20647        simd_shuffle!(
20648            b.1,
20649            b.1,
20650            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20651        )
20652    };
20653    let c: uint8x16_t =
20654        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20655    unsafe {
20656        let ret_val: poly8x16_t =
20657            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20658        simd_shuffle!(
20659            ret_val,
20660            ret_val,
20661            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20662        )
20663    }
20664}
20665#[doc = "Extended table look-up"]
20666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20667#[inline]
20668#[target_feature(enable = "neon")]
20669#[cfg_attr(test, assert_instr(tbx))]
20670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20671fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20672    unsafe extern "unadjusted" {
20673        #[cfg_attr(
20674            any(target_arch = "aarch64", target_arch = "arm64ec"),
20675            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20676        )]
20677        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20678            -> int8x8_t;
20679    }
20680    unsafe { _vqtbx3(a, b, c, d, e) }
20681}
20682#[doc = "Extended table look-up"]
20683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20684#[inline]
20685#[target_feature(enable = "neon")]
20686#[cfg_attr(test, assert_instr(tbx))]
20687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20688fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20689    unsafe extern "unadjusted" {
20690        #[cfg_attr(
20691            any(target_arch = "aarch64", target_arch = "arm64ec"),
20692            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20693        )]
20694        fn _vqtbx3q(
20695            a: int8x16_t,
20696            b: int8x16_t,
20697            c: int8x16_t,
20698            d: int8x16_t,
20699            e: uint8x16_t,
20700        ) -> int8x16_t;
20701    }
20702    unsafe { _vqtbx3q(a, b, c, d, e) }
20703}
20704#[doc = "Extended table look-up"]
20705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20706#[inline]
20707#[target_feature(enable = "neon")]
20708#[cfg_attr(test, assert_instr(tbx))]
20709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20710pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20711    vqtbx3(a, b.0, b.1, b.2, c)
20712}
20713#[doc = "Extended table look-up"]
20714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20715#[inline]
20716#[target_feature(enable = "neon")]
20717#[cfg_attr(test, assert_instr(tbx))]
20718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20719pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20720    vqtbx3q(a, b.0, b.1, b.2, c)
20721}
20722#[doc = "Extended table look-up"]
20723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20724#[inline]
20725#[cfg(target_endian = "little")]
20726#[target_feature(enable = "neon")]
20727#[cfg_attr(test, assert_instr(tbx))]
20728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20729pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20730    unsafe {
20731        transmute(vqtbx3(
20732            transmute(a),
20733            transmute(b.0),
20734            transmute(b.1),
20735            transmute(b.2),
20736            c,
20737        ))
20738    }
20739}
20740#[doc = "Extended table look-up"]
20741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20742#[inline]
20743#[cfg(target_endian = "big")]
20744#[target_feature(enable = "neon")]
20745#[cfg_attr(test, assert_instr(tbx))]
20746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20747pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20748    let mut b: uint8x16x3_t = b;
20749    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20750    b.0 = unsafe {
20751        simd_shuffle!(
20752            b.0,
20753            b.0,
20754            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20755        )
20756    };
20757    b.1 = unsafe {
20758        simd_shuffle!(
20759            b.1,
20760            b.1,
20761            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20762        )
20763    };
20764    b.2 = unsafe {
20765        simd_shuffle!(
20766            b.2,
20767            b.2,
20768            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20769        )
20770    };
20771    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20772    unsafe {
20773        let ret_val: uint8x8_t = transmute(vqtbx3(
20774            transmute(a),
20775            transmute(b.0),
20776            transmute(b.1),
20777            transmute(b.2),
20778            c,
20779        ));
20780        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20781    }
20782}
20783#[doc = "Extended table look-up"]
20784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20785#[inline]
20786#[cfg(target_endian = "little")]
20787#[target_feature(enable = "neon")]
20788#[cfg_attr(test, assert_instr(tbx))]
20789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20790pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20791    unsafe {
20792        transmute(vqtbx3q(
20793            transmute(a),
20794            transmute(b.0),
20795            transmute(b.1),
20796            transmute(b.2),
20797            c,
20798        ))
20799    }
20800}
20801#[doc = "Extended table look-up"]
20802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20803#[inline]
20804#[cfg(target_endian = "big")]
20805#[target_feature(enable = "neon")]
20806#[cfg_attr(test, assert_instr(tbx))]
20807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20808pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20809    let mut b: uint8x16x3_t = b;
20810    let a: uint8x16_t =
20811        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20812    b.0 = unsafe {
20813        simd_shuffle!(
20814            b.0,
20815            b.0,
20816            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20817        )
20818    };
20819    b.1 = unsafe {
20820        simd_shuffle!(
20821            b.1,
20822            b.1,
20823            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20824        )
20825    };
20826    b.2 = unsafe {
20827        simd_shuffle!(
20828            b.2,
20829            b.2,
20830            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20831        )
20832    };
20833    let c: uint8x16_t =
20834        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20835    unsafe {
20836        let ret_val: uint8x16_t = transmute(vqtbx3q(
20837            transmute(a),
20838            transmute(b.0),
20839            transmute(b.1),
20840            transmute(b.2),
20841            c,
20842        ));
20843        simd_shuffle!(
20844            ret_val,
20845            ret_val,
20846            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20847        )
20848    }
20849}
20850#[doc = "Extended table look-up"]
20851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20852#[inline]
20853#[cfg(target_endian = "little")]
20854#[target_feature(enable = "neon")]
20855#[cfg_attr(test, assert_instr(tbx))]
20856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20857pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20858    unsafe {
20859        transmute(vqtbx3(
20860            transmute(a),
20861            transmute(b.0),
20862            transmute(b.1),
20863            transmute(b.2),
20864            c,
20865        ))
20866    }
20867}
20868#[doc = "Extended table look-up"]
20869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20870#[inline]
20871#[cfg(target_endian = "big")]
20872#[target_feature(enable = "neon")]
20873#[cfg_attr(test, assert_instr(tbx))]
20874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20875pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20876    let mut b: poly8x16x3_t = b;
20877    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20878    b.0 = unsafe {
20879        simd_shuffle!(
20880            b.0,
20881            b.0,
20882            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20883        )
20884    };
20885    b.1 = unsafe {
20886        simd_shuffle!(
20887            b.1,
20888            b.1,
20889            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20890        )
20891    };
20892    b.2 = unsafe {
20893        simd_shuffle!(
20894            b.2,
20895            b.2,
20896            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20897        )
20898    };
20899    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20900    unsafe {
20901        let ret_val: poly8x8_t = transmute(vqtbx3(
20902            transmute(a),
20903            transmute(b.0),
20904            transmute(b.1),
20905            transmute(b.2),
20906            c,
20907        ));
20908        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20909    }
20910}
20911#[doc = "Extended table look-up"]
20912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20913#[inline]
20914#[cfg(target_endian = "little")]
20915#[target_feature(enable = "neon")]
20916#[cfg_attr(test, assert_instr(tbx))]
20917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20918pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20919    unsafe {
20920        transmute(vqtbx3q(
20921            transmute(a),
20922            transmute(b.0),
20923            transmute(b.1),
20924            transmute(b.2),
20925            c,
20926        ))
20927    }
20928}
20929#[doc = "Extended table look-up"]
20930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20931#[inline]
20932#[cfg(target_endian = "big")]
20933#[target_feature(enable = "neon")]
20934#[cfg_attr(test, assert_instr(tbx))]
20935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20936pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20937    let mut b: poly8x16x3_t = b;
20938    let a: poly8x16_t =
20939        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20940    b.0 = unsafe {
20941        simd_shuffle!(
20942            b.0,
20943            b.0,
20944            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20945        )
20946    };
20947    b.1 = unsafe {
20948        simd_shuffle!(
20949            b.1,
20950            b.1,
20951            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20952        )
20953    };
20954    b.2 = unsafe {
20955        simd_shuffle!(
20956            b.2,
20957            b.2,
20958            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20959        )
20960    };
20961    let c: uint8x16_t =
20962        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20963    unsafe {
20964        let ret_val: poly8x16_t = transmute(vqtbx3q(
20965            transmute(a),
20966            transmute(b.0),
20967            transmute(b.1),
20968            transmute(b.2),
20969            c,
20970        ));
20971        simd_shuffle!(
20972            ret_val,
20973            ret_val,
20974            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20975        )
20976    }
20977}
20978#[doc = "Extended table look-up"]
20979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20980#[inline]
20981#[target_feature(enable = "neon")]
20982#[cfg_attr(test, assert_instr(tbx))]
20983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20984fn vqtbx4(
20985    a: int8x8_t,
20986    b: int8x16_t,
20987    c: int8x16_t,
20988    d: int8x16_t,
20989    e: int8x16_t,
20990    f: uint8x8_t,
20991) -> int8x8_t {
20992    unsafe extern "unadjusted" {
20993        #[cfg_attr(
20994            any(target_arch = "aarch64", target_arch = "arm64ec"),
20995            link_name = "llvm.aarch64.neon.tbx4.v8i8"
20996        )]
20997        fn _vqtbx4(
20998            a: int8x8_t,
20999            b: int8x16_t,
21000            c: int8x16_t,
21001            d: int8x16_t,
21002            e: int8x16_t,
21003            f: uint8x8_t,
21004        ) -> int8x8_t;
21005    }
21006    unsafe { _vqtbx4(a, b, c, d, e, f) }
21007}
21008#[doc = "Extended table look-up"]
21009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
21010#[inline]
21011#[target_feature(enable = "neon")]
21012#[cfg_attr(test, assert_instr(tbx))]
21013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21014fn vqtbx4q(
21015    a: int8x16_t,
21016    b: int8x16_t,
21017    c: int8x16_t,
21018    d: int8x16_t,
21019    e: int8x16_t,
21020    f: uint8x16_t,
21021) -> int8x16_t {
21022    unsafe extern "unadjusted" {
21023        #[cfg_attr(
21024            any(target_arch = "aarch64", target_arch = "arm64ec"),
21025            link_name = "llvm.aarch64.neon.tbx4.v16i8"
21026        )]
21027        fn _vqtbx4q(
21028            a: int8x16_t,
21029            b: int8x16_t,
21030            c: int8x16_t,
21031            d: int8x16_t,
21032            e: int8x16_t,
21033            f: uint8x16_t,
21034        ) -> int8x16_t;
21035    }
21036    unsafe { _vqtbx4q(a, b, c, d, e, f) }
21037}
21038#[doc = "Extended table look-up"]
21039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
21040#[inline]
21041#[target_feature(enable = "neon")]
21042#[cfg_attr(test, assert_instr(tbx))]
21043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21044pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
21045    vqtbx4(a, b.0, b.1, b.2, b.3, c)
21046}
21047#[doc = "Extended table look-up"]
21048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
21049#[inline]
21050#[target_feature(enable = "neon")]
21051#[cfg_attr(test, assert_instr(tbx))]
21052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21053pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
21054    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
21055}
21056#[doc = "Extended table look-up"]
21057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21058#[inline]
21059#[cfg(target_endian = "little")]
21060#[target_feature(enable = "neon")]
21061#[cfg_attr(test, assert_instr(tbx))]
21062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21063pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21064    unsafe {
21065        transmute(vqtbx4(
21066            transmute(a),
21067            transmute(b.0),
21068            transmute(b.1),
21069            transmute(b.2),
21070            transmute(b.3),
21071            c,
21072        ))
21073    }
21074}
21075#[doc = "Extended table look-up"]
21076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21077#[inline]
21078#[cfg(target_endian = "big")]
21079#[target_feature(enable = "neon")]
21080#[cfg_attr(test, assert_instr(tbx))]
21081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21082pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21083    let mut b: uint8x16x4_t = b;
21084    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21085    b.0 = unsafe {
21086        simd_shuffle!(
21087            b.0,
21088            b.0,
21089            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21090        )
21091    };
21092    b.1 = unsafe {
21093        simd_shuffle!(
21094            b.1,
21095            b.1,
21096            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21097        )
21098    };
21099    b.2 = unsafe {
21100        simd_shuffle!(
21101            b.2,
21102            b.2,
21103            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21104        )
21105    };
21106    b.3 = unsafe {
21107        simd_shuffle!(
21108            b.3,
21109            b.3,
21110            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21111        )
21112    };
21113    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21114    unsafe {
21115        let ret_val: uint8x8_t = transmute(vqtbx4(
21116            transmute(a),
21117            transmute(b.0),
21118            transmute(b.1),
21119            transmute(b.2),
21120            transmute(b.3),
21121            c,
21122        ));
21123        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21124    }
21125}
21126#[doc = "Extended table look-up"]
21127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21128#[inline]
21129#[cfg(target_endian = "little")]
21130#[target_feature(enable = "neon")]
21131#[cfg_attr(test, assert_instr(tbx))]
21132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21133pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21134    unsafe {
21135        transmute(vqtbx4q(
21136            transmute(a),
21137            transmute(b.0),
21138            transmute(b.1),
21139            transmute(b.2),
21140            transmute(b.3),
21141            c,
21142        ))
21143    }
21144}
21145#[doc = "Extended table look-up"]
21146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21147#[inline]
21148#[cfg(target_endian = "big")]
21149#[target_feature(enable = "neon")]
21150#[cfg_attr(test, assert_instr(tbx))]
21151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21152pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21153    let mut b: uint8x16x4_t = b;
21154    let a: uint8x16_t =
21155        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21156    b.0 = unsafe {
21157        simd_shuffle!(
21158            b.0,
21159            b.0,
21160            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21161        )
21162    };
21163    b.1 = unsafe {
21164        simd_shuffle!(
21165            b.1,
21166            b.1,
21167            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21168        )
21169    };
21170    b.2 = unsafe {
21171        simd_shuffle!(
21172            b.2,
21173            b.2,
21174            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21175        )
21176    };
21177    b.3 = unsafe {
21178        simd_shuffle!(
21179            b.3,
21180            b.3,
21181            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21182        )
21183    };
21184    let c: uint8x16_t =
21185        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21186    unsafe {
21187        let ret_val: uint8x16_t = transmute(vqtbx4q(
21188            transmute(a),
21189            transmute(b.0),
21190            transmute(b.1),
21191            transmute(b.2),
21192            transmute(b.3),
21193            c,
21194        ));
21195        simd_shuffle!(
21196            ret_val,
21197            ret_val,
21198            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21199        )
21200    }
21201}
21202#[doc = "Extended table look-up"]
21203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21204#[inline]
21205#[cfg(target_endian = "little")]
21206#[target_feature(enable = "neon")]
21207#[cfg_attr(test, assert_instr(tbx))]
21208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21209pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21210    unsafe {
21211        transmute(vqtbx4(
21212            transmute(a),
21213            transmute(b.0),
21214            transmute(b.1),
21215            transmute(b.2),
21216            transmute(b.3),
21217            c,
21218        ))
21219    }
21220}
21221#[doc = "Extended table look-up"]
21222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21223#[inline]
21224#[cfg(target_endian = "big")]
21225#[target_feature(enable = "neon")]
21226#[cfg_attr(test, assert_instr(tbx))]
21227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21228pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21229    let mut b: poly8x16x4_t = b;
21230    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21231    b.0 = unsafe {
21232        simd_shuffle!(
21233            b.0,
21234            b.0,
21235            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21236        )
21237    };
21238    b.1 = unsafe {
21239        simd_shuffle!(
21240            b.1,
21241            b.1,
21242            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21243        )
21244    };
21245    b.2 = unsafe {
21246        simd_shuffle!(
21247            b.2,
21248            b.2,
21249            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21250        )
21251    };
21252    b.3 = unsafe {
21253        simd_shuffle!(
21254            b.3,
21255            b.3,
21256            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21257        )
21258    };
21259    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21260    unsafe {
21261        let ret_val: poly8x8_t = transmute(vqtbx4(
21262            transmute(a),
21263            transmute(b.0),
21264            transmute(b.1),
21265            transmute(b.2),
21266            transmute(b.3),
21267            c,
21268        ));
21269        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21270    }
21271}
21272#[doc = "Extended table look-up"]
21273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21274#[inline]
21275#[cfg(target_endian = "little")]
21276#[target_feature(enable = "neon")]
21277#[cfg_attr(test, assert_instr(tbx))]
21278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21279pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21280    unsafe {
21281        transmute(vqtbx4q(
21282            transmute(a),
21283            transmute(b.0),
21284            transmute(b.1),
21285            transmute(b.2),
21286            transmute(b.3),
21287            c,
21288        ))
21289    }
21290}
21291#[doc = "Extended table look-up"]
21292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21293#[inline]
21294#[cfg(target_endian = "big")]
21295#[target_feature(enable = "neon")]
21296#[cfg_attr(test, assert_instr(tbx))]
21297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21298pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21299    let mut b: poly8x16x4_t = b;
21300    let a: poly8x16_t =
21301        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21302    b.0 = unsafe {
21303        simd_shuffle!(
21304            b.0,
21305            b.0,
21306            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21307        )
21308    };
21309    b.1 = unsafe {
21310        simd_shuffle!(
21311            b.1,
21312            b.1,
21313            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21314        )
21315    };
21316    b.2 = unsafe {
21317        simd_shuffle!(
21318            b.2,
21319            b.2,
21320            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21321        )
21322    };
21323    b.3 = unsafe {
21324        simd_shuffle!(
21325            b.3,
21326            b.3,
21327            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21328        )
21329    };
21330    let c: uint8x16_t =
21331        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21332    unsafe {
21333        let ret_val: poly8x16_t = transmute(vqtbx4q(
21334            transmute(a),
21335            transmute(b.0),
21336            transmute(b.1),
21337            transmute(b.2),
21338            transmute(b.3),
21339            c,
21340        ));
21341        simd_shuffle!(
21342            ret_val,
21343            ret_val,
21344            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21345        )
21346    }
21347}
21348#[doc = "Rotate and exclusive OR"]
21349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21350#[inline]
21351#[target_feature(enable = "neon,sha3")]
21352#[cfg_attr(test, assert_instr(rax1))]
21353#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21354pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21355    unsafe extern "unadjusted" {
21356        #[cfg_attr(
21357            any(target_arch = "aarch64", target_arch = "arm64ec"),
21358            link_name = "llvm.aarch64.crypto.rax1"
21359        )]
21360        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21361    }
21362    unsafe { _vrax1q_u64(a, b) }
21363}
21364#[doc = "Reverse bit order"]
21365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21366#[inline]
21367#[target_feature(enable = "neon")]
21368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21369#[cfg_attr(test, assert_instr(rbit))]
21370pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21371    unsafe { simd_bitreverse(a) }
21372}
21373#[doc = "Reverse bit order"]
21374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21375#[inline]
21376#[target_feature(enable = "neon")]
21377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21378#[cfg_attr(test, assert_instr(rbit))]
21379pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21380    unsafe { simd_bitreverse(a) }
21381}
21382#[doc = "Reverse bit order"]
21383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21384#[inline]
21385#[cfg(target_endian = "little")]
21386#[target_feature(enable = "neon")]
21387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21388#[cfg_attr(test, assert_instr(rbit))]
21389pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21390    unsafe { transmute(vrbit_s8(transmute(a))) }
21391}
21392#[doc = "Reverse bit order"]
21393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21394#[inline]
21395#[cfg(target_endian = "big")]
21396#[target_feature(enable = "neon")]
21397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21398#[cfg_attr(test, assert_instr(rbit))]
21399pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21400    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21401    unsafe {
21402        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21403        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21404    }
21405}
21406#[doc = "Reverse bit order"]
21407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21408#[inline]
21409#[cfg(target_endian = "little")]
21410#[target_feature(enable = "neon")]
21411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21412#[cfg_attr(test, assert_instr(rbit))]
21413pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21414    unsafe { transmute(vrbitq_s8(transmute(a))) }
21415}
21416#[doc = "Reverse bit order"]
21417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21418#[inline]
21419#[cfg(target_endian = "big")]
21420#[target_feature(enable = "neon")]
21421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21422#[cfg_attr(test, assert_instr(rbit))]
21423pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21424    let a: uint8x16_t =
21425        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21426    unsafe {
21427        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21428        simd_shuffle!(
21429            ret_val,
21430            ret_val,
21431            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21432        )
21433    }
21434}
21435#[doc = "Reverse bit order"]
21436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21437#[inline]
21438#[cfg(target_endian = "little")]
21439#[target_feature(enable = "neon")]
21440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21441#[cfg_attr(test, assert_instr(rbit))]
21442pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21443    unsafe { transmute(vrbit_s8(transmute(a))) }
21444}
21445#[doc = "Reverse bit order"]
21446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21447#[inline]
21448#[cfg(target_endian = "big")]
21449#[target_feature(enable = "neon")]
21450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21451#[cfg_attr(test, assert_instr(rbit))]
21452pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21453    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21454    unsafe {
21455        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21456        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21457    }
21458}
21459#[doc = "Reverse bit order"]
21460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21461#[inline]
21462#[cfg(target_endian = "little")]
21463#[target_feature(enable = "neon")]
21464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21465#[cfg_attr(test, assert_instr(rbit))]
21466pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21467    unsafe { transmute(vrbitq_s8(transmute(a))) }
21468}
21469#[doc = "Reverse bit order"]
21470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21471#[inline]
21472#[cfg(target_endian = "big")]
21473#[target_feature(enable = "neon")]
21474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21475#[cfg_attr(test, assert_instr(rbit))]
21476pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21477    let a: poly8x16_t =
21478        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21479    unsafe {
21480        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21481        simd_shuffle!(
21482            ret_val,
21483            ret_val,
21484            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21485        )
21486    }
21487}
21488#[doc = "Reciprocal estimate."]
21489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21490#[inline]
21491#[target_feature(enable = "neon")]
21492#[cfg_attr(test, assert_instr(frecpe))]
21493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21494pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21495    unsafe extern "unadjusted" {
21496        #[cfg_attr(
21497            any(target_arch = "aarch64", target_arch = "arm64ec"),
21498            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21499        )]
21500        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21501    }
21502    unsafe { _vrecpe_f64(a) }
21503}
21504#[doc = "Reciprocal estimate."]
21505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21506#[inline]
21507#[target_feature(enable = "neon")]
21508#[cfg_attr(test, assert_instr(frecpe))]
21509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21510pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21511    unsafe extern "unadjusted" {
21512        #[cfg_attr(
21513            any(target_arch = "aarch64", target_arch = "arm64ec"),
21514            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21515        )]
21516        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21517    }
21518    unsafe { _vrecpeq_f64(a) }
21519}
21520#[doc = "Reciprocal estimate."]
21521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21522#[inline]
21523#[target_feature(enable = "neon")]
21524#[cfg_attr(test, assert_instr(frecpe))]
21525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21526pub fn vrecped_f64(a: f64) -> f64 {
21527    unsafe extern "unadjusted" {
21528        #[cfg_attr(
21529            any(target_arch = "aarch64", target_arch = "arm64ec"),
21530            link_name = "llvm.aarch64.neon.frecpe.f64"
21531        )]
21532        fn _vrecped_f64(a: f64) -> f64;
21533    }
21534    unsafe { _vrecped_f64(a) }
21535}
21536#[doc = "Reciprocal estimate."]
21537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21538#[inline]
21539#[target_feature(enable = "neon")]
21540#[cfg_attr(test, assert_instr(frecpe))]
21541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21542pub fn vrecpes_f32(a: f32) -> f32 {
21543    unsafe extern "unadjusted" {
21544        #[cfg_attr(
21545            any(target_arch = "aarch64", target_arch = "arm64ec"),
21546            link_name = "llvm.aarch64.neon.frecpe.f32"
21547        )]
21548        fn _vrecpes_f32(a: f32) -> f32;
21549    }
21550    unsafe { _vrecpes_f32(a) }
21551}
21552#[doc = "Reciprocal estimate."]
21553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21554#[inline]
21555#[cfg_attr(test, assert_instr(frecpe))]
21556#[target_feature(enable = "neon,fp16")]
21557#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21558pub fn vrecpeh_f16(a: f16) -> f16 {
21559    unsafe extern "unadjusted" {
21560        #[cfg_attr(
21561            any(target_arch = "aarch64", target_arch = "arm64ec"),
21562            link_name = "llvm.aarch64.neon.frecpe.f16"
21563        )]
21564        fn _vrecpeh_f16(a: f16) -> f16;
21565    }
21566    unsafe { _vrecpeh_f16(a) }
21567}
21568#[doc = "Floating-point reciprocal step"]
21569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21570#[inline]
21571#[target_feature(enable = "neon")]
21572#[cfg_attr(test, assert_instr(frecps))]
21573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21574pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21575    unsafe extern "unadjusted" {
21576        #[cfg_attr(
21577            any(target_arch = "aarch64", target_arch = "arm64ec"),
21578            link_name = "llvm.aarch64.neon.frecps.v1f64"
21579        )]
21580        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21581    }
21582    unsafe { _vrecps_f64(a, b) }
21583}
21584#[doc = "Floating-point reciprocal step"]
21585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21586#[inline]
21587#[target_feature(enable = "neon")]
21588#[cfg_attr(test, assert_instr(frecps))]
21589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21590pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21591    unsafe extern "unadjusted" {
21592        #[cfg_attr(
21593            any(target_arch = "aarch64", target_arch = "arm64ec"),
21594            link_name = "llvm.aarch64.neon.frecps.v2f64"
21595        )]
21596        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21597    }
21598    unsafe { _vrecpsq_f64(a, b) }
21599}
21600#[doc = "Floating-point reciprocal step"]
21601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21602#[inline]
21603#[target_feature(enable = "neon")]
21604#[cfg_attr(test, assert_instr(frecps))]
21605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21606pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21607    unsafe extern "unadjusted" {
21608        #[cfg_attr(
21609            any(target_arch = "aarch64", target_arch = "arm64ec"),
21610            link_name = "llvm.aarch64.neon.frecps.f64"
21611        )]
21612        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21613    }
21614    unsafe { _vrecpsd_f64(a, b) }
21615}
21616#[doc = "Floating-point reciprocal step"]
21617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21618#[inline]
21619#[target_feature(enable = "neon")]
21620#[cfg_attr(test, assert_instr(frecps))]
21621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21622pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21623    unsafe extern "unadjusted" {
21624        #[cfg_attr(
21625            any(target_arch = "aarch64", target_arch = "arm64ec"),
21626            link_name = "llvm.aarch64.neon.frecps.f32"
21627        )]
21628        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21629    }
21630    unsafe { _vrecpss_f32(a, b) }
21631}
21632#[doc = "Floating-point reciprocal step"]
21633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21634#[inline]
21635#[cfg_attr(test, assert_instr(frecps))]
21636#[target_feature(enable = "neon,fp16")]
21637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21638pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21639    unsafe extern "unadjusted" {
21640        #[cfg_attr(
21641            any(target_arch = "aarch64", target_arch = "arm64ec"),
21642            link_name = "llvm.aarch64.neon.frecps.f16"
21643        )]
21644        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21645    }
21646    unsafe { _vrecpsh_f16(a, b) }
21647}
21648#[doc = "Floating-point reciprocal exponent"]
21649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21650#[inline]
21651#[target_feature(enable = "neon")]
21652#[cfg_attr(test, assert_instr(frecpx))]
21653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21654pub fn vrecpxd_f64(a: f64) -> f64 {
21655    unsafe extern "unadjusted" {
21656        #[cfg_attr(
21657            any(target_arch = "aarch64", target_arch = "arm64ec"),
21658            link_name = "llvm.aarch64.neon.frecpx.f64"
21659        )]
21660        fn _vrecpxd_f64(a: f64) -> f64;
21661    }
21662    unsafe { _vrecpxd_f64(a) }
21663}
21664#[doc = "Floating-point reciprocal exponent"]
21665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21666#[inline]
21667#[target_feature(enable = "neon")]
21668#[cfg_attr(test, assert_instr(frecpx))]
21669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21670pub fn vrecpxs_f32(a: f32) -> f32 {
21671    unsafe extern "unadjusted" {
21672        #[cfg_attr(
21673            any(target_arch = "aarch64", target_arch = "arm64ec"),
21674            link_name = "llvm.aarch64.neon.frecpx.f32"
21675        )]
21676        fn _vrecpxs_f32(a: f32) -> f32;
21677    }
21678    unsafe { _vrecpxs_f32(a) }
21679}
21680#[doc = "Floating-point reciprocal exponent"]
21681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21682#[inline]
21683#[cfg_attr(test, assert_instr(frecpx))]
21684#[target_feature(enable = "neon,fp16")]
21685#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21686pub fn vrecpxh_f16(a: f16) -> f16 {
21687    unsafe extern "unadjusted" {
21688        #[cfg_attr(
21689            any(target_arch = "aarch64", target_arch = "arm64ec"),
21690            link_name = "llvm.aarch64.neon.frecpx.f16"
21691        )]
21692        fn _vrecpxh_f16(a: f16) -> f16;
21693    }
21694    unsafe { _vrecpxh_f16(a) }
21695}
21696#[doc = "Vector reinterpret cast operation"]
21697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21698#[inline]
21699#[cfg(target_endian = "little")]
21700#[target_feature(enable = "neon,fp16")]
21701#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21702#[cfg_attr(test, assert_instr(nop))]
21703pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21704    unsafe { transmute(a) }
21705}
21706#[doc = "Vector reinterpret cast operation"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21708#[inline]
21709#[cfg(target_endian = "big")]
21710#[target_feature(enable = "neon,fp16")]
21711#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21712#[cfg_attr(test, assert_instr(nop))]
21713pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21714    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21715    unsafe { transmute(a) }
21716}
21717#[doc = "Vector reinterpret cast operation"]
21718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21719#[inline]
21720#[cfg(target_endian = "little")]
21721#[target_feature(enable = "neon,fp16")]
21722#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21723#[cfg_attr(test, assert_instr(nop))]
21724pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21725    unsafe { transmute(a) }
21726}
21727#[doc = "Vector reinterpret cast operation"]
21728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21729#[inline]
21730#[cfg(target_endian = "big")]
21731#[target_feature(enable = "neon,fp16")]
21732#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21733#[cfg_attr(test, assert_instr(nop))]
21734pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21735    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21736    unsafe {
21737        let ret_val: float64x2_t = transmute(a);
21738        simd_shuffle!(ret_val, ret_val, [1, 0])
21739    }
21740}
21741#[doc = "Vector reinterpret cast operation"]
21742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21743#[inline]
21744#[cfg(target_endian = "little")]
21745#[target_feature(enable = "neon,fp16")]
21746#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21747#[cfg_attr(test, assert_instr(nop))]
21748pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21749    unsafe { transmute(a) }
21750}
21751#[doc = "Vector reinterpret cast operation"]
21752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21753#[inline]
21754#[cfg(target_endian = "big")]
21755#[target_feature(enable = "neon,fp16")]
21756#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21757#[cfg_attr(test, assert_instr(nop))]
21758pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21759    unsafe {
21760        let ret_val: float16x4_t = transmute(a);
21761        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21762    }
21763}
21764#[doc = "Vector reinterpret cast operation"]
21765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21766#[inline]
21767#[cfg(target_endian = "little")]
21768#[target_feature(enable = "neon,fp16")]
21769#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21770#[cfg_attr(test, assert_instr(nop))]
21771pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21772    unsafe { transmute(a) }
21773}
21774#[doc = "Vector reinterpret cast operation"]
21775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21776#[inline]
21777#[cfg(target_endian = "big")]
21778#[target_feature(enable = "neon,fp16")]
21779#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21780#[cfg_attr(test, assert_instr(nop))]
21781pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21782    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21783    unsafe {
21784        let ret_val: float16x8_t = transmute(a);
21785        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21786    }
21787}
21788#[doc = "Vector reinterpret cast operation"]
21789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21790#[inline]
21791#[cfg(target_endian = "little")]
21792#[target_feature(enable = "neon")]
21793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21794#[cfg_attr(test, assert_instr(nop))]
21795pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21796    unsafe { transmute(a) }
21797}
21798#[doc = "Vector reinterpret cast operation"]
21799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21800#[inline]
21801#[cfg(target_endian = "big")]
21802#[target_feature(enable = "neon")]
21803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21804#[cfg_attr(test, assert_instr(nop))]
21805pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21806    unsafe {
21807        let ret_val: float64x2_t = transmute(a);
21808        simd_shuffle!(ret_val, ret_val, [1, 0])
21809    }
21810}
21811#[doc = "Vector reinterpret cast operation"]
21812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21813#[inline]
21814#[cfg(target_endian = "little")]
21815#[target_feature(enable = "neon")]
21816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21817#[cfg_attr(test, assert_instr(nop))]
21818pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21819    unsafe { transmute(a) }
21820}
21821#[doc = "Vector reinterpret cast operation"]
21822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21823#[inline]
21824#[cfg(target_endian = "big")]
21825#[target_feature(enable = "neon")]
21826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21827#[cfg_attr(test, assert_instr(nop))]
21828pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21829    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21830    unsafe { transmute(a) }
21831}
21832#[doc = "Vector reinterpret cast operation"]
21833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21834#[inline]
21835#[cfg(target_endian = "little")]
21836#[target_feature(enable = "neon")]
21837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21838#[cfg_attr(test, assert_instr(nop))]
21839pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21840    unsafe { transmute(a) }
21841}
21842#[doc = "Vector reinterpret cast operation"]
21843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21844#[inline]
21845#[cfg(target_endian = "big")]
21846#[target_feature(enable = "neon")]
21847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21848#[cfg_attr(test, assert_instr(nop))]
21849pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21850    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21851    unsafe { transmute(a) }
21852}
21853#[doc = "Vector reinterpret cast operation"]
21854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21855#[inline]
21856#[cfg(target_endian = "little")]
21857#[target_feature(enable = "neon")]
21858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21859#[cfg_attr(test, assert_instr(nop))]
21860pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21861    unsafe { transmute(a) }
21862}
21863#[doc = "Vector reinterpret cast operation"]
21864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21865#[inline]
21866#[cfg(target_endian = "big")]
21867#[target_feature(enable = "neon")]
21868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21869#[cfg_attr(test, assert_instr(nop))]
21870pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21871    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21872    unsafe {
21873        let ret_val: float64x2_t = transmute(a);
21874        simd_shuffle!(ret_val, ret_val, [1, 0])
21875    }
21876}
21877#[doc = "Vector reinterpret cast operation"]
21878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21879#[inline]
21880#[cfg(target_endian = "little")]
21881#[target_feature(enable = "neon")]
21882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21883#[cfg_attr(test, assert_instr(nop))]
21884pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21885    unsafe { transmute(a) }
21886}
21887#[doc = "Vector reinterpret cast operation"]
21888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21889#[inline]
21890#[cfg(target_endian = "big")]
21891#[target_feature(enable = "neon")]
21892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21893#[cfg_attr(test, assert_instr(nop))]
21894pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21895    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21896    unsafe {
21897        let ret_val: poly64x2_t = transmute(a);
21898        simd_shuffle!(ret_val, ret_val, [1, 0])
21899    }
21900}
21901#[doc = "Vector reinterpret cast operation"]
21902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21903#[inline]
21904#[cfg(target_endian = "little")]
21905#[target_feature(enable = "neon")]
21906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21907#[cfg_attr(test, assert_instr(nop))]
21908pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21909    unsafe { transmute(a) }
21910}
21911#[doc = "Vector reinterpret cast operation"]
21912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21913#[inline]
21914#[cfg(target_endian = "big")]
21915#[target_feature(enable = "neon")]
21916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21917#[cfg_attr(test, assert_instr(nop))]
21918pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21919    unsafe {
21920        let ret_val: float32x2_t = transmute(a);
21921        simd_shuffle!(ret_val, ret_val, [1, 0])
21922    }
21923}
21924#[doc = "Vector reinterpret cast operation"]
21925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21926#[inline]
21927#[cfg(target_endian = "little")]
21928#[target_feature(enable = "neon")]
21929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21930#[cfg_attr(test, assert_instr(nop))]
21931pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21932    unsafe { transmute(a) }
21933}
21934#[doc = "Vector reinterpret cast operation"]
21935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21936#[inline]
21937#[cfg(target_endian = "big")]
21938#[target_feature(enable = "neon")]
21939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21940#[cfg_attr(test, assert_instr(nop))]
21941pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21942    unsafe {
21943        let ret_val: int8x8_t = transmute(a);
21944        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21945    }
21946}
21947#[doc = "Vector reinterpret cast operation"]
21948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21949#[inline]
21950#[cfg(target_endian = "little")]
21951#[target_feature(enable = "neon")]
21952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21953#[cfg_attr(test, assert_instr(nop))]
21954pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21955    unsafe { transmute(a) }
21956}
21957#[doc = "Vector reinterpret cast operation"]
21958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21959#[inline]
21960#[cfg(target_endian = "big")]
21961#[target_feature(enable = "neon")]
21962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21963#[cfg_attr(test, assert_instr(nop))]
21964pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21965    unsafe {
21966        let ret_val: int16x4_t = transmute(a);
21967        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21968    }
21969}
21970#[doc = "Vector reinterpret cast operation"]
21971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21972#[inline]
21973#[cfg(target_endian = "little")]
21974#[target_feature(enable = "neon")]
21975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21976#[cfg_attr(test, assert_instr(nop))]
21977pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21978    unsafe { transmute(a) }
21979}
21980#[doc = "Vector reinterpret cast operation"]
21981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21982#[inline]
21983#[cfg(target_endian = "big")]
21984#[target_feature(enable = "neon")]
21985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21986#[cfg_attr(test, assert_instr(nop))]
21987pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21988    unsafe {
21989        let ret_val: int32x2_t = transmute(a);
21990        simd_shuffle!(ret_val, ret_val, [1, 0])
21991    }
21992}
21993#[doc = "Vector reinterpret cast operation"]
21994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
21995#[inline]
21996#[target_feature(enable = "neon")]
21997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21998#[cfg_attr(test, assert_instr(nop))]
21999pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
22000    unsafe { transmute(a) }
22001}
22002#[doc = "Vector reinterpret cast operation"]
22003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22004#[inline]
22005#[cfg(target_endian = "little")]
22006#[target_feature(enable = "neon")]
22007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22008#[cfg_attr(test, assert_instr(nop))]
22009pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22010    unsafe { transmute(a) }
22011}
22012#[doc = "Vector reinterpret cast operation"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22014#[inline]
22015#[cfg(target_endian = "big")]
22016#[target_feature(enable = "neon")]
22017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22018#[cfg_attr(test, assert_instr(nop))]
22019pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22020    unsafe {
22021        let ret_val: uint8x8_t = transmute(a);
22022        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22023    }
22024}
22025#[doc = "Vector reinterpret cast operation"]
22026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22027#[inline]
22028#[cfg(target_endian = "little")]
22029#[target_feature(enable = "neon")]
22030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22031#[cfg_attr(test, assert_instr(nop))]
22032pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22033    unsafe { transmute(a) }
22034}
22035#[doc = "Vector reinterpret cast operation"]
22036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22037#[inline]
22038#[cfg(target_endian = "big")]
22039#[target_feature(enable = "neon")]
22040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22041#[cfg_attr(test, assert_instr(nop))]
22042pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22043    unsafe {
22044        let ret_val: uint16x4_t = transmute(a);
22045        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22046    }
22047}
22048#[doc = "Vector reinterpret cast operation"]
22049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22050#[inline]
22051#[cfg(target_endian = "little")]
22052#[target_feature(enable = "neon")]
22053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22054#[cfg_attr(test, assert_instr(nop))]
22055pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22056    unsafe { transmute(a) }
22057}
22058#[doc = "Vector reinterpret cast operation"]
22059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22060#[inline]
22061#[cfg(target_endian = "big")]
22062#[target_feature(enable = "neon")]
22063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22064#[cfg_attr(test, assert_instr(nop))]
22065pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22066    unsafe {
22067        let ret_val: uint32x2_t = transmute(a);
22068        simd_shuffle!(ret_val, ret_val, [1, 0])
22069    }
22070}
22071#[doc = "Vector reinterpret cast operation"]
22072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
22073#[inline]
22074#[target_feature(enable = "neon")]
22075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22076#[cfg_attr(test, assert_instr(nop))]
22077pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
22078    unsafe { transmute(a) }
22079}
22080#[doc = "Vector reinterpret cast operation"]
22081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22082#[inline]
22083#[cfg(target_endian = "little")]
22084#[target_feature(enable = "neon")]
22085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22086#[cfg_attr(test, assert_instr(nop))]
22087pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22088    unsafe { transmute(a) }
22089}
22090#[doc = "Vector reinterpret cast operation"]
22091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22092#[inline]
22093#[cfg(target_endian = "big")]
22094#[target_feature(enable = "neon")]
22095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22096#[cfg_attr(test, assert_instr(nop))]
22097pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22098    unsafe {
22099        let ret_val: poly8x8_t = transmute(a);
22100        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22101    }
22102}
22103#[doc = "Vector reinterpret cast operation"]
22104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22105#[inline]
22106#[cfg(target_endian = "little")]
22107#[target_feature(enable = "neon")]
22108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22109#[cfg_attr(test, assert_instr(nop))]
22110pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22111    unsafe { transmute(a) }
22112}
22113#[doc = "Vector reinterpret cast operation"]
22114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22115#[inline]
22116#[cfg(target_endian = "big")]
22117#[target_feature(enable = "neon")]
22118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22119#[cfg_attr(test, assert_instr(nop))]
22120pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22121    unsafe {
22122        let ret_val: poly16x4_t = transmute(a);
22123        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22124    }
22125}
22126#[doc = "Vector reinterpret cast operation"]
22127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
22128#[inline]
22129#[target_feature(enable = "neon")]
22130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22131#[cfg_attr(test, assert_instr(nop))]
22132pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
22133    unsafe { transmute(a) }
22134}
22135#[doc = "Vector reinterpret cast operation"]
22136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22137#[inline]
22138#[cfg(target_endian = "little")]
22139#[target_feature(enable = "neon")]
22140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22141#[cfg_attr(test, assert_instr(nop))]
22142pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22143    unsafe { transmute(a) }
22144}
22145#[doc = "Vector reinterpret cast operation"]
22146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22147#[inline]
22148#[cfg(target_endian = "big")]
22149#[target_feature(enable = "neon")]
22150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22151#[cfg_attr(test, assert_instr(nop))]
22152pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22153    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22154    unsafe { transmute(a) }
22155}
22156#[doc = "Vector reinterpret cast operation"]
22157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22158#[inline]
22159#[cfg(target_endian = "little")]
22160#[target_feature(enable = "neon")]
22161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22162#[cfg_attr(test, assert_instr(nop))]
22163pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22164    unsafe { transmute(a) }
22165}
22166#[doc = "Vector reinterpret cast operation"]
22167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22168#[inline]
22169#[cfg(target_endian = "big")]
22170#[target_feature(enable = "neon")]
22171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22172#[cfg_attr(test, assert_instr(nop))]
22173pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22174    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22175    unsafe {
22176        let ret_val: float32x4_t = transmute(a);
22177        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22178    }
22179}
22180#[doc = "Vector reinterpret cast operation"]
22181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22182#[inline]
22183#[cfg(target_endian = "little")]
22184#[target_feature(enable = "neon")]
22185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22186#[cfg_attr(test, assert_instr(nop))]
22187pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22188    unsafe { transmute(a) }
22189}
22190#[doc = "Vector reinterpret cast operation"]
22191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22192#[inline]
22193#[cfg(target_endian = "big")]
22194#[target_feature(enable = "neon")]
22195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22196#[cfg_attr(test, assert_instr(nop))]
22197pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22198    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22199    unsafe {
22200        let ret_val: int8x16_t = transmute(a);
22201        simd_shuffle!(
22202            ret_val,
22203            ret_val,
22204            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22205        )
22206    }
22207}
22208#[doc = "Vector reinterpret cast operation"]
22209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22210#[inline]
22211#[cfg(target_endian = "little")]
22212#[target_feature(enable = "neon")]
22213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22214#[cfg_attr(test, assert_instr(nop))]
22215pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22216    unsafe { transmute(a) }
22217}
22218#[doc = "Vector reinterpret cast operation"]
22219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22220#[inline]
22221#[cfg(target_endian = "big")]
22222#[target_feature(enable = "neon")]
22223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22224#[cfg_attr(test, assert_instr(nop))]
22225pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22226    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22227    unsafe {
22228        let ret_val: int16x8_t = transmute(a);
22229        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22230    }
22231}
22232#[doc = "Vector reinterpret cast operation"]
22233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22234#[inline]
22235#[cfg(target_endian = "little")]
22236#[target_feature(enable = "neon")]
22237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22238#[cfg_attr(test, assert_instr(nop))]
22239pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22240    unsafe { transmute(a) }
22241}
22242#[doc = "Vector reinterpret cast operation"]
22243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22244#[inline]
22245#[cfg(target_endian = "big")]
22246#[target_feature(enable = "neon")]
22247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22248#[cfg_attr(test, assert_instr(nop))]
22249pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22250    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22251    unsafe {
22252        let ret_val: int32x4_t = transmute(a);
22253        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22254    }
22255}
22256#[doc = "Vector reinterpret cast operation"]
22257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22258#[inline]
22259#[cfg(target_endian = "little")]
22260#[target_feature(enable = "neon")]
22261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22262#[cfg_attr(test, assert_instr(nop))]
22263pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22264    unsafe { transmute(a) }
22265}
22266#[doc = "Vector reinterpret cast operation"]
22267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22268#[inline]
22269#[cfg(target_endian = "big")]
22270#[target_feature(enable = "neon")]
22271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22272#[cfg_attr(test, assert_instr(nop))]
22273pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22274    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22275    unsafe {
22276        let ret_val: int64x2_t = transmute(a);
22277        simd_shuffle!(ret_val, ret_val, [1, 0])
22278    }
22279}
22280#[doc = "Vector reinterpret cast operation"]
22281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22282#[inline]
22283#[cfg(target_endian = "little")]
22284#[target_feature(enable = "neon")]
22285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22286#[cfg_attr(test, assert_instr(nop))]
22287pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22288    unsafe { transmute(a) }
22289}
22290#[doc = "Vector reinterpret cast operation"]
22291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22292#[inline]
22293#[cfg(target_endian = "big")]
22294#[target_feature(enable = "neon")]
22295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22296#[cfg_attr(test, assert_instr(nop))]
22297pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22298    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22299    unsafe {
22300        let ret_val: uint8x16_t = transmute(a);
22301        simd_shuffle!(
22302            ret_val,
22303            ret_val,
22304            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22305        )
22306    }
22307}
22308#[doc = "Vector reinterpret cast operation"]
22309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22310#[inline]
22311#[cfg(target_endian = "little")]
22312#[target_feature(enable = "neon")]
22313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22314#[cfg_attr(test, assert_instr(nop))]
22315pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22316    unsafe { transmute(a) }
22317}
22318#[doc = "Vector reinterpret cast operation"]
22319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22320#[inline]
22321#[cfg(target_endian = "big")]
22322#[target_feature(enable = "neon")]
22323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22324#[cfg_attr(test, assert_instr(nop))]
22325pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22326    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22327    unsafe {
22328        let ret_val: uint16x8_t = transmute(a);
22329        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22330    }
22331}
22332#[doc = "Vector reinterpret cast operation"]
22333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22334#[inline]
22335#[cfg(target_endian = "little")]
22336#[target_feature(enable = "neon")]
22337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22338#[cfg_attr(test, assert_instr(nop))]
22339pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22340    unsafe { transmute(a) }
22341}
22342#[doc = "Vector reinterpret cast operation"]
22343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22344#[inline]
22345#[cfg(target_endian = "big")]
22346#[target_feature(enable = "neon")]
22347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22348#[cfg_attr(test, assert_instr(nop))]
22349pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22350    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22351    unsafe {
22352        let ret_val: uint32x4_t = transmute(a);
22353        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22354    }
22355}
22356#[doc = "Vector reinterpret cast operation"]
22357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22358#[inline]
22359#[cfg(target_endian = "little")]
22360#[target_feature(enable = "neon")]
22361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22362#[cfg_attr(test, assert_instr(nop))]
22363pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22364    unsafe { transmute(a) }
22365}
22366#[doc = "Vector reinterpret cast operation"]
22367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22368#[inline]
22369#[cfg(target_endian = "big")]
22370#[target_feature(enable = "neon")]
22371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22372#[cfg_attr(test, assert_instr(nop))]
22373pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22374    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22375    unsafe {
22376        let ret_val: uint64x2_t = transmute(a);
22377        simd_shuffle!(ret_val, ret_val, [1, 0])
22378    }
22379}
22380#[doc = "Vector reinterpret cast operation"]
22381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22382#[inline]
22383#[cfg(target_endian = "little")]
22384#[target_feature(enable = "neon")]
22385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22386#[cfg_attr(test, assert_instr(nop))]
22387pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22388    unsafe { transmute(a) }
22389}
22390#[doc = "Vector reinterpret cast operation"]
22391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22392#[inline]
22393#[cfg(target_endian = "big")]
22394#[target_feature(enable = "neon")]
22395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22396#[cfg_attr(test, assert_instr(nop))]
22397pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22398    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22399    unsafe {
22400        let ret_val: poly8x16_t = transmute(a);
22401        simd_shuffle!(
22402            ret_val,
22403            ret_val,
22404            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22405        )
22406    }
22407}
22408#[doc = "Vector reinterpret cast operation"]
22409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22410#[inline]
22411#[cfg(target_endian = "little")]
22412#[target_feature(enable = "neon")]
22413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22414#[cfg_attr(test, assert_instr(nop))]
22415pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22416    unsafe { transmute(a) }
22417}
22418#[doc = "Vector reinterpret cast operation"]
22419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22420#[inline]
22421#[cfg(target_endian = "big")]
22422#[target_feature(enable = "neon")]
22423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22424#[cfg_attr(test, assert_instr(nop))]
22425pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22426    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22427    unsafe {
22428        let ret_val: poly16x8_t = transmute(a);
22429        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22430    }
22431}
22432#[doc = "Vector reinterpret cast operation"]
22433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22434#[inline]
22435#[cfg(target_endian = "little")]
22436#[target_feature(enable = "neon")]
22437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22438#[cfg_attr(test, assert_instr(nop))]
22439pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22440    unsafe { transmute(a) }
22441}
22442#[doc = "Vector reinterpret cast operation"]
22443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22444#[inline]
22445#[cfg(target_endian = "big")]
22446#[target_feature(enable = "neon")]
22447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22448#[cfg_attr(test, assert_instr(nop))]
22449pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22450    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22451    unsafe {
22452        let ret_val: poly64x2_t = transmute(a);
22453        simd_shuffle!(ret_val, ret_val, [1, 0])
22454    }
22455}
22456#[doc = "Vector reinterpret cast operation"]
22457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22458#[inline]
22459#[cfg(target_endian = "little")]
22460#[target_feature(enable = "neon")]
22461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22462#[cfg_attr(test, assert_instr(nop))]
22463pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22464    unsafe { transmute(a) }
22465}
22466#[doc = "Vector reinterpret cast operation"]
22467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22468#[inline]
22469#[cfg(target_endian = "big")]
22470#[target_feature(enable = "neon")]
22471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22472#[cfg_attr(test, assert_instr(nop))]
22473pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22474    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22475    unsafe { transmute(a) }
22476}
22477#[doc = "Vector reinterpret cast operation"]
22478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22479#[inline]
22480#[cfg(target_endian = "little")]
22481#[target_feature(enable = "neon")]
22482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22483#[cfg_attr(test, assert_instr(nop))]
22484pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22485    unsafe { transmute(a) }
22486}
22487#[doc = "Vector reinterpret cast operation"]
22488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22489#[inline]
22490#[cfg(target_endian = "big")]
22491#[target_feature(enable = "neon")]
22492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22493#[cfg_attr(test, assert_instr(nop))]
22494pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22495    let a: int8x16_t =
22496        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22497    unsafe {
22498        let ret_val: float64x2_t = transmute(a);
22499        simd_shuffle!(ret_val, ret_val, [1, 0])
22500    }
22501}
22502#[doc = "Vector reinterpret cast operation"]
22503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22504#[inline]
22505#[cfg(target_endian = "little")]
22506#[target_feature(enable = "neon")]
22507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22508#[cfg_attr(test, assert_instr(nop))]
22509pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22510    unsafe { transmute(a) }
22511}
22512#[doc = "Vector reinterpret cast operation"]
22513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22514#[inline]
22515#[cfg(target_endian = "big")]
22516#[target_feature(enable = "neon")]
22517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22518#[cfg_attr(test, assert_instr(nop))]
22519pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22520    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22521    unsafe { transmute(a) }
22522}
22523#[doc = "Vector reinterpret cast operation"]
22524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22525#[inline]
22526#[cfg(target_endian = "little")]
22527#[target_feature(enable = "neon")]
22528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22529#[cfg_attr(test, assert_instr(nop))]
22530pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22531    unsafe { transmute(a) }
22532}
22533#[doc = "Vector reinterpret cast operation"]
22534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22535#[inline]
22536#[cfg(target_endian = "big")]
22537#[target_feature(enable = "neon")]
22538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22539#[cfg_attr(test, assert_instr(nop))]
22540pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22541    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22542    unsafe {
22543        let ret_val: float64x2_t = transmute(a);
22544        simd_shuffle!(ret_val, ret_val, [1, 0])
22545    }
22546}
22547#[doc = "Vector reinterpret cast operation"]
22548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22549#[inline]
22550#[cfg(target_endian = "little")]
22551#[target_feature(enable = "neon")]
22552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22553#[cfg_attr(test, assert_instr(nop))]
22554pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22555    unsafe { transmute(a) }
22556}
22557#[doc = "Vector reinterpret cast operation"]
22558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22559#[inline]
22560#[cfg(target_endian = "big")]
22561#[target_feature(enable = "neon")]
22562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22563#[cfg_attr(test, assert_instr(nop))]
22564pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22565    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22566    unsafe { transmute(a) }
22567}
22568#[doc = "Vector reinterpret cast operation"]
22569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22570#[inline]
22571#[cfg(target_endian = "little")]
22572#[target_feature(enable = "neon")]
22573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22574#[cfg_attr(test, assert_instr(nop))]
22575pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22576    unsafe { transmute(a) }
22577}
22578#[doc = "Vector reinterpret cast operation"]
22579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22580#[inline]
22581#[cfg(target_endian = "big")]
22582#[target_feature(enable = "neon")]
22583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22584#[cfg_attr(test, assert_instr(nop))]
22585pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22586    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22587    unsafe {
22588        let ret_val: float64x2_t = transmute(a);
22589        simd_shuffle!(ret_val, ret_val, [1, 0])
22590    }
22591}
22592#[doc = "Vector reinterpret cast operation"]
22593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22594#[inline]
22595#[target_feature(enable = "neon")]
22596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22597#[cfg_attr(test, assert_instr(nop))]
22598pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22599    unsafe { transmute(a) }
22600}
22601#[doc = "Vector reinterpret cast operation"]
22602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22603#[inline]
22604#[target_feature(enable = "neon")]
22605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22606#[cfg_attr(test, assert_instr(nop))]
22607pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22608    unsafe { transmute(a) }
22609}
22610#[doc = "Vector reinterpret cast operation"]
22611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22612#[inline]
22613#[cfg(target_endian = "little")]
22614#[target_feature(enable = "neon")]
22615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22616#[cfg_attr(test, assert_instr(nop))]
22617pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22618    unsafe { transmute(a) }
22619}
22620#[doc = "Vector reinterpret cast operation"]
22621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22622#[inline]
22623#[cfg(target_endian = "big")]
22624#[target_feature(enable = "neon")]
22625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22626#[cfg_attr(test, assert_instr(nop))]
22627pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22628    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22629    unsafe {
22630        let ret_val: float64x2_t = transmute(a);
22631        simd_shuffle!(ret_val, ret_val, [1, 0])
22632    }
22633}
22634#[doc = "Vector reinterpret cast operation"]
22635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22636#[inline]
22637#[cfg(target_endian = "little")]
22638#[target_feature(enable = "neon")]
22639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22640#[cfg_attr(test, assert_instr(nop))]
22641pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22642    unsafe { transmute(a) }
22643}
22644#[doc = "Vector reinterpret cast operation"]
22645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22646#[inline]
22647#[cfg(target_endian = "big")]
22648#[target_feature(enable = "neon")]
22649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22650#[cfg_attr(test, assert_instr(nop))]
22651pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22652    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22653    unsafe {
22654        let ret_val: poly64x2_t = transmute(a);
22655        simd_shuffle!(ret_val, ret_val, [1, 0])
22656    }
22657}
22658#[doc = "Vector reinterpret cast operation"]
22659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22660#[inline]
22661#[cfg(target_endian = "little")]
22662#[target_feature(enable = "neon")]
22663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22664#[cfg_attr(test, assert_instr(nop))]
22665pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22666    unsafe { transmute(a) }
22667}
22668#[doc = "Vector reinterpret cast operation"]
22669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22670#[inline]
22671#[cfg(target_endian = "big")]
22672#[target_feature(enable = "neon")]
22673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22674#[cfg_attr(test, assert_instr(nop))]
22675pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22676    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22677    unsafe { transmute(a) }
22678}
22679#[doc = "Vector reinterpret cast operation"]
22680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22681#[inline]
22682#[cfg(target_endian = "little")]
22683#[target_feature(enable = "neon")]
22684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22685#[cfg_attr(test, assert_instr(nop))]
22686pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22687    unsafe { transmute(a) }
22688}
22689#[doc = "Vector reinterpret cast operation"]
22690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22691#[inline]
22692#[cfg(target_endian = "big")]
22693#[target_feature(enable = "neon")]
22694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22695#[cfg_attr(test, assert_instr(nop))]
22696pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22697    let a: uint8x16_t =
22698        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22699    unsafe {
22700        let ret_val: float64x2_t = transmute(a);
22701        simd_shuffle!(ret_val, ret_val, [1, 0])
22702    }
22703}
22704#[doc = "Vector reinterpret cast operation"]
22705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22706#[inline]
22707#[cfg(target_endian = "little")]
22708#[target_feature(enable = "neon")]
22709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22710#[cfg_attr(test, assert_instr(nop))]
22711pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22712    unsafe { transmute(a) }
22713}
22714#[doc = "Vector reinterpret cast operation"]
22715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22716#[inline]
22717#[cfg(target_endian = "big")]
22718#[target_feature(enable = "neon")]
22719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22720#[cfg_attr(test, assert_instr(nop))]
22721pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22722    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22723    unsafe { transmute(a) }
22724}
22725#[doc = "Vector reinterpret cast operation"]
22726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22727#[inline]
22728#[cfg(target_endian = "little")]
22729#[target_feature(enable = "neon")]
22730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22731#[cfg_attr(test, assert_instr(nop))]
22732pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22733    unsafe { transmute(a) }
22734}
22735#[doc = "Vector reinterpret cast operation"]
22736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22737#[inline]
22738#[cfg(target_endian = "big")]
22739#[target_feature(enable = "neon")]
22740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22741#[cfg_attr(test, assert_instr(nop))]
22742pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22743    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22744    unsafe {
22745        let ret_val: float64x2_t = transmute(a);
22746        simd_shuffle!(ret_val, ret_val, [1, 0])
22747    }
22748}
22749#[doc = "Vector reinterpret cast operation"]
22750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22751#[inline]
22752#[cfg(target_endian = "little")]
22753#[target_feature(enable = "neon")]
22754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22755#[cfg_attr(test, assert_instr(nop))]
22756pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22757    unsafe { transmute(a) }
22758}
22759#[doc = "Vector reinterpret cast operation"]
22760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22761#[inline]
22762#[cfg(target_endian = "big")]
22763#[target_feature(enable = "neon")]
22764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22765#[cfg_attr(test, assert_instr(nop))]
22766pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22767    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22768    unsafe { transmute(a) }
22769}
22770#[doc = "Vector reinterpret cast operation"]
22771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22772#[inline]
22773#[cfg(target_endian = "little")]
22774#[target_feature(enable = "neon")]
22775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22776#[cfg_attr(test, assert_instr(nop))]
22777pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22778    unsafe { transmute(a) }
22779}
22780#[doc = "Vector reinterpret cast operation"]
22781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22782#[inline]
22783#[cfg(target_endian = "big")]
22784#[target_feature(enable = "neon")]
22785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22786#[cfg_attr(test, assert_instr(nop))]
22787pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22788    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22789    unsafe {
22790        let ret_val: float64x2_t = transmute(a);
22791        simd_shuffle!(ret_val, ret_val, [1, 0])
22792    }
22793}
22794#[doc = "Vector reinterpret cast operation"]
22795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22796#[inline]
22797#[target_feature(enable = "neon")]
22798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22799#[cfg_attr(test, assert_instr(nop))]
22800pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22801    unsafe { transmute(a) }
22802}
22803#[doc = "Vector reinterpret cast operation"]
22804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22805#[inline]
22806#[target_feature(enable = "neon")]
22807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22808#[cfg_attr(test, assert_instr(nop))]
22809pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22810    unsafe { transmute(a) }
22811}
22812#[doc = "Vector reinterpret cast operation"]
22813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22814#[inline]
22815#[cfg(target_endian = "little")]
22816#[target_feature(enable = "neon")]
22817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22818#[cfg_attr(test, assert_instr(nop))]
22819pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22820    unsafe { transmute(a) }
22821}
22822#[doc = "Vector reinterpret cast operation"]
22823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22824#[inline]
22825#[cfg(target_endian = "big")]
22826#[target_feature(enable = "neon")]
22827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22828#[cfg_attr(test, assert_instr(nop))]
22829pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22830    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22831    unsafe {
22832        let ret_val: float64x2_t = transmute(a);
22833        simd_shuffle!(ret_val, ret_val, [1, 0])
22834    }
22835}
22836#[doc = "Vector reinterpret cast operation"]
22837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22838#[inline]
22839#[cfg(target_endian = "little")]
22840#[target_feature(enable = "neon")]
22841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22842#[cfg_attr(test, assert_instr(nop))]
22843pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22844    unsafe { transmute(a) }
22845}
22846#[doc = "Vector reinterpret cast operation"]
22847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22848#[inline]
22849#[cfg(target_endian = "big")]
22850#[target_feature(enable = "neon")]
22851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22852#[cfg_attr(test, assert_instr(nop))]
22853pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22854    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22855    unsafe {
22856        let ret_val: poly64x2_t = transmute(a);
22857        simd_shuffle!(ret_val, ret_val, [1, 0])
22858    }
22859}
22860#[doc = "Vector reinterpret cast operation"]
22861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22862#[inline]
22863#[cfg(target_endian = "little")]
22864#[target_feature(enable = "neon")]
22865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22866#[cfg_attr(test, assert_instr(nop))]
22867pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22868    unsafe { transmute(a) }
22869}
22870#[doc = "Vector reinterpret cast operation"]
22871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22872#[inline]
22873#[cfg(target_endian = "big")]
22874#[target_feature(enable = "neon")]
22875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22876#[cfg_attr(test, assert_instr(nop))]
22877pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22878    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22879    unsafe { transmute(a) }
22880}
22881#[doc = "Vector reinterpret cast operation"]
22882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22883#[inline]
22884#[cfg(target_endian = "little")]
22885#[target_feature(enable = "neon")]
22886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22887#[cfg_attr(test, assert_instr(nop))]
22888pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22889    unsafe { transmute(a) }
22890}
22891#[doc = "Vector reinterpret cast operation"]
22892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22893#[inline]
22894#[cfg(target_endian = "big")]
22895#[target_feature(enable = "neon")]
22896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22897#[cfg_attr(test, assert_instr(nop))]
22898pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22899    let a: poly8x16_t =
22900        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22901    unsafe {
22902        let ret_val: float64x2_t = transmute(a);
22903        simd_shuffle!(ret_val, ret_val, [1, 0])
22904    }
22905}
22906#[doc = "Vector reinterpret cast operation"]
22907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22908#[inline]
22909#[cfg(target_endian = "little")]
22910#[target_feature(enable = "neon")]
22911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22912#[cfg_attr(test, assert_instr(nop))]
22913pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22914    unsafe { transmute(a) }
22915}
22916#[doc = "Vector reinterpret cast operation"]
22917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22918#[inline]
22919#[cfg(target_endian = "big")]
22920#[target_feature(enable = "neon")]
22921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22922#[cfg_attr(test, assert_instr(nop))]
22923pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22924    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22925    unsafe { transmute(a) }
22926}
22927#[doc = "Vector reinterpret cast operation"]
22928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22929#[inline]
22930#[cfg(target_endian = "little")]
22931#[target_feature(enable = "neon")]
22932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22933#[cfg_attr(test, assert_instr(nop))]
22934pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22935    unsafe { transmute(a) }
22936}
22937#[doc = "Vector reinterpret cast operation"]
22938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22939#[inline]
22940#[cfg(target_endian = "big")]
22941#[target_feature(enable = "neon")]
22942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22943#[cfg_attr(test, assert_instr(nop))]
22944pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22945    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22946    unsafe {
22947        let ret_val: float64x2_t = transmute(a);
22948        simd_shuffle!(ret_val, ret_val, [1, 0])
22949    }
22950}
22951#[doc = "Vector reinterpret cast operation"]
22952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22953#[inline]
22954#[cfg(target_endian = "little")]
22955#[target_feature(enable = "neon")]
22956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22957#[cfg_attr(test, assert_instr(nop))]
22958pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22959    unsafe { transmute(a) }
22960}
22961#[doc = "Vector reinterpret cast operation"]
22962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22963#[inline]
22964#[cfg(target_endian = "big")]
22965#[target_feature(enable = "neon")]
22966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22967#[cfg_attr(test, assert_instr(nop))]
22968pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22969    unsafe {
22970        let ret_val: float32x2_t = transmute(a);
22971        simd_shuffle!(ret_val, ret_val, [1, 0])
22972    }
22973}
22974#[doc = "Vector reinterpret cast operation"]
22975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
22976#[inline]
22977#[target_feature(enable = "neon")]
22978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22979#[cfg_attr(test, assert_instr(nop))]
22980pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
22981    unsafe { transmute(a) }
22982}
22983#[doc = "Vector reinterpret cast operation"]
22984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
22985#[inline]
22986#[target_feature(enable = "neon")]
22987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22988#[cfg_attr(test, assert_instr(nop))]
22989pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
22990    unsafe { transmute(a) }
22991}
22992#[doc = "Vector reinterpret cast operation"]
22993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
22994#[inline]
22995#[target_feature(enable = "neon")]
22996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22997#[cfg_attr(test, assert_instr(nop))]
22998pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
22999    unsafe { transmute(a) }
23000}
23001#[doc = "Vector reinterpret cast operation"]
23002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23003#[inline]
23004#[cfg(target_endian = "little")]
23005#[target_feature(enable = "neon")]
23006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23007#[cfg_attr(test, assert_instr(nop))]
23008pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23009    unsafe { transmute(a) }
23010}
23011#[doc = "Vector reinterpret cast operation"]
23012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23013#[inline]
23014#[cfg(target_endian = "big")]
23015#[target_feature(enable = "neon")]
23016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23017#[cfg_attr(test, assert_instr(nop))]
23018pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23019    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23020    unsafe {
23021        let ret_val: float32x4_t = transmute(a);
23022        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
23023    }
23024}
23025#[doc = "Vector reinterpret cast operation"]
23026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23027#[inline]
23028#[cfg(target_endian = "little")]
23029#[target_feature(enable = "neon")]
23030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23031#[cfg_attr(test, assert_instr(nop))]
23032pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23033    unsafe { transmute(a) }
23034}
23035#[doc = "Vector reinterpret cast operation"]
23036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23037#[inline]
23038#[cfg(target_endian = "big")]
23039#[target_feature(enable = "neon")]
23040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23041#[cfg_attr(test, assert_instr(nop))]
23042pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23043    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23044    unsafe {
23045        let ret_val: float64x2_t = transmute(a);
23046        simd_shuffle!(ret_val, ret_val, [1, 0])
23047    }
23048}
23049#[doc = "Vector reinterpret cast operation"]
23050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23051#[inline]
23052#[cfg(target_endian = "little")]
23053#[target_feature(enable = "neon")]
23054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23055#[cfg_attr(test, assert_instr(nop))]
23056pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23057    unsafe { transmute(a) }
23058}
23059#[doc = "Vector reinterpret cast operation"]
23060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23061#[inline]
23062#[cfg(target_endian = "big")]
23063#[target_feature(enable = "neon")]
23064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23065#[cfg_attr(test, assert_instr(nop))]
23066pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23067    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23068    unsafe {
23069        let ret_val: int64x2_t = transmute(a);
23070        simd_shuffle!(ret_val, ret_val, [1, 0])
23071    }
23072}
23073#[doc = "Vector reinterpret cast operation"]
23074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23075#[inline]
23076#[cfg(target_endian = "little")]
23077#[target_feature(enable = "neon")]
23078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23079#[cfg_attr(test, assert_instr(nop))]
23080pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23081    unsafe { transmute(a) }
23082}
23083#[doc = "Vector reinterpret cast operation"]
23084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23085#[inline]
23086#[cfg(target_endian = "big")]
23087#[target_feature(enable = "neon")]
23088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23089#[cfg_attr(test, assert_instr(nop))]
23090pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23091    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23092    unsafe {
23093        let ret_val: uint64x2_t = transmute(a);
23094        simd_shuffle!(ret_val, ret_val, [1, 0])
23095    }
23096}
23097#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
23099#[inline]
23100#[target_feature(enable = "neon,frintts")]
23101#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23102#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23103pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
23104    unsafe extern "unadjusted" {
23105        #[cfg_attr(
23106            any(target_arch = "aarch64", target_arch = "arm64ec"),
23107            link_name = "llvm.aarch64.neon.frint32x.v2f32"
23108        )]
23109        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
23110    }
23111    unsafe { _vrnd32x_f32(a) }
23112}
23113#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
23115#[inline]
23116#[target_feature(enable = "neon,frintts")]
23117#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23119pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
23120    unsafe extern "unadjusted" {
23121        #[cfg_attr(
23122            any(target_arch = "aarch64", target_arch = "arm64ec"),
23123            link_name = "llvm.aarch64.neon.frint32x.v4f32"
23124        )]
23125        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
23126    }
23127    unsafe { _vrnd32xq_f32(a) }
23128}
23129#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
23131#[inline]
23132#[target_feature(enable = "neon,frintts")]
23133#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23134#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23135pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
23136    unsafe extern "unadjusted" {
23137        #[cfg_attr(
23138            any(target_arch = "aarch64", target_arch = "arm64ec"),
23139            link_name = "llvm.aarch64.neon.frint32x.v2f64"
23140        )]
23141        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
23142    }
23143    unsafe { _vrnd32xq_f64(a) }
23144}
23145#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
23147#[inline]
23148#[target_feature(enable = "neon,frintts")]
23149#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23150#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23151pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
23152    unsafe extern "unadjusted" {
23153        #[cfg_attr(
23154            any(target_arch = "aarch64", target_arch = "arm64ec"),
23155            link_name = "llvm.aarch64.frint32x.f64"
23156        )]
23157        fn _vrnd32x_f64(a: f64) -> f64;
23158    }
23159    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
23160}
23161#[doc = "Floating-point round to 32-bit integer toward zero"]
23162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
23163#[inline]
23164#[target_feature(enable = "neon,frintts")]
23165#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23167pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
23168    unsafe extern "unadjusted" {
23169        #[cfg_attr(
23170            any(target_arch = "aarch64", target_arch = "arm64ec"),
23171            link_name = "llvm.aarch64.neon.frint32z.v2f32"
23172        )]
23173        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
23174    }
23175    unsafe { _vrnd32z_f32(a) }
23176}
23177#[doc = "Floating-point round to 32-bit integer toward zero"]
23178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
23179#[inline]
23180#[target_feature(enable = "neon,frintts")]
23181#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23182#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23183pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
23184    unsafe extern "unadjusted" {
23185        #[cfg_attr(
23186            any(target_arch = "aarch64", target_arch = "arm64ec"),
23187            link_name = "llvm.aarch64.neon.frint32z.v4f32"
23188        )]
23189        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
23190    }
23191    unsafe { _vrnd32zq_f32(a) }
23192}
23193#[doc = "Floating-point round to 32-bit integer toward zero"]
23194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
23195#[inline]
23196#[target_feature(enable = "neon,frintts")]
23197#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23198#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23199pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23200    unsafe extern "unadjusted" {
23201        #[cfg_attr(
23202            any(target_arch = "aarch64", target_arch = "arm64ec"),
23203            link_name = "llvm.aarch64.neon.frint32z.v2f64"
23204        )]
23205        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23206    }
23207    unsafe { _vrnd32zq_f64(a) }
23208}
23209#[doc = "Floating-point round to 32-bit integer toward zero"]
23210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23211#[inline]
23212#[target_feature(enable = "neon,frintts")]
23213#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23214#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23215pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23216    unsafe extern "unadjusted" {
23217        #[cfg_attr(
23218            any(target_arch = "aarch64", target_arch = "arm64ec"),
23219            link_name = "llvm.aarch64.frint32z.f64"
23220        )]
23221        fn _vrnd32z_f64(a: f64) -> f64;
23222    }
23223    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23224}
23225#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23227#[inline]
23228#[target_feature(enable = "neon,frintts")]
23229#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23230#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23231pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23232    unsafe extern "unadjusted" {
23233        #[cfg_attr(
23234            any(target_arch = "aarch64", target_arch = "arm64ec"),
23235            link_name = "llvm.aarch64.neon.frint64x.v2f32"
23236        )]
23237        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23238    }
23239    unsafe { _vrnd64x_f32(a) }
23240}
23241#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23243#[inline]
23244#[target_feature(enable = "neon,frintts")]
23245#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23246#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23247pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23248    unsafe extern "unadjusted" {
23249        #[cfg_attr(
23250            any(target_arch = "aarch64", target_arch = "arm64ec"),
23251            link_name = "llvm.aarch64.neon.frint64x.v4f32"
23252        )]
23253        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23254    }
23255    unsafe { _vrnd64xq_f32(a) }
23256}
23257#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23259#[inline]
23260#[target_feature(enable = "neon,frintts")]
23261#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23262#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23263pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23264    unsafe extern "unadjusted" {
23265        #[cfg_attr(
23266            any(target_arch = "aarch64", target_arch = "arm64ec"),
23267            link_name = "llvm.aarch64.neon.frint64x.v2f64"
23268        )]
23269        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23270    }
23271    unsafe { _vrnd64xq_f64(a) }
23272}
23273#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23275#[inline]
23276#[target_feature(enable = "neon,frintts")]
23277#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23278#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23279pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23280    unsafe extern "unadjusted" {
23281        #[cfg_attr(
23282            any(target_arch = "aarch64", target_arch = "arm64ec"),
23283            link_name = "llvm.aarch64.frint64x.f64"
23284        )]
23285        fn _vrnd64x_f64(a: f64) -> f64;
23286    }
23287    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23288}
23289#[doc = "Floating-point round to 64-bit integer toward zero"]
23290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23291#[inline]
23292#[target_feature(enable = "neon,frintts")]
23293#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23294#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23295pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23296    unsafe extern "unadjusted" {
23297        #[cfg_attr(
23298            any(target_arch = "aarch64", target_arch = "arm64ec"),
23299            link_name = "llvm.aarch64.neon.frint64z.v2f32"
23300        )]
23301        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23302    }
23303    unsafe { _vrnd64z_f32(a) }
23304}
23305#[doc = "Floating-point round to 64-bit integer toward zero"]
23306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23307#[inline]
23308#[target_feature(enable = "neon,frintts")]
23309#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23310#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23311pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23312    unsafe extern "unadjusted" {
23313        #[cfg_attr(
23314            any(target_arch = "aarch64", target_arch = "arm64ec"),
23315            link_name = "llvm.aarch64.neon.frint64z.v4f32"
23316        )]
23317        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23318    }
23319    unsafe { _vrnd64zq_f32(a) }
23320}
23321#[doc = "Floating-point round to 64-bit integer toward zero"]
23322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23323#[inline]
23324#[target_feature(enable = "neon,frintts")]
23325#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23326#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23327pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23328    unsafe extern "unadjusted" {
23329        #[cfg_attr(
23330            any(target_arch = "aarch64", target_arch = "arm64ec"),
23331            link_name = "llvm.aarch64.neon.frint64z.v2f64"
23332        )]
23333        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23334    }
23335    unsafe { _vrnd64zq_f64(a) }
23336}
23337#[doc = "Floating-point round to 64-bit integer toward zero"]
23338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23339#[inline]
23340#[target_feature(enable = "neon,frintts")]
23341#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23342#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23343pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23344    unsafe extern "unadjusted" {
23345        #[cfg_attr(
23346            any(target_arch = "aarch64", target_arch = "arm64ec"),
23347            link_name = "llvm.aarch64.frint64z.f64"
23348        )]
23349        fn _vrnd64z_f64(a: f64) -> f64;
23350    }
23351    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23352}
23353#[doc = "Floating-point round to integral, toward zero"]
23354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23355#[inline]
23356#[target_feature(enable = "neon,fp16")]
23357#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23358#[cfg_attr(test, assert_instr(frintz))]
23359pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23360    unsafe { simd_trunc(a) }
23361}
23362#[doc = "Floating-point round to integral, toward zero"]
23363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23364#[inline]
23365#[target_feature(enable = "neon,fp16")]
23366#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23367#[cfg_attr(test, assert_instr(frintz))]
23368pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23369    unsafe { simd_trunc(a) }
23370}
23371#[doc = "Floating-point round to integral, toward zero"]
23372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23373#[inline]
23374#[target_feature(enable = "neon")]
23375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23376#[cfg_attr(test, assert_instr(frintz))]
23377pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23378    unsafe { simd_trunc(a) }
23379}
23380#[doc = "Floating-point round to integral, toward zero"]
23381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23382#[inline]
23383#[target_feature(enable = "neon")]
23384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23385#[cfg_attr(test, assert_instr(frintz))]
23386pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23387    unsafe { simd_trunc(a) }
23388}
23389#[doc = "Floating-point round to integral, toward zero"]
23390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23391#[inline]
23392#[target_feature(enable = "neon")]
23393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23394#[cfg_attr(test, assert_instr(frintz))]
23395pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23396    unsafe { simd_trunc(a) }
23397}
23398#[doc = "Floating-point round to integral, toward zero"]
23399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23400#[inline]
23401#[target_feature(enable = "neon")]
23402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23403#[cfg_attr(test, assert_instr(frintz))]
23404pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23405    unsafe { simd_trunc(a) }
23406}
23407#[doc = "Floating-point round to integral, to nearest with ties to away"]
23408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23409#[inline]
23410#[target_feature(enable = "neon,fp16")]
23411#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23412#[cfg_attr(test, assert_instr(frinta))]
23413pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23414    unsafe { simd_round(a) }
23415}
23416#[doc = "Floating-point round to integral, to nearest with ties to away"]
23417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23418#[inline]
23419#[target_feature(enable = "neon,fp16")]
23420#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23421#[cfg_attr(test, assert_instr(frinta))]
23422pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23423    unsafe { simd_round(a) }
23424}
23425#[doc = "Floating-point round to integral, to nearest with ties to away"]
23426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23427#[inline]
23428#[target_feature(enable = "neon")]
23429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23430#[cfg_attr(test, assert_instr(frinta))]
23431pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23432    unsafe { simd_round(a) }
23433}
23434#[doc = "Floating-point round to integral, to nearest with ties to away"]
23435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23436#[inline]
23437#[target_feature(enable = "neon")]
23438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23439#[cfg_attr(test, assert_instr(frinta))]
23440pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23441    unsafe { simd_round(a) }
23442}
23443#[doc = "Floating-point round to integral, to nearest with ties to away"]
23444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23445#[inline]
23446#[target_feature(enable = "neon")]
23447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23448#[cfg_attr(test, assert_instr(frinta))]
23449pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23450    unsafe { simd_round(a) }
23451}
23452#[doc = "Floating-point round to integral, to nearest with ties to away"]
23453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23454#[inline]
23455#[target_feature(enable = "neon")]
23456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23457#[cfg_attr(test, assert_instr(frinta))]
23458pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23459    unsafe { simd_round(a) }
23460}
23461#[doc = "Floating-point round to integral, to nearest with ties to away"]
23462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23463#[inline]
23464#[target_feature(enable = "neon,fp16")]
23465#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23466#[cfg_attr(test, assert_instr(frinta))]
23467pub fn vrndah_f16(a: f16) -> f16 {
23468    unsafe { roundf16(a) }
23469}
23470#[doc = "Floating-point round to integral, to nearest with ties to away"]
23471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23472#[inline]
23473#[target_feature(enable = "neon,fp16")]
23474#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23475#[cfg_attr(test, assert_instr(frintz))]
23476pub fn vrndh_f16(a: f16) -> f16 {
23477    unsafe { truncf16(a) }
23478}
23479#[doc = "Floating-point round to integral, using current rounding mode"]
23480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23481#[inline]
23482#[target_feature(enable = "neon,fp16")]
23483#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23484#[cfg_attr(test, assert_instr(frinti))]
23485pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23486    unsafe extern "unadjusted" {
23487        #[cfg_attr(
23488            any(target_arch = "aarch64", target_arch = "arm64ec"),
23489            link_name = "llvm.nearbyint.v4f16"
23490        )]
23491        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23492    }
23493    unsafe { _vrndi_f16(a) }
23494}
23495#[doc = "Floating-point round to integral, using current rounding mode"]
23496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23497#[inline]
23498#[target_feature(enable = "neon,fp16")]
23499#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23500#[cfg_attr(test, assert_instr(frinti))]
23501pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23502    unsafe extern "unadjusted" {
23503        #[cfg_attr(
23504            any(target_arch = "aarch64", target_arch = "arm64ec"),
23505            link_name = "llvm.nearbyint.v8f16"
23506        )]
23507        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23508    }
23509    unsafe { _vrndiq_f16(a) }
23510}
23511#[doc = "Floating-point round to integral, using current rounding mode"]
23512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23513#[inline]
23514#[target_feature(enable = "neon")]
23515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23516#[cfg_attr(test, assert_instr(frinti))]
23517pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23518    unsafe extern "unadjusted" {
23519        #[cfg_attr(
23520            any(target_arch = "aarch64", target_arch = "arm64ec"),
23521            link_name = "llvm.nearbyint.v2f32"
23522        )]
23523        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23524    }
23525    unsafe { _vrndi_f32(a) }
23526}
23527#[doc = "Floating-point round to integral, using current rounding mode"]
23528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23529#[inline]
23530#[target_feature(enable = "neon")]
23531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23532#[cfg_attr(test, assert_instr(frinti))]
23533pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23534    unsafe extern "unadjusted" {
23535        #[cfg_attr(
23536            any(target_arch = "aarch64", target_arch = "arm64ec"),
23537            link_name = "llvm.nearbyint.v4f32"
23538        )]
23539        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23540    }
23541    unsafe { _vrndiq_f32(a) }
23542}
23543#[doc = "Floating-point round to integral, using current rounding mode"]
23544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23545#[inline]
23546#[target_feature(enable = "neon")]
23547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23548#[cfg_attr(test, assert_instr(frinti))]
23549pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23550    unsafe extern "unadjusted" {
23551        #[cfg_attr(
23552            any(target_arch = "aarch64", target_arch = "arm64ec"),
23553            link_name = "llvm.nearbyint.v1f64"
23554        )]
23555        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23556    }
23557    unsafe { _vrndi_f64(a) }
23558}
23559#[doc = "Floating-point round to integral, using current rounding mode"]
23560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23561#[inline]
23562#[target_feature(enable = "neon")]
23563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23564#[cfg_attr(test, assert_instr(frinti))]
23565pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23566    unsafe extern "unadjusted" {
23567        #[cfg_attr(
23568            any(target_arch = "aarch64", target_arch = "arm64ec"),
23569            link_name = "llvm.nearbyint.v2f64"
23570        )]
23571        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23572    }
23573    unsafe { _vrndiq_f64(a) }
23574}
23575#[doc = "Floating-point round to integral, using current rounding mode"]
23576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23577#[inline]
23578#[target_feature(enable = "neon,fp16")]
23579#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23580#[cfg_attr(test, assert_instr(frinti))]
23581pub fn vrndih_f16(a: f16) -> f16 {
23582    unsafe extern "unadjusted" {
23583        #[cfg_attr(
23584            any(target_arch = "aarch64", target_arch = "arm64ec"),
23585            link_name = "llvm.nearbyint.f16"
23586        )]
23587        fn _vrndih_f16(a: f16) -> f16;
23588    }
23589    unsafe { _vrndih_f16(a) }
23590}
23591#[doc = "Floating-point round to integral, toward minus infinity"]
23592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23593#[inline]
23594#[target_feature(enable = "neon,fp16")]
23595#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23596#[cfg_attr(test, assert_instr(frintm))]
23597pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23598    unsafe { simd_floor(a) }
23599}
23600#[doc = "Floating-point round to integral, toward minus infinity"]
23601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23602#[inline]
23603#[target_feature(enable = "neon,fp16")]
23604#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23605#[cfg_attr(test, assert_instr(frintm))]
23606pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23607    unsafe { simd_floor(a) }
23608}
23609#[doc = "Floating-point round to integral, toward minus infinity"]
23610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23611#[inline]
23612#[target_feature(enable = "neon")]
23613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23614#[cfg_attr(test, assert_instr(frintm))]
23615pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23616    unsafe { simd_floor(a) }
23617}
23618#[doc = "Floating-point round to integral, toward minus infinity"]
23619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23620#[inline]
23621#[target_feature(enable = "neon")]
23622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23623#[cfg_attr(test, assert_instr(frintm))]
23624pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23625    unsafe { simd_floor(a) }
23626}
23627#[doc = "Floating-point round to integral, toward minus infinity"]
23628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23629#[inline]
23630#[target_feature(enable = "neon")]
23631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23632#[cfg_attr(test, assert_instr(frintm))]
23633pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23634    unsafe { simd_floor(a) }
23635}
23636#[doc = "Floating-point round to integral, toward minus infinity"]
23637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23638#[inline]
23639#[target_feature(enable = "neon")]
23640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23641#[cfg_attr(test, assert_instr(frintm))]
23642pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23643    unsafe { simd_floor(a) }
23644}
23645#[doc = "Floating-point round to integral, toward minus infinity"]
23646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23647#[inline]
23648#[target_feature(enable = "neon,fp16")]
23649#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23650#[cfg_attr(test, assert_instr(frintm))]
23651pub fn vrndmh_f16(a: f16) -> f16 {
23652    unsafe { floorf16(a) }
23653}
23654#[doc = "Floating-point round to integral, to nearest with ties to even"]
23655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23656#[inline]
23657#[target_feature(enable = "neon")]
23658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23659#[cfg_attr(test, assert_instr(frintn))]
23660pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23661    unsafe extern "unadjusted" {
23662        #[cfg_attr(
23663            any(target_arch = "aarch64", target_arch = "arm64ec"),
23664            link_name = "llvm.roundeven.v1f64"
23665        )]
23666        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23667    }
23668    unsafe { _vrndn_f64(a) }
23669}
23670#[doc = "Floating-point round to integral, to nearest with ties to even"]
23671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23672#[inline]
23673#[target_feature(enable = "neon")]
23674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23675#[cfg_attr(test, assert_instr(frintn))]
23676pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23677    unsafe extern "unadjusted" {
23678        #[cfg_attr(
23679            any(target_arch = "aarch64", target_arch = "arm64ec"),
23680            link_name = "llvm.roundeven.v2f64"
23681        )]
23682        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23683    }
23684    unsafe { _vrndnq_f64(a) }
23685}
23686#[doc = "Floating-point round to integral, toward minus infinity"]
23687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23688#[inline]
23689#[target_feature(enable = "neon,fp16")]
23690#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23691#[cfg_attr(test, assert_instr(frintn))]
23692pub fn vrndnh_f16(a: f16) -> f16 {
23693    unsafe extern "unadjusted" {
23694        #[cfg_attr(
23695            any(target_arch = "aarch64", target_arch = "arm64ec"),
23696            link_name = "llvm.roundeven.f16"
23697        )]
23698        fn _vrndnh_f16(a: f16) -> f16;
23699    }
23700    unsafe { _vrndnh_f16(a) }
23701}
23702#[doc = "Floating-point round to integral, to nearest with ties to even"]
23703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23704#[inline]
23705#[target_feature(enable = "neon")]
23706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23707#[cfg_attr(test, assert_instr(frintn))]
23708pub fn vrndns_f32(a: f32) -> f32 {
23709    unsafe extern "unadjusted" {
23710        #[cfg_attr(
23711            any(target_arch = "aarch64", target_arch = "arm64ec"),
23712            link_name = "llvm.roundeven.f32"
23713        )]
23714        fn _vrndns_f32(a: f32) -> f32;
23715    }
23716    unsafe { _vrndns_f32(a) }
23717}
23718#[doc = "Floating-point round to integral, toward plus infinity"]
23719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23720#[inline]
23721#[target_feature(enable = "neon,fp16")]
23722#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23723#[cfg_attr(test, assert_instr(frintp))]
23724pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23725    unsafe { simd_ceil(a) }
23726}
23727#[doc = "Floating-point round to integral, toward plus infinity"]
23728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23729#[inline]
23730#[target_feature(enable = "neon,fp16")]
23731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23732#[cfg_attr(test, assert_instr(frintp))]
23733pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23734    unsafe { simd_ceil(a) }
23735}
23736#[doc = "Floating-point round to integral, toward plus infinity"]
23737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23738#[inline]
23739#[target_feature(enable = "neon")]
23740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23741#[cfg_attr(test, assert_instr(frintp))]
23742pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23743    unsafe { simd_ceil(a) }
23744}
23745#[doc = "Floating-point round to integral, toward plus infinity"]
23746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23747#[inline]
23748#[target_feature(enable = "neon")]
23749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23750#[cfg_attr(test, assert_instr(frintp))]
23751pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23752    unsafe { simd_ceil(a) }
23753}
23754#[doc = "Floating-point round to integral, toward plus infinity"]
23755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23756#[inline]
23757#[target_feature(enable = "neon")]
23758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23759#[cfg_attr(test, assert_instr(frintp))]
23760pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23761    unsafe { simd_ceil(a) }
23762}
23763#[doc = "Floating-point round to integral, toward plus infinity"]
23764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23765#[inline]
23766#[target_feature(enable = "neon")]
23767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23768#[cfg_attr(test, assert_instr(frintp))]
23769pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23770    unsafe { simd_ceil(a) }
23771}
23772#[doc = "Floating-point round to integral, toward plus infinity"]
23773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23774#[inline]
23775#[target_feature(enable = "neon,fp16")]
23776#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23777#[cfg_attr(test, assert_instr(frintp))]
23778pub fn vrndph_f16(a: f16) -> f16 {
23779    unsafe { ceilf16(a) }
23780}
23781#[doc = "Floating-point round to integral exact, using current rounding mode"]
23782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23783#[inline]
23784#[target_feature(enable = "neon,fp16")]
23785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23786#[cfg_attr(test, assert_instr(frintx))]
23787pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23788    unsafe extern "unadjusted" {
23789        #[cfg_attr(
23790            any(target_arch = "aarch64", target_arch = "arm64ec"),
23791            link_name = "llvm.rint.v4f16"
23792        )]
23793        fn _vrndx_f16(a: float16x4_t) -> float16x4_t;
23794    }
23795    unsafe { _vrndx_f16(a) }
23796}
23797#[doc = "Floating-point round to integral exact, using current rounding mode"]
23798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23799#[inline]
23800#[target_feature(enable = "neon,fp16")]
23801#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23802#[cfg_attr(test, assert_instr(frintx))]
23803pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23804    unsafe extern "unadjusted" {
23805        #[cfg_attr(
23806            any(target_arch = "aarch64", target_arch = "arm64ec"),
23807            link_name = "llvm.rint.v8f16"
23808        )]
23809        fn _vrndxq_f16(a: float16x8_t) -> float16x8_t;
23810    }
23811    unsafe { _vrndxq_f16(a) }
23812}
23813#[doc = "Floating-point round to integral exact, using current rounding mode"]
23814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23815#[inline]
23816#[target_feature(enable = "neon")]
23817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23818#[cfg_attr(test, assert_instr(frintx))]
23819pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23820    unsafe extern "unadjusted" {
23821        #[cfg_attr(
23822            any(target_arch = "aarch64", target_arch = "arm64ec"),
23823            link_name = "llvm.rint.v2f32"
23824        )]
23825        fn _vrndx_f32(a: float32x2_t) -> float32x2_t;
23826    }
23827    unsafe { _vrndx_f32(a) }
23828}
23829#[doc = "Floating-point round to integral exact, using current rounding mode"]
23830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23831#[inline]
23832#[target_feature(enable = "neon")]
23833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23834#[cfg_attr(test, assert_instr(frintx))]
23835pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23836    unsafe extern "unadjusted" {
23837        #[cfg_attr(
23838            any(target_arch = "aarch64", target_arch = "arm64ec"),
23839            link_name = "llvm.rint.v4f32"
23840        )]
23841        fn _vrndxq_f32(a: float32x4_t) -> float32x4_t;
23842    }
23843    unsafe { _vrndxq_f32(a) }
23844}
23845#[doc = "Floating-point round to integral exact, using current rounding mode"]
23846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23847#[inline]
23848#[target_feature(enable = "neon")]
23849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23850#[cfg_attr(test, assert_instr(frintx))]
23851pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23852    unsafe extern "unadjusted" {
23853        #[cfg_attr(
23854            any(target_arch = "aarch64", target_arch = "arm64ec"),
23855            link_name = "llvm.rint.v1f64"
23856        )]
23857        fn _vrndx_f64(a: float64x1_t) -> float64x1_t;
23858    }
23859    unsafe { _vrndx_f64(a) }
23860}
23861#[doc = "Floating-point round to integral exact, using current rounding mode"]
23862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23863#[inline]
23864#[target_feature(enable = "neon")]
23865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23866#[cfg_attr(test, assert_instr(frintx))]
23867pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
23868    unsafe extern "unadjusted" {
23869        #[cfg_attr(
23870            any(target_arch = "aarch64", target_arch = "arm64ec"),
23871            link_name = "llvm.rint.v2f64"
23872        )]
23873        fn _vrndxq_f64(a: float64x2_t) -> float64x2_t;
23874    }
23875    unsafe { _vrndxq_f64(a) }
23876}
23877#[doc = "Floating-point round to integral, using current rounding mode"]
23878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
23879#[inline]
23880#[target_feature(enable = "neon,fp16")]
23881#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23882#[cfg_attr(test, assert_instr(frintx))]
23883pub fn vrndxh_f16(a: f16) -> f16 {
23884    round_ties_even_f16(a)
23885}
23886#[doc = "Signed rounding shift left"]
23887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
23888#[inline]
23889#[target_feature(enable = "neon")]
23890#[cfg_attr(test, assert_instr(srshl))]
23891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23892pub fn vrshld_s64(a: i64, b: i64) -> i64 {
23893    unsafe extern "unadjusted" {
23894        #[cfg_attr(
23895            any(target_arch = "aarch64", target_arch = "arm64ec"),
23896            link_name = "llvm.aarch64.neon.srshl.i64"
23897        )]
23898        fn _vrshld_s64(a: i64, b: i64) -> i64;
23899    }
23900    unsafe { _vrshld_s64(a, b) }
23901}
23902#[doc = "Unsigned rounding shift left"]
23903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
23904#[inline]
23905#[target_feature(enable = "neon")]
23906#[cfg_attr(test, assert_instr(urshl))]
23907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23908pub fn vrshld_u64(a: u64, b: i64) -> u64 {
23909    unsafe extern "unadjusted" {
23910        #[cfg_attr(
23911            any(target_arch = "aarch64", target_arch = "arm64ec"),
23912            link_name = "llvm.aarch64.neon.urshl.i64"
23913        )]
23914        fn _vrshld_u64(a: u64, b: i64) -> u64;
23915    }
23916    unsafe { _vrshld_u64(a, b) }
23917}
23918#[doc = "Signed rounding shift right"]
23919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
23920#[inline]
23921#[target_feature(enable = "neon")]
23922#[cfg_attr(test, assert_instr(srshr, N = 2))]
23923#[rustc_legacy_const_generics(1)]
23924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23925pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
23926    static_assert!(N >= 1 && N <= 64);
23927    vrshld_s64(a, -N as i64)
23928}
23929#[doc = "Unsigned rounding shift right"]
23930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
23931#[inline]
23932#[target_feature(enable = "neon")]
23933#[cfg_attr(test, assert_instr(urshr, N = 2))]
23934#[rustc_legacy_const_generics(1)]
23935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23936pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
23937    static_assert!(N >= 1 && N <= 64);
23938    vrshld_u64(a, -N as i64)
23939}
23940#[doc = "Rounding shift right narrow"]
23941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
23942#[inline]
23943#[target_feature(enable = "neon")]
23944#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23945#[rustc_legacy_const_generics(2)]
23946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23947pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23948    static_assert!(N >= 1 && N <= 8);
23949    unsafe {
23950        simd_shuffle!(
23951            a,
23952            vrshrn_n_s16::<N>(b),
23953            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23954        )
23955    }
23956}
23957#[doc = "Rounding shift right narrow"]
23958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
23959#[inline]
23960#[target_feature(enable = "neon")]
23961#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23962#[rustc_legacy_const_generics(2)]
23963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23964pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23965    static_assert!(N >= 1 && N <= 16);
23966    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23967}
23968#[doc = "Rounding shift right narrow"]
23969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
23970#[inline]
23971#[target_feature(enable = "neon")]
23972#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23973#[rustc_legacy_const_generics(2)]
23974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23975pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23976    static_assert!(N >= 1 && N <= 32);
23977    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23978}
23979#[doc = "Rounding shift right narrow"]
23980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
23981#[inline]
23982#[target_feature(enable = "neon")]
23983#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23984#[rustc_legacy_const_generics(2)]
23985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23986pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23987    static_assert!(N >= 1 && N <= 8);
23988    unsafe {
23989        simd_shuffle!(
23990            a,
23991            vrshrn_n_u16::<N>(b),
23992            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23993        )
23994    }
23995}
23996#[doc = "Rounding shift right narrow"]
23997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
23998#[inline]
23999#[target_feature(enable = "neon")]
24000#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24001#[rustc_legacy_const_generics(2)]
24002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24003pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24004    static_assert!(N >= 1 && N <= 16);
24005    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24006}
24007#[doc = "Rounding shift right narrow"]
24008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
24009#[inline]
24010#[target_feature(enable = "neon")]
24011#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24012#[rustc_legacy_const_generics(2)]
24013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24014pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24015    static_assert!(N >= 1 && N <= 32);
24016    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24017}
24018#[doc = "Reciprocal square-root estimate."]
24019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
24020#[inline]
24021#[target_feature(enable = "neon")]
24022#[cfg_attr(test, assert_instr(frsqrte))]
24023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24024pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
24025    unsafe extern "unadjusted" {
24026        #[cfg_attr(
24027            any(target_arch = "aarch64", target_arch = "arm64ec"),
24028            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
24029        )]
24030        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
24031    }
24032    unsafe { _vrsqrte_f64(a) }
24033}
24034#[doc = "Reciprocal square-root estimate."]
24035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
24036#[inline]
24037#[target_feature(enable = "neon")]
24038#[cfg_attr(test, assert_instr(frsqrte))]
24039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24040pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
24041    unsafe extern "unadjusted" {
24042        #[cfg_attr(
24043            any(target_arch = "aarch64", target_arch = "arm64ec"),
24044            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
24045        )]
24046        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
24047    }
24048    unsafe { _vrsqrteq_f64(a) }
24049}
24050#[doc = "Reciprocal square-root estimate."]
24051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
24052#[inline]
24053#[target_feature(enable = "neon")]
24054#[cfg_attr(test, assert_instr(frsqrte))]
24055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24056pub fn vrsqrted_f64(a: f64) -> f64 {
24057    unsafe extern "unadjusted" {
24058        #[cfg_attr(
24059            any(target_arch = "aarch64", target_arch = "arm64ec"),
24060            link_name = "llvm.aarch64.neon.frsqrte.f64"
24061        )]
24062        fn _vrsqrted_f64(a: f64) -> f64;
24063    }
24064    unsafe { _vrsqrted_f64(a) }
24065}
24066#[doc = "Reciprocal square-root estimate."]
24067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
24068#[inline]
24069#[target_feature(enable = "neon")]
24070#[cfg_attr(test, assert_instr(frsqrte))]
24071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24072pub fn vrsqrtes_f32(a: f32) -> f32 {
24073    unsafe extern "unadjusted" {
24074        #[cfg_attr(
24075            any(target_arch = "aarch64", target_arch = "arm64ec"),
24076            link_name = "llvm.aarch64.neon.frsqrte.f32"
24077        )]
24078        fn _vrsqrtes_f32(a: f32) -> f32;
24079    }
24080    unsafe { _vrsqrtes_f32(a) }
24081}
24082#[doc = "Reciprocal square-root estimate."]
24083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
24084#[inline]
24085#[target_feature(enable = "neon,fp16")]
24086#[cfg_attr(test, assert_instr(frsqrte))]
24087#[target_feature(enable = "neon,fp16")]
24088#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24089pub fn vrsqrteh_f16(a: f16) -> f16 {
24090    unsafe extern "unadjusted" {
24091        #[cfg_attr(
24092            any(target_arch = "aarch64", target_arch = "arm64ec"),
24093            link_name = "llvm.aarch64.neon.frsqrte.f16"
24094        )]
24095        fn _vrsqrteh_f16(a: f16) -> f16;
24096    }
24097    unsafe { _vrsqrteh_f16(a) }
24098}
24099#[doc = "Floating-point reciprocal square root step"]
24100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
24101#[inline]
24102#[target_feature(enable = "neon")]
24103#[cfg_attr(test, assert_instr(frsqrts))]
24104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24105pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
24106    unsafe extern "unadjusted" {
24107        #[cfg_attr(
24108            any(target_arch = "aarch64", target_arch = "arm64ec"),
24109            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
24110        )]
24111        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
24112    }
24113    unsafe { _vrsqrts_f64(a, b) }
24114}
24115#[doc = "Floating-point reciprocal square root step"]
24116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
24117#[inline]
24118#[target_feature(enable = "neon")]
24119#[cfg_attr(test, assert_instr(frsqrts))]
24120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24121pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
24122    unsafe extern "unadjusted" {
24123        #[cfg_attr(
24124            any(target_arch = "aarch64", target_arch = "arm64ec"),
24125            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
24126        )]
24127        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
24128    }
24129    unsafe { _vrsqrtsq_f64(a, b) }
24130}
24131#[doc = "Floating-point reciprocal square root step"]
24132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
24133#[inline]
24134#[target_feature(enable = "neon")]
24135#[cfg_attr(test, assert_instr(frsqrts))]
24136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24137pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
24138    unsafe extern "unadjusted" {
24139        #[cfg_attr(
24140            any(target_arch = "aarch64", target_arch = "arm64ec"),
24141            link_name = "llvm.aarch64.neon.frsqrts.f64"
24142        )]
24143        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
24144    }
24145    unsafe { _vrsqrtsd_f64(a, b) }
24146}
24147#[doc = "Floating-point reciprocal square root step"]
24148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
24149#[inline]
24150#[target_feature(enable = "neon")]
24151#[cfg_attr(test, assert_instr(frsqrts))]
24152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24153pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
24154    unsafe extern "unadjusted" {
24155        #[cfg_attr(
24156            any(target_arch = "aarch64", target_arch = "arm64ec"),
24157            link_name = "llvm.aarch64.neon.frsqrts.f32"
24158        )]
24159        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
24160    }
24161    unsafe { _vrsqrtss_f32(a, b) }
24162}
24163#[doc = "Floating-point reciprocal square root step"]
24164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
24165#[inline]
24166#[target_feature(enable = "neon,fp16")]
24167#[cfg_attr(test, assert_instr(frsqrts))]
24168#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24169pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
24170    unsafe extern "unadjusted" {
24171        #[cfg_attr(
24172            any(target_arch = "aarch64", target_arch = "arm64ec"),
24173            link_name = "llvm.aarch64.neon.frsqrts.f16"
24174        )]
24175        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
24176    }
24177    unsafe { _vrsqrtsh_f16(a, b) }
24178}
24179#[doc = "Signed rounding shift right and accumulate."]
24180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
24181#[inline]
24182#[target_feature(enable = "neon")]
24183#[cfg_attr(test, assert_instr(srshr, N = 2))]
24184#[rustc_legacy_const_generics(2)]
24185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24186pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24187    static_assert!(N >= 1 && N <= 64);
24188    let b: i64 = vrshrd_n_s64::<N>(b);
24189    a.wrapping_add(b)
24190}
24191#[doc = "Unsigned rounding shift right and accumulate."]
24192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
24193#[inline]
24194#[target_feature(enable = "neon")]
24195#[cfg_attr(test, assert_instr(urshr, N = 2))]
24196#[rustc_legacy_const_generics(2)]
24197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24198pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24199    static_assert!(N >= 1 && N <= 64);
24200    let b: u64 = vrshrd_n_u64::<N>(b);
24201    a.wrapping_add(b)
24202}
24203#[doc = "Rounding subtract returning high narrow"]
24204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24205#[inline]
24206#[target_feature(enable = "neon")]
24207#[cfg(target_endian = "little")]
24208#[cfg_attr(test, assert_instr(rsubhn2))]
24209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24210pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24211    let x: int8x8_t = vrsubhn_s16(b, c);
24212    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24213}
24214#[doc = "Rounding subtract returning high narrow"]
24215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24216#[inline]
24217#[target_feature(enable = "neon")]
24218#[cfg(target_endian = "little")]
24219#[cfg_attr(test, assert_instr(rsubhn2))]
24220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24221pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24222    let x: int16x4_t = vrsubhn_s32(b, c);
24223    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24224}
24225#[doc = "Rounding subtract returning high narrow"]
24226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24227#[inline]
24228#[target_feature(enable = "neon")]
24229#[cfg(target_endian = "little")]
24230#[cfg_attr(test, assert_instr(rsubhn2))]
24231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24232pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24233    let x: int32x2_t = vrsubhn_s64(b, c);
24234    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24235}
24236#[doc = "Rounding subtract returning high narrow"]
24237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24238#[inline]
24239#[target_feature(enable = "neon")]
24240#[cfg(target_endian = "little")]
24241#[cfg_attr(test, assert_instr(rsubhn2))]
24242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24243pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24244    let x: uint8x8_t = vrsubhn_u16(b, c);
24245    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24246}
24247#[doc = "Rounding subtract returning high narrow"]
24248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24249#[inline]
24250#[target_feature(enable = "neon")]
24251#[cfg(target_endian = "little")]
24252#[cfg_attr(test, assert_instr(rsubhn2))]
24253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24254pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24255    let x: uint16x4_t = vrsubhn_u32(b, c);
24256    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24257}
24258#[doc = "Rounding subtract returning high narrow"]
24259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24260#[inline]
24261#[target_feature(enable = "neon")]
24262#[cfg(target_endian = "little")]
24263#[cfg_attr(test, assert_instr(rsubhn2))]
24264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24265pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24266    let x: uint32x2_t = vrsubhn_u64(b, c);
24267    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24268}
24269#[doc = "Rounding subtract returning high narrow"]
24270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24271#[inline]
24272#[target_feature(enable = "neon")]
24273#[cfg(target_endian = "big")]
24274#[cfg_attr(test, assert_instr(rsubhn))]
24275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24276pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24277    let x: int8x8_t = vrsubhn_s16(b, c);
24278    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24279}
24280#[doc = "Rounding subtract returning high narrow"]
24281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24282#[inline]
24283#[target_feature(enable = "neon")]
24284#[cfg(target_endian = "big")]
24285#[cfg_attr(test, assert_instr(rsubhn))]
24286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24287pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24288    let x: int16x4_t = vrsubhn_s32(b, c);
24289    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24290}
24291#[doc = "Rounding subtract returning high narrow"]
24292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24293#[inline]
24294#[target_feature(enable = "neon")]
24295#[cfg(target_endian = "big")]
24296#[cfg_attr(test, assert_instr(rsubhn))]
24297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24298pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24299    let x: int32x2_t = vrsubhn_s64(b, c);
24300    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24301}
24302#[doc = "Rounding subtract returning high narrow"]
24303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24304#[inline]
24305#[target_feature(enable = "neon")]
24306#[cfg(target_endian = "big")]
24307#[cfg_attr(test, assert_instr(rsubhn))]
24308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24309pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24310    let x: uint8x8_t = vrsubhn_u16(b, c);
24311    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24312}
24313#[doc = "Rounding subtract returning high narrow"]
24314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24315#[inline]
24316#[target_feature(enable = "neon")]
24317#[cfg(target_endian = "big")]
24318#[cfg_attr(test, assert_instr(rsubhn))]
24319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24320pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24321    let x: uint16x4_t = vrsubhn_u32(b, c);
24322    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24323}
24324#[doc = "Rounding subtract returning high narrow"]
24325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24326#[inline]
24327#[target_feature(enable = "neon")]
24328#[cfg(target_endian = "big")]
24329#[cfg_attr(test, assert_instr(rsubhn))]
24330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24331pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24332    let x: uint32x2_t = vrsubhn_u64(b, c);
24333    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24334}
24335#[doc = "Insert vector element from another vector element"]
24336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24337#[inline]
24338#[target_feature(enable = "neon")]
24339#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24340#[rustc_legacy_const_generics(2)]
24341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24342pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24343    static_assert!(LANE == 0);
24344    unsafe { simd_insert!(b, LANE as u32, a) }
24345}
24346#[doc = "Insert vector element from another vector element"]
24347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24348#[inline]
24349#[target_feature(enable = "neon")]
24350#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24351#[rustc_legacy_const_generics(2)]
24352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24353pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24354    static_assert_uimm_bits!(LANE, 1);
24355    unsafe { simd_insert!(b, LANE as u32, a) }
24356}
24357#[doc = "SHA512 hash update part 2"]
24358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24359#[inline]
24360#[target_feature(enable = "neon,sha3")]
24361#[cfg_attr(test, assert_instr(sha512h2))]
24362#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24363pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24364    unsafe extern "unadjusted" {
24365        #[cfg_attr(
24366            any(target_arch = "aarch64", target_arch = "arm64ec"),
24367            link_name = "llvm.aarch64.crypto.sha512h2"
24368        )]
24369        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24370    }
24371    unsafe { _vsha512h2q_u64(a, b, c) }
24372}
24373#[doc = "SHA512 hash update part 1"]
24374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24375#[inline]
24376#[target_feature(enable = "neon,sha3")]
24377#[cfg_attr(test, assert_instr(sha512h))]
24378#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24379pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24380    unsafe extern "unadjusted" {
24381        #[cfg_attr(
24382            any(target_arch = "aarch64", target_arch = "arm64ec"),
24383            link_name = "llvm.aarch64.crypto.sha512h"
24384        )]
24385        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24386    }
24387    unsafe { _vsha512hq_u64(a, b, c) }
24388}
24389#[doc = "SHA512 schedule update 0"]
24390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24391#[inline]
24392#[target_feature(enable = "neon,sha3")]
24393#[cfg_attr(test, assert_instr(sha512su0))]
24394#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24395pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24396    unsafe extern "unadjusted" {
24397        #[cfg_attr(
24398            any(target_arch = "aarch64", target_arch = "arm64ec"),
24399            link_name = "llvm.aarch64.crypto.sha512su0"
24400        )]
24401        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24402    }
24403    unsafe { _vsha512su0q_u64(a, b) }
24404}
24405#[doc = "SHA512 schedule update 1"]
24406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24407#[inline]
24408#[target_feature(enable = "neon,sha3")]
24409#[cfg_attr(test, assert_instr(sha512su1))]
24410#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24411pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24412    unsafe extern "unadjusted" {
24413        #[cfg_attr(
24414            any(target_arch = "aarch64", target_arch = "arm64ec"),
24415            link_name = "llvm.aarch64.crypto.sha512su1"
24416        )]
24417        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24418    }
24419    unsafe { _vsha512su1q_u64(a, b, c) }
24420}
24421#[doc = "Signed Shift left"]
24422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24423#[inline]
24424#[target_feature(enable = "neon")]
24425#[cfg_attr(test, assert_instr(sshl))]
24426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24427pub fn vshld_s64(a: i64, b: i64) -> i64 {
24428    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24429}
24430#[doc = "Unsigned Shift left"]
24431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24432#[inline]
24433#[target_feature(enable = "neon")]
24434#[cfg_attr(test, assert_instr(ushl))]
24435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24436pub fn vshld_u64(a: u64, b: i64) -> u64 {
24437    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24438}
24439#[doc = "Signed shift left long"]
24440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24441#[inline]
24442#[target_feature(enable = "neon")]
24443#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24444#[rustc_legacy_const_generics(1)]
24445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24446pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24447    static_assert!(N >= 0 && N <= 8);
24448    unsafe {
24449        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24450        vshll_n_s8::<N>(b)
24451    }
24452}
24453#[doc = "Signed shift left long"]
24454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24455#[inline]
24456#[target_feature(enable = "neon")]
24457#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24458#[rustc_legacy_const_generics(1)]
24459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24460pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24461    static_assert!(N >= 0 && N <= 16);
24462    unsafe {
24463        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24464        vshll_n_s16::<N>(b)
24465    }
24466}
24467#[doc = "Signed shift left long"]
24468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24469#[inline]
24470#[target_feature(enable = "neon")]
24471#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24472#[rustc_legacy_const_generics(1)]
24473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24474pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24475    static_assert!(N >= 0 && N <= 32);
24476    unsafe {
24477        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24478        vshll_n_s32::<N>(b)
24479    }
24480}
24481#[doc = "Signed shift left long"]
24482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24483#[inline]
24484#[target_feature(enable = "neon")]
24485#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24486#[rustc_legacy_const_generics(1)]
24487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24488pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24489    static_assert!(N >= 0 && N <= 8);
24490    unsafe {
24491        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24492        vshll_n_u8::<N>(b)
24493    }
24494}
24495#[doc = "Signed shift left long"]
24496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24497#[inline]
24498#[target_feature(enable = "neon")]
24499#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24500#[rustc_legacy_const_generics(1)]
24501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24502pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24503    static_assert!(N >= 0 && N <= 16);
24504    unsafe {
24505        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24506        vshll_n_u16::<N>(b)
24507    }
24508}
24509#[doc = "Signed shift left long"]
24510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24511#[inline]
24512#[target_feature(enable = "neon")]
24513#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24514#[rustc_legacy_const_generics(1)]
24515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24516pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24517    static_assert!(N >= 0 && N <= 32);
24518    unsafe {
24519        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24520        vshll_n_u32::<N>(b)
24521    }
24522}
24523#[doc = "Shift right narrow"]
24524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24525#[inline]
24526#[target_feature(enable = "neon")]
24527#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24528#[rustc_legacy_const_generics(2)]
24529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24530pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24531    static_assert!(N >= 1 && N <= 8);
24532    unsafe {
24533        simd_shuffle!(
24534            a,
24535            vshrn_n_s16::<N>(b),
24536            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24537        )
24538    }
24539}
24540#[doc = "Shift right narrow"]
24541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24542#[inline]
24543#[target_feature(enable = "neon")]
24544#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24545#[rustc_legacy_const_generics(2)]
24546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24547pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24548    static_assert!(N >= 1 && N <= 16);
24549    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24550}
24551#[doc = "Shift right narrow"]
24552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24553#[inline]
24554#[target_feature(enable = "neon")]
24555#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24556#[rustc_legacy_const_generics(2)]
24557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24558pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24559    static_assert!(N >= 1 && N <= 32);
24560    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24561}
24562#[doc = "Shift right narrow"]
24563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24564#[inline]
24565#[target_feature(enable = "neon")]
24566#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24567#[rustc_legacy_const_generics(2)]
24568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24569pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24570    static_assert!(N >= 1 && N <= 8);
24571    unsafe {
24572        simd_shuffle!(
24573            a,
24574            vshrn_n_u16::<N>(b),
24575            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24576        )
24577    }
24578}
24579#[doc = "Shift right narrow"]
24580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24581#[inline]
24582#[target_feature(enable = "neon")]
24583#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24584#[rustc_legacy_const_generics(2)]
24585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24586pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24587    static_assert!(N >= 1 && N <= 16);
24588    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24589}
24590#[doc = "Shift right narrow"]
24591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24592#[inline]
24593#[target_feature(enable = "neon")]
24594#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24595#[rustc_legacy_const_generics(2)]
24596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24597pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24598    static_assert!(N >= 1 && N <= 32);
24599    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24600}
24601#[doc = "Shift Left and Insert (immediate)"]
24602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24603#[inline]
24604#[target_feature(enable = "neon")]
24605#[cfg_attr(test, assert_instr(sli, N = 1))]
24606#[rustc_legacy_const_generics(2)]
24607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24608pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24609    static_assert_uimm_bits!(N, 3);
24610    unsafe extern "unadjusted" {
24611        #[cfg_attr(
24612            any(target_arch = "aarch64", target_arch = "arm64ec"),
24613            link_name = "llvm.aarch64.neon.vsli.v8i8"
24614        )]
24615        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24616    }
24617    unsafe { _vsli_n_s8(a, b, N) }
24618}
24619#[doc = "Shift Left and Insert (immediate)"]
24620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24621#[inline]
24622#[target_feature(enable = "neon")]
24623#[cfg_attr(test, assert_instr(sli, N = 1))]
24624#[rustc_legacy_const_generics(2)]
24625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24626pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24627    static_assert_uimm_bits!(N, 3);
24628    unsafe extern "unadjusted" {
24629        #[cfg_attr(
24630            any(target_arch = "aarch64", target_arch = "arm64ec"),
24631            link_name = "llvm.aarch64.neon.vsli.v16i8"
24632        )]
24633        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24634    }
24635    unsafe { _vsliq_n_s8(a, b, N) }
24636}
24637#[doc = "Shift Left and Insert (immediate)"]
24638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24639#[inline]
24640#[target_feature(enable = "neon")]
24641#[cfg_attr(test, assert_instr(sli, N = 1))]
24642#[rustc_legacy_const_generics(2)]
24643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24644pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24645    static_assert_uimm_bits!(N, 4);
24646    unsafe extern "unadjusted" {
24647        #[cfg_attr(
24648            any(target_arch = "aarch64", target_arch = "arm64ec"),
24649            link_name = "llvm.aarch64.neon.vsli.v4i16"
24650        )]
24651        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24652    }
24653    unsafe { _vsli_n_s16(a, b, N) }
24654}
24655#[doc = "Shift Left and Insert (immediate)"]
24656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24657#[inline]
24658#[target_feature(enable = "neon")]
24659#[cfg_attr(test, assert_instr(sli, N = 1))]
24660#[rustc_legacy_const_generics(2)]
24661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24662pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24663    static_assert_uimm_bits!(N, 4);
24664    unsafe extern "unadjusted" {
24665        #[cfg_attr(
24666            any(target_arch = "aarch64", target_arch = "arm64ec"),
24667            link_name = "llvm.aarch64.neon.vsli.v8i16"
24668        )]
24669        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24670    }
24671    unsafe { _vsliq_n_s16(a, b, N) }
24672}
24673#[doc = "Shift Left and Insert (immediate)"]
24674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24675#[inline]
24676#[target_feature(enable = "neon")]
24677#[cfg_attr(test, assert_instr(sli, N = 1))]
24678#[rustc_legacy_const_generics(2)]
24679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24680pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24681    static_assert!(N >= 0 && N <= 31);
24682    unsafe extern "unadjusted" {
24683        #[cfg_attr(
24684            any(target_arch = "aarch64", target_arch = "arm64ec"),
24685            link_name = "llvm.aarch64.neon.vsli.v2i32"
24686        )]
24687        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24688    }
24689    unsafe { _vsli_n_s32(a, b, N) }
24690}
24691#[doc = "Shift Left and Insert (immediate)"]
24692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24693#[inline]
24694#[target_feature(enable = "neon")]
24695#[cfg_attr(test, assert_instr(sli, N = 1))]
24696#[rustc_legacy_const_generics(2)]
24697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24698pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24699    static_assert!(N >= 0 && N <= 31);
24700    unsafe extern "unadjusted" {
24701        #[cfg_attr(
24702            any(target_arch = "aarch64", target_arch = "arm64ec"),
24703            link_name = "llvm.aarch64.neon.vsli.v4i32"
24704        )]
24705        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24706    }
24707    unsafe { _vsliq_n_s32(a, b, N) }
24708}
24709#[doc = "Shift Left and Insert (immediate)"]
24710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24711#[inline]
24712#[target_feature(enable = "neon")]
24713#[cfg_attr(test, assert_instr(sli, N = 1))]
24714#[rustc_legacy_const_generics(2)]
24715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24716pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24717    static_assert!(N >= 0 && N <= 63);
24718    unsafe extern "unadjusted" {
24719        #[cfg_attr(
24720            any(target_arch = "aarch64", target_arch = "arm64ec"),
24721            link_name = "llvm.aarch64.neon.vsli.v1i64"
24722        )]
24723        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24724    }
24725    unsafe { _vsli_n_s64(a, b, N) }
24726}
24727#[doc = "Shift Left and Insert (immediate)"]
24728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24729#[inline]
24730#[target_feature(enable = "neon")]
24731#[cfg_attr(test, assert_instr(sli, N = 1))]
24732#[rustc_legacy_const_generics(2)]
24733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24734pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24735    static_assert!(N >= 0 && N <= 63);
24736    unsafe extern "unadjusted" {
24737        #[cfg_attr(
24738            any(target_arch = "aarch64", target_arch = "arm64ec"),
24739            link_name = "llvm.aarch64.neon.vsli.v2i64"
24740        )]
24741        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24742    }
24743    unsafe { _vsliq_n_s64(a, b, N) }
24744}
24745#[doc = "Shift Left and Insert (immediate)"]
24746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24747#[inline]
24748#[target_feature(enable = "neon")]
24749#[cfg_attr(test, assert_instr(sli, N = 1))]
24750#[rustc_legacy_const_generics(2)]
24751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24752pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24753    static_assert_uimm_bits!(N, 3);
24754    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24755}
24756#[doc = "Shift Left and Insert (immediate)"]
24757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24758#[inline]
24759#[target_feature(enable = "neon")]
24760#[cfg_attr(test, assert_instr(sli, N = 1))]
24761#[rustc_legacy_const_generics(2)]
24762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24763pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24764    static_assert_uimm_bits!(N, 3);
24765    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24766}
24767#[doc = "Shift Left and Insert (immediate)"]
24768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24769#[inline]
24770#[target_feature(enable = "neon")]
24771#[cfg_attr(test, assert_instr(sli, N = 1))]
24772#[rustc_legacy_const_generics(2)]
24773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24774pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24775    static_assert_uimm_bits!(N, 4);
24776    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24777}
24778#[doc = "Shift Left and Insert (immediate)"]
24779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24780#[inline]
24781#[target_feature(enable = "neon")]
24782#[cfg_attr(test, assert_instr(sli, N = 1))]
24783#[rustc_legacy_const_generics(2)]
24784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24785pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24786    static_assert_uimm_bits!(N, 4);
24787    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24788}
24789#[doc = "Shift Left and Insert (immediate)"]
24790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24791#[inline]
24792#[target_feature(enable = "neon")]
24793#[cfg_attr(test, assert_instr(sli, N = 1))]
24794#[rustc_legacy_const_generics(2)]
24795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24796pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24797    static_assert!(N >= 0 && N <= 31);
24798    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24799}
24800#[doc = "Shift Left and Insert (immediate)"]
24801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
24802#[inline]
24803#[target_feature(enable = "neon")]
24804#[cfg_attr(test, assert_instr(sli, N = 1))]
24805#[rustc_legacy_const_generics(2)]
24806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24807pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24808    static_assert!(N >= 0 && N <= 31);
24809    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
24810}
24811#[doc = "Shift Left and Insert (immediate)"]
24812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
24813#[inline]
24814#[target_feature(enable = "neon")]
24815#[cfg_attr(test, assert_instr(sli, N = 1))]
24816#[rustc_legacy_const_generics(2)]
24817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24818pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24819    static_assert!(N >= 0 && N <= 63);
24820    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24821}
24822#[doc = "Shift Left and Insert (immediate)"]
24823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
24824#[inline]
24825#[target_feature(enable = "neon")]
24826#[cfg_attr(test, assert_instr(sli, N = 1))]
24827#[rustc_legacy_const_generics(2)]
24828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24829pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24830    static_assert!(N >= 0 && N <= 63);
24831    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24832}
24833#[doc = "Shift Left and Insert (immediate)"]
24834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
24835#[inline]
24836#[target_feature(enable = "neon")]
24837#[cfg_attr(test, assert_instr(sli, N = 1))]
24838#[rustc_legacy_const_generics(2)]
24839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24840pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24841    static_assert_uimm_bits!(N, 3);
24842    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24843}
24844#[doc = "Shift Left and Insert (immediate)"]
24845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
24846#[inline]
24847#[target_feature(enable = "neon")]
24848#[cfg_attr(test, assert_instr(sli, N = 1))]
24849#[rustc_legacy_const_generics(2)]
24850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24851pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24852    static_assert_uimm_bits!(N, 3);
24853    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24854}
24855#[doc = "Shift Left and Insert (immediate)"]
24856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
24857#[inline]
24858#[target_feature(enable = "neon")]
24859#[cfg_attr(test, assert_instr(sli, N = 1))]
24860#[rustc_legacy_const_generics(2)]
24861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24862pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24863    static_assert_uimm_bits!(N, 4);
24864    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24865}
24866#[doc = "Shift Left and Insert (immediate)"]
24867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
24868#[inline]
24869#[target_feature(enable = "neon")]
24870#[cfg_attr(test, assert_instr(sli, N = 1))]
24871#[rustc_legacy_const_generics(2)]
24872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24873pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24874    static_assert_uimm_bits!(N, 4);
24875    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24876}
24877#[doc = "Shift Left and Insert (immediate)"]
24878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
24879#[inline]
24880#[target_feature(enable = "neon,aes")]
24881#[cfg_attr(test, assert_instr(sli, N = 1))]
24882#[rustc_legacy_const_generics(2)]
24883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24884pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24885    static_assert!(N >= 0 && N <= 63);
24886    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24887}
24888#[doc = "Shift Left and Insert (immediate)"]
24889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
24890#[inline]
24891#[target_feature(enable = "neon,aes")]
24892#[cfg_attr(test, assert_instr(sli, N = 1))]
24893#[rustc_legacy_const_generics(2)]
24894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24895pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24896    static_assert!(N >= 0 && N <= 63);
24897    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24898}
24899#[doc = "Shift left and insert"]
24900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
24901#[inline]
24902#[target_feature(enable = "neon")]
24903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24904#[rustc_legacy_const_generics(2)]
24905#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24906pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24907    static_assert!(N >= 0 && N <= 63);
24908    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24909}
24910#[doc = "Shift left and insert"]
24911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
24912#[inline]
24913#[target_feature(enable = "neon")]
24914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24915#[rustc_legacy_const_generics(2)]
24916#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24917pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24918    static_assert!(N >= 0 && N <= 63);
24919    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
24920}
24921#[doc = "SM3PARTW1"]
24922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
24923#[inline]
24924#[target_feature(enable = "neon,sm4")]
24925#[cfg_attr(test, assert_instr(sm3partw1))]
24926#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24927pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24928    unsafe extern "unadjusted" {
24929        #[cfg_attr(
24930            any(target_arch = "aarch64", target_arch = "arm64ec"),
24931            link_name = "llvm.aarch64.crypto.sm3partw1"
24932        )]
24933        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24934    }
24935    unsafe { _vsm3partw1q_u32(a, b, c) }
24936}
24937#[doc = "SM3PARTW2"]
24938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
24939#[inline]
24940#[target_feature(enable = "neon,sm4")]
24941#[cfg_attr(test, assert_instr(sm3partw2))]
24942#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24943pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24944    unsafe extern "unadjusted" {
24945        #[cfg_attr(
24946            any(target_arch = "aarch64", target_arch = "arm64ec"),
24947            link_name = "llvm.aarch64.crypto.sm3partw2"
24948        )]
24949        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24950    }
24951    unsafe { _vsm3partw2q_u32(a, b, c) }
24952}
24953#[doc = "SM3SS1"]
24954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
24955#[inline]
24956#[target_feature(enable = "neon,sm4")]
24957#[cfg_attr(test, assert_instr(sm3ss1))]
24958#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24959pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24960    unsafe extern "unadjusted" {
24961        #[cfg_attr(
24962            any(target_arch = "aarch64", target_arch = "arm64ec"),
24963            link_name = "llvm.aarch64.crypto.sm3ss1"
24964        )]
24965        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24966    }
24967    unsafe { _vsm3ss1q_u32(a, b, c) }
24968}
24969#[doc = "SM3TT1A"]
24970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
24971#[inline]
24972#[target_feature(enable = "neon,sm4")]
24973#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
24974#[rustc_legacy_const_generics(3)]
24975#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24976pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24977    static_assert_uimm_bits!(IMM2, 2);
24978    unsafe extern "unadjusted" {
24979        #[cfg_attr(
24980            any(target_arch = "aarch64", target_arch = "arm64ec"),
24981            link_name = "llvm.aarch64.crypto.sm3tt1a"
24982        )]
24983        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24984    }
24985    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
24986}
24987#[doc = "SM3TT1B"]
24988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
24989#[inline]
24990#[target_feature(enable = "neon,sm4")]
24991#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
24992#[rustc_legacy_const_generics(3)]
24993#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24994pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24995    static_assert_uimm_bits!(IMM2, 2);
24996    unsafe extern "unadjusted" {
24997        #[cfg_attr(
24998            any(target_arch = "aarch64", target_arch = "arm64ec"),
24999            link_name = "llvm.aarch64.crypto.sm3tt1b"
25000        )]
25001        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25002    }
25003    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
25004}
25005#[doc = "SM3TT2A"]
25006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
25007#[inline]
25008#[target_feature(enable = "neon,sm4")]
25009#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
25010#[rustc_legacy_const_generics(3)]
25011#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25012pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25013    static_assert_uimm_bits!(IMM2, 2);
25014    unsafe extern "unadjusted" {
25015        #[cfg_attr(
25016            any(target_arch = "aarch64", target_arch = "arm64ec"),
25017            link_name = "llvm.aarch64.crypto.sm3tt2a"
25018        )]
25019        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25020    }
25021    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
25022}
25023#[doc = "SM3TT2B"]
25024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
25025#[inline]
25026#[target_feature(enable = "neon,sm4")]
25027#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
25028#[rustc_legacy_const_generics(3)]
25029#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25030pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25031    static_assert_uimm_bits!(IMM2, 2);
25032    unsafe extern "unadjusted" {
25033        #[cfg_attr(
25034            any(target_arch = "aarch64", target_arch = "arm64ec"),
25035            link_name = "llvm.aarch64.crypto.sm3tt2b"
25036        )]
25037        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25038    }
25039    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
25040}
25041#[doc = "SM4 key"]
25042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
25043#[inline]
25044#[target_feature(enable = "neon,sm4")]
25045#[cfg_attr(test, assert_instr(sm4ekey))]
25046#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25047pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25048    unsafe extern "unadjusted" {
25049        #[cfg_attr(
25050            any(target_arch = "aarch64", target_arch = "arm64ec"),
25051            link_name = "llvm.aarch64.crypto.sm4ekey"
25052        )]
25053        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25054    }
25055    unsafe { _vsm4ekeyq_u32(a, b) }
25056}
25057#[doc = "SM4 encode"]
25058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
25059#[inline]
25060#[target_feature(enable = "neon,sm4")]
25061#[cfg_attr(test, assert_instr(sm4e))]
25062#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25063pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25064    unsafe extern "unadjusted" {
25065        #[cfg_attr(
25066            any(target_arch = "aarch64", target_arch = "arm64ec"),
25067            link_name = "llvm.aarch64.crypto.sm4e"
25068        )]
25069        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25070    }
25071    unsafe { _vsm4eq_u32(a, b) }
25072}
25073#[doc = "Unsigned saturating Accumulate of Signed value."]
25074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
25075#[inline]
25076#[target_feature(enable = "neon")]
25077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25078#[cfg_attr(test, assert_instr(usqadd))]
25079pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
25080    unsafe extern "unadjusted" {
25081        #[cfg_attr(
25082            any(target_arch = "aarch64", target_arch = "arm64ec"),
25083            link_name = "llvm.aarch64.neon.usqadd.v8i8"
25084        )]
25085        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
25086    }
25087    unsafe { _vsqadd_u8(a, b) }
25088}
25089#[doc = "Unsigned saturating Accumulate of Signed value."]
25090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
25091#[inline]
25092#[target_feature(enable = "neon")]
25093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25094#[cfg_attr(test, assert_instr(usqadd))]
25095pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
25096    unsafe extern "unadjusted" {
25097        #[cfg_attr(
25098            any(target_arch = "aarch64", target_arch = "arm64ec"),
25099            link_name = "llvm.aarch64.neon.usqadd.v16i8"
25100        )]
25101        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
25102    }
25103    unsafe { _vsqaddq_u8(a, b) }
25104}
25105#[doc = "Unsigned saturating Accumulate of Signed value."]
25106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
25107#[inline]
25108#[target_feature(enable = "neon")]
25109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25110#[cfg_attr(test, assert_instr(usqadd))]
25111pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
25112    unsafe extern "unadjusted" {
25113        #[cfg_attr(
25114            any(target_arch = "aarch64", target_arch = "arm64ec"),
25115            link_name = "llvm.aarch64.neon.usqadd.v4i16"
25116        )]
25117        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
25118    }
25119    unsafe { _vsqadd_u16(a, b) }
25120}
25121#[doc = "Unsigned saturating Accumulate of Signed value."]
25122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
25123#[inline]
25124#[target_feature(enable = "neon")]
25125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25126#[cfg_attr(test, assert_instr(usqadd))]
25127pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
25128    unsafe extern "unadjusted" {
25129        #[cfg_attr(
25130            any(target_arch = "aarch64", target_arch = "arm64ec"),
25131            link_name = "llvm.aarch64.neon.usqadd.v8i16"
25132        )]
25133        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
25134    }
25135    unsafe { _vsqaddq_u16(a, b) }
25136}
25137#[doc = "Unsigned saturating Accumulate of Signed value."]
25138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
25139#[inline]
25140#[target_feature(enable = "neon")]
25141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25142#[cfg_attr(test, assert_instr(usqadd))]
25143pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
25144    unsafe extern "unadjusted" {
25145        #[cfg_attr(
25146            any(target_arch = "aarch64", target_arch = "arm64ec"),
25147            link_name = "llvm.aarch64.neon.usqadd.v2i32"
25148        )]
25149        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
25150    }
25151    unsafe { _vsqadd_u32(a, b) }
25152}
25153#[doc = "Unsigned saturating Accumulate of Signed value."]
25154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
25155#[inline]
25156#[target_feature(enable = "neon")]
25157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25158#[cfg_attr(test, assert_instr(usqadd))]
25159pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
25160    unsafe extern "unadjusted" {
25161        #[cfg_attr(
25162            any(target_arch = "aarch64", target_arch = "arm64ec"),
25163            link_name = "llvm.aarch64.neon.usqadd.v4i32"
25164        )]
25165        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
25166    }
25167    unsafe { _vsqaddq_u32(a, b) }
25168}
25169#[doc = "Unsigned saturating Accumulate of Signed value."]
25170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
25171#[inline]
25172#[target_feature(enable = "neon")]
25173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25174#[cfg_attr(test, assert_instr(usqadd))]
25175pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
25176    unsafe extern "unadjusted" {
25177        #[cfg_attr(
25178            any(target_arch = "aarch64", target_arch = "arm64ec"),
25179            link_name = "llvm.aarch64.neon.usqadd.v1i64"
25180        )]
25181        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
25182    }
25183    unsafe { _vsqadd_u64(a, b) }
25184}
25185#[doc = "Unsigned saturating Accumulate of Signed value."]
25186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
25187#[inline]
25188#[target_feature(enable = "neon")]
25189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25190#[cfg_attr(test, assert_instr(usqadd))]
25191pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
25192    unsafe extern "unadjusted" {
25193        #[cfg_attr(
25194            any(target_arch = "aarch64", target_arch = "arm64ec"),
25195            link_name = "llvm.aarch64.neon.usqadd.v2i64"
25196        )]
25197        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
25198    }
25199    unsafe { _vsqaddq_u64(a, b) }
25200}
25201#[doc = "Unsigned saturating accumulate of signed value"]
25202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
25203#[inline]
25204#[target_feature(enable = "neon")]
25205#[cfg_attr(test, assert_instr(usqadd))]
25206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25207pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
25208    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
25209}
25210#[doc = "Unsigned saturating accumulate of signed value"]
25211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
25212#[inline]
25213#[target_feature(enable = "neon")]
25214#[cfg_attr(test, assert_instr(usqadd))]
25215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25216pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
25217    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
25218}
25219#[doc = "Unsigned saturating accumulate of signed value"]
25220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25221#[inline]
25222#[target_feature(enable = "neon")]
25223#[cfg_attr(test, assert_instr(usqadd))]
25224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25225pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25226    unsafe extern "unadjusted" {
25227        #[cfg_attr(
25228            any(target_arch = "aarch64", target_arch = "arm64ec"),
25229            link_name = "llvm.aarch64.neon.usqadd.i64"
25230        )]
25231        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25232    }
25233    unsafe { _vsqaddd_u64(a, b) }
25234}
25235#[doc = "Unsigned saturating accumulate of signed value"]
25236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25237#[inline]
25238#[target_feature(enable = "neon")]
25239#[cfg_attr(test, assert_instr(usqadd))]
25240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25241pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25242    unsafe extern "unadjusted" {
25243        #[cfg_attr(
25244            any(target_arch = "aarch64", target_arch = "arm64ec"),
25245            link_name = "llvm.aarch64.neon.usqadd.i32"
25246        )]
25247        fn _vsqadds_u32(a: u32, b: i32) -> u32;
25248    }
25249    unsafe { _vsqadds_u32(a, b) }
25250}
25251#[doc = "Calculates the square root of each lane."]
25252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25253#[inline]
25254#[cfg_attr(test, assert_instr(fsqrt))]
25255#[target_feature(enable = "neon,fp16")]
25256#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25257pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25258    unsafe { simd_fsqrt(a) }
25259}
25260#[doc = "Calculates the square root of each lane."]
25261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25262#[inline]
25263#[cfg_attr(test, assert_instr(fsqrt))]
25264#[target_feature(enable = "neon,fp16")]
25265#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25266pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25267    unsafe { simd_fsqrt(a) }
25268}
25269#[doc = "Calculates the square root of each lane."]
25270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25271#[inline]
25272#[target_feature(enable = "neon")]
25273#[cfg_attr(test, assert_instr(fsqrt))]
25274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25275pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25276    unsafe { simd_fsqrt(a) }
25277}
25278#[doc = "Calculates the square root of each lane."]
25279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25280#[inline]
25281#[target_feature(enable = "neon")]
25282#[cfg_attr(test, assert_instr(fsqrt))]
25283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25284pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25285    unsafe { simd_fsqrt(a) }
25286}
25287#[doc = "Calculates the square root of each lane."]
25288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25289#[inline]
25290#[target_feature(enable = "neon")]
25291#[cfg_attr(test, assert_instr(fsqrt))]
25292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25293pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25294    unsafe { simd_fsqrt(a) }
25295}
25296#[doc = "Calculates the square root of each lane."]
25297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25298#[inline]
25299#[target_feature(enable = "neon")]
25300#[cfg_attr(test, assert_instr(fsqrt))]
25301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25302pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25303    unsafe { simd_fsqrt(a) }
25304}
25305#[doc = "Floating-point round to integral, using current rounding mode"]
25306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25307#[inline]
25308#[target_feature(enable = "neon,fp16")]
25309#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25310#[cfg_attr(test, assert_instr(fsqrt))]
25311pub fn vsqrth_f16(a: f16) -> f16 {
25312    unsafe { sqrtf16(a) }
25313}
25314#[doc = "Shift Right and Insert (immediate)"]
25315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25316#[inline]
25317#[target_feature(enable = "neon")]
25318#[cfg_attr(test, assert_instr(sri, N = 1))]
25319#[rustc_legacy_const_generics(2)]
25320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25321pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25322    static_assert!(N >= 1 && N <= 8);
25323    unsafe extern "unadjusted" {
25324        #[cfg_attr(
25325            any(target_arch = "aarch64", target_arch = "arm64ec"),
25326            link_name = "llvm.aarch64.neon.vsri.v8i8"
25327        )]
25328        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25329    }
25330    unsafe { _vsri_n_s8(a, b, N) }
25331}
25332#[doc = "Shift Right and Insert (immediate)"]
25333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25334#[inline]
25335#[target_feature(enable = "neon")]
25336#[cfg_attr(test, assert_instr(sri, N = 1))]
25337#[rustc_legacy_const_generics(2)]
25338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25339pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25340    static_assert!(N >= 1 && N <= 8);
25341    unsafe extern "unadjusted" {
25342        #[cfg_attr(
25343            any(target_arch = "aarch64", target_arch = "arm64ec"),
25344            link_name = "llvm.aarch64.neon.vsri.v16i8"
25345        )]
25346        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25347    }
25348    unsafe { _vsriq_n_s8(a, b, N) }
25349}
25350#[doc = "Shift Right and Insert (immediate)"]
25351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25352#[inline]
25353#[target_feature(enable = "neon")]
25354#[cfg_attr(test, assert_instr(sri, N = 1))]
25355#[rustc_legacy_const_generics(2)]
25356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25357pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25358    static_assert!(N >= 1 && N <= 16);
25359    unsafe extern "unadjusted" {
25360        #[cfg_attr(
25361            any(target_arch = "aarch64", target_arch = "arm64ec"),
25362            link_name = "llvm.aarch64.neon.vsri.v4i16"
25363        )]
25364        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25365    }
25366    unsafe { _vsri_n_s16(a, b, N) }
25367}
25368#[doc = "Shift Right and Insert (immediate)"]
25369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25370#[inline]
25371#[target_feature(enable = "neon")]
25372#[cfg_attr(test, assert_instr(sri, N = 1))]
25373#[rustc_legacy_const_generics(2)]
25374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25375pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25376    static_assert!(N >= 1 && N <= 16);
25377    unsafe extern "unadjusted" {
25378        #[cfg_attr(
25379            any(target_arch = "aarch64", target_arch = "arm64ec"),
25380            link_name = "llvm.aarch64.neon.vsri.v8i16"
25381        )]
25382        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25383    }
25384    unsafe { _vsriq_n_s16(a, b, N) }
25385}
25386#[doc = "Shift Right and Insert (immediate)"]
25387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25388#[inline]
25389#[target_feature(enable = "neon")]
25390#[cfg_attr(test, assert_instr(sri, N = 1))]
25391#[rustc_legacy_const_generics(2)]
25392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25393pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25394    static_assert!(N >= 1 && N <= 32);
25395    unsafe extern "unadjusted" {
25396        #[cfg_attr(
25397            any(target_arch = "aarch64", target_arch = "arm64ec"),
25398            link_name = "llvm.aarch64.neon.vsri.v2i32"
25399        )]
25400        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25401    }
25402    unsafe { _vsri_n_s32(a, b, N) }
25403}
25404#[doc = "Shift Right and Insert (immediate)"]
25405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25406#[inline]
25407#[target_feature(enable = "neon")]
25408#[cfg_attr(test, assert_instr(sri, N = 1))]
25409#[rustc_legacy_const_generics(2)]
25410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25411pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25412    static_assert!(N >= 1 && N <= 32);
25413    unsafe extern "unadjusted" {
25414        #[cfg_attr(
25415            any(target_arch = "aarch64", target_arch = "arm64ec"),
25416            link_name = "llvm.aarch64.neon.vsri.v4i32"
25417        )]
25418        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25419    }
25420    unsafe { _vsriq_n_s32(a, b, N) }
25421}
25422#[doc = "Shift Right and Insert (immediate)"]
25423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25424#[inline]
25425#[target_feature(enable = "neon")]
25426#[cfg_attr(test, assert_instr(sri, N = 1))]
25427#[rustc_legacy_const_generics(2)]
25428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25429pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25430    static_assert!(N >= 1 && N <= 64);
25431    unsafe extern "unadjusted" {
25432        #[cfg_attr(
25433            any(target_arch = "aarch64", target_arch = "arm64ec"),
25434            link_name = "llvm.aarch64.neon.vsri.v1i64"
25435        )]
25436        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25437    }
25438    unsafe { _vsri_n_s64(a, b, N) }
25439}
25440#[doc = "Shift Right and Insert (immediate)"]
25441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25442#[inline]
25443#[target_feature(enable = "neon")]
25444#[cfg_attr(test, assert_instr(sri, N = 1))]
25445#[rustc_legacy_const_generics(2)]
25446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25447pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25448    static_assert!(N >= 1 && N <= 64);
25449    unsafe extern "unadjusted" {
25450        #[cfg_attr(
25451            any(target_arch = "aarch64", target_arch = "arm64ec"),
25452            link_name = "llvm.aarch64.neon.vsri.v2i64"
25453        )]
25454        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25455    }
25456    unsafe { _vsriq_n_s64(a, b, N) }
25457}
25458#[doc = "Shift Right and Insert (immediate)"]
25459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25460#[inline]
25461#[target_feature(enable = "neon")]
25462#[cfg_attr(test, assert_instr(sri, N = 1))]
25463#[rustc_legacy_const_generics(2)]
25464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25465pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25466    static_assert!(N >= 1 && N <= 8);
25467    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25468}
25469#[doc = "Shift Right and Insert (immediate)"]
25470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25471#[inline]
25472#[target_feature(enable = "neon")]
25473#[cfg_attr(test, assert_instr(sri, N = 1))]
25474#[rustc_legacy_const_generics(2)]
25475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25476pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25477    static_assert!(N >= 1 && N <= 8);
25478    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25479}
25480#[doc = "Shift Right and Insert (immediate)"]
25481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25482#[inline]
25483#[target_feature(enable = "neon")]
25484#[cfg_attr(test, assert_instr(sri, N = 1))]
25485#[rustc_legacy_const_generics(2)]
25486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25487pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25488    static_assert!(N >= 1 && N <= 16);
25489    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25490}
25491#[doc = "Shift Right and Insert (immediate)"]
25492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25493#[inline]
25494#[target_feature(enable = "neon")]
25495#[cfg_attr(test, assert_instr(sri, N = 1))]
25496#[rustc_legacy_const_generics(2)]
25497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25498pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25499    static_assert!(N >= 1 && N <= 16);
25500    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25501}
25502#[doc = "Shift Right and Insert (immediate)"]
25503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25504#[inline]
25505#[target_feature(enable = "neon")]
25506#[cfg_attr(test, assert_instr(sri, N = 1))]
25507#[rustc_legacy_const_generics(2)]
25508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25509pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25510    static_assert!(N >= 1 && N <= 32);
25511    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25512}
25513#[doc = "Shift Right and Insert (immediate)"]
25514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25515#[inline]
25516#[target_feature(enable = "neon")]
25517#[cfg_attr(test, assert_instr(sri, N = 1))]
25518#[rustc_legacy_const_generics(2)]
25519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25520pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25521    static_assert!(N >= 1 && N <= 32);
25522    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25523}
25524#[doc = "Shift Right and Insert (immediate)"]
25525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25526#[inline]
25527#[target_feature(enable = "neon")]
25528#[cfg_attr(test, assert_instr(sri, N = 1))]
25529#[rustc_legacy_const_generics(2)]
25530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25531pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25532    static_assert!(N >= 1 && N <= 64);
25533    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25534}
25535#[doc = "Shift Right and Insert (immediate)"]
25536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25537#[inline]
25538#[target_feature(enable = "neon")]
25539#[cfg_attr(test, assert_instr(sri, N = 1))]
25540#[rustc_legacy_const_generics(2)]
25541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25542pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25543    static_assert!(N >= 1 && N <= 64);
25544    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25545}
25546#[doc = "Shift Right and Insert (immediate)"]
25547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25548#[inline]
25549#[target_feature(enable = "neon")]
25550#[cfg_attr(test, assert_instr(sri, N = 1))]
25551#[rustc_legacy_const_generics(2)]
25552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25553pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25554    static_assert!(N >= 1 && N <= 8);
25555    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25556}
25557#[doc = "Shift Right and Insert (immediate)"]
25558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25559#[inline]
25560#[target_feature(enable = "neon")]
25561#[cfg_attr(test, assert_instr(sri, N = 1))]
25562#[rustc_legacy_const_generics(2)]
25563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25564pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25565    static_assert!(N >= 1 && N <= 8);
25566    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25567}
25568#[doc = "Shift Right and Insert (immediate)"]
25569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25570#[inline]
25571#[target_feature(enable = "neon")]
25572#[cfg_attr(test, assert_instr(sri, N = 1))]
25573#[rustc_legacy_const_generics(2)]
25574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25575pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25576    static_assert!(N >= 1 && N <= 16);
25577    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25578}
25579#[doc = "Shift Right and Insert (immediate)"]
25580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25581#[inline]
25582#[target_feature(enable = "neon")]
25583#[cfg_attr(test, assert_instr(sri, N = 1))]
25584#[rustc_legacy_const_generics(2)]
25585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25586pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25587    static_assert!(N >= 1 && N <= 16);
25588    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25589}
25590#[doc = "Shift Right and Insert (immediate)"]
25591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25592#[inline]
25593#[target_feature(enable = "neon,aes")]
25594#[cfg_attr(test, assert_instr(sri, N = 1))]
25595#[rustc_legacy_const_generics(2)]
25596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25597pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25598    static_assert!(N >= 1 && N <= 64);
25599    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25600}
25601#[doc = "Shift Right and Insert (immediate)"]
25602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25603#[inline]
25604#[target_feature(enable = "neon,aes")]
25605#[cfg_attr(test, assert_instr(sri, N = 1))]
25606#[rustc_legacy_const_generics(2)]
25607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25608pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25609    static_assert!(N >= 1 && N <= 64);
25610    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25611}
25612#[doc = "Shift right and insert"]
25613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25614#[inline]
25615#[target_feature(enable = "neon")]
25616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25617#[rustc_legacy_const_generics(2)]
25618#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25619pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25620    static_assert!(N >= 1 && N <= 64);
25621    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25622}
25623#[doc = "Shift right and insert"]
25624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25625#[inline]
25626#[target_feature(enable = "neon")]
25627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25628#[rustc_legacy_const_generics(2)]
25629#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25630pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25631    static_assert!(N >= 1 && N <= 64);
25632    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25633}
25634#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25636#[doc = "## Safety"]
25637#[doc = "  * Neon instrinsic unsafe"]
25638#[inline]
25639#[target_feature(enable = "neon,fp16")]
25640#[cfg_attr(test, assert_instr(str))]
25641#[allow(clippy::cast_ptr_alignment)]
25642#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25643pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25644    crate::ptr::write_unaligned(ptr.cast(), a)
25645}
25646#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25648#[doc = "## Safety"]
25649#[doc = "  * Neon instrinsic unsafe"]
25650#[inline]
25651#[target_feature(enable = "neon,fp16")]
25652#[cfg_attr(test, assert_instr(str))]
25653#[allow(clippy::cast_ptr_alignment)]
25654#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25655pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25656    crate::ptr::write_unaligned(ptr.cast(), a)
25657}
25658#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25660#[doc = "## Safety"]
25661#[doc = "  * Neon instrinsic unsafe"]
25662#[inline]
25663#[target_feature(enable = "neon")]
25664#[cfg_attr(test, assert_instr(str))]
25665#[allow(clippy::cast_ptr_alignment)]
25666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25667pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25668    crate::ptr::write_unaligned(ptr.cast(), a)
25669}
25670#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25672#[doc = "## Safety"]
25673#[doc = "  * Neon instrinsic unsafe"]
25674#[inline]
25675#[target_feature(enable = "neon")]
25676#[cfg_attr(test, assert_instr(str))]
25677#[allow(clippy::cast_ptr_alignment)]
25678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25679pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25680    crate::ptr::write_unaligned(ptr.cast(), a)
25681}
25682#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25684#[doc = "## Safety"]
25685#[doc = "  * Neon instrinsic unsafe"]
25686#[inline]
25687#[target_feature(enable = "neon")]
25688#[cfg_attr(test, assert_instr(str))]
25689#[allow(clippy::cast_ptr_alignment)]
25690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25691pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25692    crate::ptr::write_unaligned(ptr.cast(), a)
25693}
25694#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25696#[doc = "## Safety"]
25697#[doc = "  * Neon instrinsic unsafe"]
25698#[inline]
25699#[target_feature(enable = "neon")]
25700#[cfg_attr(test, assert_instr(str))]
25701#[allow(clippy::cast_ptr_alignment)]
25702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25703pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25704    crate::ptr::write_unaligned(ptr.cast(), a)
25705}
25706#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25708#[doc = "## Safety"]
25709#[doc = "  * Neon instrinsic unsafe"]
25710#[inline]
25711#[target_feature(enable = "neon")]
25712#[cfg_attr(test, assert_instr(str))]
25713#[allow(clippy::cast_ptr_alignment)]
25714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25715pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25716    crate::ptr::write_unaligned(ptr.cast(), a)
25717}
25718#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25720#[doc = "## Safety"]
25721#[doc = "  * Neon instrinsic unsafe"]
25722#[inline]
25723#[target_feature(enable = "neon")]
25724#[cfg_attr(test, assert_instr(str))]
25725#[allow(clippy::cast_ptr_alignment)]
25726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25727pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25728    crate::ptr::write_unaligned(ptr.cast(), a)
25729}
25730#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25732#[doc = "## Safety"]
25733#[doc = "  * Neon instrinsic unsafe"]
25734#[inline]
25735#[target_feature(enable = "neon")]
25736#[cfg_attr(test, assert_instr(str))]
25737#[allow(clippy::cast_ptr_alignment)]
25738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25739pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25740    crate::ptr::write_unaligned(ptr.cast(), a)
25741}
25742#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25744#[doc = "## Safety"]
25745#[doc = "  * Neon instrinsic unsafe"]
25746#[inline]
25747#[target_feature(enable = "neon")]
25748#[cfg_attr(test, assert_instr(str))]
25749#[allow(clippy::cast_ptr_alignment)]
25750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25751pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25752    crate::ptr::write_unaligned(ptr.cast(), a)
25753}
25754#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25756#[doc = "## Safety"]
25757#[doc = "  * Neon instrinsic unsafe"]
25758#[inline]
25759#[target_feature(enable = "neon")]
25760#[cfg_attr(test, assert_instr(str))]
25761#[allow(clippy::cast_ptr_alignment)]
25762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25763pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25764    crate::ptr::write_unaligned(ptr.cast(), a)
25765}
25766#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25768#[doc = "## Safety"]
25769#[doc = "  * Neon instrinsic unsafe"]
25770#[inline]
25771#[target_feature(enable = "neon")]
25772#[cfg_attr(test, assert_instr(str))]
25773#[allow(clippy::cast_ptr_alignment)]
25774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25775pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25776    crate::ptr::write_unaligned(ptr.cast(), a)
25777}
25778#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25780#[doc = "## Safety"]
25781#[doc = "  * Neon instrinsic unsafe"]
25782#[inline]
25783#[target_feature(enable = "neon")]
25784#[cfg_attr(test, assert_instr(str))]
25785#[allow(clippy::cast_ptr_alignment)]
25786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25787pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25788    crate::ptr::write_unaligned(ptr.cast(), a)
25789}
25790#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25792#[doc = "## Safety"]
25793#[doc = "  * Neon instrinsic unsafe"]
25794#[inline]
25795#[target_feature(enable = "neon")]
25796#[cfg_attr(test, assert_instr(str))]
25797#[allow(clippy::cast_ptr_alignment)]
25798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25799pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
25800    crate::ptr::write_unaligned(ptr.cast(), a)
25801}
25802#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
25804#[doc = "## Safety"]
25805#[doc = "  * Neon instrinsic unsafe"]
25806#[inline]
25807#[target_feature(enable = "neon")]
25808#[cfg_attr(test, assert_instr(str))]
25809#[allow(clippy::cast_ptr_alignment)]
25810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25811pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
25812    crate::ptr::write_unaligned(ptr.cast(), a)
25813}
25814#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
25816#[doc = "## Safety"]
25817#[doc = "  * Neon instrinsic unsafe"]
25818#[inline]
25819#[target_feature(enable = "neon")]
25820#[cfg_attr(test, assert_instr(str))]
25821#[allow(clippy::cast_ptr_alignment)]
25822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25823pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
25824    crate::ptr::write_unaligned(ptr.cast(), a)
25825}
25826#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
25828#[doc = "## Safety"]
25829#[doc = "  * Neon instrinsic unsafe"]
25830#[inline]
25831#[target_feature(enable = "neon")]
25832#[cfg_attr(test, assert_instr(str))]
25833#[allow(clippy::cast_ptr_alignment)]
25834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25835pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
25836    crate::ptr::write_unaligned(ptr.cast(), a)
25837}
25838#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
25840#[doc = "## Safety"]
25841#[doc = "  * Neon instrinsic unsafe"]
25842#[inline]
25843#[target_feature(enable = "neon")]
25844#[cfg_attr(test, assert_instr(str))]
25845#[allow(clippy::cast_ptr_alignment)]
25846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25847pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
25848    crate::ptr::write_unaligned(ptr.cast(), a)
25849}
25850#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
25852#[doc = "## Safety"]
25853#[doc = "  * Neon instrinsic unsafe"]
25854#[inline]
25855#[target_feature(enable = "neon")]
25856#[cfg_attr(test, assert_instr(str))]
25857#[allow(clippy::cast_ptr_alignment)]
25858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25859pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
25860    crate::ptr::write_unaligned(ptr.cast(), a)
25861}
25862#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
25864#[doc = "## Safety"]
25865#[doc = "  * Neon instrinsic unsafe"]
25866#[inline]
25867#[target_feature(enable = "neon")]
25868#[cfg_attr(test, assert_instr(str))]
25869#[allow(clippy::cast_ptr_alignment)]
25870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25871pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
25872    crate::ptr::write_unaligned(ptr.cast(), a)
25873}
25874#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
25876#[doc = "## Safety"]
25877#[doc = "  * Neon instrinsic unsafe"]
25878#[inline]
25879#[target_feature(enable = "neon")]
25880#[cfg_attr(test, assert_instr(str))]
25881#[allow(clippy::cast_ptr_alignment)]
25882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25883pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
25884    crate::ptr::write_unaligned(ptr.cast(), a)
25885}
25886#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
25888#[doc = "## Safety"]
25889#[doc = "  * Neon instrinsic unsafe"]
25890#[inline]
25891#[target_feature(enable = "neon")]
25892#[cfg_attr(test, assert_instr(str))]
25893#[allow(clippy::cast_ptr_alignment)]
25894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25895pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
25896    crate::ptr::write_unaligned(ptr.cast(), a)
25897}
25898#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
25900#[doc = "## Safety"]
25901#[doc = "  * Neon instrinsic unsafe"]
25902#[inline]
25903#[target_feature(enable = "neon")]
25904#[cfg_attr(test, assert_instr(str))]
25905#[allow(clippy::cast_ptr_alignment)]
25906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25907pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
25908    crate::ptr::write_unaligned(ptr.cast(), a)
25909}
25910#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
25912#[doc = "## Safety"]
25913#[doc = "  * Neon instrinsic unsafe"]
25914#[inline]
25915#[target_feature(enable = "neon")]
25916#[cfg_attr(test, assert_instr(str))]
25917#[allow(clippy::cast_ptr_alignment)]
25918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25919pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
25920    crate::ptr::write_unaligned(ptr.cast(), a)
25921}
25922#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
25924#[doc = "## Safety"]
25925#[doc = "  * Neon instrinsic unsafe"]
25926#[inline]
25927#[target_feature(enable = "neon")]
25928#[cfg_attr(test, assert_instr(str))]
25929#[allow(clippy::cast_ptr_alignment)]
25930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25931pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
25932    crate::ptr::write_unaligned(ptr.cast(), a)
25933}
25934#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
25936#[doc = "## Safety"]
25937#[doc = "  * Neon instrinsic unsafe"]
25938#[inline]
25939#[target_feature(enable = "neon")]
25940#[cfg_attr(test, assert_instr(str))]
25941#[allow(clippy::cast_ptr_alignment)]
25942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25943pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
25944    crate::ptr::write_unaligned(ptr.cast(), a)
25945}
25946#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
25948#[doc = "## Safety"]
25949#[doc = "  * Neon instrinsic unsafe"]
25950#[inline]
25951#[target_feature(enable = "neon,aes")]
25952#[cfg_attr(test, assert_instr(str))]
25953#[allow(clippy::cast_ptr_alignment)]
25954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25955pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
25956    crate::ptr::write_unaligned(ptr.cast(), a)
25957}
25958#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
25960#[doc = "## Safety"]
25961#[doc = "  * Neon instrinsic unsafe"]
25962#[inline]
25963#[target_feature(enable = "neon,aes")]
25964#[cfg_attr(test, assert_instr(str))]
25965#[allow(clippy::cast_ptr_alignment)]
25966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25967pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
25968    crate::ptr::write_unaligned(ptr.cast(), a)
25969}
25970#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
25972#[doc = "## Safety"]
25973#[doc = "  * Neon instrinsic unsafe"]
25974#[inline]
25975#[target_feature(enable = "neon")]
25976#[cfg_attr(test, assert_instr(st1))]
25977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25978pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
25979    unsafe extern "unadjusted" {
25980        #[cfg_attr(
25981            any(target_arch = "aarch64", target_arch = "arm64ec"),
25982            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
25983        )]
25984        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
25985    }
25986    _vst1_f64_x2(b.0, b.1, a)
25987}
25988#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
25990#[doc = "## Safety"]
25991#[doc = "  * Neon instrinsic unsafe"]
25992#[inline]
25993#[target_feature(enable = "neon")]
25994#[cfg_attr(test, assert_instr(st1))]
25995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25996pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
25997    unsafe extern "unadjusted" {
25998        #[cfg_attr(
25999            any(target_arch = "aarch64", target_arch = "arm64ec"),
26000            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
26001        )]
26002        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
26003    }
26004    _vst1q_f64_x2(b.0, b.1, a)
26005}
26006#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
26008#[doc = "## Safety"]
26009#[doc = "  * Neon instrinsic unsafe"]
26010#[inline]
26011#[target_feature(enable = "neon")]
26012#[cfg_attr(test, assert_instr(st1))]
26013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26014pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
26015    unsafe extern "unadjusted" {
26016        #[cfg_attr(
26017            any(target_arch = "aarch64", target_arch = "arm64ec"),
26018            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
26019        )]
26020        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
26021    }
26022    _vst1_f64_x3(b.0, b.1, b.2, a)
26023}
26024#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
26026#[doc = "## Safety"]
26027#[doc = "  * Neon instrinsic unsafe"]
26028#[inline]
26029#[target_feature(enable = "neon")]
26030#[cfg_attr(test, assert_instr(st1))]
26031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26032pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
26033    unsafe extern "unadjusted" {
26034        #[cfg_attr(
26035            any(target_arch = "aarch64", target_arch = "arm64ec"),
26036            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
26037        )]
26038        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
26039    }
26040    _vst1q_f64_x3(b.0, b.1, b.2, a)
26041}
26042#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
26044#[doc = "## Safety"]
26045#[doc = "  * Neon instrinsic unsafe"]
26046#[inline]
26047#[target_feature(enable = "neon")]
26048#[cfg_attr(test, assert_instr(st1))]
26049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26050pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
26051    unsafe extern "unadjusted" {
26052        #[cfg_attr(
26053            any(target_arch = "aarch64", target_arch = "arm64ec"),
26054            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
26055        )]
26056        fn _vst1_f64_x4(
26057            a: float64x1_t,
26058            b: float64x1_t,
26059            c: float64x1_t,
26060            d: float64x1_t,
26061            ptr: *mut f64,
26062        );
26063    }
26064    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
26065}
26066#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
26068#[doc = "## Safety"]
26069#[doc = "  * Neon instrinsic unsafe"]
26070#[inline]
26071#[target_feature(enable = "neon")]
26072#[cfg_attr(test, assert_instr(st1))]
26073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26074pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
26075    unsafe extern "unadjusted" {
26076        #[cfg_attr(
26077            any(target_arch = "aarch64", target_arch = "arm64ec"),
26078            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
26079        )]
26080        fn _vst1q_f64_x4(
26081            a: float64x2_t,
26082            b: float64x2_t,
26083            c: float64x2_t,
26084            d: float64x2_t,
26085            ptr: *mut f64,
26086        );
26087    }
26088    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
26089}
26090#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
26092#[doc = "## Safety"]
26093#[doc = "  * Neon instrinsic unsafe"]
26094#[inline]
26095#[target_feature(enable = "neon")]
26096#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26097#[rustc_legacy_const_generics(2)]
26098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26099pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
26100    static_assert!(LANE == 0);
26101    *a = simd_extract!(b, LANE as u32);
26102}
26103#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
26105#[doc = "## Safety"]
26106#[doc = "  * Neon instrinsic unsafe"]
26107#[inline]
26108#[target_feature(enable = "neon")]
26109#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26110#[rustc_legacy_const_generics(2)]
26111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26112pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
26113    static_assert_uimm_bits!(LANE, 1);
26114    *a = simd_extract!(b, LANE as u32);
26115}
26116#[doc = "Store multiple 2-element structures from two registers"]
26117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
26118#[doc = "## Safety"]
26119#[doc = "  * Neon instrinsic unsafe"]
26120#[inline]
26121#[target_feature(enable = "neon")]
26122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26123#[cfg_attr(test, assert_instr(st1))]
26124pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
26125    unsafe extern "unadjusted" {
26126        #[cfg_attr(
26127            any(target_arch = "aarch64", target_arch = "arm64ec"),
26128            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
26129        )]
26130        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
26131    }
26132    _vst2_f64(b.0, b.1, a as _)
26133}
26134#[doc = "Store multiple 2-element structures from two registers"]
26135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
26136#[doc = "## Safety"]
26137#[doc = "  * Neon instrinsic unsafe"]
26138#[inline]
26139#[target_feature(enable = "neon")]
26140#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26141#[rustc_legacy_const_generics(2)]
26142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26143pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
26144    static_assert!(LANE == 0);
26145    unsafe extern "unadjusted" {
26146        #[cfg_attr(
26147            any(target_arch = "aarch64", target_arch = "arm64ec"),
26148            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
26149        )]
26150        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
26151    }
26152    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
26153}
26154#[doc = "Store multiple 2-element structures from two registers"]
26155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
26156#[doc = "## Safety"]
26157#[doc = "  * Neon instrinsic unsafe"]
26158#[inline]
26159#[target_feature(enable = "neon")]
26160#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26161#[rustc_legacy_const_generics(2)]
26162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26163pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
26164    static_assert!(LANE == 0);
26165    unsafe extern "unadjusted" {
26166        #[cfg_attr(
26167            any(target_arch = "aarch64", target_arch = "arm64ec"),
26168            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
26169        )]
26170        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
26171    }
26172    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
26173}
26174#[doc = "Store multiple 2-element structures from two registers"]
26175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
26176#[doc = "## Safety"]
26177#[doc = "  * Neon instrinsic unsafe"]
26178#[inline]
26179#[target_feature(enable = "neon,aes")]
26180#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26181#[rustc_legacy_const_generics(2)]
26182#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26183pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
26184    static_assert!(LANE == 0);
26185    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26186}
26187#[doc = "Store multiple 2-element structures from two registers"]
26188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
26189#[doc = "## Safety"]
26190#[doc = "  * Neon instrinsic unsafe"]
26191#[inline]
26192#[target_feature(enable = "neon")]
26193#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26194#[rustc_legacy_const_generics(2)]
26195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26196pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
26197    static_assert!(LANE == 0);
26198    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26199}
26200#[doc = "Store multiple 2-element structures from two registers"]
26201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
26202#[doc = "## Safety"]
26203#[doc = "  * Neon instrinsic unsafe"]
26204#[inline]
26205#[target_feature(enable = "neon")]
26206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26207#[cfg_attr(test, assert_instr(st2))]
26208pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
26209    unsafe extern "unadjusted" {
26210        #[cfg_attr(
26211            any(target_arch = "aarch64", target_arch = "arm64ec"),
26212            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
26213        )]
26214        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26215    }
26216    _vst2q_f64(b.0, b.1, a as _)
26217}
26218#[doc = "Store multiple 2-element structures from two registers"]
26219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26220#[doc = "## Safety"]
26221#[doc = "  * Neon instrinsic unsafe"]
26222#[inline]
26223#[target_feature(enable = "neon")]
26224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26225#[cfg_attr(test, assert_instr(st2))]
26226pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26227    unsafe extern "unadjusted" {
26228        #[cfg_attr(
26229            any(target_arch = "aarch64", target_arch = "arm64ec"),
26230            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
26231        )]
26232        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26233    }
26234    _vst2q_s64(b.0, b.1, a as _)
26235}
26236#[doc = "Store multiple 2-element structures from two registers"]
26237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26238#[doc = "## Safety"]
26239#[doc = "  * Neon instrinsic unsafe"]
26240#[inline]
26241#[target_feature(enable = "neon")]
26242#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26243#[rustc_legacy_const_generics(2)]
26244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26245pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26246    static_assert_uimm_bits!(LANE, 1);
26247    unsafe extern "unadjusted" {
26248        #[cfg_attr(
26249            any(target_arch = "aarch64", target_arch = "arm64ec"),
26250            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
26251        )]
26252        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26253    }
26254    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26255}
26256#[doc = "Store multiple 2-element structures from two registers"]
26257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26258#[doc = "## Safety"]
26259#[doc = "  * Neon instrinsic unsafe"]
26260#[inline]
26261#[target_feature(enable = "neon")]
26262#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26263#[rustc_legacy_const_generics(2)]
26264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26265pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26266    static_assert_uimm_bits!(LANE, 4);
26267    unsafe extern "unadjusted" {
26268        #[cfg_attr(
26269            any(target_arch = "aarch64", target_arch = "arm64ec"),
26270            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
26271        )]
26272        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26273    }
26274    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26275}
26276#[doc = "Store multiple 2-element structures from two registers"]
26277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26278#[doc = "## Safety"]
26279#[doc = "  * Neon instrinsic unsafe"]
26280#[inline]
26281#[target_feature(enable = "neon")]
26282#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26283#[rustc_legacy_const_generics(2)]
26284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26285pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26286    static_assert_uimm_bits!(LANE, 1);
26287    unsafe extern "unadjusted" {
26288        #[cfg_attr(
26289            any(target_arch = "aarch64", target_arch = "arm64ec"),
26290            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
26291        )]
26292        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26293    }
26294    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26295}
26296#[doc = "Store multiple 2-element structures from two registers"]
26297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26298#[doc = "## Safety"]
26299#[doc = "  * Neon instrinsic unsafe"]
26300#[inline]
26301#[target_feature(enable = "neon,aes")]
26302#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26303#[rustc_legacy_const_generics(2)]
26304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26305pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26306    static_assert_uimm_bits!(LANE, 1);
26307    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26308}
26309#[doc = "Store multiple 2-element structures from two registers"]
26310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26311#[doc = "## Safety"]
26312#[doc = "  * Neon instrinsic unsafe"]
26313#[inline]
26314#[target_feature(enable = "neon")]
26315#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26316#[rustc_legacy_const_generics(2)]
26317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26318pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26319    static_assert_uimm_bits!(LANE, 4);
26320    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26321}
26322#[doc = "Store multiple 2-element structures from two registers"]
26323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26324#[doc = "## Safety"]
26325#[doc = "  * Neon instrinsic unsafe"]
26326#[inline]
26327#[target_feature(enable = "neon")]
26328#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26329#[rustc_legacy_const_generics(2)]
26330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26331pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26332    static_assert_uimm_bits!(LANE, 1);
26333    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26334}
26335#[doc = "Store multiple 2-element structures from two registers"]
26336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26337#[doc = "## Safety"]
26338#[doc = "  * Neon instrinsic unsafe"]
26339#[inline]
26340#[target_feature(enable = "neon")]
26341#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26342#[rustc_legacy_const_generics(2)]
26343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26344pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26345    static_assert_uimm_bits!(LANE, 4);
26346    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26347}
26348#[doc = "Store multiple 2-element structures from two registers"]
26349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26350#[doc = "## Safety"]
26351#[doc = "  * Neon instrinsic unsafe"]
26352#[inline]
26353#[target_feature(enable = "neon,aes")]
26354#[cfg_attr(test, assert_instr(st2))]
26355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26356pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26357    vst2q_s64(transmute(a), transmute(b))
26358}
26359#[doc = "Store multiple 2-element structures from two registers"]
26360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26361#[doc = "## Safety"]
26362#[doc = "  * Neon instrinsic unsafe"]
26363#[inline]
26364#[target_feature(enable = "neon")]
26365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26366#[cfg_attr(test, assert_instr(st2))]
26367pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26368    vst2q_s64(transmute(a), transmute(b))
26369}
26370#[doc = "Store multiple 3-element structures from three registers"]
26371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26372#[doc = "## Safety"]
26373#[doc = "  * Neon instrinsic unsafe"]
26374#[inline]
26375#[target_feature(enable = "neon")]
26376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26377#[cfg_attr(test, assert_instr(nop))]
26378pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26379    unsafe extern "unadjusted" {
26380        #[cfg_attr(
26381            any(target_arch = "aarch64", target_arch = "arm64ec"),
26382            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
26383        )]
26384        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26385    }
26386    _vst3_f64(b.0, b.1, b.2, a as _)
26387}
26388#[doc = "Store multiple 3-element structures from three registers"]
26389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26390#[doc = "## Safety"]
26391#[doc = "  * Neon instrinsic unsafe"]
26392#[inline]
26393#[target_feature(enable = "neon")]
26394#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26395#[rustc_legacy_const_generics(2)]
26396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26397pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26398    static_assert!(LANE == 0);
26399    unsafe extern "unadjusted" {
26400        #[cfg_attr(
26401            any(target_arch = "aarch64", target_arch = "arm64ec"),
26402            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
26403        )]
26404        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26405    }
26406    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26407}
26408#[doc = "Store multiple 3-element structures from three registers"]
26409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26410#[doc = "## Safety"]
26411#[doc = "  * Neon instrinsic unsafe"]
26412#[inline]
26413#[target_feature(enable = "neon")]
26414#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26415#[rustc_legacy_const_generics(2)]
26416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26417pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26418    static_assert!(LANE == 0);
26419    unsafe extern "unadjusted" {
26420        #[cfg_attr(
26421            any(target_arch = "aarch64", target_arch = "arm64ec"),
26422            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
26423        )]
26424        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26425    }
26426    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26427}
26428#[doc = "Store multiple 3-element structures from three registers"]
26429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26430#[doc = "## Safety"]
26431#[doc = "  * Neon instrinsic unsafe"]
26432#[inline]
26433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26434#[target_feature(enable = "neon,aes")]
26435#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26436#[rustc_legacy_const_generics(2)]
26437pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26438    static_assert!(LANE == 0);
26439    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26440}
26441#[doc = "Store multiple 3-element structures from three registers"]
26442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26443#[doc = "## Safety"]
26444#[doc = "  * Neon instrinsic unsafe"]
26445#[inline]
26446#[target_feature(enable = "neon")]
26447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26448#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26449#[rustc_legacy_const_generics(2)]
26450pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26451    static_assert!(LANE == 0);
26452    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26453}
26454#[doc = "Store multiple 3-element structures from three registers"]
26455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26456#[doc = "## Safety"]
26457#[doc = "  * Neon instrinsic unsafe"]
26458#[inline]
26459#[target_feature(enable = "neon")]
26460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26461#[cfg_attr(test, assert_instr(st3))]
26462pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26463    unsafe extern "unadjusted" {
26464        #[cfg_attr(
26465            any(target_arch = "aarch64", target_arch = "arm64ec"),
26466            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26467        )]
26468        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26469    }
26470    _vst3q_f64(b.0, b.1, b.2, a as _)
26471}
26472#[doc = "Store multiple 3-element structures from three registers"]
26473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26474#[doc = "## Safety"]
26475#[doc = "  * Neon instrinsic unsafe"]
26476#[inline]
26477#[target_feature(enable = "neon")]
26478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26479#[cfg_attr(test, assert_instr(st3))]
26480pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26481    unsafe extern "unadjusted" {
26482        #[cfg_attr(
26483            any(target_arch = "aarch64", target_arch = "arm64ec"),
26484            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26485        )]
26486        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26487    }
26488    _vst3q_s64(b.0, b.1, b.2, a as _)
26489}
26490#[doc = "Store multiple 3-element structures from three registers"]
26491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26492#[doc = "## Safety"]
26493#[doc = "  * Neon instrinsic unsafe"]
26494#[inline]
26495#[target_feature(enable = "neon")]
26496#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26497#[rustc_legacy_const_generics(2)]
26498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26499pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26500    static_assert_uimm_bits!(LANE, 1);
26501    unsafe extern "unadjusted" {
26502        #[cfg_attr(
26503            any(target_arch = "aarch64", target_arch = "arm64ec"),
26504            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26505        )]
26506        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26507    }
26508    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26509}
26510#[doc = "Store multiple 3-element structures from three registers"]
26511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26512#[doc = "## Safety"]
26513#[doc = "  * Neon instrinsic unsafe"]
26514#[inline]
26515#[target_feature(enable = "neon")]
26516#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26517#[rustc_legacy_const_generics(2)]
26518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26519pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26520    static_assert_uimm_bits!(LANE, 4);
26521    unsafe extern "unadjusted" {
26522        #[cfg_attr(
26523            any(target_arch = "aarch64", target_arch = "arm64ec"),
26524            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26525        )]
26526        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26527    }
26528    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26529}
26530#[doc = "Store multiple 3-element structures from three registers"]
26531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26532#[doc = "## Safety"]
26533#[doc = "  * Neon instrinsic unsafe"]
26534#[inline]
26535#[target_feature(enable = "neon")]
26536#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26537#[rustc_legacy_const_generics(2)]
26538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26539pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26540    static_assert_uimm_bits!(LANE, 1);
26541    unsafe extern "unadjusted" {
26542        #[cfg_attr(
26543            any(target_arch = "aarch64", target_arch = "arm64ec"),
26544            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26545        )]
26546        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26547    }
26548    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26549}
26550#[doc = "Store multiple 3-element structures from three registers"]
26551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26552#[doc = "## Safety"]
26553#[doc = "  * Neon instrinsic unsafe"]
26554#[inline]
26555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26556#[target_feature(enable = "neon,aes")]
26557#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26558#[rustc_legacy_const_generics(2)]
26559pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26560    static_assert_uimm_bits!(LANE, 1);
26561    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26562}
26563#[doc = "Store multiple 3-element structures from three registers"]
26564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26565#[doc = "## Safety"]
26566#[doc = "  * Neon instrinsic unsafe"]
26567#[inline]
26568#[target_feature(enable = "neon")]
26569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26570#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26571#[rustc_legacy_const_generics(2)]
26572pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26573    static_assert_uimm_bits!(LANE, 4);
26574    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26575}
26576#[doc = "Store multiple 3-element structures from three registers"]
26577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26578#[doc = "## Safety"]
26579#[doc = "  * Neon instrinsic unsafe"]
26580#[inline]
26581#[target_feature(enable = "neon")]
26582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26583#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26584#[rustc_legacy_const_generics(2)]
26585pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26586    static_assert_uimm_bits!(LANE, 1);
26587    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26588}
26589#[doc = "Store multiple 3-element structures from three registers"]
26590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26591#[doc = "## Safety"]
26592#[doc = "  * Neon instrinsic unsafe"]
26593#[inline]
26594#[target_feature(enable = "neon")]
26595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26596#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26597#[rustc_legacy_const_generics(2)]
26598pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26599    static_assert_uimm_bits!(LANE, 4);
26600    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26601}
26602#[doc = "Store multiple 3-element structures from three registers"]
26603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26604#[doc = "## Safety"]
26605#[doc = "  * Neon instrinsic unsafe"]
26606#[inline]
26607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26608#[target_feature(enable = "neon,aes")]
26609#[cfg_attr(test, assert_instr(st3))]
26610pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26611    vst3q_s64(transmute(a), transmute(b))
26612}
26613#[doc = "Store multiple 3-element structures from three registers"]
26614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26615#[doc = "## Safety"]
26616#[doc = "  * Neon instrinsic unsafe"]
26617#[inline]
26618#[target_feature(enable = "neon")]
26619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26620#[cfg_attr(test, assert_instr(st3))]
26621pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26622    vst3q_s64(transmute(a), transmute(b))
26623}
26624#[doc = "Store multiple 4-element structures from four registers"]
26625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26626#[doc = "## Safety"]
26627#[doc = "  * Neon instrinsic unsafe"]
26628#[inline]
26629#[target_feature(enable = "neon")]
26630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26631#[cfg_attr(test, assert_instr(nop))]
26632pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26633    unsafe extern "unadjusted" {
26634        #[cfg_attr(
26635            any(target_arch = "aarch64", target_arch = "arm64ec"),
26636            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26637        )]
26638        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26639    }
26640    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26641}
26642#[doc = "Store multiple 4-element structures from four registers"]
26643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26644#[doc = "## Safety"]
26645#[doc = "  * Neon instrinsic unsafe"]
26646#[inline]
26647#[target_feature(enable = "neon")]
26648#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26649#[rustc_legacy_const_generics(2)]
26650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26651pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26652    static_assert!(LANE == 0);
26653    unsafe extern "unadjusted" {
26654        #[cfg_attr(
26655            any(target_arch = "aarch64", target_arch = "arm64ec"),
26656            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26657        )]
26658        fn _vst4_lane_f64(
26659            a: float64x1_t,
26660            b: float64x1_t,
26661            c: float64x1_t,
26662            d: float64x1_t,
26663            n: i64,
26664            ptr: *mut i8,
26665        );
26666    }
26667    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26668}
26669#[doc = "Store multiple 4-element structures from four registers"]
26670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26671#[doc = "## Safety"]
26672#[doc = "  * Neon instrinsic unsafe"]
26673#[inline]
26674#[target_feature(enable = "neon")]
26675#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26676#[rustc_legacy_const_generics(2)]
26677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26678pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26679    static_assert!(LANE == 0);
26680    unsafe extern "unadjusted" {
26681        #[cfg_attr(
26682            any(target_arch = "aarch64", target_arch = "arm64ec"),
26683            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26684        )]
26685        fn _vst4_lane_s64(
26686            a: int64x1_t,
26687            b: int64x1_t,
26688            c: int64x1_t,
26689            d: int64x1_t,
26690            n: i64,
26691            ptr: *mut i8,
26692        );
26693    }
26694    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26695}
26696#[doc = "Store multiple 4-element structures from four registers"]
26697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26698#[doc = "## Safety"]
26699#[doc = "  * Neon instrinsic unsafe"]
26700#[inline]
26701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26702#[target_feature(enable = "neon,aes")]
26703#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26704#[rustc_legacy_const_generics(2)]
26705pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26706    static_assert!(LANE == 0);
26707    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26708}
26709#[doc = "Store multiple 4-element structures from four registers"]
26710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26711#[doc = "## Safety"]
26712#[doc = "  * Neon instrinsic unsafe"]
26713#[inline]
26714#[target_feature(enable = "neon")]
26715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26716#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26717#[rustc_legacy_const_generics(2)]
26718pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26719    static_assert!(LANE == 0);
26720    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26721}
26722#[doc = "Store multiple 4-element structures from four registers"]
26723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26724#[doc = "## Safety"]
26725#[doc = "  * Neon instrinsic unsafe"]
26726#[inline]
26727#[target_feature(enable = "neon")]
26728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26729#[cfg_attr(test, assert_instr(st4))]
26730pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26731    unsafe extern "unadjusted" {
26732        #[cfg_attr(
26733            any(target_arch = "aarch64", target_arch = "arm64ec"),
26734            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26735        )]
26736        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26737    }
26738    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26739}
26740#[doc = "Store multiple 4-element structures from four registers"]
26741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26742#[doc = "## Safety"]
26743#[doc = "  * Neon instrinsic unsafe"]
26744#[inline]
26745#[target_feature(enable = "neon")]
26746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26747#[cfg_attr(test, assert_instr(st4))]
26748pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26749    unsafe extern "unadjusted" {
26750        #[cfg_attr(
26751            any(target_arch = "aarch64", target_arch = "arm64ec"),
26752            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26753        )]
26754        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26755    }
26756    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26757}
26758#[doc = "Store multiple 4-element structures from four registers"]
26759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26760#[doc = "## Safety"]
26761#[doc = "  * Neon instrinsic unsafe"]
26762#[inline]
26763#[target_feature(enable = "neon")]
26764#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26765#[rustc_legacy_const_generics(2)]
26766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26767pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26768    static_assert_uimm_bits!(LANE, 1);
26769    unsafe extern "unadjusted" {
26770        #[cfg_attr(
26771            any(target_arch = "aarch64", target_arch = "arm64ec"),
26772            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26773        )]
26774        fn _vst4q_lane_f64(
26775            a: float64x2_t,
26776            b: float64x2_t,
26777            c: float64x2_t,
26778            d: float64x2_t,
26779            n: i64,
26780            ptr: *mut i8,
26781        );
26782    }
26783    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26784}
26785#[doc = "Store multiple 4-element structures from four registers"]
26786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26787#[doc = "## Safety"]
26788#[doc = "  * Neon instrinsic unsafe"]
26789#[inline]
26790#[target_feature(enable = "neon")]
26791#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26792#[rustc_legacy_const_generics(2)]
26793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26794pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
26795    static_assert_uimm_bits!(LANE, 4);
26796    unsafe extern "unadjusted" {
26797        #[cfg_attr(
26798            any(target_arch = "aarch64", target_arch = "arm64ec"),
26799            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
26800        )]
26801        fn _vst4q_lane_s8(
26802            a: int8x16_t,
26803            b: int8x16_t,
26804            c: int8x16_t,
26805            d: int8x16_t,
26806            n: i64,
26807            ptr: *mut i8,
26808        );
26809    }
26810    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26811}
26812#[doc = "Store multiple 4-element structures from four registers"]
26813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
26814#[doc = "## Safety"]
26815#[doc = "  * Neon instrinsic unsafe"]
26816#[inline]
26817#[target_feature(enable = "neon")]
26818#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26819#[rustc_legacy_const_generics(2)]
26820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26821pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
26822    static_assert_uimm_bits!(LANE, 1);
26823    unsafe extern "unadjusted" {
26824        #[cfg_attr(
26825            any(target_arch = "aarch64", target_arch = "arm64ec"),
26826            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
26827        )]
26828        fn _vst4q_lane_s64(
26829            a: int64x2_t,
26830            b: int64x2_t,
26831            c: int64x2_t,
26832            d: int64x2_t,
26833            n: i64,
26834            ptr: *mut i8,
26835        );
26836    }
26837    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26838}
26839#[doc = "Store multiple 4-element structures from four registers"]
26840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
26841#[doc = "## Safety"]
26842#[doc = "  * Neon instrinsic unsafe"]
26843#[inline]
26844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26845#[target_feature(enable = "neon,aes")]
26846#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26847#[rustc_legacy_const_generics(2)]
26848pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
26849    static_assert_uimm_bits!(LANE, 1);
26850    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26851}
26852#[doc = "Store multiple 4-element structures from four registers"]
26853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
26854#[doc = "## Safety"]
26855#[doc = "  * Neon instrinsic unsafe"]
26856#[inline]
26857#[target_feature(enable = "neon")]
26858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26859#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26860#[rustc_legacy_const_generics(2)]
26861pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
26862    static_assert_uimm_bits!(LANE, 4);
26863    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26864}
26865#[doc = "Store multiple 4-element structures from four registers"]
26866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
26867#[doc = "## Safety"]
26868#[doc = "  * Neon instrinsic unsafe"]
26869#[inline]
26870#[target_feature(enable = "neon")]
26871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26872#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26873#[rustc_legacy_const_generics(2)]
26874pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
26875    static_assert_uimm_bits!(LANE, 1);
26876    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26877}
26878#[doc = "Store multiple 4-element structures from four registers"]
26879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
26880#[doc = "## Safety"]
26881#[doc = "  * Neon instrinsic unsafe"]
26882#[inline]
26883#[target_feature(enable = "neon")]
26884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26885#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26886#[rustc_legacy_const_generics(2)]
26887pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
26888    static_assert_uimm_bits!(LANE, 4);
26889    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26890}
26891#[doc = "Store multiple 4-element structures from four registers"]
26892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
26893#[doc = "## Safety"]
26894#[doc = "  * Neon instrinsic unsafe"]
26895#[inline]
26896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26897#[target_feature(enable = "neon,aes")]
26898#[cfg_attr(test, assert_instr(st4))]
26899pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
26900    vst4q_s64(transmute(a), transmute(b))
26901}
26902#[doc = "Store multiple 4-element structures from four registers"]
26903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
26904#[doc = "## Safety"]
26905#[doc = "  * Neon instrinsic unsafe"]
26906#[inline]
26907#[target_feature(enable = "neon")]
26908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26909#[cfg_attr(test, assert_instr(st4))]
26910pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
26911    vst4q_s64(transmute(a), transmute(b))
26912}
26913#[doc = "Subtract"]
26914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
26915#[inline]
26916#[target_feature(enable = "neon")]
26917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26918#[cfg_attr(test, assert_instr(fsub))]
26919pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
26920    unsafe { simd_sub(a, b) }
26921}
26922#[doc = "Subtract"]
26923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
26924#[inline]
26925#[target_feature(enable = "neon")]
26926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26927#[cfg_attr(test, assert_instr(fsub))]
26928pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26929    unsafe { simd_sub(a, b) }
26930}
26931#[doc = "Subtract"]
26932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
26933#[inline]
26934#[target_feature(enable = "neon")]
26935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26936#[cfg_attr(test, assert_instr(nop))]
26937pub fn vsubd_s64(a: i64, b: i64) -> i64 {
26938    a.wrapping_sub(b)
26939}
26940#[doc = "Subtract"]
26941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
26942#[inline]
26943#[target_feature(enable = "neon")]
26944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26945#[cfg_attr(test, assert_instr(nop))]
26946pub fn vsubd_u64(a: u64, b: u64) -> u64 {
26947    a.wrapping_sub(b)
26948}
26949#[doc = "Subtract"]
26950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
26951#[inline]
26952#[target_feature(enable = "neon,fp16")]
26953#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26954#[cfg_attr(test, assert_instr(nop))]
26955pub fn vsubh_f16(a: f16, b: f16) -> f16 {
26956    a - b
26957}
26958#[doc = "Signed Subtract Long"]
26959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
26960#[inline]
26961#[target_feature(enable = "neon")]
26962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26963#[cfg_attr(test, assert_instr(ssubl))]
26964pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
26965    unsafe {
26966        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26967        let d: int16x8_t = simd_cast(c);
26968        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26969        let f: int16x8_t = simd_cast(e);
26970        simd_sub(d, f)
26971    }
26972}
26973#[doc = "Signed Subtract Long"]
26974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
26975#[inline]
26976#[target_feature(enable = "neon")]
26977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26978#[cfg_attr(test, assert_instr(ssubl))]
26979pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
26980    unsafe {
26981        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26982        let d: int32x4_t = simd_cast(c);
26983        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26984        let f: int32x4_t = simd_cast(e);
26985        simd_sub(d, f)
26986    }
26987}
26988#[doc = "Signed Subtract Long"]
26989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
26990#[inline]
26991#[target_feature(enable = "neon")]
26992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26993#[cfg_attr(test, assert_instr(ssubl))]
26994pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
26995    unsafe {
26996        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
26997        let d: int64x2_t = simd_cast(c);
26998        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26999        let f: int64x2_t = simd_cast(e);
27000        simd_sub(d, f)
27001    }
27002}
27003#[doc = "Unsigned Subtract Long"]
27004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
27005#[inline]
27006#[target_feature(enable = "neon")]
27007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27008#[cfg_attr(test, assert_instr(usubl))]
27009pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
27010    unsafe {
27011        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27012        let d: uint16x8_t = simd_cast(c);
27013        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27014        let f: uint16x8_t = simd_cast(e);
27015        simd_sub(d, f)
27016    }
27017}
27018#[doc = "Unsigned Subtract Long"]
27019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
27020#[inline]
27021#[target_feature(enable = "neon")]
27022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27023#[cfg_attr(test, assert_instr(usubl))]
27024pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
27025    unsafe {
27026        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27027        let d: uint32x4_t = simd_cast(c);
27028        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27029        let f: uint32x4_t = simd_cast(e);
27030        simd_sub(d, f)
27031    }
27032}
27033#[doc = "Unsigned Subtract Long"]
27034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
27035#[inline]
27036#[target_feature(enable = "neon")]
27037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27038#[cfg_attr(test, assert_instr(usubl))]
27039pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
27040    unsafe {
27041        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
27042        let d: uint64x2_t = simd_cast(c);
27043        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27044        let f: uint64x2_t = simd_cast(e);
27045        simd_sub(d, f)
27046    }
27047}
27048#[doc = "Signed Subtract Wide"]
27049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
27050#[inline]
27051#[target_feature(enable = "neon")]
27052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27053#[cfg_attr(test, assert_instr(ssubw))]
27054pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
27055    unsafe {
27056        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27057        simd_sub(a, simd_cast(c))
27058    }
27059}
27060#[doc = "Signed Subtract Wide"]
27061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
27062#[inline]
27063#[target_feature(enable = "neon")]
27064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27065#[cfg_attr(test, assert_instr(ssubw))]
27066pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
27067    unsafe {
27068        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27069        simd_sub(a, simd_cast(c))
27070    }
27071}
27072#[doc = "Signed Subtract Wide"]
27073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
27074#[inline]
27075#[target_feature(enable = "neon")]
27076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27077#[cfg_attr(test, assert_instr(ssubw))]
27078pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
27079    unsafe {
27080        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27081        simd_sub(a, simd_cast(c))
27082    }
27083}
27084#[doc = "Unsigned Subtract Wide"]
27085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
27086#[inline]
27087#[target_feature(enable = "neon")]
27088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27089#[cfg_attr(test, assert_instr(usubw))]
27090pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
27091    unsafe {
27092        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27093        simd_sub(a, simd_cast(c))
27094    }
27095}
27096#[doc = "Unsigned Subtract Wide"]
27097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
27098#[inline]
27099#[target_feature(enable = "neon")]
27100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27101#[cfg_attr(test, assert_instr(usubw))]
27102pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
27103    unsafe {
27104        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27105        simd_sub(a, simd_cast(c))
27106    }
27107}
27108#[doc = "Unsigned Subtract Wide"]
27109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
27110#[inline]
27111#[target_feature(enable = "neon")]
27112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27113#[cfg_attr(test, assert_instr(usubw))]
27114pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
27115    unsafe {
27116        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27117        simd_sub(a, simd_cast(c))
27118    }
27119}
27120#[doc = "Dot product index form with signed and unsigned integers"]
27121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
27122#[inline]
27123#[target_feature(enable = "neon,i8mm")]
27124#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27125#[rustc_legacy_const_generics(3)]
27126#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27127pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
27128    static_assert_uimm_bits!(LANE, 2);
27129    unsafe {
27130        let c: uint32x4_t = transmute(c);
27131        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
27132        vusdot_s32(a, transmute(c), b)
27133    }
27134}
27135#[doc = "Dot product index form with signed and unsigned integers"]
27136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
27137#[inline]
27138#[target_feature(enable = "neon,i8mm")]
27139#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
27140#[rustc_legacy_const_generics(3)]
27141#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
27142pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
27143    static_assert_uimm_bits!(LANE, 2);
27144    unsafe {
27145        let c: uint32x4_t = transmute(c);
27146        let c: uint32x4_t =
27147            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
27148        vusdotq_s32(a, transmute(c), b)
27149    }
27150}
27151#[doc = "Table look-up"]
27152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
27153#[inline]
27154#[target_feature(enable = "neon")]
27155#[cfg_attr(test, assert_instr(tbl))]
27156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27157pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27158    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
27159        {
27160            transmute(b)
27161        }
27162    })
27163}
27164#[doc = "Table look-up"]
27165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
27166#[inline]
27167#[target_feature(enable = "neon")]
27168#[cfg_attr(test, assert_instr(tbl))]
27169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27170pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27171    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
27172}
27173#[doc = "Table look-up"]
27174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
27175#[inline]
27176#[target_feature(enable = "neon")]
27177#[cfg_attr(test, assert_instr(tbl))]
27178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27179pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
27180    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
27181}
27182#[doc = "Table look-up"]
27183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
27184#[inline]
27185#[target_feature(enable = "neon")]
27186#[cfg_attr(test, assert_instr(tbl))]
27187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27188pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
27189    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
27190}
27191#[doc = "Table look-up"]
27192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27193#[inline]
27194#[cfg(target_endian = "little")]
27195#[target_feature(enable = "neon")]
27196#[cfg_attr(test, assert_instr(tbl))]
27197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27198pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27199    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
27200}
27201#[doc = "Table look-up"]
27202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27203#[inline]
27204#[cfg(target_endian = "big")]
27205#[target_feature(enable = "neon")]
27206#[cfg_attr(test, assert_instr(tbl))]
27207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27208pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27209    let mut a: uint8x8x2_t = a;
27210    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27211    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27212    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27213    unsafe {
27214        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27215        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27216    }
27217}
27218#[doc = "Table look-up"]
27219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27220#[inline]
27221#[cfg(target_endian = "little")]
27222#[target_feature(enable = "neon")]
27223#[cfg_attr(test, assert_instr(tbl))]
27224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27225pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27226    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27227}
27228#[doc = "Table look-up"]
27229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27230#[inline]
27231#[cfg(target_endian = "big")]
27232#[target_feature(enable = "neon")]
27233#[cfg_attr(test, assert_instr(tbl))]
27234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27235pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27236    let mut a: poly8x8x2_t = a;
27237    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27238    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27239    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27240    unsafe {
27241        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27242        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27243    }
27244}
27245#[doc = "Table look-up"]
27246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27247#[inline]
27248#[target_feature(enable = "neon")]
27249#[cfg_attr(test, assert_instr(tbl))]
27250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27251pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27252    let x = int8x16x2_t(
27253        vcombine_s8(a.0, a.1),
27254        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27255    );
27256    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27257}
27258#[doc = "Table look-up"]
27259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27260#[inline]
27261#[cfg(target_endian = "little")]
27262#[target_feature(enable = "neon")]
27263#[cfg_attr(test, assert_instr(tbl))]
27264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27265pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27266    let x = uint8x16x2_t(
27267        vcombine_u8(a.0, a.1),
27268        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27269    );
27270    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27271}
27272#[doc = "Table look-up"]
27273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27274#[inline]
27275#[cfg(target_endian = "big")]
27276#[target_feature(enable = "neon")]
27277#[cfg_attr(test, assert_instr(tbl))]
27278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27279pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27280    let mut a: uint8x8x3_t = a;
27281    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27282    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27283    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27284    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27285    let x = uint8x16x2_t(
27286        vcombine_u8(a.0, a.1),
27287        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27288    );
27289    unsafe {
27290        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27291        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27292    }
27293}
27294#[doc = "Table look-up"]
27295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27296#[inline]
27297#[cfg(target_endian = "little")]
27298#[target_feature(enable = "neon")]
27299#[cfg_attr(test, assert_instr(tbl))]
27300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27301pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27302    let x = poly8x16x2_t(
27303        vcombine_p8(a.0, a.1),
27304        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27305    );
27306    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27307}
27308#[doc = "Table look-up"]
27309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27310#[inline]
27311#[cfg(target_endian = "big")]
27312#[target_feature(enable = "neon")]
27313#[cfg_attr(test, assert_instr(tbl))]
27314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27315pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27316    let mut a: poly8x8x3_t = a;
27317    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27318    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27319    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27320    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27321    let x = poly8x16x2_t(
27322        vcombine_p8(a.0, a.1),
27323        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27324    );
27325    unsafe {
27326        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27327        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27328    }
27329}
27330#[doc = "Table look-up"]
27331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27332#[inline]
27333#[target_feature(enable = "neon")]
27334#[cfg_attr(test, assert_instr(tbl))]
27335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27336pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27337    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27338    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27339}
27340#[doc = "Table look-up"]
27341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27342#[inline]
27343#[cfg(target_endian = "little")]
27344#[target_feature(enable = "neon")]
27345#[cfg_attr(test, assert_instr(tbl))]
27346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27347pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27348    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27349    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27350}
27351#[doc = "Table look-up"]
27352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27353#[inline]
27354#[cfg(target_endian = "big")]
27355#[target_feature(enable = "neon")]
27356#[cfg_attr(test, assert_instr(tbl))]
27357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27358pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27359    let mut a: uint8x8x4_t = a;
27360    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27361    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27362    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27363    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27364    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27365    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27366    unsafe {
27367        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27368        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27369    }
27370}
27371#[doc = "Table look-up"]
27372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27373#[inline]
27374#[cfg(target_endian = "little")]
27375#[target_feature(enable = "neon")]
27376#[cfg_attr(test, assert_instr(tbl))]
27377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27378pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27379    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27380    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27381}
27382#[doc = "Table look-up"]
27383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27384#[inline]
27385#[cfg(target_endian = "big")]
27386#[target_feature(enable = "neon")]
27387#[cfg_attr(test, assert_instr(tbl))]
27388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27389pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27390    let mut a: poly8x8x4_t = a;
27391    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27392    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27393    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27394    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27395    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27396    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27397    unsafe {
27398        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27399        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27400    }
27401}
27402#[doc = "Extended table look-up"]
27403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27404#[inline]
27405#[target_feature(enable = "neon")]
27406#[cfg_attr(test, assert_instr(tbx))]
27407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27408pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27409    unsafe {
27410        simd_select(
27411            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27412            transmute(vqtbx1(
27413                transmute(a),
27414                transmute(vcombine_s8(b, crate::mem::zeroed())),
27415                transmute(c),
27416            )),
27417            a,
27418        )
27419    }
27420}
27421#[doc = "Extended table look-up"]
27422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27423#[inline]
27424#[target_feature(enable = "neon")]
27425#[cfg_attr(test, assert_instr(tbx))]
27426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27427pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27428    unsafe {
27429        simd_select(
27430            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27431            transmute(vqtbx1(
27432                transmute(a),
27433                transmute(vcombine_u8(b, crate::mem::zeroed())),
27434                c,
27435            )),
27436            a,
27437        )
27438    }
27439}
27440#[doc = "Extended table look-up"]
27441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27442#[inline]
27443#[target_feature(enable = "neon")]
27444#[cfg_attr(test, assert_instr(tbx))]
27445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27446pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27447    unsafe {
27448        simd_select(
27449            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27450            transmute(vqtbx1(
27451                transmute(a),
27452                transmute(vcombine_p8(b, crate::mem::zeroed())),
27453                c,
27454            )),
27455            a,
27456        )
27457    }
27458}
27459#[doc = "Extended table look-up"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27461#[inline]
27462#[target_feature(enable = "neon")]
27463#[cfg_attr(test, assert_instr(tbx))]
27464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27465pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27466    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27467}
27468#[doc = "Extended table look-up"]
27469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27470#[inline]
27471#[cfg(target_endian = "little")]
27472#[target_feature(enable = "neon")]
27473#[cfg_attr(test, assert_instr(tbx))]
27474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27475pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27476    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27477}
27478#[doc = "Extended table look-up"]
27479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27480#[inline]
27481#[cfg(target_endian = "big")]
27482#[target_feature(enable = "neon")]
27483#[cfg_attr(test, assert_instr(tbx))]
27484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27485pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27486    let mut b: uint8x8x2_t = b;
27487    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27488    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27489    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27490    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27491    unsafe {
27492        let ret_val: uint8x8_t =
27493            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27494        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27495    }
27496}
27497#[doc = "Extended table look-up"]
27498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27499#[inline]
27500#[cfg(target_endian = "little")]
27501#[target_feature(enable = "neon")]
27502#[cfg_attr(test, assert_instr(tbx))]
27503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27504pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27505    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27506}
27507#[doc = "Extended table look-up"]
27508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27509#[inline]
27510#[cfg(target_endian = "big")]
27511#[target_feature(enable = "neon")]
27512#[cfg_attr(test, assert_instr(tbx))]
27513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27514pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27515    let mut b: poly8x8x2_t = b;
27516    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27517    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27518    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27519    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27520    unsafe {
27521        let ret_val: poly8x8_t =
27522            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27523        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27524    }
27525}
27526#[doc = "Extended table look-up"]
27527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27528#[inline]
27529#[target_feature(enable = "neon")]
27530#[cfg_attr(test, assert_instr(tbx))]
27531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27532pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27533    let x = int8x16x2_t(
27534        vcombine_s8(b.0, b.1),
27535        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27536    );
27537    unsafe {
27538        transmute(simd_select(
27539            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27540            transmute(vqtbx2(
27541                transmute(a),
27542                transmute(x.0),
27543                transmute(x.1),
27544                transmute(c),
27545            )),
27546            a,
27547        ))
27548    }
27549}
27550#[doc = "Extended table look-up"]
27551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27552#[inline]
27553#[cfg(target_endian = "little")]
27554#[target_feature(enable = "neon")]
27555#[cfg_attr(test, assert_instr(tbx))]
27556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27557pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27558    let x = uint8x16x2_t(
27559        vcombine_u8(b.0, b.1),
27560        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27561    );
27562    unsafe {
27563        transmute(simd_select(
27564            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27565            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27566            a,
27567        ))
27568    }
27569}
27570#[doc = "Extended table look-up"]
27571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27572#[inline]
27573#[cfg(target_endian = "big")]
27574#[target_feature(enable = "neon")]
27575#[cfg_attr(test, assert_instr(tbx))]
27576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27577pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27578    let mut b: uint8x8x3_t = b;
27579    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27580    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27581    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27582    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27583    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27584    let x = uint8x16x2_t(
27585        vcombine_u8(b.0, b.1),
27586        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27587    );
27588    unsafe {
27589        let ret_val: uint8x8_t = transmute(simd_select(
27590            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27591            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27592            a,
27593        ));
27594        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27595    }
27596}
27597#[doc = "Extended table look-up"]
27598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27599#[inline]
27600#[cfg(target_endian = "little")]
27601#[target_feature(enable = "neon")]
27602#[cfg_attr(test, assert_instr(tbx))]
27603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27604pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27605    let x = poly8x16x2_t(
27606        vcombine_p8(b.0, b.1),
27607        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27608    );
27609    unsafe {
27610        transmute(simd_select(
27611            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27612            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27613            a,
27614        ))
27615    }
27616}
27617#[doc = "Extended table look-up"]
27618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27619#[inline]
27620#[cfg(target_endian = "big")]
27621#[target_feature(enable = "neon")]
27622#[cfg_attr(test, assert_instr(tbx))]
27623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27624pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27625    let mut b: poly8x8x3_t = b;
27626    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27627    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27628    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27629    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27630    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27631    let x = poly8x16x2_t(
27632        vcombine_p8(b.0, b.1),
27633        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27634    );
27635    unsafe {
27636        let ret_val: poly8x8_t = transmute(simd_select(
27637            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27638            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27639            a,
27640        ));
27641        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27642    }
27643}
27644#[doc = "Extended table look-up"]
27645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27646#[inline]
27647#[target_feature(enable = "neon")]
27648#[cfg_attr(test, assert_instr(tbx))]
27649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27650pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27651    unsafe {
27652        vqtbx2(
27653            transmute(a),
27654            transmute(vcombine_s8(b.0, b.1)),
27655            transmute(vcombine_s8(b.2, b.3)),
27656            transmute(c),
27657        )
27658    }
27659}
27660#[doc = "Extended table look-up"]
27661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27662#[inline]
27663#[cfg(target_endian = "little")]
27664#[target_feature(enable = "neon")]
27665#[cfg_attr(test, assert_instr(tbx))]
27666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27667pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27668    unsafe {
27669        transmute(vqtbx2(
27670            transmute(a),
27671            transmute(vcombine_u8(b.0, b.1)),
27672            transmute(vcombine_u8(b.2, b.3)),
27673            c,
27674        ))
27675    }
27676}
27677#[doc = "Extended table look-up"]
27678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27679#[inline]
27680#[cfg(target_endian = "big")]
27681#[target_feature(enable = "neon")]
27682#[cfg_attr(test, assert_instr(tbx))]
27683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27684pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27685    let mut b: uint8x8x4_t = b;
27686    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27687    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27688    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27689    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27690    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27691    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27692    unsafe {
27693        let ret_val: uint8x8_t = transmute(vqtbx2(
27694            transmute(a),
27695            transmute(vcombine_u8(b.0, b.1)),
27696            transmute(vcombine_u8(b.2, b.3)),
27697            c,
27698        ));
27699        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27700    }
27701}
27702#[doc = "Extended table look-up"]
27703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27704#[inline]
27705#[cfg(target_endian = "little")]
27706#[target_feature(enable = "neon")]
27707#[cfg_attr(test, assert_instr(tbx))]
27708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27709pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27710    unsafe {
27711        transmute(vqtbx2(
27712            transmute(a),
27713            transmute(vcombine_p8(b.0, b.1)),
27714            transmute(vcombine_p8(b.2, b.3)),
27715            c,
27716        ))
27717    }
27718}
27719#[doc = "Extended table look-up"]
27720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27721#[inline]
27722#[cfg(target_endian = "big")]
27723#[target_feature(enable = "neon")]
27724#[cfg_attr(test, assert_instr(tbx))]
27725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27726pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27727    let mut b: poly8x8x4_t = b;
27728    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27729    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27730    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27731    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27732    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27733    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27734    unsafe {
27735        let ret_val: poly8x8_t = transmute(vqtbx2(
27736            transmute(a),
27737            transmute(vcombine_p8(b.0, b.1)),
27738            transmute(vcombine_p8(b.2, b.3)),
27739            c,
27740        ));
27741        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27742    }
27743}
27744#[doc = "Transpose vectors"]
27745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
27746#[inline]
27747#[target_feature(enable = "neon,fp16")]
27748#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27749#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27750pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27751    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27752}
27753#[doc = "Transpose vectors"]
27754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
27755#[inline]
27756#[target_feature(enable = "neon,fp16")]
27757#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27758#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27759pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27760    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27761}
27762#[doc = "Transpose vectors"]
27763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
27764#[inline]
27765#[target_feature(enable = "neon")]
27766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27767#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27768pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27769    unsafe { simd_shuffle!(a, b, [0, 2]) }
27770}
27771#[doc = "Transpose vectors"]
27772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
27773#[inline]
27774#[target_feature(enable = "neon")]
27775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27776#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27777pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27778    unsafe { simd_shuffle!(a, b, [0, 2]) }
27779}
27780#[doc = "Transpose vectors"]
27781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
27782#[inline]
27783#[target_feature(enable = "neon")]
27784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27785#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27786pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27787    unsafe { simd_shuffle!(a, b, [0, 2]) }
27788}
27789#[doc = "Transpose vectors"]
27790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
27791#[inline]
27792#[target_feature(enable = "neon")]
27793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27794#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27795pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27796    unsafe { simd_shuffle!(a, b, [0, 2]) }
27797}
27798#[doc = "Transpose vectors"]
27799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
27800#[inline]
27801#[target_feature(enable = "neon")]
27802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27804pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27805    unsafe { simd_shuffle!(a, b, [0, 2]) }
27806}
27807#[doc = "Transpose vectors"]
27808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
27809#[inline]
27810#[target_feature(enable = "neon")]
27811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27812#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27813pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27814    unsafe { simd_shuffle!(a, b, [0, 2]) }
27815}
27816#[doc = "Transpose vectors"]
27817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
27818#[inline]
27819#[target_feature(enable = "neon")]
27820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27821#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27822pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27823    unsafe { simd_shuffle!(a, b, [0, 2]) }
27824}
27825#[doc = "Transpose vectors"]
27826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
27827#[inline]
27828#[target_feature(enable = "neon")]
27829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27830#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27831pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27832    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27833}
27834#[doc = "Transpose vectors"]
27835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
27836#[inline]
27837#[target_feature(enable = "neon")]
27838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27840pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27841    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27842}
27843#[doc = "Transpose vectors"]
27844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
27845#[inline]
27846#[target_feature(enable = "neon")]
27847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27848#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27849pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27850    unsafe {
27851        simd_shuffle!(
27852            a,
27853            b,
27854            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27855        )
27856    }
27857}
27858#[doc = "Transpose vectors"]
27859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
27860#[inline]
27861#[target_feature(enable = "neon")]
27862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27864pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27865    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27866}
27867#[doc = "Transpose vectors"]
27868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
27869#[inline]
27870#[target_feature(enable = "neon")]
27871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27872#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27873pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27874    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27875}
27876#[doc = "Transpose vectors"]
27877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
27878#[inline]
27879#[target_feature(enable = "neon")]
27880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27881#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27882pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27883    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27884}
27885#[doc = "Transpose vectors"]
27886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
27887#[inline]
27888#[target_feature(enable = "neon")]
27889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27890#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27891pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27892    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27893}
27894#[doc = "Transpose vectors"]
27895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
27896#[inline]
27897#[target_feature(enable = "neon")]
27898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27899#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27900pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27901    unsafe {
27902        simd_shuffle!(
27903            a,
27904            b,
27905            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27906        )
27907    }
27908}
27909#[doc = "Transpose vectors"]
27910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
27911#[inline]
27912#[target_feature(enable = "neon")]
27913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27914#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27915pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27916    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27917}
27918#[doc = "Transpose vectors"]
27919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
27920#[inline]
27921#[target_feature(enable = "neon")]
27922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27923#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27924pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27925    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27926}
27927#[doc = "Transpose vectors"]
27928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
27929#[inline]
27930#[target_feature(enable = "neon")]
27931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27932#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27933pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27934    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27935}
27936#[doc = "Transpose vectors"]
27937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
27938#[inline]
27939#[target_feature(enable = "neon")]
27940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27941#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27942pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27943    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27944}
27945#[doc = "Transpose vectors"]
27946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
27947#[inline]
27948#[target_feature(enable = "neon")]
27949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27950#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27951pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27952    unsafe {
27953        simd_shuffle!(
27954            a,
27955            b,
27956            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27957        )
27958    }
27959}
27960#[doc = "Transpose vectors"]
27961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
27962#[inline]
27963#[target_feature(enable = "neon")]
27964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27965#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27966pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27967    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27968}
27969#[doc = "Transpose vectors"]
27970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
27971#[inline]
27972#[target_feature(enable = "neon")]
27973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27974#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27975pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27976    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27977}
27978#[doc = "Transpose vectors"]
27979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
27980#[inline]
27981#[target_feature(enable = "neon,fp16")]
27982#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27983#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27984pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27985    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27986}
27987#[doc = "Transpose vectors"]
27988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
27989#[inline]
27990#[target_feature(enable = "neon,fp16")]
27991#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27992#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27993pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27994    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27995}
27996#[doc = "Transpose vectors"]
27997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
27998#[inline]
27999#[target_feature(enable = "neon")]
28000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28001#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28002pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28003    unsafe { simd_shuffle!(a, b, [1, 3]) }
28004}
28005#[doc = "Transpose vectors"]
28006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
28007#[inline]
28008#[target_feature(enable = "neon")]
28009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28010#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28011pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28012    unsafe { simd_shuffle!(a, b, [1, 3]) }
28013}
28014#[doc = "Transpose vectors"]
28015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
28016#[inline]
28017#[target_feature(enable = "neon")]
28018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28019#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28020pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28021    unsafe { simd_shuffle!(a, b, [1, 3]) }
28022}
28023#[doc = "Transpose vectors"]
28024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
28025#[inline]
28026#[target_feature(enable = "neon")]
28027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28028#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28029pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28030    unsafe { simd_shuffle!(a, b, [1, 3]) }
28031}
28032#[doc = "Transpose vectors"]
28033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
28034#[inline]
28035#[target_feature(enable = "neon")]
28036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28037#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28038pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28039    unsafe { simd_shuffle!(a, b, [1, 3]) }
28040}
28041#[doc = "Transpose vectors"]
28042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
28043#[inline]
28044#[target_feature(enable = "neon")]
28045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28046#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28047pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28048    unsafe { simd_shuffle!(a, b, [1, 3]) }
28049}
28050#[doc = "Transpose vectors"]
28051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
28052#[inline]
28053#[target_feature(enable = "neon")]
28054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28055#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28056pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28057    unsafe { simd_shuffle!(a, b, [1, 3]) }
28058}
28059#[doc = "Transpose vectors"]
28060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
28061#[inline]
28062#[target_feature(enable = "neon")]
28063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28064#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28065pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28066    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28067}
28068#[doc = "Transpose vectors"]
28069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
28070#[inline]
28071#[target_feature(enable = "neon")]
28072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28073#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28074pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28075    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28076}
28077#[doc = "Transpose vectors"]
28078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
28079#[inline]
28080#[target_feature(enable = "neon")]
28081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28082#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28083pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28084    unsafe {
28085        simd_shuffle!(
28086            a,
28087            b,
28088            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28089        )
28090    }
28091}
28092#[doc = "Transpose vectors"]
28093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
28094#[inline]
28095#[target_feature(enable = "neon")]
28096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28097#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28098pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28099    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28100}
28101#[doc = "Transpose vectors"]
28102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
28103#[inline]
28104#[target_feature(enable = "neon")]
28105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28106#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28107pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28108    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28109}
28110#[doc = "Transpose vectors"]
28111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
28112#[inline]
28113#[target_feature(enable = "neon")]
28114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28115#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28116pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28117    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28118}
28119#[doc = "Transpose vectors"]
28120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
28121#[inline]
28122#[target_feature(enable = "neon")]
28123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28124#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28125pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28126    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28127}
28128#[doc = "Transpose vectors"]
28129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
28130#[inline]
28131#[target_feature(enable = "neon")]
28132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28133#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28134pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28135    unsafe {
28136        simd_shuffle!(
28137            a,
28138            b,
28139            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28140        )
28141    }
28142}
28143#[doc = "Transpose vectors"]
28144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
28145#[inline]
28146#[target_feature(enable = "neon")]
28147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28148#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28149pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28150    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28151}
28152#[doc = "Transpose vectors"]
28153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
28154#[inline]
28155#[target_feature(enable = "neon")]
28156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28157#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28158pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28159    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28160}
28161#[doc = "Transpose vectors"]
28162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
28163#[inline]
28164#[target_feature(enable = "neon")]
28165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28167pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28168    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28169}
28170#[doc = "Transpose vectors"]
28171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
28172#[inline]
28173#[target_feature(enable = "neon")]
28174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28175#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28176pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28177    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28178}
28179#[doc = "Transpose vectors"]
28180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
28181#[inline]
28182#[target_feature(enable = "neon")]
28183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28184#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28185pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28186    unsafe {
28187        simd_shuffle!(
28188            a,
28189            b,
28190            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28191        )
28192    }
28193}
28194#[doc = "Transpose vectors"]
28195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
28196#[inline]
28197#[target_feature(enable = "neon")]
28198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28199#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28200pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28201    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28202}
28203#[doc = "Transpose vectors"]
28204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
28205#[inline]
28206#[target_feature(enable = "neon")]
28207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28208#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28209pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28210    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28211}
28212#[doc = "Signed compare bitwise Test bits nonzero"]
28213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28214#[inline]
28215#[target_feature(enable = "neon")]
28216#[cfg_attr(test, assert_instr(cmtst))]
28217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28218pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28219    unsafe {
28220        let c: int64x1_t = simd_and(a, b);
28221        let d: i64x1 = i64x1::new(0);
28222        simd_ne(c, transmute(d))
28223    }
28224}
28225#[doc = "Signed compare bitwise Test bits nonzero"]
28226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28227#[inline]
28228#[target_feature(enable = "neon")]
28229#[cfg_attr(test, assert_instr(cmtst))]
28230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28231pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28232    unsafe {
28233        let c: int64x2_t = simd_and(a, b);
28234        let d: i64x2 = i64x2::new(0, 0);
28235        simd_ne(c, transmute(d))
28236    }
28237}
28238#[doc = "Signed compare bitwise Test bits nonzero"]
28239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28240#[inline]
28241#[target_feature(enable = "neon")]
28242#[cfg_attr(test, assert_instr(cmtst))]
28243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28244pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28245    unsafe {
28246        let c: poly64x1_t = simd_and(a, b);
28247        let d: i64x1 = i64x1::new(0);
28248        simd_ne(c, transmute(d))
28249    }
28250}
28251#[doc = "Signed compare bitwise Test bits nonzero"]
28252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28253#[inline]
28254#[target_feature(enable = "neon")]
28255#[cfg_attr(test, assert_instr(cmtst))]
28256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28257pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28258    unsafe {
28259        let c: poly64x2_t = simd_and(a, b);
28260        let d: i64x2 = i64x2::new(0, 0);
28261        simd_ne(c, transmute(d))
28262    }
28263}
28264#[doc = "Unsigned compare bitwise Test bits nonzero"]
28265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28266#[inline]
28267#[target_feature(enable = "neon")]
28268#[cfg_attr(test, assert_instr(cmtst))]
28269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28270pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28271    unsafe {
28272        let c: uint64x1_t = simd_and(a, b);
28273        let d: u64x1 = u64x1::new(0);
28274        simd_ne(c, transmute(d))
28275    }
28276}
28277#[doc = "Unsigned compare bitwise Test bits nonzero"]
28278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28279#[inline]
28280#[target_feature(enable = "neon")]
28281#[cfg_attr(test, assert_instr(cmtst))]
28282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28283pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28284    unsafe {
28285        let c: uint64x2_t = simd_and(a, b);
28286        let d: u64x2 = u64x2::new(0, 0);
28287        simd_ne(c, transmute(d))
28288    }
28289}
28290#[doc = "Compare bitwise test bits nonzero"]
28291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28292#[inline]
28293#[target_feature(enable = "neon")]
28294#[cfg_attr(test, assert_instr(tst))]
28295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28296pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28297    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28298}
28299#[doc = "Compare bitwise test bits nonzero"]
28300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28301#[inline]
28302#[target_feature(enable = "neon")]
28303#[cfg_attr(test, assert_instr(tst))]
28304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28305pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28306    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28307}
28308#[doc = "Signed saturating Accumulate of Unsigned value."]
28309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28310#[inline]
28311#[target_feature(enable = "neon")]
28312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28313#[cfg_attr(test, assert_instr(suqadd))]
28314pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28315    unsafe extern "unadjusted" {
28316        #[cfg_attr(
28317            any(target_arch = "aarch64", target_arch = "arm64ec"),
28318            link_name = "llvm.aarch64.neon.suqadd.v8i8"
28319        )]
28320        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28321    }
28322    unsafe { _vuqadd_s8(a, b) }
28323}
28324#[doc = "Signed saturating Accumulate of Unsigned value."]
28325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28326#[inline]
28327#[target_feature(enable = "neon")]
28328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28329#[cfg_attr(test, assert_instr(suqadd))]
28330pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28331    unsafe extern "unadjusted" {
28332        #[cfg_attr(
28333            any(target_arch = "aarch64", target_arch = "arm64ec"),
28334            link_name = "llvm.aarch64.neon.suqadd.v16i8"
28335        )]
28336        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28337    }
28338    unsafe { _vuqaddq_s8(a, b) }
28339}
28340#[doc = "Signed saturating Accumulate of Unsigned value."]
28341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28342#[inline]
28343#[target_feature(enable = "neon")]
28344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28345#[cfg_attr(test, assert_instr(suqadd))]
28346pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28347    unsafe extern "unadjusted" {
28348        #[cfg_attr(
28349            any(target_arch = "aarch64", target_arch = "arm64ec"),
28350            link_name = "llvm.aarch64.neon.suqadd.v4i16"
28351        )]
28352        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28353    }
28354    unsafe { _vuqadd_s16(a, b) }
28355}
28356#[doc = "Signed saturating Accumulate of Unsigned value."]
28357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28358#[inline]
28359#[target_feature(enable = "neon")]
28360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28361#[cfg_attr(test, assert_instr(suqadd))]
28362pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28363    unsafe extern "unadjusted" {
28364        #[cfg_attr(
28365            any(target_arch = "aarch64", target_arch = "arm64ec"),
28366            link_name = "llvm.aarch64.neon.suqadd.v8i16"
28367        )]
28368        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28369    }
28370    unsafe { _vuqaddq_s16(a, b) }
28371}
28372#[doc = "Signed saturating Accumulate of Unsigned value."]
28373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28374#[inline]
28375#[target_feature(enable = "neon")]
28376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28377#[cfg_attr(test, assert_instr(suqadd))]
28378pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28379    unsafe extern "unadjusted" {
28380        #[cfg_attr(
28381            any(target_arch = "aarch64", target_arch = "arm64ec"),
28382            link_name = "llvm.aarch64.neon.suqadd.v2i32"
28383        )]
28384        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28385    }
28386    unsafe { _vuqadd_s32(a, b) }
28387}
28388#[doc = "Signed saturating Accumulate of Unsigned value."]
28389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28390#[inline]
28391#[target_feature(enable = "neon")]
28392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28393#[cfg_attr(test, assert_instr(suqadd))]
28394pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28395    unsafe extern "unadjusted" {
28396        #[cfg_attr(
28397            any(target_arch = "aarch64", target_arch = "arm64ec"),
28398            link_name = "llvm.aarch64.neon.suqadd.v4i32"
28399        )]
28400        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28401    }
28402    unsafe { _vuqaddq_s32(a, b) }
28403}
28404#[doc = "Signed saturating Accumulate of Unsigned value."]
28405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28406#[inline]
28407#[target_feature(enable = "neon")]
28408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28409#[cfg_attr(test, assert_instr(suqadd))]
28410pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28411    unsafe extern "unadjusted" {
28412        #[cfg_attr(
28413            any(target_arch = "aarch64", target_arch = "arm64ec"),
28414            link_name = "llvm.aarch64.neon.suqadd.v1i64"
28415        )]
28416        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28417    }
28418    unsafe { _vuqadd_s64(a, b) }
28419}
28420#[doc = "Signed saturating Accumulate of Unsigned value."]
28421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28422#[inline]
28423#[target_feature(enable = "neon")]
28424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28425#[cfg_attr(test, assert_instr(suqadd))]
28426pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28427    unsafe extern "unadjusted" {
28428        #[cfg_attr(
28429            any(target_arch = "aarch64", target_arch = "arm64ec"),
28430            link_name = "llvm.aarch64.neon.suqadd.v2i64"
28431        )]
28432        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28433    }
28434    unsafe { _vuqaddq_s64(a, b) }
28435}
28436#[doc = "Signed saturating accumulate of unsigned value"]
28437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28438#[inline]
28439#[target_feature(enable = "neon")]
28440#[cfg_attr(test, assert_instr(suqadd))]
28441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28442pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28443    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28444}
28445#[doc = "Signed saturating accumulate of unsigned value"]
28446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28447#[inline]
28448#[target_feature(enable = "neon")]
28449#[cfg_attr(test, assert_instr(suqadd))]
28450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28451pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28452    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28453}
28454#[doc = "Signed saturating accumulate of unsigned value"]
28455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28456#[inline]
28457#[target_feature(enable = "neon")]
28458#[cfg_attr(test, assert_instr(suqadd))]
28459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28460pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28461    unsafe extern "unadjusted" {
28462        #[cfg_attr(
28463            any(target_arch = "aarch64", target_arch = "arm64ec"),
28464            link_name = "llvm.aarch64.neon.suqadd.i64"
28465        )]
28466        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28467    }
28468    unsafe { _vuqaddd_s64(a, b) }
28469}
28470#[doc = "Signed saturating accumulate of unsigned value"]
28471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28472#[inline]
28473#[target_feature(enable = "neon")]
28474#[cfg_attr(test, assert_instr(suqadd))]
28475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28476pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28477    unsafe extern "unadjusted" {
28478        #[cfg_attr(
28479            any(target_arch = "aarch64", target_arch = "arm64ec"),
28480            link_name = "llvm.aarch64.neon.suqadd.i32"
28481        )]
28482        fn _vuqadds_s32(a: i32, b: u32) -> i32;
28483    }
28484    unsafe { _vuqadds_s32(a, b) }
28485}
28486#[doc = "Dot product index form with unsigned and signed integers"]
28487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
28488#[inline]
28489#[target_feature(enable = "neon,i8mm")]
28490#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28491#[rustc_legacy_const_generics(3)]
28492#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28493pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
28494    static_assert_uimm_bits!(LANE, 2);
28495    unsafe {
28496        let c: int32x4_t = transmute(c);
28497        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
28498        vusdot_s32(a, b, transmute(c))
28499    }
28500}
28501#[doc = "Dot product index form with unsigned and signed integers"]
28502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
28503#[inline]
28504#[target_feature(enable = "neon,i8mm")]
28505#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28506#[rustc_legacy_const_generics(3)]
28507#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28508pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
28509    static_assert_uimm_bits!(LANE, 2);
28510    unsafe {
28511        let c: int32x4_t = transmute(c);
28512        let c: int32x4_t =
28513            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
28514        vusdotq_s32(a, b, transmute(c))
28515    }
28516}
28517#[doc = "Unzip vectors"]
28518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28519#[inline]
28520#[target_feature(enable = "neon,fp16")]
28521#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28522#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28523pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28524    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28525}
28526#[doc = "Unzip vectors"]
28527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28528#[inline]
28529#[target_feature(enable = "neon,fp16")]
28530#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28531#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28532pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28533    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28534}
28535#[doc = "Unzip vectors"]
28536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28537#[inline]
28538#[target_feature(enable = "neon")]
28539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28540#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28541pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28542    unsafe { simd_shuffle!(a, b, [0, 2]) }
28543}
28544#[doc = "Unzip vectors"]
28545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28546#[inline]
28547#[target_feature(enable = "neon")]
28548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28549#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28550pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28551    unsafe { simd_shuffle!(a, b, [0, 2]) }
28552}
28553#[doc = "Unzip vectors"]
28554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28555#[inline]
28556#[target_feature(enable = "neon")]
28557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28558#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28559pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28560    unsafe { simd_shuffle!(a, b, [0, 2]) }
28561}
28562#[doc = "Unzip vectors"]
28563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28564#[inline]
28565#[target_feature(enable = "neon")]
28566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28567#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28568pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28569    unsafe { simd_shuffle!(a, b, [0, 2]) }
28570}
28571#[doc = "Unzip vectors"]
28572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28573#[inline]
28574#[target_feature(enable = "neon")]
28575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28576#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28577pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28578    unsafe { simd_shuffle!(a, b, [0, 2]) }
28579}
28580#[doc = "Unzip vectors"]
28581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28582#[inline]
28583#[target_feature(enable = "neon")]
28584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28585#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28586pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28587    unsafe { simd_shuffle!(a, b, [0, 2]) }
28588}
28589#[doc = "Unzip vectors"]
28590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28591#[inline]
28592#[target_feature(enable = "neon")]
28593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28594#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28595pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28596    unsafe { simd_shuffle!(a, b, [0, 2]) }
28597}
28598#[doc = "Unzip vectors"]
28599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28600#[inline]
28601#[target_feature(enable = "neon")]
28602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28603#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28604pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28605    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28606}
28607#[doc = "Unzip vectors"]
28608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28609#[inline]
28610#[target_feature(enable = "neon")]
28611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28612#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28613pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28614    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28615}
28616#[doc = "Unzip vectors"]
28617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28618#[inline]
28619#[target_feature(enable = "neon")]
28620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28621#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28622pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28623    unsafe {
28624        simd_shuffle!(
28625            a,
28626            b,
28627            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28628        )
28629    }
28630}
28631#[doc = "Unzip vectors"]
28632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28633#[inline]
28634#[target_feature(enable = "neon")]
28635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28636#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28637pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28638    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28639}
28640#[doc = "Unzip vectors"]
28641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28642#[inline]
28643#[target_feature(enable = "neon")]
28644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28645#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28646pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28647    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28648}
28649#[doc = "Unzip vectors"]
28650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28651#[inline]
28652#[target_feature(enable = "neon")]
28653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28654#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28655pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28656    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28657}
28658#[doc = "Unzip vectors"]
28659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28660#[inline]
28661#[target_feature(enable = "neon")]
28662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28663#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28664pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28665    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28666}
28667#[doc = "Unzip vectors"]
28668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28669#[inline]
28670#[target_feature(enable = "neon")]
28671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28672#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28673pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28674    unsafe {
28675        simd_shuffle!(
28676            a,
28677            b,
28678            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28679        )
28680    }
28681}
28682#[doc = "Unzip vectors"]
28683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28684#[inline]
28685#[target_feature(enable = "neon")]
28686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28687#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28688pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28689    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28690}
28691#[doc = "Unzip vectors"]
28692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28693#[inline]
28694#[target_feature(enable = "neon")]
28695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28696#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28697pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28698    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28699}
28700#[doc = "Unzip vectors"]
28701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28702#[inline]
28703#[target_feature(enable = "neon")]
28704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28705#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28706pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28707    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28708}
28709#[doc = "Unzip vectors"]
28710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28711#[inline]
28712#[target_feature(enable = "neon")]
28713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28714#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28715pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28716    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28717}
28718#[doc = "Unzip vectors"]
28719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28720#[inline]
28721#[target_feature(enable = "neon")]
28722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28723#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28724pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28725    unsafe {
28726        simd_shuffle!(
28727            a,
28728            b,
28729            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28730        )
28731    }
28732}
28733#[doc = "Unzip vectors"]
28734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28735#[inline]
28736#[target_feature(enable = "neon")]
28737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28738#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28739pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28740    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28741}
28742#[doc = "Unzip vectors"]
28743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28744#[inline]
28745#[target_feature(enable = "neon")]
28746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28747#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28748pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28749    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28750}
28751#[doc = "Unzip vectors"]
28752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28753#[inline]
28754#[target_feature(enable = "neon,fp16")]
28755#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28756#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28757pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28758    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28759}
28760#[doc = "Unzip vectors"]
28761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
28762#[inline]
28763#[target_feature(enable = "neon,fp16")]
28764#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28765#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28766pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28767    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28768}
28769#[doc = "Unzip vectors"]
28770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
28771#[inline]
28772#[target_feature(enable = "neon")]
28773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28774#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28775pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28776    unsafe { simd_shuffle!(a, b, [1, 3]) }
28777}
28778#[doc = "Unzip vectors"]
28779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
28780#[inline]
28781#[target_feature(enable = "neon")]
28782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28784pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28785    unsafe { simd_shuffle!(a, b, [1, 3]) }
28786}
28787#[doc = "Unzip vectors"]
28788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
28789#[inline]
28790#[target_feature(enable = "neon")]
28791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28792#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28793pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28794    unsafe { simd_shuffle!(a, b, [1, 3]) }
28795}
28796#[doc = "Unzip vectors"]
28797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
28798#[inline]
28799#[target_feature(enable = "neon")]
28800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28801#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28802pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28803    unsafe { simd_shuffle!(a, b, [1, 3]) }
28804}
28805#[doc = "Unzip vectors"]
28806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
28807#[inline]
28808#[target_feature(enable = "neon")]
28809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28810#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28811pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28812    unsafe { simd_shuffle!(a, b, [1, 3]) }
28813}
28814#[doc = "Unzip vectors"]
28815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
28816#[inline]
28817#[target_feature(enable = "neon")]
28818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28820pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28821    unsafe { simd_shuffle!(a, b, [1, 3]) }
28822}
28823#[doc = "Unzip vectors"]
28824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
28825#[inline]
28826#[target_feature(enable = "neon")]
28827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28829pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28830    unsafe { simd_shuffle!(a, b, [1, 3]) }
28831}
28832#[doc = "Unzip vectors"]
28833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
28834#[inline]
28835#[target_feature(enable = "neon")]
28836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28837#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28838pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28839    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28840}
28841#[doc = "Unzip vectors"]
28842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
28843#[inline]
28844#[target_feature(enable = "neon")]
28845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28846#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28847pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28848    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28849}
28850#[doc = "Unzip vectors"]
28851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
28852#[inline]
28853#[target_feature(enable = "neon")]
28854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28856pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28857    unsafe {
28858        simd_shuffle!(
28859            a,
28860            b,
28861            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28862        )
28863    }
28864}
28865#[doc = "Unzip vectors"]
28866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
28867#[inline]
28868#[target_feature(enable = "neon")]
28869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28870#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28871pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28872    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28873}
28874#[doc = "Unzip vectors"]
28875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
28876#[inline]
28877#[target_feature(enable = "neon")]
28878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28879#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28880pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28881    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28882}
28883#[doc = "Unzip vectors"]
28884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
28885#[inline]
28886#[target_feature(enable = "neon")]
28887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28888#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28889pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28890    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28891}
28892#[doc = "Unzip vectors"]
28893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
28894#[inline]
28895#[target_feature(enable = "neon")]
28896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28898pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28899    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28900}
28901#[doc = "Unzip vectors"]
28902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
28903#[inline]
28904#[target_feature(enable = "neon")]
28905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28906#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28907pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28908    unsafe {
28909        simd_shuffle!(
28910            a,
28911            b,
28912            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28913        )
28914    }
28915}
28916#[doc = "Unzip vectors"]
28917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
28918#[inline]
28919#[target_feature(enable = "neon")]
28920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28921#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28922pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28923    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28924}
28925#[doc = "Unzip vectors"]
28926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
28927#[inline]
28928#[target_feature(enable = "neon")]
28929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28930#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28931pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28932    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28933}
28934#[doc = "Unzip vectors"]
28935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
28936#[inline]
28937#[target_feature(enable = "neon")]
28938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28939#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28940pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28941    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28942}
28943#[doc = "Unzip vectors"]
28944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
28945#[inline]
28946#[target_feature(enable = "neon")]
28947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28948#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28949pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28950    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28951}
28952#[doc = "Unzip vectors"]
28953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
28954#[inline]
28955#[target_feature(enable = "neon")]
28956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28957#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28958pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28959    unsafe {
28960        simd_shuffle!(
28961            a,
28962            b,
28963            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28964        )
28965    }
28966}
28967#[doc = "Unzip vectors"]
28968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
28969#[inline]
28970#[target_feature(enable = "neon")]
28971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28972#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28973pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28974    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28975}
28976#[doc = "Unzip vectors"]
28977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
28978#[inline]
28979#[target_feature(enable = "neon")]
28980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28981#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28982pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28983    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28984}
28985#[doc = "Exclusive OR and rotate"]
28986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
28987#[inline]
28988#[target_feature(enable = "neon,sha3")]
28989#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
28990#[rustc_legacy_const_generics(2)]
28991#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
28992pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28993    static_assert_uimm_bits!(IMM6, 6);
28994    unsafe extern "unadjusted" {
28995        #[cfg_attr(
28996            any(target_arch = "aarch64", target_arch = "arm64ec"),
28997            link_name = "llvm.aarch64.crypto.xar"
28998        )]
28999        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
29000    }
29001    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
29002}
29003#[doc = "Zip vectors"]
29004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
29005#[inline]
29006#[target_feature(enable = "neon,fp16")]
29007#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29008#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29009pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29010    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29011}
29012#[doc = "Zip vectors"]
29013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
29014#[inline]
29015#[target_feature(enable = "neon,fp16")]
29016#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29017#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29018pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29019    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29020}
29021#[doc = "Zip vectors"]
29022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
29023#[inline]
29024#[target_feature(enable = "neon")]
29025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29026#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29027pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29028    unsafe { simd_shuffle!(a, b, [0, 2]) }
29029}
29030#[doc = "Zip vectors"]
29031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
29032#[inline]
29033#[target_feature(enable = "neon")]
29034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29035#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29036pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29037    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29038}
29039#[doc = "Zip vectors"]
29040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
29041#[inline]
29042#[target_feature(enable = "neon")]
29043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29044#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29045pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29046    unsafe { simd_shuffle!(a, b, [0, 2]) }
29047}
29048#[doc = "Zip vectors"]
29049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
29050#[inline]
29051#[target_feature(enable = "neon")]
29052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29053#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29054pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29055    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29056}
29057#[doc = "Zip vectors"]
29058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
29059#[inline]
29060#[target_feature(enable = "neon")]
29061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29062#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29063pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29064    unsafe {
29065        simd_shuffle!(
29066            a,
29067            b,
29068            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29069        )
29070    }
29071}
29072#[doc = "Zip vectors"]
29073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
29074#[inline]
29075#[target_feature(enable = "neon")]
29076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29077#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29078pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29079    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29080}
29081#[doc = "Zip vectors"]
29082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
29083#[inline]
29084#[target_feature(enable = "neon")]
29085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29086#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29087pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29088    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29089}
29090#[doc = "Zip vectors"]
29091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
29092#[inline]
29093#[target_feature(enable = "neon")]
29094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29095#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29096pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29097    unsafe { simd_shuffle!(a, b, [0, 2]) }
29098}
29099#[doc = "Zip vectors"]
29100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
29101#[inline]
29102#[target_feature(enable = "neon")]
29103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29104#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29105pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29106    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29107}
29108#[doc = "Zip vectors"]
29109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
29110#[inline]
29111#[target_feature(enable = "neon")]
29112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29114pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29115    unsafe { simd_shuffle!(a, b, [0, 2]) }
29116}
29117#[doc = "Zip vectors"]
29118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
29119#[inline]
29120#[target_feature(enable = "neon")]
29121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29122#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29123pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29124    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29125}
29126#[doc = "Zip vectors"]
29127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
29128#[inline]
29129#[target_feature(enable = "neon")]
29130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29131#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29132pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29133    unsafe {
29134        simd_shuffle!(
29135            a,
29136            b,
29137            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29138        )
29139    }
29140}
29141#[doc = "Zip vectors"]
29142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
29143#[inline]
29144#[target_feature(enable = "neon")]
29145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29146#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29147pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29148    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29149}
29150#[doc = "Zip vectors"]
29151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
29152#[inline]
29153#[target_feature(enable = "neon")]
29154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29155#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29156pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29157    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29158}
29159#[doc = "Zip vectors"]
29160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
29161#[inline]
29162#[target_feature(enable = "neon")]
29163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29164#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29165pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29166    unsafe { simd_shuffle!(a, b, [0, 2]) }
29167}
29168#[doc = "Zip vectors"]
29169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
29170#[inline]
29171#[target_feature(enable = "neon")]
29172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29173#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29174pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29175    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29176}
29177#[doc = "Zip vectors"]
29178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
29179#[inline]
29180#[target_feature(enable = "neon")]
29181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29182#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29183pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29184    unsafe { simd_shuffle!(a, b, [0, 2]) }
29185}
29186#[doc = "Zip vectors"]
29187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
29188#[inline]
29189#[target_feature(enable = "neon")]
29190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29191#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29192pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29193    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29194}
29195#[doc = "Zip vectors"]
29196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
29197#[inline]
29198#[target_feature(enable = "neon")]
29199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29200#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29201pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29202    unsafe {
29203        simd_shuffle!(
29204            a,
29205            b,
29206            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29207        )
29208    }
29209}
29210#[doc = "Zip vectors"]
29211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29212#[inline]
29213#[target_feature(enable = "neon")]
29214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29215#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29216pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29217    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29218}
29219#[doc = "Zip vectors"]
29220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29221#[inline]
29222#[target_feature(enable = "neon")]
29223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29224#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29225pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29226    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29227}
29228#[doc = "Zip vectors"]
29229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29230#[inline]
29231#[target_feature(enable = "neon")]
29232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29233#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29234pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29235    unsafe { simd_shuffle!(a, b, [0, 2]) }
29236}
29237#[doc = "Zip vectors"]
29238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29239#[inline]
29240#[target_feature(enable = "neon,fp16")]
29241#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29242#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29243pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29244    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29245}
29246#[doc = "Zip vectors"]
29247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29248#[inline]
29249#[target_feature(enable = "neon,fp16")]
29250#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
29251#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29252pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29253    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29254}
29255#[doc = "Zip vectors"]
29256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29257#[inline]
29258#[target_feature(enable = "neon")]
29259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29260#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29261pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29262    unsafe { simd_shuffle!(a, b, [1, 3]) }
29263}
29264#[doc = "Zip vectors"]
29265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29266#[inline]
29267#[target_feature(enable = "neon")]
29268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29269#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29270pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29271    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29272}
29273#[doc = "Zip vectors"]
29274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29275#[inline]
29276#[target_feature(enable = "neon")]
29277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29278#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29279pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29280    unsafe { simd_shuffle!(a, b, [1, 3]) }
29281}
29282#[doc = "Zip vectors"]
29283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29284#[inline]
29285#[target_feature(enable = "neon")]
29286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29287#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29288pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29289    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29290}
29291#[doc = "Zip vectors"]
29292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29293#[inline]
29294#[target_feature(enable = "neon")]
29295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29296#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29297pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29298    unsafe {
29299        simd_shuffle!(
29300            a,
29301            b,
29302            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29303        )
29304    }
29305}
29306#[doc = "Zip vectors"]
29307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29308#[inline]
29309#[target_feature(enable = "neon")]
29310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29311#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29312pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29313    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29314}
29315#[doc = "Zip vectors"]
29316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29317#[inline]
29318#[target_feature(enable = "neon")]
29319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29320#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29321pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29322    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29323}
29324#[doc = "Zip vectors"]
29325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29326#[inline]
29327#[target_feature(enable = "neon")]
29328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29329#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29330pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29331    unsafe { simd_shuffle!(a, b, [1, 3]) }
29332}
29333#[doc = "Zip vectors"]
29334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29335#[inline]
29336#[target_feature(enable = "neon")]
29337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29338#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29339pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29340    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29341}
29342#[doc = "Zip vectors"]
29343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29344#[inline]
29345#[target_feature(enable = "neon")]
29346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29347#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29348pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29349    unsafe { simd_shuffle!(a, b, [1, 3]) }
29350}
29351#[doc = "Zip vectors"]
29352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29353#[inline]
29354#[target_feature(enable = "neon")]
29355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29356#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29357pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29358    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29359}
29360#[doc = "Zip vectors"]
29361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29362#[inline]
29363#[target_feature(enable = "neon")]
29364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29365#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29366pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29367    unsafe {
29368        simd_shuffle!(
29369            a,
29370            b,
29371            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29372        )
29373    }
29374}
29375#[doc = "Zip vectors"]
29376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29377#[inline]
29378#[target_feature(enable = "neon")]
29379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29380#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29381pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29382    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29383}
29384#[doc = "Zip vectors"]
29385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29386#[inline]
29387#[target_feature(enable = "neon")]
29388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29389#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29390pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29391    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29392}
29393#[doc = "Zip vectors"]
29394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29395#[inline]
29396#[target_feature(enable = "neon")]
29397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29398#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29399pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29400    unsafe { simd_shuffle!(a, b, [1, 3]) }
29401}
29402#[doc = "Zip vectors"]
29403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29404#[inline]
29405#[target_feature(enable = "neon")]
29406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29407#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29408pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29409    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29410}
29411#[doc = "Zip vectors"]
29412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29413#[inline]
29414#[target_feature(enable = "neon")]
29415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29416#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29417pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29418    unsafe { simd_shuffle!(a, b, [1, 3]) }
29419}
29420#[doc = "Zip vectors"]
29421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29422#[inline]
29423#[target_feature(enable = "neon")]
29424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29425#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29426pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29427    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29428}
29429#[doc = "Zip vectors"]
29430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29431#[inline]
29432#[target_feature(enable = "neon")]
29433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29434#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29435pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29436    unsafe {
29437        simd_shuffle!(
29438            a,
29439            b,
29440            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29441        )
29442    }
29443}
29444#[doc = "Zip vectors"]
29445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29446#[inline]
29447#[target_feature(enable = "neon")]
29448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29449#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29450pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29451    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29452}
29453#[doc = "Zip vectors"]
29454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29455#[inline]
29456#[target_feature(enable = "neon")]
29457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29458#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29459pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29460    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29461}
29462#[doc = "Zip vectors"]
29463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29464#[inline]
29465#[target_feature(enable = "neon")]
29466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29467#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29468pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29469    unsafe { simd_shuffle!(a, b, [1, 3]) }
29470}