1#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22 unsafe extern "unadjusted" {
23 #[cfg_attr(
24 any(target_arch = "aarch64", target_arch = "arm64ec"),
25 link_name = "llvm.aarch64.crc32cx"
26 )]
27 fn ___crc32cd(crc: u32, data: u64) -> u32;
28 }
29 unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38 unsafe extern "unadjusted" {
39 #[cfg_attr(
40 any(target_arch = "aarch64", target_arch = "arm64ec"),
41 link_name = "llvm.aarch64.crc32x"
42 )]
43 fn ___crc32d(crc: u32, data: u64) -> u32;
44 }
45 unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "1.95.0")]
53pub fn __jcvt(a: f64) -> i32 {
54 unsafe extern "unadjusted" {
55 #[cfg_attr(
56 any(target_arch = "aarch64", target_arch = "arm64ec"),
57 link_name = "llvm.aarch64.fjcvtzs"
58 )]
59 fn ___jcvt(a: f64) -> i32;
60 }
61 unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70 unsafe {
71 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72 let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73 let f: int8x8_t = vabd_s8(d, e);
74 let f: uint8x8_t = simd_cast(f);
75 simd_add(a, simd_cast(f))
76 }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85 unsafe {
86 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87 let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88 let f: int16x4_t = vabd_s16(d, e);
89 let f: uint16x4_t = simd_cast(f);
90 simd_add(a, simd_cast(f))
91 }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100 unsafe {
101 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102 let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103 let f: int32x2_t = vabd_s32(d, e);
104 let f: uint32x2_t = simd_cast(f);
105 simd_add(a, simd_cast(f))
106 }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115 unsafe {
116 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117 let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118 let f: uint8x8_t = vabd_u8(d, e);
119 simd_add(a, simd_cast(f))
120 }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129 unsafe {
130 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131 let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132 let f: uint16x4_t = vabd_u16(d, e);
133 simd_add(a, simd_cast(f))
134 }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143 unsafe {
144 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145 let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146 let f: uint32x2_t = vabd_u32(d, e);
147 simd_add(a, simd_cast(f))
148 }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157 unsafe extern "unadjusted" {
158 #[cfg_attr(
159 any(target_arch = "aarch64", target_arch = "arm64ec"),
160 link_name = "llvm.aarch64.neon.fabd.v1f64"
161 )]
162 fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163 }
164 unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173 unsafe extern "unadjusted" {
174 #[cfg_attr(
175 any(target_arch = "aarch64", target_arch = "arm64ec"),
176 link_name = "llvm.aarch64.neon.fabd.v2f64"
177 )]
178 fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179 }
180 unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189 unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198 unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208 unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217 unsafe {
218 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219 let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220 let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221 simd_cast(e)
222 }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231 unsafe {
232 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233 let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234 let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235 simd_cast(e)
236 }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245 unsafe {
246 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247 let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248 let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249 simd_cast(e)
250 }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259 unsafe {
260 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261 let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262 simd_cast(vabd_u8(c, d))
263 }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272 unsafe {
273 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274 let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275 simd_cast(vabd_u16(c, d))
276 }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285 unsafe {
286 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287 let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288 simd_cast(vabd_u32(c, d))
289 }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298 unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307 unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316 unsafe {
317 let neg: int64x1_t = simd_neg(a);
318 let mask: int64x1_t = simd_ge(a, neg);
319 simd_select(mask, a, neg)
320 }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329 unsafe {
330 let neg: int64x2_t = simd_neg(a);
331 let mask: int64x2_t = simd_ge(a, neg);
332 simd_select(mask, a, neg)
333 }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342 unsafe extern "unadjusted" {
343 #[cfg_attr(
344 any(target_arch = "aarch64", target_arch = "arm64ec"),
345 link_name = "llvm.aarch64.neon.abs.i64"
346 )]
347 fn _vabsd_s64(a: i64) -> i64;
348 }
349 unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358 a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367 a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376 unsafe extern "unadjusted" {
377 #[cfg_attr(
378 any(target_arch = "aarch64", target_arch = "arm64ec"),
379 link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380 )]
381 fn _vaddlv_s16(a: int16x4_t) -> i32;
382 }
383 unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392 unsafe extern "unadjusted" {
393 #[cfg_attr(
394 any(target_arch = "aarch64", target_arch = "arm64ec"),
395 link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396 )]
397 fn _vaddlvq_s16(a: int16x8_t) -> i32;
398 }
399 unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408 unsafe extern "unadjusted" {
409 #[cfg_attr(
410 any(target_arch = "aarch64", target_arch = "arm64ec"),
411 link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412 )]
413 fn _vaddlvq_s32(a: int32x4_t) -> i64;
414 }
415 unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424 unsafe extern "unadjusted" {
425 #[cfg_attr(
426 any(target_arch = "aarch64", target_arch = "arm64ec"),
427 link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428 )]
429 fn _vaddlv_s32(a: int32x2_t) -> i64;
430 }
431 unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440 unsafe extern "unadjusted" {
441 #[cfg_attr(
442 any(target_arch = "aarch64", target_arch = "arm64ec"),
443 link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444 )]
445 fn _vaddlv_s8(a: int8x8_t) -> i32;
446 }
447 unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456 unsafe extern "unadjusted" {
457 #[cfg_attr(
458 any(target_arch = "aarch64", target_arch = "arm64ec"),
459 link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460 )]
461 fn _vaddlvq_s8(a: int8x16_t) -> i32;
462 }
463 unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472 unsafe extern "unadjusted" {
473 #[cfg_attr(
474 any(target_arch = "aarch64", target_arch = "arm64ec"),
475 link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476 )]
477 fn _vaddlv_u16(a: uint16x4_t) -> u32;
478 }
479 unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488 unsafe extern "unadjusted" {
489 #[cfg_attr(
490 any(target_arch = "aarch64", target_arch = "arm64ec"),
491 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492 )]
493 fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494 }
495 unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504 unsafe extern "unadjusted" {
505 #[cfg_attr(
506 any(target_arch = "aarch64", target_arch = "arm64ec"),
507 link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508 )]
509 fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510 }
511 unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520 unsafe extern "unadjusted" {
521 #[cfg_attr(
522 any(target_arch = "aarch64", target_arch = "arm64ec"),
523 link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524 )]
525 fn _vaddlv_u32(a: uint32x2_t) -> u64;
526 }
527 unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536 unsafe extern "unadjusted" {
537 #[cfg_attr(
538 any(target_arch = "aarch64", target_arch = "arm64ec"),
539 link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540 )]
541 fn _vaddlv_u8(a: uint8x8_t) -> i32;
542 }
543 unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552 unsafe extern "unadjusted" {
553 #[cfg_attr(
554 any(target_arch = "aarch64", target_arch = "arm64ec"),
555 link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556 )]
557 fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558 }
559 unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568 unsafe extern "unadjusted" {
569 #[cfg_attr(
570 any(target_arch = "aarch64", target_arch = "arm64ec"),
571 link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572 )]
573 fn _vaddv_f32(a: float32x2_t) -> f32;
574 }
575 unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584 unsafe extern "unadjusted" {
585 #[cfg_attr(
586 any(target_arch = "aarch64", target_arch = "arm64ec"),
587 link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588 )]
589 fn _vaddvq_f32(a: float32x4_t) -> f32;
590 }
591 unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600 unsafe extern "unadjusted" {
601 #[cfg_attr(
602 any(target_arch = "aarch64", target_arch = "arm64ec"),
603 link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604 )]
605 fn _vaddvq_f64(a: float64x2_t) -> f64;
606 }
607 unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616 unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625 unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634 unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643 unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652 unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661 unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670 unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679 unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688 unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697 unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706 unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715 unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724 unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733 unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742 unsafe extern "unadjusted" {
743 #[cfg_attr(
744 any(target_arch = "aarch64", target_arch = "arm64ec"),
745 link_name = "llvm.aarch64.neon.famax.v4f16"
746 )]
747 fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748 }
749 unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758 unsafe extern "unadjusted" {
759 #[cfg_attr(
760 any(target_arch = "aarch64", target_arch = "arm64ec"),
761 link_name = "llvm.aarch64.neon.famax.v8f16"
762 )]
763 fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764 }
765 unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774 unsafe extern "unadjusted" {
775 #[cfg_attr(
776 any(target_arch = "aarch64", target_arch = "arm64ec"),
777 link_name = "llvm.aarch64.neon.famax.v2f32"
778 )]
779 fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780 }
781 unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790 unsafe extern "unadjusted" {
791 #[cfg_attr(
792 any(target_arch = "aarch64", target_arch = "arm64ec"),
793 link_name = "llvm.aarch64.neon.famax.v4f32"
794 )]
795 fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796 }
797 unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806 unsafe extern "unadjusted" {
807 #[cfg_attr(
808 any(target_arch = "aarch64", target_arch = "arm64ec"),
809 link_name = "llvm.aarch64.neon.famax.v2f64"
810 )]
811 fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812 }
813 unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822 unsafe extern "unadjusted" {
823 #[cfg_attr(
824 any(target_arch = "aarch64", target_arch = "arm64ec"),
825 link_name = "llvm.aarch64.neon.famin.v4f16"
826 )]
827 fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828 }
829 unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838 unsafe extern "unadjusted" {
839 #[cfg_attr(
840 any(target_arch = "aarch64", target_arch = "arm64ec"),
841 link_name = "llvm.aarch64.neon.famin.v8f16"
842 )]
843 fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844 }
845 unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854 unsafe extern "unadjusted" {
855 #[cfg_attr(
856 any(target_arch = "aarch64", target_arch = "arm64ec"),
857 link_name = "llvm.aarch64.neon.famin.v2f32"
858 )]
859 fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860 }
861 unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870 unsafe extern "unadjusted" {
871 #[cfg_attr(
872 any(target_arch = "aarch64", target_arch = "arm64ec"),
873 link_name = "llvm.aarch64.neon.famin.v4f32"
874 )]
875 fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876 }
877 unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886 unsafe extern "unadjusted" {
887 #[cfg_attr(
888 any(target_arch = "aarch64", target_arch = "arm64ec"),
889 link_name = "llvm.aarch64.neon.famin.v2f64"
890 )]
891 fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892 }
893 unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902 unsafe extern "unadjusted" {
903 #[cfg_attr(
904 any(target_arch = "aarch64", target_arch = "arm64ec"),
905 link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906 )]
907 fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908 }
909 unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918 unsafe extern "unadjusted" {
919 #[cfg_attr(
920 any(target_arch = "aarch64", target_arch = "arm64ec"),
921 link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922 )]
923 fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924 }
925 unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934 unsafe extern "unadjusted" {
935 #[cfg_attr(
936 any(target_arch = "aarch64", target_arch = "arm64ec"),
937 link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938 )]
939 fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940 }
941 unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950 unsafe extern "unadjusted" {
951 #[cfg_attr(
952 any(target_arch = "aarch64", target_arch = "arm64ec"),
953 link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954 )]
955 fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956 }
957 unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966 unsafe extern "unadjusted" {
967 #[cfg_attr(
968 any(target_arch = "aarch64", target_arch = "arm64ec"),
969 link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970 )]
971 fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972 }
973 unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982 unsafe extern "unadjusted" {
983 #[cfg_attr(
984 any(target_arch = "aarch64", target_arch = "arm64ec"),
985 link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986 )]
987 fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988 }
989 unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998 unsafe extern "unadjusted" {
999 #[cfg_attr(
1000 any(target_arch = "aarch64", target_arch = "arm64ec"),
1001 link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002 )]
1003 fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004 }
1005 unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014 unsafe extern "unadjusted" {
1015 #[cfg_attr(
1016 any(target_arch = "aarch64", target_arch = "arm64ec"),
1017 link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018 )]
1019 fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020 }
1021 unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032 unsafe extern "unadjusted" {
1033 #[cfg_attr(
1034 any(target_arch = "aarch64", target_arch = "arm64ec"),
1035 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036 )]
1037 fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038 }
1039 unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050 unsafe extern "unadjusted" {
1051 #[cfg_attr(
1052 any(target_arch = "aarch64", target_arch = "arm64ec"),
1053 link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054 )]
1055 fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056 }
1057 unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066 unsafe extern "unadjusted" {
1067 #[cfg_attr(
1068 any(target_arch = "aarch64", target_arch = "arm64ec"),
1069 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070 )]
1071 fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072 }
1073 unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082 unsafe extern "unadjusted" {
1083 #[cfg_attr(
1084 any(target_arch = "aarch64", target_arch = "arm64ec"),
1085 link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086 )]
1087 fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088 }
1089 unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098 unsafe extern "unadjusted" {
1099 #[cfg_attr(
1100 any(target_arch = "aarch64", target_arch = "arm64ec"),
1101 link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102 )]
1103 fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104 }
1105 unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116 unsafe extern "unadjusted" {
1117 #[cfg_attr(
1118 any(target_arch = "aarch64", target_arch = "arm64ec"),
1119 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120 )]
1121 fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122 }
1123 unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134 unsafe extern "unadjusted" {
1135 #[cfg_attr(
1136 any(target_arch = "aarch64", target_arch = "arm64ec"),
1137 link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138 )]
1139 fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140 }
1141 unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150 unsafe extern "unadjusted" {
1151 #[cfg_attr(
1152 any(target_arch = "aarch64", target_arch = "arm64ec"),
1153 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154 )]
1155 fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156 }
1157 unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166 unsafe extern "unadjusted" {
1167 #[cfg_attr(
1168 any(target_arch = "aarch64", target_arch = "arm64ec"),
1169 link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170 )]
1171 fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172 }
1173 unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182 unsafe extern "unadjusted" {
1183 #[cfg_attr(
1184 any(target_arch = "aarch64", target_arch = "arm64ec"),
1185 link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186 )]
1187 fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188 }
1189 unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198 unsafe extern "unadjusted" {
1199 #[cfg_attr(
1200 any(target_arch = "aarch64", target_arch = "arm64ec"),
1201 link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202 )]
1203 fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204 }
1205 unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214 unsafe extern "unadjusted" {
1215 #[cfg_attr(
1216 any(target_arch = "aarch64", target_arch = "arm64ec"),
1217 link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218 )]
1219 fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220 }
1221 unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230 unsafe extern "unadjusted" {
1231 #[cfg_attr(
1232 any(target_arch = "aarch64", target_arch = "arm64ec"),
1233 link_name = "llvm.aarch64.neon.facge.i64.f64"
1234 )]
1235 fn _vcaged_f64(a: f64, b: f64) -> u64;
1236 }
1237 unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246 unsafe extern "unadjusted" {
1247 #[cfg_attr(
1248 any(target_arch = "aarch64", target_arch = "arm64ec"),
1249 link_name = "llvm.aarch64.neon.facge.i32.f32"
1250 )]
1251 fn _vcages_f32(a: f32, b: f32) -> u32;
1252 }
1253 unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263 unsafe extern "unadjusted" {
1264 #[cfg_attr(
1265 any(target_arch = "aarch64", target_arch = "arm64ec"),
1266 link_name = "llvm.aarch64.neon.facge.i32.f16"
1267 )]
1268 fn _vcageh_f16(a: f16, b: f16) -> i32;
1269 }
1270 unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279 unsafe extern "unadjusted" {
1280 #[cfg_attr(
1281 any(target_arch = "aarch64", target_arch = "arm64ec"),
1282 link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283 )]
1284 fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285 }
1286 unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295 unsafe extern "unadjusted" {
1296 #[cfg_attr(
1297 any(target_arch = "aarch64", target_arch = "arm64ec"),
1298 link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299 )]
1300 fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301 }
1302 unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311 unsafe extern "unadjusted" {
1312 #[cfg_attr(
1313 any(target_arch = "aarch64", target_arch = "arm64ec"),
1314 link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315 )]
1316 fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317 }
1318 unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327 unsafe extern "unadjusted" {
1328 #[cfg_attr(
1329 any(target_arch = "aarch64", target_arch = "arm64ec"),
1330 link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331 )]
1332 fn _vcagts_f32(a: f32, b: f32) -> u32;
1333 }
1334 unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344 unsafe extern "unadjusted" {
1345 #[cfg_attr(
1346 any(target_arch = "aarch64", target_arch = "arm64ec"),
1347 link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348 )]
1349 fn _vcagth_f16(a: f16, b: f16) -> i32;
1350 }
1351 unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360 vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369 vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378 vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387 vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397 vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406 vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415 vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424 vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433 vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443 vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452 unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461 unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470 unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479 unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488 unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497 unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506 unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515 unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524 unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533 unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542 unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551 unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561 unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571 let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572 unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582 let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583 unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592 let b: f32x2 = f32x2::new(0.0, 0.0);
1593 unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603 unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612 let b: f64 = 0.0;
1613 unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622 let b: f64x2 = f64x2::new(0.0, 0.0);
1623 unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633 unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643 unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652 let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653 unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663 unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672 let b: i32x2 = i32x2::new(0, 0);
1673 unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682 let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683 unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692 let b: i64x1 = i64x1::new(0);
1693 unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702 let b: i64x2 = i64x2::new(0, 0);
1703 unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713 unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723 unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732 let b: i64x1 = i64x1::new(0);
1733 unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742 let b: i64x2 = i64x2::new(0, 0);
1743 unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752 let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753 unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762 let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763 unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772 let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773 unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782 let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783 unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792 let b: u32x2 = u32x2::new(0, 0);
1793 unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802 let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803 unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812 let b: u64x1 = u64x1::new(0);
1813 unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822 let b: u64x2 = u64x2::new(0, 0);
1823 unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832 unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841 unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851 unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860 unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869 unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878 unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887 unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896 unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905 unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914 unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923 unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932 unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941 unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950 unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959 unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969 unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978 let b: f32x2 = f32x2::new(0.0, 0.0);
1979 unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989 unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998 let b: f64 = 0.0;
1999 unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008 let b: f64x2 = f64x2::new(0.0, 0.0);
2009 unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019 unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029 unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039 unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049 unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058 let b: i32x2 = i32x2::new(0, 0);
2059 unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069 unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078 let b: i64x1 = i64x1::new(0);
2079 unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088 let b: i64x2 = i64x2::new(0, 0);
2089 unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098 unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107 unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116 unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126 unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135 unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144 unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153 unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162 unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171 unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180 unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189 unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198 unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207 unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216 unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226 unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235 let b: f32x2 = f32x2::new(0.0, 0.0);
2236 unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246 unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255 let b: f64 = 0.0;
2256 unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265 let b: f64x2 = f64x2::new(0.0, 0.0);
2266 unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276 unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286 unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296 unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306 unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315 let b: i32x2 = i32x2::new(0, 0);
2316 unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326 unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335 let b: i64x1 = i64x1::new(0);
2336 unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345 let b: i64x2 = i64x2::new(0, 0);
2346 unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355 unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364 unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373 unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383 unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392 unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401 unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410 unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419 unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428 unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437 unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446 unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455 unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464 unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473 unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483 unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492 let b: f32x2 = f32x2::new(0.0, 0.0);
2493 unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503 unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512 let b: f64 = 0.0;
2513 unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522 let b: f64x2 = f64x2::new(0.0, 0.0);
2523 unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533 unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543 unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553 unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563 unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572 let b: i32x2 = i32x2::new(0, 0);
2573 unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583 unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592 let b: i64x1 = i64x1::new(0);
2593 unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602 let b: i64x2 = i64x2::new(0, 0);
2603 unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612 unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621 unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630 unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640 unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649 unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658 unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667 unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676 unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685 unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694 unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703 unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712 unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722 unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731 unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740 unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749 let b: f32x2 = f32x2::new(0.0, 0.0);
2750 unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759 let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760 unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769 let b: f64 = 0.0;
2770 unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779 let b: f64x2 = f64x2::new(0.0, 0.0);
2780 unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789 let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790 unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799 let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800 unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809 let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810 unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819 let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820 unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829 let b: i32x2 = i32x2::new(0, 0);
2830 unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839 let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840 unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849 let b: i64x1 = i64x1::new(0);
2850 unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859 let b: i64x2 = i64x2::new(0, 0);
2860 unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869 unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878 unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887 unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897 unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908 unsafe extern "unadjusted" {
2909 #[cfg_attr(
2910 any(target_arch = "aarch64", target_arch = "arm64ec"),
2911 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912 )]
2913 fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914 }
2915 unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926 unsafe extern "unadjusted" {
2927 #[cfg_attr(
2928 any(target_arch = "aarch64", target_arch = "arm64ec"),
2929 link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930 )]
2931 fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932 }
2933 unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942 unsafe extern "unadjusted" {
2943 #[cfg_attr(
2944 any(target_arch = "aarch64", target_arch = "arm64ec"),
2945 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946 )]
2947 fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948 }
2949 unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958 unsafe extern "unadjusted" {
2959 #[cfg_attr(
2960 any(target_arch = "aarch64", target_arch = "arm64ec"),
2961 link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962 )]
2963 fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964 }
2965 unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974 unsafe extern "unadjusted" {
2975 #[cfg_attr(
2976 any(target_arch = "aarch64", target_arch = "arm64ec"),
2977 link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978 )]
2979 fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980 }
2981 unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993 a: float16x4_t,
2994 b: float16x4_t,
2995 c: float16x4_t,
2996) -> float16x4_t {
2997 static_assert_uimm_bits!(LANE, 1);
2998 unsafe {
2999 let c: float16x4_t = simd_shuffle!(
3000 c,
3001 c,
3002 [
3003 2 * LANE as u32,
3004 2 * LANE as u32 + 1,
3005 2 * LANE as u32,
3006 2 * LANE as u32 + 1
3007 ]
3008 );
3009 vcmla_f16(a, b, c)
3010 }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022 a: float16x8_t,
3023 b: float16x8_t,
3024 c: float16x4_t,
3025) -> float16x8_t {
3026 static_assert_uimm_bits!(LANE, 1);
3027 unsafe {
3028 let c: float16x8_t = simd_shuffle!(
3029 c,
3030 c,
3031 [
3032 2 * LANE as u32,
3033 2 * LANE as u32 + 1,
3034 2 * LANE as u32,
3035 2 * LANE as u32 + 1,
3036 2 * LANE as u32,
3037 2 * LANE as u32 + 1,
3038 2 * LANE as u32,
3039 2 * LANE as u32 + 1
3040 ]
3041 );
3042 vcmlaq_f16(a, b, c)
3043 }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053 a: float32x2_t,
3054 b: float32x2_t,
3055 c: float32x2_t,
3056) -> float32x2_t {
3057 static_assert!(LANE == 0);
3058 unsafe {
3059 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060 vcmla_f32(a, b, c)
3061 }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071 a: float32x4_t,
3072 b: float32x4_t,
3073 c: float32x2_t,
3074) -> float32x4_t {
3075 static_assert!(LANE == 0);
3076 unsafe {
3077 let c: float32x4_t = simd_shuffle!(
3078 c,
3079 c,
3080 [
3081 2 * LANE as u32,
3082 2 * LANE as u32 + 1,
3083 2 * LANE as u32,
3084 2 * LANE as u32 + 1
3085 ]
3086 );
3087 vcmlaq_f32(a, b, c)
3088 }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100 a: float16x4_t,
3101 b: float16x4_t,
3102 c: float16x8_t,
3103) -> float16x4_t {
3104 static_assert_uimm_bits!(LANE, 2);
3105 unsafe {
3106 let c: float16x4_t = simd_shuffle!(
3107 c,
3108 c,
3109 [
3110 2 * LANE as u32,
3111 2 * LANE as u32 + 1,
3112 2 * LANE as u32,
3113 2 * LANE as u32 + 1
3114 ]
3115 );
3116 vcmla_f16(a, b, c)
3117 }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129 a: float16x8_t,
3130 b: float16x8_t,
3131 c: float16x8_t,
3132) -> float16x8_t {
3133 static_assert_uimm_bits!(LANE, 2);
3134 unsafe {
3135 let c: float16x8_t = simd_shuffle!(
3136 c,
3137 c,
3138 [
3139 2 * LANE as u32,
3140 2 * LANE as u32 + 1,
3141 2 * LANE as u32,
3142 2 * LANE as u32 + 1,
3143 2 * LANE as u32,
3144 2 * LANE as u32 + 1,
3145 2 * LANE as u32,
3146 2 * LANE as u32 + 1
3147 ]
3148 );
3149 vcmlaq_f16(a, b, c)
3150 }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160 a: float32x2_t,
3161 b: float32x2_t,
3162 c: float32x4_t,
3163) -> float32x2_t {
3164 static_assert_uimm_bits!(LANE, 1);
3165 unsafe {
3166 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167 vcmla_f32(a, b, c)
3168 }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178 a: float32x4_t,
3179 b: float32x4_t,
3180 c: float32x4_t,
3181) -> float32x4_t {
3182 static_assert_uimm_bits!(LANE, 1);
3183 unsafe {
3184 let c: float32x4_t = simd_shuffle!(
3185 c,
3186 c,
3187 [
3188 2 * LANE as u32,
3189 2 * LANE as u32 + 1,
3190 2 * LANE as u32,
3191 2 * LANE as u32 + 1
3192 ]
3193 );
3194 vcmlaq_f32(a, b, c)
3195 }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206 unsafe extern "unadjusted" {
3207 #[cfg_attr(
3208 any(target_arch = "aarch64", target_arch = "arm64ec"),
3209 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210 )]
3211 fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212 }
3213 unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224 unsafe extern "unadjusted" {
3225 #[cfg_attr(
3226 any(target_arch = "aarch64", target_arch = "arm64ec"),
3227 link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228 )]
3229 fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230 }
3231 unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240 unsafe extern "unadjusted" {
3241 #[cfg_attr(
3242 any(target_arch = "aarch64", target_arch = "arm64ec"),
3243 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244 )]
3245 fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246 }
3247 unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256 unsafe extern "unadjusted" {
3257 #[cfg_attr(
3258 any(target_arch = "aarch64", target_arch = "arm64ec"),
3259 link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260 )]
3261 fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262 }
3263 unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272 unsafe extern "unadjusted" {
3273 #[cfg_attr(
3274 any(target_arch = "aarch64", target_arch = "arm64ec"),
3275 link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276 )]
3277 fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278 }
3279 unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291 a: float16x4_t,
3292 b: float16x4_t,
3293 c: float16x4_t,
3294) -> float16x4_t {
3295 static_assert_uimm_bits!(LANE, 1);
3296 unsafe {
3297 let c: float16x4_t = simd_shuffle!(
3298 c,
3299 c,
3300 [
3301 2 * LANE as u32,
3302 2 * LANE as u32 + 1,
3303 2 * LANE as u32,
3304 2 * LANE as u32 + 1
3305 ]
3306 );
3307 vcmla_rot180_f16(a, b, c)
3308 }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320 a: float16x8_t,
3321 b: float16x8_t,
3322 c: float16x4_t,
3323) -> float16x8_t {
3324 static_assert_uimm_bits!(LANE, 1);
3325 unsafe {
3326 let c: float16x8_t = simd_shuffle!(
3327 c,
3328 c,
3329 [
3330 2 * LANE as u32,
3331 2 * LANE as u32 + 1,
3332 2 * LANE as u32,
3333 2 * LANE as u32 + 1,
3334 2 * LANE as u32,
3335 2 * LANE as u32 + 1,
3336 2 * LANE as u32,
3337 2 * LANE as u32 + 1
3338 ]
3339 );
3340 vcmlaq_rot180_f16(a, b, c)
3341 }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351 a: float32x2_t,
3352 b: float32x2_t,
3353 c: float32x2_t,
3354) -> float32x2_t {
3355 static_assert!(LANE == 0);
3356 unsafe {
3357 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358 vcmla_rot180_f32(a, b, c)
3359 }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369 a: float32x4_t,
3370 b: float32x4_t,
3371 c: float32x2_t,
3372) -> float32x4_t {
3373 static_assert!(LANE == 0);
3374 unsafe {
3375 let c: float32x4_t = simd_shuffle!(
3376 c,
3377 c,
3378 [
3379 2 * LANE as u32,
3380 2 * LANE as u32 + 1,
3381 2 * LANE as u32,
3382 2 * LANE as u32 + 1
3383 ]
3384 );
3385 vcmlaq_rot180_f32(a, b, c)
3386 }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398 a: float16x4_t,
3399 b: float16x4_t,
3400 c: float16x8_t,
3401) -> float16x4_t {
3402 static_assert_uimm_bits!(LANE, 2);
3403 unsafe {
3404 let c: float16x4_t = simd_shuffle!(
3405 c,
3406 c,
3407 [
3408 2 * LANE as u32,
3409 2 * LANE as u32 + 1,
3410 2 * LANE as u32,
3411 2 * LANE as u32 + 1
3412 ]
3413 );
3414 vcmla_rot180_f16(a, b, c)
3415 }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427 a: float16x8_t,
3428 b: float16x8_t,
3429 c: float16x8_t,
3430) -> float16x8_t {
3431 static_assert_uimm_bits!(LANE, 2);
3432 unsafe {
3433 let c: float16x8_t = simd_shuffle!(
3434 c,
3435 c,
3436 [
3437 2 * LANE as u32,
3438 2 * LANE as u32 + 1,
3439 2 * LANE as u32,
3440 2 * LANE as u32 + 1,
3441 2 * LANE as u32,
3442 2 * LANE as u32 + 1,
3443 2 * LANE as u32,
3444 2 * LANE as u32 + 1
3445 ]
3446 );
3447 vcmlaq_rot180_f16(a, b, c)
3448 }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458 a: float32x2_t,
3459 b: float32x2_t,
3460 c: float32x4_t,
3461) -> float32x2_t {
3462 static_assert_uimm_bits!(LANE, 1);
3463 unsafe {
3464 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465 vcmla_rot180_f32(a, b, c)
3466 }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476 a: float32x4_t,
3477 b: float32x4_t,
3478 c: float32x4_t,
3479) -> float32x4_t {
3480 static_assert_uimm_bits!(LANE, 1);
3481 unsafe {
3482 let c: float32x4_t = simd_shuffle!(
3483 c,
3484 c,
3485 [
3486 2 * LANE as u32,
3487 2 * LANE as u32 + 1,
3488 2 * LANE as u32,
3489 2 * LANE as u32 + 1
3490 ]
3491 );
3492 vcmlaq_rot180_f32(a, b, c)
3493 }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504 unsafe extern "unadjusted" {
3505 #[cfg_attr(
3506 any(target_arch = "aarch64", target_arch = "arm64ec"),
3507 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508 )]
3509 fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510 }
3511 unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522 unsafe extern "unadjusted" {
3523 #[cfg_attr(
3524 any(target_arch = "aarch64", target_arch = "arm64ec"),
3525 link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526 )]
3527 fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528 }
3529 unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538 unsafe extern "unadjusted" {
3539 #[cfg_attr(
3540 any(target_arch = "aarch64", target_arch = "arm64ec"),
3541 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542 )]
3543 fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544 }
3545 unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554 unsafe extern "unadjusted" {
3555 #[cfg_attr(
3556 any(target_arch = "aarch64", target_arch = "arm64ec"),
3557 link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558 )]
3559 fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560 }
3561 unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570 unsafe extern "unadjusted" {
3571 #[cfg_attr(
3572 any(target_arch = "aarch64", target_arch = "arm64ec"),
3573 link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574 )]
3575 fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576 }
3577 unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589 a: float16x4_t,
3590 b: float16x4_t,
3591 c: float16x4_t,
3592) -> float16x4_t {
3593 static_assert_uimm_bits!(LANE, 1);
3594 unsafe {
3595 let c: float16x4_t = simd_shuffle!(
3596 c,
3597 c,
3598 [
3599 2 * LANE as u32,
3600 2 * LANE as u32 + 1,
3601 2 * LANE as u32,
3602 2 * LANE as u32 + 1
3603 ]
3604 );
3605 vcmla_rot270_f16(a, b, c)
3606 }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618 a: float16x8_t,
3619 b: float16x8_t,
3620 c: float16x4_t,
3621) -> float16x8_t {
3622 static_assert_uimm_bits!(LANE, 1);
3623 unsafe {
3624 let c: float16x8_t = simd_shuffle!(
3625 c,
3626 c,
3627 [
3628 2 * LANE as u32,
3629 2 * LANE as u32 + 1,
3630 2 * LANE as u32,
3631 2 * LANE as u32 + 1,
3632 2 * LANE as u32,
3633 2 * LANE as u32 + 1,
3634 2 * LANE as u32,
3635 2 * LANE as u32 + 1
3636 ]
3637 );
3638 vcmlaq_rot270_f16(a, b, c)
3639 }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649 a: float32x2_t,
3650 b: float32x2_t,
3651 c: float32x2_t,
3652) -> float32x2_t {
3653 static_assert!(LANE == 0);
3654 unsafe {
3655 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656 vcmla_rot270_f32(a, b, c)
3657 }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667 a: float32x4_t,
3668 b: float32x4_t,
3669 c: float32x2_t,
3670) -> float32x4_t {
3671 static_assert!(LANE == 0);
3672 unsafe {
3673 let c: float32x4_t = simd_shuffle!(
3674 c,
3675 c,
3676 [
3677 2 * LANE as u32,
3678 2 * LANE as u32 + 1,
3679 2 * LANE as u32,
3680 2 * LANE as u32 + 1
3681 ]
3682 );
3683 vcmlaq_rot270_f32(a, b, c)
3684 }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696 a: float16x4_t,
3697 b: float16x4_t,
3698 c: float16x8_t,
3699) -> float16x4_t {
3700 static_assert_uimm_bits!(LANE, 2);
3701 unsafe {
3702 let c: float16x4_t = simd_shuffle!(
3703 c,
3704 c,
3705 [
3706 2 * LANE as u32,
3707 2 * LANE as u32 + 1,
3708 2 * LANE as u32,
3709 2 * LANE as u32 + 1
3710 ]
3711 );
3712 vcmla_rot270_f16(a, b, c)
3713 }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725 a: float16x8_t,
3726 b: float16x8_t,
3727 c: float16x8_t,
3728) -> float16x8_t {
3729 static_assert_uimm_bits!(LANE, 2);
3730 unsafe {
3731 let c: float16x8_t = simd_shuffle!(
3732 c,
3733 c,
3734 [
3735 2 * LANE as u32,
3736 2 * LANE as u32 + 1,
3737 2 * LANE as u32,
3738 2 * LANE as u32 + 1,
3739 2 * LANE as u32,
3740 2 * LANE as u32 + 1,
3741 2 * LANE as u32,
3742 2 * LANE as u32 + 1
3743 ]
3744 );
3745 vcmlaq_rot270_f16(a, b, c)
3746 }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756 a: float32x2_t,
3757 b: float32x2_t,
3758 c: float32x4_t,
3759) -> float32x2_t {
3760 static_assert_uimm_bits!(LANE, 1);
3761 unsafe {
3762 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763 vcmla_rot270_f32(a, b, c)
3764 }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774 a: float32x4_t,
3775 b: float32x4_t,
3776 c: float32x4_t,
3777) -> float32x4_t {
3778 static_assert_uimm_bits!(LANE, 1);
3779 unsafe {
3780 let c: float32x4_t = simd_shuffle!(
3781 c,
3782 c,
3783 [
3784 2 * LANE as u32,
3785 2 * LANE as u32 + 1,
3786 2 * LANE as u32,
3787 2 * LANE as u32 + 1
3788 ]
3789 );
3790 vcmlaq_rot270_f32(a, b, c)
3791 }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802 unsafe extern "unadjusted" {
3803 #[cfg_attr(
3804 any(target_arch = "aarch64", target_arch = "arm64ec"),
3805 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806 )]
3807 fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808 }
3809 unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820 unsafe extern "unadjusted" {
3821 #[cfg_attr(
3822 any(target_arch = "aarch64", target_arch = "arm64ec"),
3823 link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824 )]
3825 fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826 }
3827 unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836 unsafe extern "unadjusted" {
3837 #[cfg_attr(
3838 any(target_arch = "aarch64", target_arch = "arm64ec"),
3839 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840 )]
3841 fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842 }
3843 unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852 unsafe extern "unadjusted" {
3853 #[cfg_attr(
3854 any(target_arch = "aarch64", target_arch = "arm64ec"),
3855 link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856 )]
3857 fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858 }
3859 unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868 unsafe extern "unadjusted" {
3869 #[cfg_attr(
3870 any(target_arch = "aarch64", target_arch = "arm64ec"),
3871 link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872 )]
3873 fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874 }
3875 unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887 a: float16x4_t,
3888 b: float16x4_t,
3889 c: float16x4_t,
3890) -> float16x4_t {
3891 static_assert_uimm_bits!(LANE, 1);
3892 unsafe {
3893 let c: float16x4_t = simd_shuffle!(
3894 c,
3895 c,
3896 [
3897 2 * LANE as u32,
3898 2 * LANE as u32 + 1,
3899 2 * LANE as u32,
3900 2 * LANE as u32 + 1
3901 ]
3902 );
3903 vcmla_rot90_f16(a, b, c)
3904 }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916 a: float16x8_t,
3917 b: float16x8_t,
3918 c: float16x4_t,
3919) -> float16x8_t {
3920 static_assert_uimm_bits!(LANE, 1);
3921 unsafe {
3922 let c: float16x8_t = simd_shuffle!(
3923 c,
3924 c,
3925 [
3926 2 * LANE as u32,
3927 2 * LANE as u32 + 1,
3928 2 * LANE as u32,
3929 2 * LANE as u32 + 1,
3930 2 * LANE as u32,
3931 2 * LANE as u32 + 1,
3932 2 * LANE as u32,
3933 2 * LANE as u32 + 1
3934 ]
3935 );
3936 vcmlaq_rot90_f16(a, b, c)
3937 }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947 a: float32x2_t,
3948 b: float32x2_t,
3949 c: float32x2_t,
3950) -> float32x2_t {
3951 static_assert!(LANE == 0);
3952 unsafe {
3953 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954 vcmla_rot90_f32(a, b, c)
3955 }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965 a: float32x4_t,
3966 b: float32x4_t,
3967 c: float32x2_t,
3968) -> float32x4_t {
3969 static_assert!(LANE == 0);
3970 unsafe {
3971 let c: float32x4_t = simd_shuffle!(
3972 c,
3973 c,
3974 [
3975 2 * LANE as u32,
3976 2 * LANE as u32 + 1,
3977 2 * LANE as u32,
3978 2 * LANE as u32 + 1
3979 ]
3980 );
3981 vcmlaq_rot90_f32(a, b, c)
3982 }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994 a: float16x4_t,
3995 b: float16x4_t,
3996 c: float16x8_t,
3997) -> float16x4_t {
3998 static_assert_uimm_bits!(LANE, 2);
3999 unsafe {
4000 let c: float16x4_t = simd_shuffle!(
4001 c,
4002 c,
4003 [
4004 2 * LANE as u32,
4005 2 * LANE as u32 + 1,
4006 2 * LANE as u32,
4007 2 * LANE as u32 + 1
4008 ]
4009 );
4010 vcmla_rot90_f16(a, b, c)
4011 }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023 a: float16x8_t,
4024 b: float16x8_t,
4025 c: float16x8_t,
4026) -> float16x8_t {
4027 static_assert_uimm_bits!(LANE, 2);
4028 unsafe {
4029 let c: float16x8_t = simd_shuffle!(
4030 c,
4031 c,
4032 [
4033 2 * LANE as u32,
4034 2 * LANE as u32 + 1,
4035 2 * LANE as u32,
4036 2 * LANE as u32 + 1,
4037 2 * LANE as u32,
4038 2 * LANE as u32 + 1,
4039 2 * LANE as u32,
4040 2 * LANE as u32 + 1
4041 ]
4042 );
4043 vcmlaq_rot90_f16(a, b, c)
4044 }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054 a: float32x2_t,
4055 b: float32x2_t,
4056 c: float32x4_t,
4057) -> float32x2_t {
4058 static_assert_uimm_bits!(LANE, 1);
4059 unsafe {
4060 let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061 vcmla_rot90_f32(a, b, c)
4062 }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072 a: float32x4_t,
4073 b: float32x4_t,
4074 c: float32x4_t,
4075) -> float32x4_t {
4076 static_assert_uimm_bits!(LANE, 1);
4077 unsafe {
4078 let c: float32x4_t = simd_shuffle!(
4079 c,
4080 c,
4081 [
4082 2 * LANE as u32,
4083 2 * LANE as u32 + 1,
4084 2 * LANE as u32,
4085 2 * LANE as u32 + 1
4086 ]
4087 );
4088 vcmlaq_rot90_f32(a, b, c)
4089 }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099 a: float32x2_t,
4100 b: float32x2_t,
4101) -> float32x2_t {
4102 static_assert_uimm_bits!(LANE1, 1);
4103 static_assert_uimm_bits!(LANE2, 1);
4104 unsafe {
4105 match LANE1 & 0b1 {
4106 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108 _ => unreachable_unchecked(),
4109 }
4110 }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120 static_assert_uimm_bits!(LANE1, 3);
4121 static_assert_uimm_bits!(LANE2, 3);
4122 unsafe {
4123 match LANE1 & 0b111 {
4124 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132 _ => unreachable_unchecked(),
4133 }
4134 }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144 static_assert_uimm_bits!(LANE1, 2);
4145 static_assert_uimm_bits!(LANE2, 2);
4146 unsafe {
4147 match LANE1 & 0b11 {
4148 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152 _ => unreachable_unchecked(),
4153 }
4154 }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164 static_assert_uimm_bits!(LANE1, 1);
4165 static_assert_uimm_bits!(LANE2, 1);
4166 unsafe {
4167 match LANE1 & 0b1 {
4168 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170 _ => unreachable_unchecked(),
4171 }
4172 }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182 static_assert_uimm_bits!(LANE1, 3);
4183 static_assert_uimm_bits!(LANE2, 3);
4184 unsafe {
4185 match LANE1 & 0b111 {
4186 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194 _ => unreachable_unchecked(),
4195 }
4196 }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206 a: uint16x4_t,
4207 b: uint16x4_t,
4208) -> uint16x4_t {
4209 static_assert_uimm_bits!(LANE1, 2);
4210 static_assert_uimm_bits!(LANE2, 2);
4211 unsafe {
4212 match LANE1 & 0b11 {
4213 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217 _ => unreachable_unchecked(),
4218 }
4219 }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229 a: uint32x2_t,
4230 b: uint32x2_t,
4231) -> uint32x2_t {
4232 static_assert_uimm_bits!(LANE1, 1);
4233 static_assert_uimm_bits!(LANE2, 1);
4234 unsafe {
4235 match LANE1 & 0b1 {
4236 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238 _ => unreachable_unchecked(),
4239 }
4240 }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250 static_assert_uimm_bits!(LANE1, 3);
4251 static_assert_uimm_bits!(LANE2, 3);
4252 unsafe {
4253 match LANE1 & 0b111 {
4254 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262 _ => unreachable_unchecked(),
4263 }
4264 }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274 a: poly16x4_t,
4275 b: poly16x4_t,
4276) -> poly16x4_t {
4277 static_assert_uimm_bits!(LANE1, 2);
4278 static_assert_uimm_bits!(LANE2, 2);
4279 unsafe {
4280 match LANE1 & 0b11 {
4281 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285 _ => unreachable_unchecked(),
4286 }
4287 }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297 a: float32x2_t,
4298 b: float32x4_t,
4299) -> float32x2_t {
4300 static_assert_uimm_bits!(LANE1, 1);
4301 static_assert_uimm_bits!(LANE2, 2);
4302 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303 unsafe {
4304 match LANE1 & 0b1 {
4305 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307 _ => unreachable_unchecked(),
4308 }
4309 }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319 static_assert_uimm_bits!(LANE1, 3);
4320 static_assert_uimm_bits!(LANE2, 4);
4321 let a: int8x16_t =
4322 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323 unsafe {
4324 match LANE1 & 0b111 {
4325 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333 _ => unreachable_unchecked(),
4334 }
4335 }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345 a: int16x4_t,
4346 b: int16x8_t,
4347) -> int16x4_t {
4348 static_assert_uimm_bits!(LANE1, 2);
4349 static_assert_uimm_bits!(LANE2, 3);
4350 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351 unsafe {
4352 match LANE1 & 0b11 {
4353 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357 _ => unreachable_unchecked(),
4358 }
4359 }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369 a: int32x2_t,
4370 b: int32x4_t,
4371) -> int32x2_t {
4372 static_assert_uimm_bits!(LANE1, 1);
4373 static_assert_uimm_bits!(LANE2, 2);
4374 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375 unsafe {
4376 match LANE1 & 0b1 {
4377 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379 _ => unreachable_unchecked(),
4380 }
4381 }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391 a: uint8x8_t,
4392 b: uint8x16_t,
4393) -> uint8x8_t {
4394 static_assert_uimm_bits!(LANE1, 3);
4395 static_assert_uimm_bits!(LANE2, 4);
4396 let a: uint8x16_t =
4397 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398 unsafe {
4399 match LANE1 & 0b111 {
4400 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408 _ => unreachable_unchecked(),
4409 }
4410 }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420 a: uint16x4_t,
4421 b: uint16x8_t,
4422) -> uint16x4_t {
4423 static_assert_uimm_bits!(LANE1, 2);
4424 static_assert_uimm_bits!(LANE2, 3);
4425 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426 unsafe {
4427 match LANE1 & 0b11 {
4428 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432 _ => unreachable_unchecked(),
4433 }
4434 }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444 a: uint32x2_t,
4445 b: uint32x4_t,
4446) -> uint32x2_t {
4447 static_assert_uimm_bits!(LANE1, 1);
4448 static_assert_uimm_bits!(LANE2, 2);
4449 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450 unsafe {
4451 match LANE1 & 0b1 {
4452 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454 _ => unreachable_unchecked(),
4455 }
4456 }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466 a: poly8x8_t,
4467 b: poly8x16_t,
4468) -> poly8x8_t {
4469 static_assert_uimm_bits!(LANE1, 3);
4470 static_assert_uimm_bits!(LANE2, 4);
4471 let a: poly8x16_t =
4472 unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473 unsafe {
4474 match LANE1 & 0b111 {
4475 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483 _ => unreachable_unchecked(),
4484 }
4485 }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495 a: poly16x4_t,
4496 b: poly16x8_t,
4497) -> poly16x4_t {
4498 static_assert_uimm_bits!(LANE1, 2);
4499 static_assert_uimm_bits!(LANE2, 3);
4500 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501 unsafe {
4502 match LANE1 & 0b11 {
4503 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507 _ => unreachable_unchecked(),
4508 }
4509 }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519 a: float32x4_t,
4520 b: float32x2_t,
4521) -> float32x4_t {
4522 static_assert_uimm_bits!(LANE1, 2);
4523 static_assert_uimm_bits!(LANE2, 1);
4524 let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525 unsafe {
4526 match LANE1 & 0b11 {
4527 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531 _ => unreachable_unchecked(),
4532 }
4533 }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543 a: float64x2_t,
4544 b: float64x1_t,
4545) -> float64x2_t {
4546 static_assert_uimm_bits!(LANE1, 1);
4547 static_assert!(LANE2 == 0);
4548 let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549 unsafe {
4550 match LANE1 & 0b1 {
4551 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553 _ => unreachable_unchecked(),
4554 }
4555 }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565 a: int64x2_t,
4566 b: int64x1_t,
4567) -> int64x2_t {
4568 static_assert_uimm_bits!(LANE1, 1);
4569 static_assert!(LANE2 == 0);
4570 let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571 unsafe {
4572 match LANE1 & 0b1 {
4573 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575 _ => unreachable_unchecked(),
4576 }
4577 }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587 a: uint64x2_t,
4588 b: uint64x1_t,
4589) -> uint64x2_t {
4590 static_assert_uimm_bits!(LANE1, 1);
4591 static_assert!(LANE2 == 0);
4592 let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593 unsafe {
4594 match LANE1 & 0b1 {
4595 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597 _ => unreachable_unchecked(),
4598 }
4599 }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609 a: poly64x2_t,
4610 b: poly64x1_t,
4611) -> poly64x2_t {
4612 static_assert_uimm_bits!(LANE1, 1);
4613 static_assert!(LANE2 == 0);
4614 let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615 unsafe {
4616 match LANE1 & 0b1 {
4617 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619 _ => unreachable_unchecked(),
4620 }
4621 }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631 static_assert_uimm_bits!(LANE1, 4);
4632 static_assert_uimm_bits!(LANE2, 3);
4633 let b: int8x16_t =
4634 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635 unsafe {
4636 match LANE1 & 0b1111 {
4637 0 => simd_shuffle!(
4638 a,
4639 b,
4640 [
4641 16 + LANE2 as u32,
4642 1,
4643 2,
4644 3,
4645 4,
4646 5,
4647 6,
4648 7,
4649 8,
4650 9,
4651 10,
4652 11,
4653 12,
4654 13,
4655 14,
4656 15
4657 ]
4658 ),
4659 1 => simd_shuffle!(
4660 a,
4661 b,
4662 [
4663 0,
4664 16 + LANE2 as u32,
4665 2,
4666 3,
4667 4,
4668 5,
4669 6,
4670 7,
4671 8,
4672 9,
4673 10,
4674 11,
4675 12,
4676 13,
4677 14,
4678 15
4679 ]
4680 ),
4681 2 => simd_shuffle!(
4682 a,
4683 b,
4684 [
4685 0,
4686 1,
4687 16 + LANE2 as u32,
4688 3,
4689 4,
4690 5,
4691 6,
4692 7,
4693 8,
4694 9,
4695 10,
4696 11,
4697 12,
4698 13,
4699 14,
4700 15
4701 ]
4702 ),
4703 3 => simd_shuffle!(
4704 a,
4705 b,
4706 [
4707 0,
4708 1,
4709 2,
4710 16 + LANE2 as u32,
4711 4,
4712 5,
4713 6,
4714 7,
4715 8,
4716 9,
4717 10,
4718 11,
4719 12,
4720 13,
4721 14,
4722 15
4723 ]
4724 ),
4725 4 => simd_shuffle!(
4726 a,
4727 b,
4728 [
4729 0,
4730 1,
4731 2,
4732 3,
4733 16 + LANE2 as u32,
4734 5,
4735 6,
4736 7,
4737 8,
4738 9,
4739 10,
4740 11,
4741 12,
4742 13,
4743 14,
4744 15
4745 ]
4746 ),
4747 5 => simd_shuffle!(
4748 a,
4749 b,
4750 [
4751 0,
4752 1,
4753 2,
4754 3,
4755 4,
4756 16 + LANE2 as u32,
4757 6,
4758 7,
4759 8,
4760 9,
4761 10,
4762 11,
4763 12,
4764 13,
4765 14,
4766 15
4767 ]
4768 ),
4769 6 => simd_shuffle!(
4770 a,
4771 b,
4772 [
4773 0,
4774 1,
4775 2,
4776 3,
4777 4,
4778 5,
4779 16 + LANE2 as u32,
4780 7,
4781 8,
4782 9,
4783 10,
4784 11,
4785 12,
4786 13,
4787 14,
4788 15
4789 ]
4790 ),
4791 7 => simd_shuffle!(
4792 a,
4793 b,
4794 [
4795 0,
4796 1,
4797 2,
4798 3,
4799 4,
4800 5,
4801 6,
4802 16 + LANE2 as u32,
4803 8,
4804 9,
4805 10,
4806 11,
4807 12,
4808 13,
4809 14,
4810 15
4811 ]
4812 ),
4813 8 => simd_shuffle!(
4814 a,
4815 b,
4816 [
4817 0,
4818 1,
4819 2,
4820 3,
4821 4,
4822 5,
4823 6,
4824 7,
4825 16 + LANE2 as u32,
4826 9,
4827 10,
4828 11,
4829 12,
4830 13,
4831 14,
4832 15
4833 ]
4834 ),
4835 9 => simd_shuffle!(
4836 a,
4837 b,
4838 [
4839 0,
4840 1,
4841 2,
4842 3,
4843 4,
4844 5,
4845 6,
4846 7,
4847 8,
4848 16 + LANE2 as u32,
4849 10,
4850 11,
4851 12,
4852 13,
4853 14,
4854 15
4855 ]
4856 ),
4857 10 => simd_shuffle!(
4858 a,
4859 b,
4860 [
4861 0,
4862 1,
4863 2,
4864 3,
4865 4,
4866 5,
4867 6,
4868 7,
4869 8,
4870 9,
4871 16 + LANE2 as u32,
4872 11,
4873 12,
4874 13,
4875 14,
4876 15
4877 ]
4878 ),
4879 11 => simd_shuffle!(
4880 a,
4881 b,
4882 [
4883 0,
4884 1,
4885 2,
4886 3,
4887 4,
4888 5,
4889 6,
4890 7,
4891 8,
4892 9,
4893 10,
4894 16 + LANE2 as u32,
4895 12,
4896 13,
4897 14,
4898 15
4899 ]
4900 ),
4901 12 => simd_shuffle!(
4902 a,
4903 b,
4904 [
4905 0,
4906 1,
4907 2,
4908 3,
4909 4,
4910 5,
4911 6,
4912 7,
4913 8,
4914 9,
4915 10,
4916 11,
4917 16 + LANE2 as u32,
4918 13,
4919 14,
4920 15
4921 ]
4922 ),
4923 13 => simd_shuffle!(
4924 a,
4925 b,
4926 [
4927 0,
4928 1,
4929 2,
4930 3,
4931 4,
4932 5,
4933 6,
4934 7,
4935 8,
4936 9,
4937 10,
4938 11,
4939 12,
4940 16 + LANE2 as u32,
4941 14,
4942 15
4943 ]
4944 ),
4945 14 => simd_shuffle!(
4946 a,
4947 b,
4948 [
4949 0,
4950 1,
4951 2,
4952 3,
4953 4,
4954 5,
4955 6,
4956 7,
4957 8,
4958 9,
4959 10,
4960 11,
4961 12,
4962 13,
4963 16 + LANE2 as u32,
4964 15
4965 ]
4966 ),
4967 15 => simd_shuffle!(
4968 a,
4969 b,
4970 [
4971 0,
4972 1,
4973 2,
4974 3,
4975 4,
4976 5,
4977 6,
4978 7,
4979 8,
4980 9,
4981 10,
4982 11,
4983 12,
4984 13,
4985 14,
4986 16 + LANE2 as u32
4987 ]
4988 ),
4989 _ => unreachable_unchecked(),
4990 }
4991 }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001 a: int16x8_t,
5002 b: int16x4_t,
5003) -> int16x8_t {
5004 static_assert_uimm_bits!(LANE1, 3);
5005 static_assert_uimm_bits!(LANE2, 2);
5006 let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007 unsafe {
5008 match LANE1 & 0b111 {
5009 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017 _ => unreachable_unchecked(),
5018 }
5019 }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029 a: int32x4_t,
5030 b: int32x2_t,
5031) -> int32x4_t {
5032 static_assert_uimm_bits!(LANE1, 2);
5033 static_assert_uimm_bits!(LANE2, 1);
5034 let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035 unsafe {
5036 match LANE1 & 0b11 {
5037 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041 _ => unreachable_unchecked(),
5042 }
5043 }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053 a: uint8x16_t,
5054 b: uint8x8_t,
5055) -> uint8x16_t {
5056 static_assert_uimm_bits!(LANE1, 4);
5057 static_assert_uimm_bits!(LANE2, 3);
5058 let b: uint8x16_t =
5059 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060 unsafe {
5061 match LANE1 & 0b1111 {
5062 0 => simd_shuffle!(
5063 a,
5064 b,
5065 [
5066 16 + LANE2 as u32,
5067 1,
5068 2,
5069 3,
5070 4,
5071 5,
5072 6,
5073 7,
5074 8,
5075 9,
5076 10,
5077 11,
5078 12,
5079 13,
5080 14,
5081 15
5082 ]
5083 ),
5084 1 => simd_shuffle!(
5085 a,
5086 b,
5087 [
5088 0,
5089 16 + LANE2 as u32,
5090 2,
5091 3,
5092 4,
5093 5,
5094 6,
5095 7,
5096 8,
5097 9,
5098 10,
5099 11,
5100 12,
5101 13,
5102 14,
5103 15
5104 ]
5105 ),
5106 2 => simd_shuffle!(
5107 a,
5108 b,
5109 [
5110 0,
5111 1,
5112 16 + LANE2 as u32,
5113 3,
5114 4,
5115 5,
5116 6,
5117 7,
5118 8,
5119 9,
5120 10,
5121 11,
5122 12,
5123 13,
5124 14,
5125 15
5126 ]
5127 ),
5128 3 => simd_shuffle!(
5129 a,
5130 b,
5131 [
5132 0,
5133 1,
5134 2,
5135 16 + LANE2 as u32,
5136 4,
5137 5,
5138 6,
5139 7,
5140 8,
5141 9,
5142 10,
5143 11,
5144 12,
5145 13,
5146 14,
5147 15
5148 ]
5149 ),
5150 4 => simd_shuffle!(
5151 a,
5152 b,
5153 [
5154 0,
5155 1,
5156 2,
5157 3,
5158 16 + LANE2 as u32,
5159 5,
5160 6,
5161 7,
5162 8,
5163 9,
5164 10,
5165 11,
5166 12,
5167 13,
5168 14,
5169 15
5170 ]
5171 ),
5172 5 => simd_shuffle!(
5173 a,
5174 b,
5175 [
5176 0,
5177 1,
5178 2,
5179 3,
5180 4,
5181 16 + LANE2 as u32,
5182 6,
5183 7,
5184 8,
5185 9,
5186 10,
5187 11,
5188 12,
5189 13,
5190 14,
5191 15
5192 ]
5193 ),
5194 6 => simd_shuffle!(
5195 a,
5196 b,
5197 [
5198 0,
5199 1,
5200 2,
5201 3,
5202 4,
5203 5,
5204 16 + LANE2 as u32,
5205 7,
5206 8,
5207 9,
5208 10,
5209 11,
5210 12,
5211 13,
5212 14,
5213 15
5214 ]
5215 ),
5216 7 => simd_shuffle!(
5217 a,
5218 b,
5219 [
5220 0,
5221 1,
5222 2,
5223 3,
5224 4,
5225 5,
5226 6,
5227 16 + LANE2 as u32,
5228 8,
5229 9,
5230 10,
5231 11,
5232 12,
5233 13,
5234 14,
5235 15
5236 ]
5237 ),
5238 8 => simd_shuffle!(
5239 a,
5240 b,
5241 [
5242 0,
5243 1,
5244 2,
5245 3,
5246 4,
5247 5,
5248 6,
5249 7,
5250 16 + LANE2 as u32,
5251 9,
5252 10,
5253 11,
5254 12,
5255 13,
5256 14,
5257 15
5258 ]
5259 ),
5260 9 => simd_shuffle!(
5261 a,
5262 b,
5263 [
5264 0,
5265 1,
5266 2,
5267 3,
5268 4,
5269 5,
5270 6,
5271 7,
5272 8,
5273 16 + LANE2 as u32,
5274 10,
5275 11,
5276 12,
5277 13,
5278 14,
5279 15
5280 ]
5281 ),
5282 10 => simd_shuffle!(
5283 a,
5284 b,
5285 [
5286 0,
5287 1,
5288 2,
5289 3,
5290 4,
5291 5,
5292 6,
5293 7,
5294 8,
5295 9,
5296 16 + LANE2 as u32,
5297 11,
5298 12,
5299 13,
5300 14,
5301 15
5302 ]
5303 ),
5304 11 => simd_shuffle!(
5305 a,
5306 b,
5307 [
5308 0,
5309 1,
5310 2,
5311 3,
5312 4,
5313 5,
5314 6,
5315 7,
5316 8,
5317 9,
5318 10,
5319 16 + LANE2 as u32,
5320 12,
5321 13,
5322 14,
5323 15
5324 ]
5325 ),
5326 12 => simd_shuffle!(
5327 a,
5328 b,
5329 [
5330 0,
5331 1,
5332 2,
5333 3,
5334 4,
5335 5,
5336 6,
5337 7,
5338 8,
5339 9,
5340 10,
5341 11,
5342 16 + LANE2 as u32,
5343 13,
5344 14,
5345 15
5346 ]
5347 ),
5348 13 => simd_shuffle!(
5349 a,
5350 b,
5351 [
5352 0,
5353 1,
5354 2,
5355 3,
5356 4,
5357 5,
5358 6,
5359 7,
5360 8,
5361 9,
5362 10,
5363 11,
5364 12,
5365 16 + LANE2 as u32,
5366 14,
5367 15
5368 ]
5369 ),
5370 14 => simd_shuffle!(
5371 a,
5372 b,
5373 [
5374 0,
5375 1,
5376 2,
5377 3,
5378 4,
5379 5,
5380 6,
5381 7,
5382 8,
5383 9,
5384 10,
5385 11,
5386 12,
5387 13,
5388 16 + LANE2 as u32,
5389 15
5390 ]
5391 ),
5392 15 => simd_shuffle!(
5393 a,
5394 b,
5395 [
5396 0,
5397 1,
5398 2,
5399 3,
5400 4,
5401 5,
5402 6,
5403 7,
5404 8,
5405 9,
5406 10,
5407 11,
5408 12,
5409 13,
5410 14,
5411 16 + LANE2 as u32
5412 ]
5413 ),
5414 _ => unreachable_unchecked(),
5415 }
5416 }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426 a: uint16x8_t,
5427 b: uint16x4_t,
5428) -> uint16x8_t {
5429 static_assert_uimm_bits!(LANE1, 3);
5430 static_assert_uimm_bits!(LANE2, 2);
5431 let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432 unsafe {
5433 match LANE1 & 0b111 {
5434 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442 _ => unreachable_unchecked(),
5443 }
5444 }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454 a: uint32x4_t,
5455 b: uint32x2_t,
5456) -> uint32x4_t {
5457 static_assert_uimm_bits!(LANE1, 2);
5458 static_assert_uimm_bits!(LANE2, 1);
5459 let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460 unsafe {
5461 match LANE1 & 0b11 {
5462 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466 _ => unreachable_unchecked(),
5467 }
5468 }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478 a: poly8x16_t,
5479 b: poly8x8_t,
5480) -> poly8x16_t {
5481 static_assert_uimm_bits!(LANE1, 4);
5482 static_assert_uimm_bits!(LANE2, 3);
5483 let b: poly8x16_t =
5484 unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485 unsafe {
5486 match LANE1 & 0b1111 {
5487 0 => simd_shuffle!(
5488 a,
5489 b,
5490 [
5491 16 + LANE2 as u32,
5492 1,
5493 2,
5494 3,
5495 4,
5496 5,
5497 6,
5498 7,
5499 8,
5500 9,
5501 10,
5502 11,
5503 12,
5504 13,
5505 14,
5506 15
5507 ]
5508 ),
5509 1 => simd_shuffle!(
5510 a,
5511 b,
5512 [
5513 0,
5514 16 + LANE2 as u32,
5515 2,
5516 3,
5517 4,
5518 5,
5519 6,
5520 7,
5521 8,
5522 9,
5523 10,
5524 11,
5525 12,
5526 13,
5527 14,
5528 15
5529 ]
5530 ),
5531 2 => simd_shuffle!(
5532 a,
5533 b,
5534 [
5535 0,
5536 1,
5537 16 + LANE2 as u32,
5538 3,
5539 4,
5540 5,
5541 6,
5542 7,
5543 8,
5544 9,
5545 10,
5546 11,
5547 12,
5548 13,
5549 14,
5550 15
5551 ]
5552 ),
5553 3 => simd_shuffle!(
5554 a,
5555 b,
5556 [
5557 0,
5558 1,
5559 2,
5560 16 + LANE2 as u32,
5561 4,
5562 5,
5563 6,
5564 7,
5565 8,
5566 9,
5567 10,
5568 11,
5569 12,
5570 13,
5571 14,
5572 15
5573 ]
5574 ),
5575 4 => simd_shuffle!(
5576 a,
5577 b,
5578 [
5579 0,
5580 1,
5581 2,
5582 3,
5583 16 + LANE2 as u32,
5584 5,
5585 6,
5586 7,
5587 8,
5588 9,
5589 10,
5590 11,
5591 12,
5592 13,
5593 14,
5594 15
5595 ]
5596 ),
5597 5 => simd_shuffle!(
5598 a,
5599 b,
5600 [
5601 0,
5602 1,
5603 2,
5604 3,
5605 4,
5606 16 + LANE2 as u32,
5607 6,
5608 7,
5609 8,
5610 9,
5611 10,
5612 11,
5613 12,
5614 13,
5615 14,
5616 15
5617 ]
5618 ),
5619 6 => simd_shuffle!(
5620 a,
5621 b,
5622 [
5623 0,
5624 1,
5625 2,
5626 3,
5627 4,
5628 5,
5629 16 + LANE2 as u32,
5630 7,
5631 8,
5632 9,
5633 10,
5634 11,
5635 12,
5636 13,
5637 14,
5638 15
5639 ]
5640 ),
5641 7 => simd_shuffle!(
5642 a,
5643 b,
5644 [
5645 0,
5646 1,
5647 2,
5648 3,
5649 4,
5650 5,
5651 6,
5652 16 + LANE2 as u32,
5653 8,
5654 9,
5655 10,
5656 11,
5657 12,
5658 13,
5659 14,
5660 15
5661 ]
5662 ),
5663 8 => simd_shuffle!(
5664 a,
5665 b,
5666 [
5667 0,
5668 1,
5669 2,
5670 3,
5671 4,
5672 5,
5673 6,
5674 7,
5675 16 + LANE2 as u32,
5676 9,
5677 10,
5678 11,
5679 12,
5680 13,
5681 14,
5682 15
5683 ]
5684 ),
5685 9 => simd_shuffle!(
5686 a,
5687 b,
5688 [
5689 0,
5690 1,
5691 2,
5692 3,
5693 4,
5694 5,
5695 6,
5696 7,
5697 8,
5698 16 + LANE2 as u32,
5699 10,
5700 11,
5701 12,
5702 13,
5703 14,
5704 15
5705 ]
5706 ),
5707 10 => simd_shuffle!(
5708 a,
5709 b,
5710 [
5711 0,
5712 1,
5713 2,
5714 3,
5715 4,
5716 5,
5717 6,
5718 7,
5719 8,
5720 9,
5721 16 + LANE2 as u32,
5722 11,
5723 12,
5724 13,
5725 14,
5726 15
5727 ]
5728 ),
5729 11 => simd_shuffle!(
5730 a,
5731 b,
5732 [
5733 0,
5734 1,
5735 2,
5736 3,
5737 4,
5738 5,
5739 6,
5740 7,
5741 8,
5742 9,
5743 10,
5744 16 + LANE2 as u32,
5745 12,
5746 13,
5747 14,
5748 15
5749 ]
5750 ),
5751 12 => simd_shuffle!(
5752 a,
5753 b,
5754 [
5755 0,
5756 1,
5757 2,
5758 3,
5759 4,
5760 5,
5761 6,
5762 7,
5763 8,
5764 9,
5765 10,
5766 11,
5767 16 + LANE2 as u32,
5768 13,
5769 14,
5770 15
5771 ]
5772 ),
5773 13 => simd_shuffle!(
5774 a,
5775 b,
5776 [
5777 0,
5778 1,
5779 2,
5780 3,
5781 4,
5782 5,
5783 6,
5784 7,
5785 8,
5786 9,
5787 10,
5788 11,
5789 12,
5790 16 + LANE2 as u32,
5791 14,
5792 15
5793 ]
5794 ),
5795 14 => simd_shuffle!(
5796 a,
5797 b,
5798 [
5799 0,
5800 1,
5801 2,
5802 3,
5803 4,
5804 5,
5805 6,
5806 7,
5807 8,
5808 9,
5809 10,
5810 11,
5811 12,
5812 13,
5813 16 + LANE2 as u32,
5814 15
5815 ]
5816 ),
5817 15 => simd_shuffle!(
5818 a,
5819 b,
5820 [
5821 0,
5822 1,
5823 2,
5824 3,
5825 4,
5826 5,
5827 6,
5828 7,
5829 8,
5830 9,
5831 10,
5832 11,
5833 12,
5834 13,
5835 14,
5836 16 + LANE2 as u32
5837 ]
5838 ),
5839 _ => unreachable_unchecked(),
5840 }
5841 }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851 a: poly16x8_t,
5852 b: poly16x4_t,
5853) -> poly16x8_t {
5854 static_assert_uimm_bits!(LANE1, 3);
5855 static_assert_uimm_bits!(LANE2, 2);
5856 let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857 unsafe {
5858 match LANE1 & 0b111 {
5859 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867 _ => unreachable_unchecked(),
5868 }
5869 }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879 a: float32x4_t,
5880 b: float32x4_t,
5881) -> float32x4_t {
5882 static_assert_uimm_bits!(LANE1, 2);
5883 static_assert_uimm_bits!(LANE2, 2);
5884 unsafe {
5885 match LANE1 & 0b11 {
5886 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890 _ => unreachable_unchecked(),
5891 }
5892 }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902 a: float64x2_t,
5903 b: float64x2_t,
5904) -> float64x2_t {
5905 static_assert_uimm_bits!(LANE1, 1);
5906 static_assert_uimm_bits!(LANE2, 1);
5907 unsafe {
5908 match LANE1 & 0b1 {
5909 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911 _ => unreachable_unchecked(),
5912 }
5913 }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923 a: int8x16_t,
5924 b: int8x16_t,
5925) -> int8x16_t {
5926 static_assert_uimm_bits!(LANE1, 4);
5927 static_assert_uimm_bits!(LANE2, 4);
5928 unsafe {
5929 match LANE1 & 0b1111 {
5930 0 => simd_shuffle!(
5931 a,
5932 b,
5933 [
5934 16 + LANE2 as u32,
5935 1,
5936 2,
5937 3,
5938 4,
5939 5,
5940 6,
5941 7,
5942 8,
5943 9,
5944 10,
5945 11,
5946 12,
5947 13,
5948 14,
5949 15
5950 ]
5951 ),
5952 1 => simd_shuffle!(
5953 a,
5954 b,
5955 [
5956 0,
5957 16 + LANE2 as u32,
5958 2,
5959 3,
5960 4,
5961 5,
5962 6,
5963 7,
5964 8,
5965 9,
5966 10,
5967 11,
5968 12,
5969 13,
5970 14,
5971 15
5972 ]
5973 ),
5974 2 => simd_shuffle!(
5975 a,
5976 b,
5977 [
5978 0,
5979 1,
5980 16 + LANE2 as u32,
5981 3,
5982 4,
5983 5,
5984 6,
5985 7,
5986 8,
5987 9,
5988 10,
5989 11,
5990 12,
5991 13,
5992 14,
5993 15
5994 ]
5995 ),
5996 3 => simd_shuffle!(
5997 a,
5998 b,
5999 [
6000 0,
6001 1,
6002 2,
6003 16 + LANE2 as u32,
6004 4,
6005 5,
6006 6,
6007 7,
6008 8,
6009 9,
6010 10,
6011 11,
6012 12,
6013 13,
6014 14,
6015 15
6016 ]
6017 ),
6018 4 => simd_shuffle!(
6019 a,
6020 b,
6021 [
6022 0,
6023 1,
6024 2,
6025 3,
6026 16 + LANE2 as u32,
6027 5,
6028 6,
6029 7,
6030 8,
6031 9,
6032 10,
6033 11,
6034 12,
6035 13,
6036 14,
6037 15
6038 ]
6039 ),
6040 5 => simd_shuffle!(
6041 a,
6042 b,
6043 [
6044 0,
6045 1,
6046 2,
6047 3,
6048 4,
6049 16 + LANE2 as u32,
6050 6,
6051 7,
6052 8,
6053 9,
6054 10,
6055 11,
6056 12,
6057 13,
6058 14,
6059 15
6060 ]
6061 ),
6062 6 => simd_shuffle!(
6063 a,
6064 b,
6065 [
6066 0,
6067 1,
6068 2,
6069 3,
6070 4,
6071 5,
6072 16 + LANE2 as u32,
6073 7,
6074 8,
6075 9,
6076 10,
6077 11,
6078 12,
6079 13,
6080 14,
6081 15
6082 ]
6083 ),
6084 7 => simd_shuffle!(
6085 a,
6086 b,
6087 [
6088 0,
6089 1,
6090 2,
6091 3,
6092 4,
6093 5,
6094 6,
6095 16 + LANE2 as u32,
6096 8,
6097 9,
6098 10,
6099 11,
6100 12,
6101 13,
6102 14,
6103 15
6104 ]
6105 ),
6106 8 => simd_shuffle!(
6107 a,
6108 b,
6109 [
6110 0,
6111 1,
6112 2,
6113 3,
6114 4,
6115 5,
6116 6,
6117 7,
6118 16 + LANE2 as u32,
6119 9,
6120 10,
6121 11,
6122 12,
6123 13,
6124 14,
6125 15
6126 ]
6127 ),
6128 9 => simd_shuffle!(
6129 a,
6130 b,
6131 [
6132 0,
6133 1,
6134 2,
6135 3,
6136 4,
6137 5,
6138 6,
6139 7,
6140 8,
6141 16 + LANE2 as u32,
6142 10,
6143 11,
6144 12,
6145 13,
6146 14,
6147 15
6148 ]
6149 ),
6150 10 => simd_shuffle!(
6151 a,
6152 b,
6153 [
6154 0,
6155 1,
6156 2,
6157 3,
6158 4,
6159 5,
6160 6,
6161 7,
6162 8,
6163 9,
6164 16 + LANE2 as u32,
6165 11,
6166 12,
6167 13,
6168 14,
6169 15
6170 ]
6171 ),
6172 11 => simd_shuffle!(
6173 a,
6174 b,
6175 [
6176 0,
6177 1,
6178 2,
6179 3,
6180 4,
6181 5,
6182 6,
6183 7,
6184 8,
6185 9,
6186 10,
6187 16 + LANE2 as u32,
6188 12,
6189 13,
6190 14,
6191 15
6192 ]
6193 ),
6194 12 => simd_shuffle!(
6195 a,
6196 b,
6197 [
6198 0,
6199 1,
6200 2,
6201 3,
6202 4,
6203 5,
6204 6,
6205 7,
6206 8,
6207 9,
6208 10,
6209 11,
6210 16 + LANE2 as u32,
6211 13,
6212 14,
6213 15
6214 ]
6215 ),
6216 13 => simd_shuffle!(
6217 a,
6218 b,
6219 [
6220 0,
6221 1,
6222 2,
6223 3,
6224 4,
6225 5,
6226 6,
6227 7,
6228 8,
6229 9,
6230 10,
6231 11,
6232 12,
6233 16 + LANE2 as u32,
6234 14,
6235 15
6236 ]
6237 ),
6238 14 => simd_shuffle!(
6239 a,
6240 b,
6241 [
6242 0,
6243 1,
6244 2,
6245 3,
6246 4,
6247 5,
6248 6,
6249 7,
6250 8,
6251 9,
6252 10,
6253 11,
6254 12,
6255 13,
6256 16 + LANE2 as u32,
6257 15
6258 ]
6259 ),
6260 15 => simd_shuffle!(
6261 a,
6262 b,
6263 [
6264 0,
6265 1,
6266 2,
6267 3,
6268 4,
6269 5,
6270 6,
6271 7,
6272 8,
6273 9,
6274 10,
6275 11,
6276 12,
6277 13,
6278 14,
6279 16 + LANE2 as u32
6280 ]
6281 ),
6282 _ => unreachable_unchecked(),
6283 }
6284 }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294 a: int16x8_t,
6295 b: int16x8_t,
6296) -> int16x8_t {
6297 static_assert_uimm_bits!(LANE1, 3);
6298 static_assert_uimm_bits!(LANE2, 3);
6299 unsafe {
6300 match LANE1 & 0b111 {
6301 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309 _ => unreachable_unchecked(),
6310 }
6311 }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321 a: int32x4_t,
6322 b: int32x4_t,
6323) -> int32x4_t {
6324 static_assert_uimm_bits!(LANE1, 2);
6325 static_assert_uimm_bits!(LANE2, 2);
6326 unsafe {
6327 match LANE1 & 0b11 {
6328 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332 _ => unreachable_unchecked(),
6333 }
6334 }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344 a: int64x2_t,
6345 b: int64x2_t,
6346) -> int64x2_t {
6347 static_assert_uimm_bits!(LANE1, 1);
6348 static_assert_uimm_bits!(LANE2, 1);
6349 unsafe {
6350 match LANE1 & 0b1 {
6351 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353 _ => unreachable_unchecked(),
6354 }
6355 }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365 a: uint8x16_t,
6366 b: uint8x16_t,
6367) -> uint8x16_t {
6368 static_assert_uimm_bits!(LANE1, 4);
6369 static_assert_uimm_bits!(LANE2, 4);
6370 unsafe {
6371 match LANE1 & 0b1111 {
6372 0 => simd_shuffle!(
6373 a,
6374 b,
6375 [
6376 16 + LANE2 as u32,
6377 1,
6378 2,
6379 3,
6380 4,
6381 5,
6382 6,
6383 7,
6384 8,
6385 9,
6386 10,
6387 11,
6388 12,
6389 13,
6390 14,
6391 15
6392 ]
6393 ),
6394 1 => simd_shuffle!(
6395 a,
6396 b,
6397 [
6398 0,
6399 16 + LANE2 as u32,
6400 2,
6401 3,
6402 4,
6403 5,
6404 6,
6405 7,
6406 8,
6407 9,
6408 10,
6409 11,
6410 12,
6411 13,
6412 14,
6413 15
6414 ]
6415 ),
6416 2 => simd_shuffle!(
6417 a,
6418 b,
6419 [
6420 0,
6421 1,
6422 16 + LANE2 as u32,
6423 3,
6424 4,
6425 5,
6426 6,
6427 7,
6428 8,
6429 9,
6430 10,
6431 11,
6432 12,
6433 13,
6434 14,
6435 15
6436 ]
6437 ),
6438 3 => simd_shuffle!(
6439 a,
6440 b,
6441 [
6442 0,
6443 1,
6444 2,
6445 16 + LANE2 as u32,
6446 4,
6447 5,
6448 6,
6449 7,
6450 8,
6451 9,
6452 10,
6453 11,
6454 12,
6455 13,
6456 14,
6457 15
6458 ]
6459 ),
6460 4 => simd_shuffle!(
6461 a,
6462 b,
6463 [
6464 0,
6465 1,
6466 2,
6467 3,
6468 16 + LANE2 as u32,
6469 5,
6470 6,
6471 7,
6472 8,
6473 9,
6474 10,
6475 11,
6476 12,
6477 13,
6478 14,
6479 15
6480 ]
6481 ),
6482 5 => simd_shuffle!(
6483 a,
6484 b,
6485 [
6486 0,
6487 1,
6488 2,
6489 3,
6490 4,
6491 16 + LANE2 as u32,
6492 6,
6493 7,
6494 8,
6495 9,
6496 10,
6497 11,
6498 12,
6499 13,
6500 14,
6501 15
6502 ]
6503 ),
6504 6 => simd_shuffle!(
6505 a,
6506 b,
6507 [
6508 0,
6509 1,
6510 2,
6511 3,
6512 4,
6513 5,
6514 16 + LANE2 as u32,
6515 7,
6516 8,
6517 9,
6518 10,
6519 11,
6520 12,
6521 13,
6522 14,
6523 15
6524 ]
6525 ),
6526 7 => simd_shuffle!(
6527 a,
6528 b,
6529 [
6530 0,
6531 1,
6532 2,
6533 3,
6534 4,
6535 5,
6536 6,
6537 16 + LANE2 as u32,
6538 8,
6539 9,
6540 10,
6541 11,
6542 12,
6543 13,
6544 14,
6545 15
6546 ]
6547 ),
6548 8 => simd_shuffle!(
6549 a,
6550 b,
6551 [
6552 0,
6553 1,
6554 2,
6555 3,
6556 4,
6557 5,
6558 6,
6559 7,
6560 16 + LANE2 as u32,
6561 9,
6562 10,
6563 11,
6564 12,
6565 13,
6566 14,
6567 15
6568 ]
6569 ),
6570 9 => simd_shuffle!(
6571 a,
6572 b,
6573 [
6574 0,
6575 1,
6576 2,
6577 3,
6578 4,
6579 5,
6580 6,
6581 7,
6582 8,
6583 16 + LANE2 as u32,
6584 10,
6585 11,
6586 12,
6587 13,
6588 14,
6589 15
6590 ]
6591 ),
6592 10 => simd_shuffle!(
6593 a,
6594 b,
6595 [
6596 0,
6597 1,
6598 2,
6599 3,
6600 4,
6601 5,
6602 6,
6603 7,
6604 8,
6605 9,
6606 16 + LANE2 as u32,
6607 11,
6608 12,
6609 13,
6610 14,
6611 15
6612 ]
6613 ),
6614 11 => simd_shuffle!(
6615 a,
6616 b,
6617 [
6618 0,
6619 1,
6620 2,
6621 3,
6622 4,
6623 5,
6624 6,
6625 7,
6626 8,
6627 9,
6628 10,
6629 16 + LANE2 as u32,
6630 12,
6631 13,
6632 14,
6633 15
6634 ]
6635 ),
6636 12 => simd_shuffle!(
6637 a,
6638 b,
6639 [
6640 0,
6641 1,
6642 2,
6643 3,
6644 4,
6645 5,
6646 6,
6647 7,
6648 8,
6649 9,
6650 10,
6651 11,
6652 16 + LANE2 as u32,
6653 13,
6654 14,
6655 15
6656 ]
6657 ),
6658 13 => simd_shuffle!(
6659 a,
6660 b,
6661 [
6662 0,
6663 1,
6664 2,
6665 3,
6666 4,
6667 5,
6668 6,
6669 7,
6670 8,
6671 9,
6672 10,
6673 11,
6674 12,
6675 16 + LANE2 as u32,
6676 14,
6677 15
6678 ]
6679 ),
6680 14 => simd_shuffle!(
6681 a,
6682 b,
6683 [
6684 0,
6685 1,
6686 2,
6687 3,
6688 4,
6689 5,
6690 6,
6691 7,
6692 8,
6693 9,
6694 10,
6695 11,
6696 12,
6697 13,
6698 16 + LANE2 as u32,
6699 15
6700 ]
6701 ),
6702 15 => simd_shuffle!(
6703 a,
6704 b,
6705 [
6706 0,
6707 1,
6708 2,
6709 3,
6710 4,
6711 5,
6712 6,
6713 7,
6714 8,
6715 9,
6716 10,
6717 11,
6718 12,
6719 13,
6720 14,
6721 16 + LANE2 as u32
6722 ]
6723 ),
6724 _ => unreachable_unchecked(),
6725 }
6726 }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736 a: uint16x8_t,
6737 b: uint16x8_t,
6738) -> uint16x8_t {
6739 static_assert_uimm_bits!(LANE1, 3);
6740 static_assert_uimm_bits!(LANE2, 3);
6741 unsafe {
6742 match LANE1 & 0b111 {
6743 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751 _ => unreachable_unchecked(),
6752 }
6753 }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763 a: uint32x4_t,
6764 b: uint32x4_t,
6765) -> uint32x4_t {
6766 static_assert_uimm_bits!(LANE1, 2);
6767 static_assert_uimm_bits!(LANE2, 2);
6768 unsafe {
6769 match LANE1 & 0b11 {
6770 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774 _ => unreachable_unchecked(),
6775 }
6776 }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786 a: uint64x2_t,
6787 b: uint64x2_t,
6788) -> uint64x2_t {
6789 static_assert_uimm_bits!(LANE1, 1);
6790 static_assert_uimm_bits!(LANE2, 1);
6791 unsafe {
6792 match LANE1 & 0b1 {
6793 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795 _ => unreachable_unchecked(),
6796 }
6797 }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807 a: poly8x16_t,
6808 b: poly8x16_t,
6809) -> poly8x16_t {
6810 static_assert_uimm_bits!(LANE1, 4);
6811 static_assert_uimm_bits!(LANE2, 4);
6812 unsafe {
6813 match LANE1 & 0b1111 {
6814 0 => simd_shuffle!(
6815 a,
6816 b,
6817 [
6818 16 + LANE2 as u32,
6819 1,
6820 2,
6821 3,
6822 4,
6823 5,
6824 6,
6825 7,
6826 8,
6827 9,
6828 10,
6829 11,
6830 12,
6831 13,
6832 14,
6833 15
6834 ]
6835 ),
6836 1 => simd_shuffle!(
6837 a,
6838 b,
6839 [
6840 0,
6841 16 + LANE2 as u32,
6842 2,
6843 3,
6844 4,
6845 5,
6846 6,
6847 7,
6848 8,
6849 9,
6850 10,
6851 11,
6852 12,
6853 13,
6854 14,
6855 15
6856 ]
6857 ),
6858 2 => simd_shuffle!(
6859 a,
6860 b,
6861 [
6862 0,
6863 1,
6864 16 + LANE2 as u32,
6865 3,
6866 4,
6867 5,
6868 6,
6869 7,
6870 8,
6871 9,
6872 10,
6873 11,
6874 12,
6875 13,
6876 14,
6877 15
6878 ]
6879 ),
6880 3 => simd_shuffle!(
6881 a,
6882 b,
6883 [
6884 0,
6885 1,
6886 2,
6887 16 + LANE2 as u32,
6888 4,
6889 5,
6890 6,
6891 7,
6892 8,
6893 9,
6894 10,
6895 11,
6896 12,
6897 13,
6898 14,
6899 15
6900 ]
6901 ),
6902 4 => simd_shuffle!(
6903 a,
6904 b,
6905 [
6906 0,
6907 1,
6908 2,
6909 3,
6910 16 + LANE2 as u32,
6911 5,
6912 6,
6913 7,
6914 8,
6915 9,
6916 10,
6917 11,
6918 12,
6919 13,
6920 14,
6921 15
6922 ]
6923 ),
6924 5 => simd_shuffle!(
6925 a,
6926 b,
6927 [
6928 0,
6929 1,
6930 2,
6931 3,
6932 4,
6933 16 + LANE2 as u32,
6934 6,
6935 7,
6936 8,
6937 9,
6938 10,
6939 11,
6940 12,
6941 13,
6942 14,
6943 15
6944 ]
6945 ),
6946 6 => simd_shuffle!(
6947 a,
6948 b,
6949 [
6950 0,
6951 1,
6952 2,
6953 3,
6954 4,
6955 5,
6956 16 + LANE2 as u32,
6957 7,
6958 8,
6959 9,
6960 10,
6961 11,
6962 12,
6963 13,
6964 14,
6965 15
6966 ]
6967 ),
6968 7 => simd_shuffle!(
6969 a,
6970 b,
6971 [
6972 0,
6973 1,
6974 2,
6975 3,
6976 4,
6977 5,
6978 6,
6979 16 + LANE2 as u32,
6980 8,
6981 9,
6982 10,
6983 11,
6984 12,
6985 13,
6986 14,
6987 15
6988 ]
6989 ),
6990 8 => simd_shuffle!(
6991 a,
6992 b,
6993 [
6994 0,
6995 1,
6996 2,
6997 3,
6998 4,
6999 5,
7000 6,
7001 7,
7002 16 + LANE2 as u32,
7003 9,
7004 10,
7005 11,
7006 12,
7007 13,
7008 14,
7009 15
7010 ]
7011 ),
7012 9 => simd_shuffle!(
7013 a,
7014 b,
7015 [
7016 0,
7017 1,
7018 2,
7019 3,
7020 4,
7021 5,
7022 6,
7023 7,
7024 8,
7025 16 + LANE2 as u32,
7026 10,
7027 11,
7028 12,
7029 13,
7030 14,
7031 15
7032 ]
7033 ),
7034 10 => simd_shuffle!(
7035 a,
7036 b,
7037 [
7038 0,
7039 1,
7040 2,
7041 3,
7042 4,
7043 5,
7044 6,
7045 7,
7046 8,
7047 9,
7048 16 + LANE2 as u32,
7049 11,
7050 12,
7051 13,
7052 14,
7053 15
7054 ]
7055 ),
7056 11 => simd_shuffle!(
7057 a,
7058 b,
7059 [
7060 0,
7061 1,
7062 2,
7063 3,
7064 4,
7065 5,
7066 6,
7067 7,
7068 8,
7069 9,
7070 10,
7071 16 + LANE2 as u32,
7072 12,
7073 13,
7074 14,
7075 15
7076 ]
7077 ),
7078 12 => simd_shuffle!(
7079 a,
7080 b,
7081 [
7082 0,
7083 1,
7084 2,
7085 3,
7086 4,
7087 5,
7088 6,
7089 7,
7090 8,
7091 9,
7092 10,
7093 11,
7094 16 + LANE2 as u32,
7095 13,
7096 14,
7097 15
7098 ]
7099 ),
7100 13 => simd_shuffle!(
7101 a,
7102 b,
7103 [
7104 0,
7105 1,
7106 2,
7107 3,
7108 4,
7109 5,
7110 6,
7111 7,
7112 8,
7113 9,
7114 10,
7115 11,
7116 12,
7117 16 + LANE2 as u32,
7118 14,
7119 15
7120 ]
7121 ),
7122 14 => simd_shuffle!(
7123 a,
7124 b,
7125 [
7126 0,
7127 1,
7128 2,
7129 3,
7130 4,
7131 5,
7132 6,
7133 7,
7134 8,
7135 9,
7136 10,
7137 11,
7138 12,
7139 13,
7140 16 + LANE2 as u32,
7141 15
7142 ]
7143 ),
7144 15 => simd_shuffle!(
7145 a,
7146 b,
7147 [
7148 0,
7149 1,
7150 2,
7151 3,
7152 4,
7153 5,
7154 6,
7155 7,
7156 8,
7157 9,
7158 10,
7159 11,
7160 12,
7161 13,
7162 14,
7163 16 + LANE2 as u32
7164 ]
7165 ),
7166 _ => unreachable_unchecked(),
7167 }
7168 }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178 a: poly16x8_t,
7179 b: poly16x8_t,
7180) -> poly16x8_t {
7181 static_assert_uimm_bits!(LANE1, 3);
7182 static_assert_uimm_bits!(LANE2, 3);
7183 unsafe {
7184 match LANE1 & 0b111 {
7185 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193 _ => unreachable_unchecked(),
7194 }
7195 }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205 a: poly64x2_t,
7206 b: poly64x2_t,
7207) -> poly64x2_t {
7208 static_assert_uimm_bits!(LANE1, 1);
7209 static_assert_uimm_bits!(LANE2, 1);
7210 unsafe {
7211 match LANE1 & 0b1 {
7212 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214 _ => unreachable_unchecked(),
7215 }
7216 }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225 unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234 unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243 unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252 unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261 unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270 unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279 unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289 vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299 vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308 unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317 unsafe {
7318 let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319 simd_cast(b)
7320 }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330 static_assert!(N >= 1 && N <= 64);
7331 unsafe extern "unadjusted" {
7332 #[cfg_attr(
7333 any(target_arch = "aarch64", target_arch = "arm64ec"),
7334 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335 )]
7336 fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337 }
7338 unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348 static_assert!(N >= 1 && N <= 64);
7349 unsafe extern "unadjusted" {
7350 #[cfg_attr(
7351 any(target_arch = "aarch64", target_arch = "arm64ec"),
7352 link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353 )]
7354 fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355 }
7356 unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366 static_assert!(N >= 1 && N <= 64);
7367 unsafe extern "unadjusted" {
7368 #[cfg_attr(
7369 any(target_arch = "aarch64", target_arch = "arm64ec"),
7370 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371 )]
7372 fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373 }
7374 unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384 static_assert!(N >= 1 && N <= 64);
7385 unsafe extern "unadjusted" {
7386 #[cfg_attr(
7387 any(target_arch = "aarch64", target_arch = "arm64ec"),
7388 link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389 )]
7390 fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391 }
7392 unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402 static_assert!(N >= 1 && N <= 64);
7403 unsafe extern "unadjusted" {
7404 #[cfg_attr(
7405 any(target_arch = "aarch64", target_arch = "arm64ec"),
7406 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407 )]
7408 fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409 }
7410 unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420 static_assert!(N >= 1 && N <= 64);
7421 unsafe extern "unadjusted" {
7422 #[cfg_attr(
7423 any(target_arch = "aarch64", target_arch = "arm64ec"),
7424 link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425 )]
7426 fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427 }
7428 unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438 static_assert!(N >= 1 && N <= 64);
7439 unsafe extern "unadjusted" {
7440 #[cfg_attr(
7441 any(target_arch = "aarch64", target_arch = "arm64ec"),
7442 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443 )]
7444 fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445 }
7446 unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456 static_assert!(N >= 1 && N <= 64);
7457 unsafe extern "unadjusted" {
7458 #[cfg_attr(
7459 any(target_arch = "aarch64", target_arch = "arm64ec"),
7460 link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461 )]
7462 fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463 }
7464 unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473 unsafe extern "unadjusted" {
7474 #[cfg_attr(
7475 any(target_arch = "aarch64", target_arch = "arm64ec"),
7476 link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477 )]
7478 fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479 }
7480 unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489 unsafe extern "unadjusted" {
7490 #[cfg_attr(
7491 any(target_arch = "aarch64", target_arch = "arm64ec"),
7492 link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493 )]
7494 fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495 }
7496 unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505 unsafe extern "unadjusted" {
7506 #[cfg_attr(
7507 any(target_arch = "aarch64", target_arch = "arm64ec"),
7508 link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509 )]
7510 fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511 }
7512 unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521 unsafe extern "unadjusted" {
7522 #[cfg_attr(
7523 any(target_arch = "aarch64", target_arch = "arm64ec"),
7524 link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525 )]
7526 fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527 }
7528 unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538 unsafe extern "unadjusted" {
7539 #[cfg_attr(
7540 any(target_arch = "aarch64", target_arch = "arm64ec"),
7541 link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542 )]
7543 fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544 }
7545 unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555 unsafe extern "unadjusted" {
7556 #[cfg_attr(
7557 any(target_arch = "aarch64", target_arch = "arm64ec"),
7558 link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559 )]
7560 fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561 }
7562 unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571 unsafe extern "unadjusted" {
7572 #[cfg_attr(
7573 any(target_arch = "aarch64", target_arch = "arm64ec"),
7574 link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575 )]
7576 fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577 }
7578 unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587 unsafe extern "unadjusted" {
7588 #[cfg_attr(
7589 any(target_arch = "aarch64", target_arch = "arm64ec"),
7590 link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591 )]
7592 fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593 }
7594 unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603 unsafe extern "unadjusted" {
7604 #[cfg_attr(
7605 any(target_arch = "aarch64", target_arch = "arm64ec"),
7606 link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607 )]
7608 fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609 }
7610 unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619 unsafe extern "unadjusted" {
7620 #[cfg_attr(
7621 any(target_arch = "aarch64", target_arch = "arm64ec"),
7622 link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623 )]
7624 fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625 }
7626 unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636 unsafe extern "unadjusted" {
7637 #[cfg_attr(
7638 any(target_arch = "aarch64", target_arch = "arm64ec"),
7639 link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640 )]
7641 fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642 }
7643 unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653 unsafe extern "unadjusted" {
7654 #[cfg_attr(
7655 any(target_arch = "aarch64", target_arch = "arm64ec"),
7656 link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657 )]
7658 fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659 }
7660 unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669 unsafe extern "unadjusted" {
7670 #[cfg_attr(
7671 any(target_arch = "aarch64", target_arch = "arm64ec"),
7672 link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673 )]
7674 fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675 }
7676 unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685 unsafe extern "unadjusted" {
7686 #[cfg_attr(
7687 any(target_arch = "aarch64", target_arch = "arm64ec"),
7688 link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689 )]
7690 fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691 }
7692 unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701 unsafe extern "unadjusted" {
7702 #[cfg_attr(
7703 any(target_arch = "aarch64", target_arch = "arm64ec"),
7704 link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705 )]
7706 fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707 }
7708 unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717 unsafe extern "unadjusted" {
7718 #[cfg_attr(
7719 any(target_arch = "aarch64", target_arch = "arm64ec"),
7720 link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721 )]
7722 fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723 }
7724 unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734 vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744 unsafe extern "unadjusted" {
7745 #[cfg_attr(
7746 any(target_arch = "aarch64", target_arch = "arm64ec"),
7747 link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748 )]
7749 fn _vcvtah_s32_f16(a: f16) -> i32;
7750 }
7751 unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761 unsafe extern "unadjusted" {
7762 #[cfg_attr(
7763 any(target_arch = "aarch64", target_arch = "arm64ec"),
7764 link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765 )]
7766 fn _vcvtah_s64_f16(a: f16) -> i64;
7767 }
7768 unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778 vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788 unsafe extern "unadjusted" {
7789 #[cfg_attr(
7790 any(target_arch = "aarch64", target_arch = "arm64ec"),
7791 link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792 )]
7793 fn _vcvtah_u32_f16(a: f16) -> u32;
7794 }
7795 unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805 unsafe extern "unadjusted" {
7806 #[cfg_attr(
7807 any(target_arch = "aarch64", target_arch = "arm64ec"),
7808 link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809 )]
7810 fn _vcvtah_u64_f16(a: f16) -> u64;
7811 }
7812 unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821 unsafe extern "unadjusted" {
7822 #[cfg_attr(
7823 any(target_arch = "aarch64", target_arch = "arm64ec"),
7824 link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825 )]
7826 fn _vcvtas_s32_f32(a: f32) -> i32;
7827 }
7828 unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837 unsafe extern "unadjusted" {
7838 #[cfg_attr(
7839 any(target_arch = "aarch64", target_arch = "arm64ec"),
7840 link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841 )]
7842 fn _vcvtad_s64_f64(a: f64) -> i64;
7843 }
7844 unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853 unsafe extern "unadjusted" {
7854 #[cfg_attr(
7855 any(target_arch = "aarch64", target_arch = "arm64ec"),
7856 link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857 )]
7858 fn _vcvtas_u32_f32(a: f32) -> u32;
7859 }
7860 unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869 unsafe extern "unadjusted" {
7870 #[cfg_attr(
7871 any(target_arch = "aarch64", target_arch = "arm64ec"),
7872 link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873 )]
7874 fn _vcvtad_u64_f64(a: f64) -> u64;
7875 }
7876 unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885 a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894 a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904 a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914 a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924 a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934 a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944 a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954 a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965 static_assert!(N >= 1 && N <= 16);
7966 vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977 static_assert!(N >= 1 && N <= 16);
7978 unsafe extern "unadjusted" {
7979 #[cfg_attr(
7980 any(target_arch = "aarch64", target_arch = "arm64ec"),
7981 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982 )]
7983 fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984 }
7985 unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996 static_assert!(N >= 1 && N <= 16);
7997 unsafe extern "unadjusted" {
7998 #[cfg_attr(
7999 any(target_arch = "aarch64", target_arch = "arm64ec"),
8000 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001 )]
8002 fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003 }
8004 unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015 static_assert!(N >= 1 && N <= 16);
8016 vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027 static_assert!(N >= 1 && N <= 16);
8028 unsafe extern "unadjusted" {
8029 #[cfg_attr(
8030 any(target_arch = "aarch64", target_arch = "arm64ec"),
8031 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032 )]
8033 fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034 }
8035 unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046 static_assert!(N >= 1 && N <= 16);
8047 unsafe extern "unadjusted" {
8048 #[cfg_attr(
8049 any(target_arch = "aarch64", target_arch = "arm64ec"),
8050 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051 )]
8052 fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053 }
8054 unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065 static_assert!(N >= 1 && N <= 16);
8066 vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077 static_assert!(N >= 1 && N <= 16);
8078 unsafe extern "unadjusted" {
8079 #[cfg_attr(
8080 any(target_arch = "aarch64", target_arch = "arm64ec"),
8081 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082 )]
8083 fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084 }
8085 unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096 static_assert!(N >= 1 && N <= 16);
8097 unsafe extern "unadjusted" {
8098 #[cfg_attr(
8099 any(target_arch = "aarch64", target_arch = "arm64ec"),
8100 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101 )]
8102 fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103 }
8104 unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115 static_assert!(N >= 1 && N <= 16);
8116 vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127 static_assert!(N >= 1 && N <= 16);
8128 unsafe extern "unadjusted" {
8129 #[cfg_attr(
8130 any(target_arch = "aarch64", target_arch = "arm64ec"),
8131 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132 )]
8133 fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134 }
8135 unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146 static_assert!(N >= 1 && N <= 16);
8147 unsafe extern "unadjusted" {
8148 #[cfg_attr(
8149 any(target_arch = "aarch64", target_arch = "arm64ec"),
8150 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151 )]
8152 fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153 }
8154 unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164 a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174 a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184 a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194 a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204 a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214 a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224 unsafe extern "unadjusted" {
8225 #[cfg_attr(
8226 any(target_arch = "aarch64", target_arch = "arm64ec"),
8227 link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228 )]
8229 fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230 }
8231 unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241 unsafe extern "unadjusted" {
8242 #[cfg_attr(
8243 any(target_arch = "aarch64", target_arch = "arm64ec"),
8244 link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245 )]
8246 fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247 }
8248 unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257 unsafe extern "unadjusted" {
8258 #[cfg_attr(
8259 any(target_arch = "aarch64", target_arch = "arm64ec"),
8260 link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261 )]
8262 fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263 }
8264 unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273 unsafe extern "unadjusted" {
8274 #[cfg_attr(
8275 any(target_arch = "aarch64", target_arch = "arm64ec"),
8276 link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277 )]
8278 fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279 }
8280 unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289 unsafe extern "unadjusted" {
8290 #[cfg_attr(
8291 any(target_arch = "aarch64", target_arch = "arm64ec"),
8292 link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293 )]
8294 fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295 }
8296 unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305 unsafe extern "unadjusted" {
8306 #[cfg_attr(
8307 any(target_arch = "aarch64", target_arch = "arm64ec"),
8308 link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309 )]
8310 fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311 }
8312 unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322 unsafe extern "unadjusted" {
8323 #[cfg_attr(
8324 any(target_arch = "aarch64", target_arch = "arm64ec"),
8325 link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326 )]
8327 fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328 }
8329 unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339 unsafe extern "unadjusted" {
8340 #[cfg_attr(
8341 any(target_arch = "aarch64", target_arch = "arm64ec"),
8342 link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343 )]
8344 fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345 }
8346 unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355 unsafe extern "unadjusted" {
8356 #[cfg_attr(
8357 any(target_arch = "aarch64", target_arch = "arm64ec"),
8358 link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359 )]
8360 fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361 }
8362 unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371 unsafe extern "unadjusted" {
8372 #[cfg_attr(
8373 any(target_arch = "aarch64", target_arch = "arm64ec"),
8374 link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375 )]
8376 fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377 }
8378 unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387 unsafe extern "unadjusted" {
8388 #[cfg_attr(
8389 any(target_arch = "aarch64", target_arch = "arm64ec"),
8390 link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391 )]
8392 fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393 }
8394 unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403 unsafe extern "unadjusted" {
8404 #[cfg_attr(
8405 any(target_arch = "aarch64", target_arch = "arm64ec"),
8406 link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407 )]
8408 fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409 }
8410 unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420 vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430 unsafe extern "unadjusted" {
8431 #[cfg_attr(
8432 any(target_arch = "aarch64", target_arch = "arm64ec"),
8433 link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434 )]
8435 fn _vcvtmh_s32_f16(a: f16) -> i32;
8436 }
8437 unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447 unsafe extern "unadjusted" {
8448 #[cfg_attr(
8449 any(target_arch = "aarch64", target_arch = "arm64ec"),
8450 link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451 )]
8452 fn _vcvtmh_s64_f16(a: f16) -> i64;
8453 }
8454 unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464 vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474 unsafe extern "unadjusted" {
8475 #[cfg_attr(
8476 any(target_arch = "aarch64", target_arch = "arm64ec"),
8477 link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478 )]
8479 fn _vcvtmh_u32_f16(a: f16) -> u32;
8480 }
8481 unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491 unsafe extern "unadjusted" {
8492 #[cfg_attr(
8493 any(target_arch = "aarch64", target_arch = "arm64ec"),
8494 link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495 )]
8496 fn _vcvtmh_u64_f16(a: f16) -> u64;
8497 }
8498 unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507 unsafe extern "unadjusted" {
8508 #[cfg_attr(
8509 any(target_arch = "aarch64", target_arch = "arm64ec"),
8510 link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511 )]
8512 fn _vcvtms_s32_f32(a: f32) -> i32;
8513 }
8514 unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523 unsafe extern "unadjusted" {
8524 #[cfg_attr(
8525 any(target_arch = "aarch64", target_arch = "arm64ec"),
8526 link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527 )]
8528 fn _vcvtmd_s64_f64(a: f64) -> i64;
8529 }
8530 unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539 unsafe extern "unadjusted" {
8540 #[cfg_attr(
8541 any(target_arch = "aarch64", target_arch = "arm64ec"),
8542 link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543 )]
8544 fn _vcvtms_u32_f32(a: f32) -> u32;
8545 }
8546 unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555 unsafe extern "unadjusted" {
8556 #[cfg_attr(
8557 any(target_arch = "aarch64", target_arch = "arm64ec"),
8558 link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559 )]
8560 fn _vcvtmd_u64_f64(a: f64) -> u64;
8561 }
8562 unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572 unsafe extern "unadjusted" {
8573 #[cfg_attr(
8574 any(target_arch = "aarch64", target_arch = "arm64ec"),
8575 link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576 )]
8577 fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578 }
8579 unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589 unsafe extern "unadjusted" {
8590 #[cfg_attr(
8591 any(target_arch = "aarch64", target_arch = "arm64ec"),
8592 link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593 )]
8594 fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595 }
8596 unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605 unsafe extern "unadjusted" {
8606 #[cfg_attr(
8607 any(target_arch = "aarch64", target_arch = "arm64ec"),
8608 link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609 )]
8610 fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611 }
8612 unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621 unsafe extern "unadjusted" {
8622 #[cfg_attr(
8623 any(target_arch = "aarch64", target_arch = "arm64ec"),
8624 link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625 )]
8626 fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627 }
8628 unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637 unsafe extern "unadjusted" {
8638 #[cfg_attr(
8639 any(target_arch = "aarch64", target_arch = "arm64ec"),
8640 link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641 )]
8642 fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643 }
8644 unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653 unsafe extern "unadjusted" {
8654 #[cfg_attr(
8655 any(target_arch = "aarch64", target_arch = "arm64ec"),
8656 link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657 )]
8658 fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659 }
8660 unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670 unsafe extern "unadjusted" {
8671 #[cfg_attr(
8672 any(target_arch = "aarch64", target_arch = "arm64ec"),
8673 link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674 )]
8675 fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676 }
8677 unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687 unsafe extern "unadjusted" {
8688 #[cfg_attr(
8689 any(target_arch = "aarch64", target_arch = "arm64ec"),
8690 link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691 )]
8692 fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693 }
8694 unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703 unsafe extern "unadjusted" {
8704 #[cfg_attr(
8705 any(target_arch = "aarch64", target_arch = "arm64ec"),
8706 link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707 )]
8708 fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709 }
8710 unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719 unsafe extern "unadjusted" {
8720 #[cfg_attr(
8721 any(target_arch = "aarch64", target_arch = "arm64ec"),
8722 link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723 )]
8724 fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725 }
8726 unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735 unsafe extern "unadjusted" {
8736 #[cfg_attr(
8737 any(target_arch = "aarch64", target_arch = "arm64ec"),
8738 link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739 )]
8740 fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741 }
8742 unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751 unsafe extern "unadjusted" {
8752 #[cfg_attr(
8753 any(target_arch = "aarch64", target_arch = "arm64ec"),
8754 link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755 )]
8756 fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757 }
8758 unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768 vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778 unsafe extern "unadjusted" {
8779 #[cfg_attr(
8780 any(target_arch = "aarch64", target_arch = "arm64ec"),
8781 link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782 )]
8783 fn _vcvtnh_s32_f16(a: f16) -> i32;
8784 }
8785 unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795 unsafe extern "unadjusted" {
8796 #[cfg_attr(
8797 any(target_arch = "aarch64", target_arch = "arm64ec"),
8798 link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799 )]
8800 fn _vcvtnh_s64_f16(a: f16) -> i64;
8801 }
8802 unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812 vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822 unsafe extern "unadjusted" {
8823 #[cfg_attr(
8824 any(target_arch = "aarch64", target_arch = "arm64ec"),
8825 link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826 )]
8827 fn _vcvtnh_u32_f16(a: f16) -> u32;
8828 }
8829 unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839 unsafe extern "unadjusted" {
8840 #[cfg_attr(
8841 any(target_arch = "aarch64", target_arch = "arm64ec"),
8842 link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843 )]
8844 fn _vcvtnh_u64_f16(a: f16) -> u64;
8845 }
8846 unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855 unsafe extern "unadjusted" {
8856 #[cfg_attr(
8857 any(target_arch = "aarch64", target_arch = "arm64ec"),
8858 link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859 )]
8860 fn _vcvtns_s32_f32(a: f32) -> i32;
8861 }
8862 unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871 unsafe extern "unadjusted" {
8872 #[cfg_attr(
8873 any(target_arch = "aarch64", target_arch = "arm64ec"),
8874 link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875 )]
8876 fn _vcvtnd_s64_f64(a: f64) -> i64;
8877 }
8878 unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887 unsafe extern "unadjusted" {
8888 #[cfg_attr(
8889 any(target_arch = "aarch64", target_arch = "arm64ec"),
8890 link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891 )]
8892 fn _vcvtns_u32_f32(a: f32) -> u32;
8893 }
8894 unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903 unsafe extern "unadjusted" {
8904 #[cfg_attr(
8905 any(target_arch = "aarch64", target_arch = "arm64ec"),
8906 link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907 )]
8908 fn _vcvtnd_u64_f64(a: f64) -> u64;
8909 }
8910 unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920 unsafe extern "unadjusted" {
8921 #[cfg_attr(
8922 any(target_arch = "aarch64", target_arch = "arm64ec"),
8923 link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924 )]
8925 fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926 }
8927 unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937 unsafe extern "unadjusted" {
8938 #[cfg_attr(
8939 any(target_arch = "aarch64", target_arch = "arm64ec"),
8940 link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941 )]
8942 fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943 }
8944 unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953 unsafe extern "unadjusted" {
8954 #[cfg_attr(
8955 any(target_arch = "aarch64", target_arch = "arm64ec"),
8956 link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957 )]
8958 fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959 }
8960 unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969 unsafe extern "unadjusted" {
8970 #[cfg_attr(
8971 any(target_arch = "aarch64", target_arch = "arm64ec"),
8972 link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973 )]
8974 fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975 }
8976 unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985 unsafe extern "unadjusted" {
8986 #[cfg_attr(
8987 any(target_arch = "aarch64", target_arch = "arm64ec"),
8988 link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989 )]
8990 fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991 }
8992 unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001 unsafe extern "unadjusted" {
9002 #[cfg_attr(
9003 any(target_arch = "aarch64", target_arch = "arm64ec"),
9004 link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005 )]
9006 fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007 }
9008 unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018 unsafe extern "unadjusted" {
9019 #[cfg_attr(
9020 any(target_arch = "aarch64", target_arch = "arm64ec"),
9021 link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022 )]
9023 fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024 }
9025 unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035 unsafe extern "unadjusted" {
9036 #[cfg_attr(
9037 any(target_arch = "aarch64", target_arch = "arm64ec"),
9038 link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039 )]
9040 fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041 }
9042 unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051 unsafe extern "unadjusted" {
9052 #[cfg_attr(
9053 any(target_arch = "aarch64", target_arch = "arm64ec"),
9054 link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055 )]
9056 fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057 }
9058 unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067 unsafe extern "unadjusted" {
9068 #[cfg_attr(
9069 any(target_arch = "aarch64", target_arch = "arm64ec"),
9070 link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071 )]
9072 fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073 }
9074 unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083 unsafe extern "unadjusted" {
9084 #[cfg_attr(
9085 any(target_arch = "aarch64", target_arch = "arm64ec"),
9086 link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087 )]
9088 fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089 }
9090 unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099 unsafe extern "unadjusted" {
9100 #[cfg_attr(
9101 any(target_arch = "aarch64", target_arch = "arm64ec"),
9102 link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103 )]
9104 fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105 }
9106 unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116 vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126 unsafe extern "unadjusted" {
9127 #[cfg_attr(
9128 any(target_arch = "aarch64", target_arch = "arm64ec"),
9129 link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130 )]
9131 fn _vcvtph_s32_f16(a: f16) -> i32;
9132 }
9133 unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143 unsafe extern "unadjusted" {
9144 #[cfg_attr(
9145 any(target_arch = "aarch64", target_arch = "arm64ec"),
9146 link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147 )]
9148 fn _vcvtph_s64_f16(a: f16) -> i64;
9149 }
9150 unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160 vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170 unsafe extern "unadjusted" {
9171 #[cfg_attr(
9172 any(target_arch = "aarch64", target_arch = "arm64ec"),
9173 link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174 )]
9175 fn _vcvtph_u32_f16(a: f16) -> u32;
9176 }
9177 unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187 unsafe extern "unadjusted" {
9188 #[cfg_attr(
9189 any(target_arch = "aarch64", target_arch = "arm64ec"),
9190 link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191 )]
9192 fn _vcvtph_u64_f16(a: f16) -> u64;
9193 }
9194 unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203 unsafe extern "unadjusted" {
9204 #[cfg_attr(
9205 any(target_arch = "aarch64", target_arch = "arm64ec"),
9206 link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207 )]
9208 fn _vcvtps_s32_f32(a: f32) -> i32;
9209 }
9210 unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219 unsafe extern "unadjusted" {
9220 #[cfg_attr(
9221 any(target_arch = "aarch64", target_arch = "arm64ec"),
9222 link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223 )]
9224 fn _vcvtpd_s64_f64(a: f64) -> i64;
9225 }
9226 unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235 unsafe extern "unadjusted" {
9236 #[cfg_attr(
9237 any(target_arch = "aarch64", target_arch = "arm64ec"),
9238 link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239 )]
9240 fn _vcvtps_u32_f32(a: f32) -> u32;
9241 }
9242 unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251 unsafe extern "unadjusted" {
9252 #[cfg_attr(
9253 any(target_arch = "aarch64", target_arch = "arm64ec"),
9254 link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255 )]
9256 fn _vcvtpd_u64_f64(a: f64) -> u64;
9257 }
9258 unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267 a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276 a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286 static_assert!(N >= 1 && N <= 64);
9287 unsafe extern "unadjusted" {
9288 #[cfg_attr(
9289 any(target_arch = "aarch64", target_arch = "arm64ec"),
9290 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291 )]
9292 fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293 }
9294 unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304 static_assert!(N >= 1 && N <= 64);
9305 unsafe extern "unadjusted" {
9306 #[cfg_attr(
9307 any(target_arch = "aarch64", target_arch = "arm64ec"),
9308 link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309 )]
9310 fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311 }
9312 unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322 static_assert!(N >= 1 && N <= 32);
9323 unsafe extern "unadjusted" {
9324 #[cfg_attr(
9325 any(target_arch = "aarch64", target_arch = "arm64ec"),
9326 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327 )]
9328 fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329 }
9330 unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340 static_assert!(N >= 1 && N <= 64);
9341 unsafe extern "unadjusted" {
9342 #[cfg_attr(
9343 any(target_arch = "aarch64", target_arch = "arm64ec"),
9344 link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345 )]
9346 fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347 }
9348 unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358 static_assert!(N >= 1 && N <= 32);
9359 unsafe extern "unadjusted" {
9360 #[cfg_attr(
9361 any(target_arch = "aarch64", target_arch = "arm64ec"),
9362 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363 )]
9364 fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365 }
9366 unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376 static_assert!(N >= 1 && N <= 64);
9377 unsafe extern "unadjusted" {
9378 #[cfg_attr(
9379 any(target_arch = "aarch64", target_arch = "arm64ec"),
9380 link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381 )]
9382 fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383 }
9384 unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394 static_assert!(N >= 1 && N <= 32);
9395 unsafe extern "unadjusted" {
9396 #[cfg_attr(
9397 any(target_arch = "aarch64", target_arch = "arm64ec"),
9398 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399 )]
9400 fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401 }
9402 unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412 static_assert!(N >= 1 && N <= 64);
9413 unsafe extern "unadjusted" {
9414 #[cfg_attr(
9415 any(target_arch = "aarch64", target_arch = "arm64ec"),
9416 link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417 )]
9418 fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419 }
9420 unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429 a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438 a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447 a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456 a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465 unsafe extern "unadjusted" {
9466 #[cfg_attr(
9467 any(target_arch = "aarch64", target_arch = "arm64ec"),
9468 link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469 )]
9470 fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471 }
9472 unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481 unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490 unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500 unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510 unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519 unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528 unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537 unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546 unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556 a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566 static_assert!(N == 0);
9567 a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577 static_assert!(N == 0);
9578 a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588 static_assert_uimm_bits!(N, 1);
9589 unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599 static_assert_uimm_bits!(N, 1);
9600 unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610 static_assert_uimm_bits!(N, 3);
9611 unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621 static_assert_uimm_bits!(N, 3);
9622 unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632 static_assert_uimm_bits!(N, 3);
9633 unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643 static_assert_uimm_bits!(N, 3);
9644 unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654 static_assert_uimm_bits!(N, 3);
9655 unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665 static_assert_uimm_bits!(N, 3);
9666 unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676 static_assert_uimm_bits!(N, 4);
9677 unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687 static_assert_uimm_bits!(N, 4);
9688 unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698 static_assert_uimm_bits!(N, 4);
9699 unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709 static_assert!(N == 0);
9710 unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720 static_assert!(N == 0);
9721 unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731 static_assert!(N == 0);
9732 unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743 static_assert_uimm_bits!(N, 2);
9744 unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755 static_assert_uimm_bits!(N, 4);
9756 unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766 static_assert!(N == 0);
9767 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777 static_assert!(N == 0);
9778 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788 static_assert_uimm_bits!(N, 1);
9789 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799 static_assert_uimm_bits!(N, 1);
9800 unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810 static_assert_uimm_bits!(N, 1);
9811 unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821 static_assert_uimm_bits!(N, 1);
9822 unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832 static_assert_uimm_bits!(N, 1);
9833 unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843 static_assert_uimm_bits!(N, 1);
9844 unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854 static_assert_uimm_bits!(N, 1);
9855 unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865 static_assert_uimm_bits!(N, 1);
9866 unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876 static_assert_uimm_bits!(N, 2);
9877 unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887 static_assert_uimm_bits!(N, 2);
9888 unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898 static_assert_uimm_bits!(N, 2);
9899 unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909 static_assert_uimm_bits!(N, 2);
9910 unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920 static_assert_uimm_bits!(N, 2);
9921 unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931 static_assert_uimm_bits!(N, 2);
9932 unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941 unsafe extern "unadjusted" {
9942 #[cfg_attr(
9943 any(target_arch = "aarch64", target_arch = "arm64ec"),
9944 link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945 )]
9946 fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947 }
9948 unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957 unsafe extern "unadjusted" {
9958 #[cfg_attr(
9959 any(target_arch = "aarch64", target_arch = "arm64ec"),
9960 link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961 )]
9962 fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963 }
9964 unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973 unsafe extern "unadjusted" {
9974 #[cfg_attr(
9975 any(target_arch = "aarch64", target_arch = "arm64ec"),
9976 link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977 )]
9978 fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979 }
9980 unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989 unsafe extern "unadjusted" {
9990 #[cfg_attr(
9991 any(target_arch = "aarch64", target_arch = "arm64ec"),
9992 link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993 )]
9994 fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995 }
9996 unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005 unsafe extern "unadjusted" {
10006 #[cfg_attr(
10007 any(target_arch = "aarch64", target_arch = "arm64ec"),
10008 link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009 )]
10010 fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011 }
10012 unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021 unsafe extern "unadjusted" {
10022 #[cfg_attr(
10023 any(target_arch = "aarch64", target_arch = "arm64ec"),
10024 link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025 )]
10026 fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027 }
10028 unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037 unsafe extern "unadjusted" {
10038 #[cfg_attr(
10039 any(target_arch = "aarch64", target_arch = "arm64ec"),
10040 link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041 )]
10042 fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043 }
10044 unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053 unsafe extern "unadjusted" {
10054 #[cfg_attr(
10055 any(target_arch = "aarch64", target_arch = "arm64ec"),
10056 link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057 )]
10058 fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059 }
10060 unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070 static_assert_uimm_bits!(N, 1);
10071 unsafe {
10072 match N & 0b1 {
10073 0 => simd_shuffle!(a, b, [0, 1]),
10074 1 => simd_shuffle!(a, b, [1, 2]),
10075 _ => unreachable_unchecked(),
10076 }
10077 }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087 static_assert_uimm_bits!(N, 1);
10088 unsafe {
10089 match N & 0b1 {
10090 0 => simd_shuffle!(a, b, [0, 1]),
10091 1 => simd_shuffle!(a, b, [1, 2]),
10092 _ => unreachable_unchecked(),
10093 }
10094 }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103 unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114 a: float16x4_t,
10115 b: float16x4_t,
10116 c: float16x4_t,
10117) -> float16x4_t {
10118 static_assert_uimm_bits!(LANE, 2);
10119 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130 a: float16x4_t,
10131 b: float16x4_t,
10132 c: float16x8_t,
10133) -> float16x4_t {
10134 static_assert_uimm_bits!(LANE, 3);
10135 unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146 a: float16x8_t,
10147 b: float16x8_t,
10148 c: float16x4_t,
10149) -> float16x8_t {
10150 static_assert_uimm_bits!(LANE, 2);
10151 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162 a: float16x8_t,
10163 b: float16x8_t,
10164 c: float16x8_t,
10165) -> float16x8_t {
10166 static_assert_uimm_bits!(LANE, 3);
10167 unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177 a: float32x2_t,
10178 b: float32x2_t,
10179 c: float32x2_t,
10180) -> float32x2_t {
10181 static_assert_uimm_bits!(LANE, 1);
10182 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192 a: float32x2_t,
10193 b: float32x2_t,
10194 c: float32x4_t,
10195) -> float32x2_t {
10196 static_assert_uimm_bits!(LANE, 2);
10197 unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207 a: float32x4_t,
10208 b: float32x4_t,
10209 c: float32x2_t,
10210) -> float32x4_t {
10211 static_assert_uimm_bits!(LANE, 1);
10212 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222 a: float32x4_t,
10223 b: float32x4_t,
10224 c: float32x4_t,
10225) -> float32x4_t {
10226 static_assert_uimm_bits!(LANE, 2);
10227 unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237 a: float64x2_t,
10238 b: float64x2_t,
10239 c: float64x2_t,
10240) -> float64x2_t {
10241 static_assert_uimm_bits!(LANE, 1);
10242 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252 a: float64x1_t,
10253 b: float64x1_t,
10254 c: float64x1_t,
10255) -> float64x1_t {
10256 static_assert!(LANE == 0);
10257 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267 a: float64x1_t,
10268 b: float64x1_t,
10269 c: float64x2_t,
10270) -> float64x1_t {
10271 static_assert_uimm_bits!(LANE, 1);
10272 unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282 vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292 vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301 vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311 static_assert!(LANE == 0);
10312 unsafe {
10313 let c: f64 = simd_extract!(c, LANE as u32);
10314 fmaf64(b, c, a)
10315 }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325 fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336 static_assert_uimm_bits!(LANE, 2);
10337 unsafe {
10338 let c: f16 = simd_extract!(v, LANE as u32);
10339 vfmah_f16(a, b, c)
10340 }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351 static_assert_uimm_bits!(LANE, 3);
10352 unsafe {
10353 let c: f16 = simd_extract!(v, LANE as u32);
10354 vfmah_f16(a, b, c)
10355 }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364 unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374 a: float64x2_t,
10375 b: float64x2_t,
10376 c: float64x1_t,
10377) -> float64x2_t {
10378 static_assert!(LANE == 0);
10379 unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388 vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398 static_assert_uimm_bits!(LANE, 1);
10399 unsafe {
10400 let c: f32 = simd_extract!(c, LANE as u32);
10401 fmaf32(b, c, a)
10402 }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412 static_assert_uimm_bits!(LANE, 2);
10413 unsafe {
10414 let c: f32 = simd_extract!(c, LANE as u32);
10415 fmaf32(b, c, a)
10416 }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426 static_assert_uimm_bits!(LANE, 1);
10427 unsafe {
10428 let c: f64 = simd_extract!(c, LANE as u32);
10429 fmaf64(b, c, a)
10430 }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441 unsafe extern "unadjusted" {
10442 #[cfg_attr(
10443 any(target_arch = "aarch64", target_arch = "arm64ec"),
10444 link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445 )]
10446 fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447 }
10448 unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459 unsafe extern "unadjusted" {
10460 #[cfg_attr(
10461 any(target_arch = "aarch64", target_arch = "arm64ec"),
10462 link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463 )]
10464 fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465 }
10466 unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478 r: float32x2_t,
10479 a: float16x4_t,
10480 b: float16x4_t,
10481) -> float32x2_t {
10482 static_assert_uimm_bits!(LANE, 2);
10483 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495 r: float32x2_t,
10496 a: float16x4_t,
10497 b: float16x8_t,
10498) -> float32x2_t {
10499 static_assert_uimm_bits!(LANE, 3);
10500 unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512 r: float32x4_t,
10513 a: float16x8_t,
10514 b: float16x4_t,
10515) -> float32x4_t {
10516 static_assert_uimm_bits!(LANE, 2);
10517 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529 r: float32x4_t,
10530 a: float16x8_t,
10531 b: float16x8_t,
10532) -> float32x4_t {
10533 static_assert_uimm_bits!(LANE, 3);
10534 unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546 r: float32x2_t,
10547 a: float16x4_t,
10548 b: float16x4_t,
10549) -> float32x2_t {
10550 static_assert_uimm_bits!(LANE, 2);
10551 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563 r: float32x2_t,
10564 a: float16x4_t,
10565 b: float16x8_t,
10566) -> float32x2_t {
10567 static_assert_uimm_bits!(LANE, 3);
10568 unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580 r: float32x4_t,
10581 a: float16x8_t,
10582 b: float16x4_t,
10583) -> float32x4_t {
10584 static_assert_uimm_bits!(LANE, 2);
10585 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597 r: float32x4_t,
10598 a: float16x8_t,
10599 b: float16x8_t,
10600) -> float32x4_t {
10601 static_assert_uimm_bits!(LANE, 3);
10602 unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613 unsafe extern "unadjusted" {
10614 #[cfg_attr(
10615 any(target_arch = "aarch64", target_arch = "arm64ec"),
10616 link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617 )]
10618 fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619 }
10620 unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631 unsafe extern "unadjusted" {
10632 #[cfg_attr(
10633 any(target_arch = "aarch64", target_arch = "arm64ec"),
10634 link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635 )]
10636 fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637 }
10638 unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649 unsafe extern "unadjusted" {
10650 #[cfg_attr(
10651 any(target_arch = "aarch64", target_arch = "arm64ec"),
10652 link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653 )]
10654 fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655 }
10656 unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667 unsafe extern "unadjusted" {
10668 #[cfg_attr(
10669 any(target_arch = "aarch64", target_arch = "arm64ec"),
10670 link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671 )]
10672 fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673 }
10674 unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686 r: float32x2_t,
10687 a: float16x4_t,
10688 b: float16x4_t,
10689) -> float32x2_t {
10690 static_assert_uimm_bits!(LANE, 2);
10691 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703 r: float32x2_t,
10704 a: float16x4_t,
10705 b: float16x8_t,
10706) -> float32x2_t {
10707 static_assert_uimm_bits!(LANE, 3);
10708 unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720 r: float32x4_t,
10721 a: float16x8_t,
10722 b: float16x4_t,
10723) -> float32x4_t {
10724 static_assert_uimm_bits!(LANE, 2);
10725 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737 r: float32x4_t,
10738 a: float16x8_t,
10739 b: float16x8_t,
10740) -> float32x4_t {
10741 static_assert_uimm_bits!(LANE, 3);
10742 unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754 r: float32x2_t,
10755 a: float16x4_t,
10756 b: float16x4_t,
10757) -> float32x2_t {
10758 static_assert_uimm_bits!(LANE, 2);
10759 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771 r: float32x2_t,
10772 a: float16x4_t,
10773 b: float16x8_t,
10774) -> float32x2_t {
10775 static_assert_uimm_bits!(LANE, 3);
10776 unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788 r: float32x4_t,
10789 a: float16x8_t,
10790 b: float16x4_t,
10791) -> float32x4_t {
10792 static_assert_uimm_bits!(LANE, 2);
10793 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805 r: float32x4_t,
10806 a: float16x8_t,
10807 b: float16x8_t,
10808) -> float32x4_t {
10809 static_assert_uimm_bits!(LANE, 3);
10810 unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821 unsafe extern "unadjusted" {
10822 #[cfg_attr(
10823 any(target_arch = "aarch64", target_arch = "arm64ec"),
10824 link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825 )]
10826 fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827 }
10828 unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839 unsafe extern "unadjusted" {
10840 #[cfg_attr(
10841 any(target_arch = "aarch64", target_arch = "arm64ec"),
10842 link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843 )]
10844 fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845 }
10846 unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855 unsafe {
10856 let b: float64x1_t = simd_neg(b);
10857 vfma_f64(a, b, c)
10858 }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869 a: float16x4_t,
10870 b: float16x4_t,
10871 c: float16x4_t,
10872) -> float16x4_t {
10873 static_assert_uimm_bits!(LANE, 2);
10874 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885 a: float16x4_t,
10886 b: float16x4_t,
10887 c: float16x8_t,
10888) -> float16x4_t {
10889 static_assert_uimm_bits!(LANE, 3);
10890 unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901 a: float16x8_t,
10902 b: float16x8_t,
10903 c: float16x4_t,
10904) -> float16x8_t {
10905 static_assert_uimm_bits!(LANE, 2);
10906 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917 a: float16x8_t,
10918 b: float16x8_t,
10919 c: float16x8_t,
10920) -> float16x8_t {
10921 static_assert_uimm_bits!(LANE, 3);
10922 unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932 a: float32x2_t,
10933 b: float32x2_t,
10934 c: float32x2_t,
10935) -> float32x2_t {
10936 static_assert_uimm_bits!(LANE, 1);
10937 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947 a: float32x2_t,
10948 b: float32x2_t,
10949 c: float32x4_t,
10950) -> float32x2_t {
10951 static_assert_uimm_bits!(LANE, 2);
10952 unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962 a: float32x4_t,
10963 b: float32x4_t,
10964 c: float32x2_t,
10965) -> float32x4_t {
10966 static_assert_uimm_bits!(LANE, 1);
10967 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977 a: float32x4_t,
10978 b: float32x4_t,
10979 c: float32x4_t,
10980) -> float32x4_t {
10981 static_assert_uimm_bits!(LANE, 2);
10982 unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992 a: float64x2_t,
10993 b: float64x2_t,
10994 c: float64x2_t,
10995) -> float64x2_t {
10996 static_assert_uimm_bits!(LANE, 1);
10997 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007 a: float64x1_t,
11008 b: float64x1_t,
11009 c: float64x1_t,
11010) -> float64x1_t {
11011 static_assert!(LANE == 0);
11012 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022 a: float64x1_t,
11023 b: float64x1_t,
11024 c: float64x2_t,
11025) -> float64x1_t {
11026 static_assert_uimm_bits!(LANE, 1);
11027 unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037 vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047 vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056 vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066 vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077 static_assert_uimm_bits!(LANE, 2);
11078 unsafe {
11079 let c: f16 = simd_extract!(v, LANE as u32);
11080 vfmsh_f16(a, b, c)
11081 }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092 static_assert_uimm_bits!(LANE, 3);
11093 unsafe {
11094 let c: f16 = simd_extract!(v, LANE as u32);
11095 vfmsh_f16(a, b, c)
11096 }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105 unsafe {
11106 let b: float64x2_t = simd_neg(b);
11107 vfmaq_f64(a, b, c)
11108 }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118 a: float64x2_t,
11119 b: float64x2_t,
11120 c: float64x1_t,
11121) -> float64x2_t {
11122 static_assert!(LANE == 0);
11123 unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132 vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142 vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152 vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162 vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172 vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = " * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184 crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = " * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196 crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = " * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207 crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = " * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218 crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = " * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229 crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = " * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240 crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = " * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251 crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = " * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262 crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = " * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273 crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = " * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284 crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = " * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295 crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = " * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306 crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = " * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317 crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = " * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328 crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = " * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339 crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = " * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350 crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = " * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361 crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = " * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372 crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = " * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383 crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = " * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394 crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = " * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405 crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = " * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416 crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = " * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427 crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = " * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438 crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = " * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449 crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = " * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460 crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = " * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471 crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = " * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482 crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = " * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld))]
11492pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t {
11493 crate::ptr::read_unaligned(ptr.cast())
11494}
11495#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11497#[doc = "## Safety"]
11498#[doc = " * Neon intrinsic unsafe"]
11499#[inline(always)]
11500#[target_feature(enable = "neon")]
11501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11502#[cfg_attr(test, assert_instr(ld))]
11503pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t {
11504 crate::ptr::read_unaligned(ptr.cast())
11505}
11506#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11508#[doc = "## Safety"]
11509#[doc = " * Neon intrinsic unsafe"]
11510#[inline(always)]
11511#[target_feature(enable = "neon")]
11512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11513#[cfg_attr(test, assert_instr(ld))]
11514pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t {
11515 crate::ptr::read_unaligned(ptr.cast())
11516}
11517#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11519#[doc = "## Safety"]
11520#[doc = " * Neon intrinsic unsafe"]
11521#[inline(always)]
11522#[target_feature(enable = "neon")]
11523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11524#[cfg_attr(test, assert_instr(ld))]
11525pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t {
11526 crate::ptr::read_unaligned(ptr.cast())
11527}
11528#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11530#[doc = "## Safety"]
11531#[doc = " * Neon intrinsic unsafe"]
11532#[inline(always)]
11533#[target_feature(enable = "neon")]
11534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11535#[cfg_attr(test, assert_instr(ld))]
11536pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t {
11537 crate::ptr::read_unaligned(ptr.cast())
11538}
11539#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11541#[doc = "## Safety"]
11542#[doc = " * Neon intrinsic unsafe"]
11543#[inline(always)]
11544#[target_feature(enable = "neon")]
11545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11546#[cfg_attr(test, assert_instr(ld))]
11547pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t {
11548 crate::ptr::read_unaligned(ptr.cast())
11549}
11550#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11552#[doc = "## Safety"]
11553#[doc = " * Neon intrinsic unsafe"]
11554#[inline(always)]
11555#[target_feature(enable = "neon")]
11556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11557#[cfg_attr(test, assert_instr(ld2r))]
11558pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11559 unsafe extern "unadjusted" {
11560 #[cfg_attr(
11561 any(target_arch = "aarch64", target_arch = "arm64ec"),
11562 link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11563 )]
11564 fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11565 }
11566 _vld2_dup_f64(a as _)
11567}
11568#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11570#[doc = "## Safety"]
11571#[doc = " * Neon intrinsic unsafe"]
11572#[inline(always)]
11573#[target_feature(enable = "neon")]
11574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11575#[cfg_attr(test, assert_instr(ld2r))]
11576pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11577 unsafe extern "unadjusted" {
11578 #[cfg_attr(
11579 any(target_arch = "aarch64", target_arch = "arm64ec"),
11580 link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11581 )]
11582 fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11583 }
11584 _vld2q_dup_f64(a as _)
11585}
11586#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11588#[doc = "## Safety"]
11589#[doc = " * Neon intrinsic unsafe"]
11590#[inline(always)]
11591#[target_feature(enable = "neon")]
11592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11593#[cfg_attr(test, assert_instr(ld2r))]
11594pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11595 unsafe extern "unadjusted" {
11596 #[cfg_attr(
11597 any(target_arch = "aarch64", target_arch = "arm64ec"),
11598 link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11599 )]
11600 fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11601 }
11602 _vld2q_dup_s64(a as _)
11603}
11604#[doc = "Load multiple 2-element structures to two registers"]
11605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11606#[doc = "## Safety"]
11607#[doc = " * Neon intrinsic unsafe"]
11608#[inline(always)]
11609#[target_feature(enable = "neon")]
11610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11611#[cfg_attr(test, assert_instr(nop))]
11612pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11613 crate::ptr::read_unaligned(a.cast())
11614}
11615#[doc = "Load multiple 2-element structures to two registers"]
11616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11617#[doc = "## Safety"]
11618#[doc = " * Neon intrinsic unsafe"]
11619#[inline(always)]
11620#[target_feature(enable = "neon")]
11621#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11622#[rustc_legacy_const_generics(2)]
11623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11624pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11625 static_assert!(LANE == 0);
11626 unsafe extern "unadjusted" {
11627 #[cfg_attr(
11628 any(target_arch = "aarch64", target_arch = "arm64ec"),
11629 link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11630 )]
11631 fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11632 }
11633 _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11634}
11635#[doc = "Load multiple 2-element structures to two registers"]
11636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11637#[doc = "## Safety"]
11638#[doc = " * Neon intrinsic unsafe"]
11639#[inline(always)]
11640#[target_feature(enable = "neon")]
11641#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11642#[rustc_legacy_const_generics(2)]
11643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11644pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11645 static_assert!(LANE == 0);
11646 unsafe extern "unadjusted" {
11647 #[cfg_attr(
11648 any(target_arch = "aarch64", target_arch = "arm64ec"),
11649 link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11650 )]
11651 fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11652 }
11653 _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11654}
11655#[doc = "Load multiple 2-element structures to two registers"]
11656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11657#[doc = "## Safety"]
11658#[doc = " * Neon intrinsic unsafe"]
11659#[inline(always)]
11660#[target_feature(enable = "neon,aes")]
11661#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11662#[rustc_legacy_const_generics(2)]
11663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11664pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11665 static_assert!(LANE == 0);
11666 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11667}
11668#[doc = "Load multiple 2-element structures to two registers"]
11669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11670#[doc = "## Safety"]
11671#[doc = " * Neon intrinsic unsafe"]
11672#[inline(always)]
11673#[target_feature(enable = "neon")]
11674#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11675#[rustc_legacy_const_generics(2)]
11676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11677pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11678 static_assert!(LANE == 0);
11679 transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11680}
11681#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11683#[doc = "## Safety"]
11684#[doc = " * Neon intrinsic unsafe"]
11685#[inline(always)]
11686#[cfg(target_endian = "little")]
11687#[target_feature(enable = "neon,aes")]
11688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11689#[cfg_attr(test, assert_instr(ld2r))]
11690pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11691 transmute(vld2q_dup_s64(transmute(a)))
11692}
11693#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11695#[doc = "## Safety"]
11696#[doc = " * Neon intrinsic unsafe"]
11697#[inline(always)]
11698#[cfg(target_endian = "big")]
11699#[target_feature(enable = "neon,aes")]
11700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11701#[cfg_attr(test, assert_instr(ld2r))]
11702pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11703 let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11704 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11705 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11706 ret_val
11707}
11708#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11710#[doc = "## Safety"]
11711#[doc = " * Neon intrinsic unsafe"]
11712#[inline(always)]
11713#[cfg(target_endian = "little")]
11714#[target_feature(enable = "neon")]
11715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11716#[cfg_attr(test, assert_instr(ld2r))]
11717pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11718 transmute(vld2q_dup_s64(transmute(a)))
11719}
11720#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11722#[doc = "## Safety"]
11723#[doc = " * Neon intrinsic unsafe"]
11724#[inline(always)]
11725#[cfg(target_endian = "big")]
11726#[target_feature(enable = "neon")]
11727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11728#[cfg_attr(test, assert_instr(ld2r))]
11729pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11730 let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11731 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11732 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11733 ret_val
11734}
11735#[doc = "Load multiple 2-element structures to two registers"]
11736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11737#[doc = "## Safety"]
11738#[doc = " * Neon intrinsic unsafe"]
11739#[inline(always)]
11740#[target_feature(enable = "neon")]
11741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11742#[cfg_attr(test, assert_instr(ld2))]
11743pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11744 unsafe extern "unadjusted" {
11745 #[cfg_attr(
11746 any(target_arch = "aarch64", target_arch = "arm64ec"),
11747 link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11748 )]
11749 fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11750 }
11751 _vld2q_f64(a as _)
11752}
11753#[doc = "Load multiple 2-element structures to two registers"]
11754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11755#[doc = "## Safety"]
11756#[doc = " * Neon intrinsic unsafe"]
11757#[inline(always)]
11758#[target_feature(enable = "neon")]
11759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11760#[cfg_attr(test, assert_instr(ld2))]
11761pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11762 unsafe extern "unadjusted" {
11763 #[cfg_attr(
11764 any(target_arch = "aarch64", target_arch = "arm64ec"),
11765 link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11766 )]
11767 fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11768 }
11769 _vld2q_s64(a as _)
11770}
11771#[doc = "Load multiple 2-element structures to two registers"]
11772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11773#[doc = "## Safety"]
11774#[doc = " * Neon intrinsic unsafe"]
11775#[inline(always)]
11776#[target_feature(enable = "neon")]
11777#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11778#[rustc_legacy_const_generics(2)]
11779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11780pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11781 static_assert_uimm_bits!(LANE, 1);
11782 unsafe extern "unadjusted" {
11783 #[cfg_attr(
11784 any(target_arch = "aarch64", target_arch = "arm64ec"),
11785 link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11786 )]
11787 fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11788 -> float64x2x2_t;
11789 }
11790 _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11791}
11792#[doc = "Load multiple 2-element structures to two registers"]
11793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11794#[doc = "## Safety"]
11795#[doc = " * Neon intrinsic unsafe"]
11796#[inline(always)]
11797#[target_feature(enable = "neon")]
11798#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11799#[rustc_legacy_const_generics(2)]
11800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11801pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11802 static_assert_uimm_bits!(LANE, 4);
11803 unsafe extern "unadjusted" {
11804 #[cfg_attr(
11805 any(target_arch = "aarch64", target_arch = "arm64ec"),
11806 link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11807 )]
11808 fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11809 }
11810 _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11811}
11812#[doc = "Load multiple 2-element structures to two registers"]
11813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11814#[doc = "## Safety"]
11815#[doc = " * Neon intrinsic unsafe"]
11816#[inline(always)]
11817#[target_feature(enable = "neon")]
11818#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11819#[rustc_legacy_const_generics(2)]
11820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11821pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11822 static_assert_uimm_bits!(LANE, 1);
11823 unsafe extern "unadjusted" {
11824 #[cfg_attr(
11825 any(target_arch = "aarch64", target_arch = "arm64ec"),
11826 link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11827 )]
11828 fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11829 }
11830 _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11831}
11832#[doc = "Load multiple 2-element structures to two registers"]
11833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11834#[doc = "## Safety"]
11835#[doc = " * Neon intrinsic unsafe"]
11836#[inline(always)]
11837#[target_feature(enable = "neon,aes")]
11838#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11839#[rustc_legacy_const_generics(2)]
11840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11841pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11842 static_assert_uimm_bits!(LANE, 1);
11843 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11844}
11845#[doc = "Load multiple 2-element structures to two registers"]
11846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11847#[doc = "## Safety"]
11848#[doc = " * Neon intrinsic unsafe"]
11849#[inline(always)]
11850#[target_feature(enable = "neon")]
11851#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11852#[rustc_legacy_const_generics(2)]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11855 static_assert_uimm_bits!(LANE, 4);
11856 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11857}
11858#[doc = "Load multiple 2-element structures to two registers"]
11859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11860#[doc = "## Safety"]
11861#[doc = " * Neon intrinsic unsafe"]
11862#[inline(always)]
11863#[target_feature(enable = "neon")]
11864#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11865#[rustc_legacy_const_generics(2)]
11866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11867pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11868 static_assert_uimm_bits!(LANE, 1);
11869 transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11870}
11871#[doc = "Load multiple 2-element structures to two registers"]
11872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11873#[doc = "## Safety"]
11874#[doc = " * Neon intrinsic unsafe"]
11875#[inline(always)]
11876#[target_feature(enable = "neon")]
11877#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11878#[rustc_legacy_const_generics(2)]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11881 static_assert_uimm_bits!(LANE, 4);
11882 transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11883}
11884#[doc = "Load multiple 2-element structures to two registers"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11886#[doc = "## Safety"]
11887#[doc = " * Neon intrinsic unsafe"]
11888#[inline(always)]
11889#[cfg(target_endian = "little")]
11890#[target_feature(enable = "neon,aes")]
11891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11892#[cfg_attr(test, assert_instr(ld2))]
11893pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11894 transmute(vld2q_s64(transmute(a)))
11895}
11896#[doc = "Load multiple 2-element structures to two registers"]
11897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11898#[doc = "## Safety"]
11899#[doc = " * Neon intrinsic unsafe"]
11900#[inline(always)]
11901#[cfg(target_endian = "big")]
11902#[target_feature(enable = "neon,aes")]
11903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11904#[cfg_attr(test, assert_instr(ld2))]
11905pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11906 let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11907 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11908 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11909 ret_val
11910}
11911#[doc = "Load multiple 2-element structures to two registers"]
11912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11913#[doc = "## Safety"]
11914#[doc = " * Neon intrinsic unsafe"]
11915#[inline(always)]
11916#[target_feature(enable = "neon")]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918#[cfg_attr(test, assert_instr(ld2))]
11919pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11920 transmute(vld2q_s64(transmute(a)))
11921}
11922#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11924#[doc = "## Safety"]
11925#[doc = " * Neon intrinsic unsafe"]
11926#[inline(always)]
11927#[target_feature(enable = "neon")]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929#[cfg_attr(test, assert_instr(ld3r))]
11930pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11931 unsafe extern "unadjusted" {
11932 #[cfg_attr(
11933 any(target_arch = "aarch64", target_arch = "arm64ec"),
11934 link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11935 )]
11936 fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11937 }
11938 _vld3_dup_f64(a as _)
11939}
11940#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11942#[doc = "## Safety"]
11943#[doc = " * Neon intrinsic unsafe"]
11944#[inline(always)]
11945#[target_feature(enable = "neon")]
11946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11947#[cfg_attr(test, assert_instr(ld3r))]
11948pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11949 unsafe extern "unadjusted" {
11950 #[cfg_attr(
11951 any(target_arch = "aarch64", target_arch = "arm64ec"),
11952 link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11953 )]
11954 fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11955 }
11956 _vld3q_dup_f64(a as _)
11957}
11958#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11960#[doc = "## Safety"]
11961#[doc = " * Neon intrinsic unsafe"]
11962#[inline(always)]
11963#[target_feature(enable = "neon")]
11964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11965#[cfg_attr(test, assert_instr(ld3r))]
11966pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11967 unsafe extern "unadjusted" {
11968 #[cfg_attr(
11969 any(target_arch = "aarch64", target_arch = "arm64ec"),
11970 link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11971 )]
11972 fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11973 }
11974 _vld3q_dup_s64(a as _)
11975}
11976#[doc = "Load multiple 3-element structures to three registers"]
11977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11978#[doc = "## Safety"]
11979#[doc = " * Neon intrinsic unsafe"]
11980#[inline(always)]
11981#[target_feature(enable = "neon")]
11982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11983#[cfg_attr(test, assert_instr(nop))]
11984pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11985 crate::ptr::read_unaligned(a.cast())
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = " * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997 static_assert!(LANE == 0);
11998 unsafe extern "unadjusted" {
11999 #[cfg_attr(
12000 any(target_arch = "aarch64", target_arch = "arm64ec"),
12001 link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002 )]
12003 fn _vld3_lane_f64(
12004 a: float64x1_t,
12005 b: float64x1_t,
12006 c: float64x1_t,
12007 n: i64,
12008 ptr: *const i8,
12009 ) -> float64x1x3_t;
12010 }
12011 _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = " * Neon intrinsic unsafe"]
12017#[inline(always)]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023 static_assert!(LANE == 0);
12024 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = " * Neon intrinsic unsafe"]
12030#[inline(always)]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036 static_assert!(LANE == 0);
12037 unsafe extern "unadjusted" {
12038 #[cfg_attr(
12039 any(target_arch = "aarch64", target_arch = "arm64ec"),
12040 link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041 )]
12042 fn _vld3_lane_s64(
12043 a: int64x1_t,
12044 b: int64x1_t,
12045 c: int64x1_t,
12046 n: i64,
12047 ptr: *const i8,
12048 ) -> int64x1x3_t;
12049 }
12050 _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = " * Neon intrinsic unsafe"]
12056#[inline(always)]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062 static_assert!(LANE == 0);
12063 transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = " * Neon intrinsic unsafe"]
12069#[inline(always)]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075 transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = " * Neon intrinsic unsafe"]
12081#[inline(always)]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087 let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091 ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = " * Neon intrinsic unsafe"]
12097#[inline(always)]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103 transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = " * Neon intrinsic unsafe"]
12109#[inline(always)]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115 let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119 ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = " * Neon intrinsic unsafe"]
12125#[inline(always)]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130 unsafe extern "unadjusted" {
12131 #[cfg_attr(
12132 any(target_arch = "aarch64", target_arch = "arm64ec"),
12133 link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12134 )]
12135 fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12136 }
12137 _vld3q_f64(a as _)
12138}
12139#[doc = "Load multiple 3-element structures to three registers"]
12140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12141#[doc = "## Safety"]
12142#[doc = " * Neon intrinsic unsafe"]
12143#[inline(always)]
12144#[target_feature(enable = "neon")]
12145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12146#[cfg_attr(test, assert_instr(ld3))]
12147pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12148 unsafe extern "unadjusted" {
12149 #[cfg_attr(
12150 any(target_arch = "aarch64", target_arch = "arm64ec"),
12151 link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12152 )]
12153 fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12154 }
12155 _vld3q_s64(a as _)
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12159#[doc = "## Safety"]
12160#[doc = " * Neon intrinsic unsafe"]
12161#[inline(always)]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12167 static_assert_uimm_bits!(LANE, 1);
12168 unsafe extern "unadjusted" {
12169 #[cfg_attr(
12170 any(target_arch = "aarch64", target_arch = "arm64ec"),
12171 link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12172 )]
12173 fn _vld3q_lane_f64(
12174 a: float64x2_t,
12175 b: float64x2_t,
12176 c: float64x2_t,
12177 n: i64,
12178 ptr: *const i8,
12179 ) -> float64x2x3_t;
12180 }
12181 _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12185#[doc = "## Safety"]
12186#[doc = " * Neon intrinsic unsafe"]
12187#[inline(always)]
12188#[target_feature(enable = "neon,aes")]
12189#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12190#[rustc_legacy_const_generics(2)]
12191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12192pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12193 static_assert_uimm_bits!(LANE, 1);
12194 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12195}
12196#[doc = "Load multiple 3-element structures to two registers"]
12197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12198#[doc = "## Safety"]
12199#[doc = " * Neon intrinsic unsafe"]
12200#[inline(always)]
12201#[target_feature(enable = "neon")]
12202#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12203#[rustc_legacy_const_generics(2)]
12204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12205pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12206 static_assert_uimm_bits!(LANE, 3);
12207 unsafe extern "unadjusted" {
12208 #[cfg_attr(
12209 any(target_arch = "aarch64", target_arch = "arm64ec"),
12210 link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12211 )]
12212 fn _vld3q_lane_s8(
12213 a: int8x16_t,
12214 b: int8x16_t,
12215 c: int8x16_t,
12216 n: i64,
12217 ptr: *const i8,
12218 ) -> int8x16x3_t;
12219 }
12220 _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12221}
12222#[doc = "Load multiple 3-element structures to two registers"]
12223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12224#[doc = "## Safety"]
12225#[doc = " * Neon intrinsic unsafe"]
12226#[inline(always)]
12227#[target_feature(enable = "neon")]
12228#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12229#[rustc_legacy_const_generics(2)]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12232 static_assert_uimm_bits!(LANE, 1);
12233 unsafe extern "unadjusted" {
12234 #[cfg_attr(
12235 any(target_arch = "aarch64", target_arch = "arm64ec"),
12236 link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12237 )]
12238 fn _vld3q_lane_s64(
12239 a: int64x2_t,
12240 b: int64x2_t,
12241 c: int64x2_t,
12242 n: i64,
12243 ptr: *const i8,
12244 ) -> int64x2x3_t;
12245 }
12246 _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12247}
12248#[doc = "Load multiple 3-element structures to three registers"]
12249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12250#[doc = "## Safety"]
12251#[doc = " * Neon intrinsic unsafe"]
12252#[inline(always)]
12253#[target_feature(enable = "neon")]
12254#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12255#[rustc_legacy_const_generics(2)]
12256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12257pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12258 static_assert_uimm_bits!(LANE, 4);
12259 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12260}
12261#[doc = "Load multiple 3-element structures to three registers"]
12262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12263#[doc = "## Safety"]
12264#[doc = " * Neon intrinsic unsafe"]
12265#[inline(always)]
12266#[target_feature(enable = "neon")]
12267#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12268#[rustc_legacy_const_generics(2)]
12269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12270pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12271 static_assert_uimm_bits!(LANE, 1);
12272 transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12273}
12274#[doc = "Load multiple 3-element structures to three registers"]
12275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12276#[doc = "## Safety"]
12277#[doc = " * Neon intrinsic unsafe"]
12278#[inline(always)]
12279#[target_feature(enable = "neon")]
12280#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12281#[rustc_legacy_const_generics(2)]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12284 static_assert_uimm_bits!(LANE, 4);
12285 transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12286}
12287#[doc = "Load multiple 3-element structures to three registers"]
12288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12289#[doc = "## Safety"]
12290#[doc = " * Neon intrinsic unsafe"]
12291#[inline(always)]
12292#[cfg(target_endian = "little")]
12293#[target_feature(enable = "neon,aes")]
12294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12295#[cfg_attr(test, assert_instr(ld3))]
12296pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12297 transmute(vld3q_s64(transmute(a)))
12298}
12299#[doc = "Load multiple 3-element structures to three registers"]
12300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12301#[doc = "## Safety"]
12302#[doc = " * Neon intrinsic unsafe"]
12303#[inline(always)]
12304#[cfg(target_endian = "big")]
12305#[target_feature(enable = "neon,aes")]
12306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12307#[cfg_attr(test, assert_instr(ld3))]
12308pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12309 let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12310 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12311 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12312 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12313 ret_val
12314}
12315#[doc = "Load multiple 3-element structures to three registers"]
12316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12317#[doc = "## Safety"]
12318#[doc = " * Neon intrinsic unsafe"]
12319#[inline(always)]
12320#[target_feature(enable = "neon")]
12321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12322#[cfg_attr(test, assert_instr(ld3))]
12323pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12324 transmute(vld3q_s64(transmute(a)))
12325}
12326#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12328#[doc = "## Safety"]
12329#[doc = " * Neon intrinsic unsafe"]
12330#[inline(always)]
12331#[target_feature(enable = "neon")]
12332#[cfg_attr(test, assert_instr(ld4r))]
12333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12334pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12335 unsafe extern "unadjusted" {
12336 #[cfg_attr(
12337 any(target_arch = "aarch64", target_arch = "arm64ec"),
12338 link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12339 )]
12340 fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12341 }
12342 _vld4_dup_f64(a as _)
12343}
12344#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12346#[doc = "## Safety"]
12347#[doc = " * Neon intrinsic unsafe"]
12348#[inline(always)]
12349#[target_feature(enable = "neon")]
12350#[cfg_attr(test, assert_instr(ld4r))]
12351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12352pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12353 unsafe extern "unadjusted" {
12354 #[cfg_attr(
12355 any(target_arch = "aarch64", target_arch = "arm64ec"),
12356 link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12357 )]
12358 fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12359 }
12360 _vld4q_dup_f64(a as _)
12361}
12362#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12364#[doc = "## Safety"]
12365#[doc = " * Neon intrinsic unsafe"]
12366#[inline(always)]
12367#[target_feature(enable = "neon")]
12368#[cfg_attr(test, assert_instr(ld4r))]
12369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12370pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12371 unsafe extern "unadjusted" {
12372 #[cfg_attr(
12373 any(target_arch = "aarch64", target_arch = "arm64ec"),
12374 link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12375 )]
12376 fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12377 }
12378 _vld4q_dup_s64(a as _)
12379}
12380#[doc = "Load multiple 4-element structures to four registers"]
12381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12382#[doc = "## Safety"]
12383#[doc = " * Neon intrinsic unsafe"]
12384#[inline(always)]
12385#[target_feature(enable = "neon")]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387#[cfg_attr(test, assert_instr(nop))]
12388pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12389 crate::ptr::read_unaligned(a.cast())
12390}
12391#[doc = "Load multiple 4-element structures to four registers"]
12392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12393#[doc = "## Safety"]
12394#[doc = " * Neon intrinsic unsafe"]
12395#[inline(always)]
12396#[target_feature(enable = "neon")]
12397#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12398#[rustc_legacy_const_generics(2)]
12399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12400pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12401 static_assert!(LANE == 0);
12402 unsafe extern "unadjusted" {
12403 #[cfg_attr(
12404 any(target_arch = "aarch64", target_arch = "arm64ec"),
12405 link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12406 )]
12407 fn _vld4_lane_f64(
12408 a: float64x1_t,
12409 b: float64x1_t,
12410 c: float64x1_t,
12411 d: float64x1_t,
12412 n: i64,
12413 ptr: *const i8,
12414 ) -> float64x1x4_t;
12415 }
12416 _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12417}
12418#[doc = "Load multiple 4-element structures to four registers"]
12419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12420#[doc = "## Safety"]
12421#[doc = " * Neon intrinsic unsafe"]
12422#[inline(always)]
12423#[target_feature(enable = "neon")]
12424#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12425#[rustc_legacy_const_generics(2)]
12426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12427pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12428 static_assert!(LANE == 0);
12429 unsafe extern "unadjusted" {
12430 #[cfg_attr(
12431 any(target_arch = "aarch64", target_arch = "arm64ec"),
12432 link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12433 )]
12434 fn _vld4_lane_s64(
12435 a: int64x1_t,
12436 b: int64x1_t,
12437 c: int64x1_t,
12438 d: int64x1_t,
12439 n: i64,
12440 ptr: *const i8,
12441 ) -> int64x1x4_t;
12442 }
12443 _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12444}
12445#[doc = "Load multiple 4-element structures to four registers"]
12446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12447#[doc = "## Safety"]
12448#[doc = " * Neon intrinsic unsafe"]
12449#[inline(always)]
12450#[target_feature(enable = "neon,aes")]
12451#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12452#[rustc_legacy_const_generics(2)]
12453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12454pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12455 static_assert!(LANE == 0);
12456 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12457}
12458#[doc = "Load multiple 4-element structures to four registers"]
12459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12460#[doc = "## Safety"]
12461#[doc = " * Neon intrinsic unsafe"]
12462#[inline(always)]
12463#[target_feature(enable = "neon")]
12464#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12465#[rustc_legacy_const_generics(2)]
12466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12467pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12468 static_assert!(LANE == 0);
12469 transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12470}
12471#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12473#[doc = "## Safety"]
12474#[doc = " * Neon intrinsic unsafe"]
12475#[inline(always)]
12476#[cfg(target_endian = "little")]
12477#[target_feature(enable = "neon,aes")]
12478#[cfg_attr(test, assert_instr(ld4r))]
12479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12480pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12481 transmute(vld4q_dup_s64(transmute(a)))
12482}
12483#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12485#[doc = "## Safety"]
12486#[doc = " * Neon intrinsic unsafe"]
12487#[inline(always)]
12488#[cfg(target_endian = "big")]
12489#[target_feature(enable = "neon,aes")]
12490#[cfg_attr(test, assert_instr(ld4r))]
12491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12492pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12493 let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12494 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12495 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12496 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12497 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12498 ret_val
12499}
12500#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12502#[doc = "## Safety"]
12503#[doc = " * Neon intrinsic unsafe"]
12504#[inline(always)]
12505#[cfg(target_endian = "little")]
12506#[target_feature(enable = "neon")]
12507#[cfg_attr(test, assert_instr(ld4r))]
12508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12509pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12510 transmute(vld4q_dup_s64(transmute(a)))
12511}
12512#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12514#[doc = "## Safety"]
12515#[doc = " * Neon intrinsic unsafe"]
12516#[inline(always)]
12517#[cfg(target_endian = "big")]
12518#[target_feature(enable = "neon")]
12519#[cfg_attr(test, assert_instr(ld4r))]
12520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12521pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12522 let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12523 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12524 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12525 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12526 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12527 ret_val
12528}
12529#[doc = "Load multiple 4-element structures to four registers"]
12530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12531#[doc = "## Safety"]
12532#[doc = " * Neon intrinsic unsafe"]
12533#[inline(always)]
12534#[target_feature(enable = "neon")]
12535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12536#[cfg_attr(test, assert_instr(ld4))]
12537pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12538 unsafe extern "unadjusted" {
12539 #[cfg_attr(
12540 any(target_arch = "aarch64", target_arch = "arm64ec"),
12541 link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12542 )]
12543 fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12544 }
12545 _vld4q_f64(a as _)
12546}
12547#[doc = "Load multiple 4-element structures to four registers"]
12548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12549#[doc = "## Safety"]
12550#[doc = " * Neon intrinsic unsafe"]
12551#[inline(always)]
12552#[target_feature(enable = "neon")]
12553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12554#[cfg_attr(test, assert_instr(ld4))]
12555pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12556 unsafe extern "unadjusted" {
12557 #[cfg_attr(
12558 any(target_arch = "aarch64", target_arch = "arm64ec"),
12559 link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12560 )]
12561 fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12562 }
12563 _vld4q_s64(a as _)
12564}
12565#[doc = "Load multiple 4-element structures to four registers"]
12566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12567#[doc = "## Safety"]
12568#[doc = " * Neon intrinsic unsafe"]
12569#[inline(always)]
12570#[target_feature(enable = "neon")]
12571#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12572#[rustc_legacy_const_generics(2)]
12573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12574pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12575 static_assert_uimm_bits!(LANE, 1);
12576 unsafe extern "unadjusted" {
12577 #[cfg_attr(
12578 any(target_arch = "aarch64", target_arch = "arm64ec"),
12579 link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12580 )]
12581 fn _vld4q_lane_f64(
12582 a: float64x2_t,
12583 b: float64x2_t,
12584 c: float64x2_t,
12585 d: float64x2_t,
12586 n: i64,
12587 ptr: *const i8,
12588 ) -> float64x2x4_t;
12589 }
12590 _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12591}
12592#[doc = "Load multiple 4-element structures to four registers"]
12593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12594#[doc = "## Safety"]
12595#[doc = " * Neon intrinsic unsafe"]
12596#[inline(always)]
12597#[target_feature(enable = "neon")]
12598#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12599#[rustc_legacy_const_generics(2)]
12600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12601pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12602 static_assert_uimm_bits!(LANE, 3);
12603 unsafe extern "unadjusted" {
12604 #[cfg_attr(
12605 any(target_arch = "aarch64", target_arch = "arm64ec"),
12606 link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12607 )]
12608 fn _vld4q_lane_s8(
12609 a: int8x16_t,
12610 b: int8x16_t,
12611 c: int8x16_t,
12612 d: int8x16_t,
12613 n: i64,
12614 ptr: *const i8,
12615 ) -> int8x16x4_t;
12616 }
12617 _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12618}
12619#[doc = "Load multiple 4-element structures to four registers"]
12620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12621#[doc = "## Safety"]
12622#[doc = " * Neon intrinsic unsafe"]
12623#[inline(always)]
12624#[target_feature(enable = "neon")]
12625#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12626#[rustc_legacy_const_generics(2)]
12627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12628pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12629 static_assert_uimm_bits!(LANE, 1);
12630 unsafe extern "unadjusted" {
12631 #[cfg_attr(
12632 any(target_arch = "aarch64", target_arch = "arm64ec"),
12633 link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12634 )]
12635 fn _vld4q_lane_s64(
12636 a: int64x2_t,
12637 b: int64x2_t,
12638 c: int64x2_t,
12639 d: int64x2_t,
12640 n: i64,
12641 ptr: *const i8,
12642 ) -> int64x2x4_t;
12643 }
12644 _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12645}
12646#[doc = "Load multiple 4-element structures to four registers"]
12647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12648#[doc = "## Safety"]
12649#[doc = " * Neon intrinsic unsafe"]
12650#[inline(always)]
12651#[target_feature(enable = "neon,aes")]
12652#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12653#[rustc_legacy_const_generics(2)]
12654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12655pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12656 static_assert_uimm_bits!(LANE, 1);
12657 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12658}
12659#[doc = "Load multiple 4-element structures to four registers"]
12660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12661#[doc = "## Safety"]
12662#[doc = " * Neon intrinsic unsafe"]
12663#[inline(always)]
12664#[target_feature(enable = "neon")]
12665#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12666#[rustc_legacy_const_generics(2)]
12667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12668pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12669 static_assert_uimm_bits!(LANE, 4);
12670 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12671}
12672#[doc = "Load multiple 4-element structures to four registers"]
12673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12674#[doc = "## Safety"]
12675#[doc = " * Neon intrinsic unsafe"]
12676#[inline(always)]
12677#[target_feature(enable = "neon")]
12678#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12679#[rustc_legacy_const_generics(2)]
12680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12681pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12682 static_assert_uimm_bits!(LANE, 1);
12683 transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12684}
12685#[doc = "Load multiple 4-element structures to four registers"]
12686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12687#[doc = "## Safety"]
12688#[doc = " * Neon intrinsic unsafe"]
12689#[inline(always)]
12690#[target_feature(enable = "neon")]
12691#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12692#[rustc_legacy_const_generics(2)]
12693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12694pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12695 static_assert_uimm_bits!(LANE, 4);
12696 transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12697}
12698#[doc = "Load multiple 4-element structures to four registers"]
12699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12700#[doc = "## Safety"]
12701#[doc = " * Neon intrinsic unsafe"]
12702#[inline(always)]
12703#[cfg(target_endian = "little")]
12704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12705#[target_feature(enable = "neon,aes")]
12706#[cfg_attr(test, assert_instr(ld4))]
12707pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12708 transmute(vld4q_s64(transmute(a)))
12709}
12710#[doc = "Load multiple 4-element structures to four registers"]
12711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12712#[doc = "## Safety"]
12713#[doc = " * Neon intrinsic unsafe"]
12714#[inline(always)]
12715#[cfg(target_endian = "big")]
12716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12717#[target_feature(enable = "neon,aes")]
12718#[cfg_attr(test, assert_instr(ld4))]
12719pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12720 let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12721 ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12722 ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12723 ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12724 ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12725 ret_val
12726}
12727#[doc = "Load multiple 4-element structures to four registers"]
12728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12729#[doc = "## Safety"]
12730#[doc = " * Neon intrinsic unsafe"]
12731#[inline(always)]
12732#[target_feature(enable = "neon")]
12733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12734#[cfg_attr(test, assert_instr(ld4))]
12735pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12736 transmute(vld4q_s64(transmute(a)))
12737}
12738#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12740#[doc = "## Safety"]
12741#[doc = " * Neon intrinsic unsafe"]
12742#[inline(always)]
12743#[target_feature(enable = "neon,rcpc3")]
12744#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12745#[rustc_legacy_const_generics(2)]
12746#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12747#[cfg(target_has_atomic = "64")]
12748pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12749 static_assert!(LANE == 0);
12750 let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12751 simd_insert!(
12752 src,
12753 LANE as u32,
12754 atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12755 )
12756}
12757#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12759#[doc = "## Safety"]
12760#[doc = " * Neon intrinsic unsafe"]
12761#[inline(always)]
12762#[target_feature(enable = "neon,rcpc3")]
12763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12764#[rustc_legacy_const_generics(2)]
12765#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12766#[cfg(target_has_atomic = "64")]
12767pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12768 static_assert_uimm_bits!(LANE, 1);
12769 let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12770 simd_insert!(
12771 src,
12772 LANE as u32,
12773 atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12774 )
12775}
12776#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12778#[doc = "## Safety"]
12779#[doc = " * Neon intrinsic unsafe"]
12780#[inline(always)]
12781#[rustc_legacy_const_generics(2)]
12782#[target_feature(enable = "neon,rcpc3")]
12783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12784#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12785#[cfg(target_has_atomic = "64")]
12786pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12787 static_assert_uimm_bits!(LANE, 1);
12788 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12789}
12790#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12792#[doc = "## Safety"]
12793#[doc = " * Neon intrinsic unsafe"]
12794#[inline(always)]
12795#[rustc_legacy_const_generics(2)]
12796#[target_feature(enable = "neon,rcpc3")]
12797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12798#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12799#[cfg(target_has_atomic = "64")]
12800pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12801 static_assert!(LANE == 0);
12802 transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12803}
12804#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12806#[doc = "## Safety"]
12807#[doc = " * Neon intrinsic unsafe"]
12808#[inline(always)]
12809#[rustc_legacy_const_generics(2)]
12810#[target_feature(enable = "neon,rcpc3")]
12811#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12812#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12813#[cfg(target_has_atomic = "64")]
12814pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12815 static_assert_uimm_bits!(LANE, 1);
12816 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12817}
12818#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12820#[doc = "## Safety"]
12821#[doc = " * Neon intrinsic unsafe"]
12822#[inline(always)]
12823#[rustc_legacy_const_generics(2)]
12824#[target_feature(enable = "neon,rcpc3")]
12825#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12826#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12827#[cfg(target_has_atomic = "64")]
12828pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12829 static_assert!(LANE == 0);
12830 transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12831}
12832#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12834#[doc = "## Safety"]
12835#[doc = " * Neon intrinsic unsafe"]
12836#[inline(always)]
12837#[rustc_legacy_const_generics(2)]
12838#[target_feature(enable = "neon,rcpc3")]
12839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12840#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12841#[cfg(target_has_atomic = "64")]
12842pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12843 static_assert_uimm_bits!(LANE, 1);
12844 transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12845}
12846#[doc = "Lookup table read with 2-bit indices"]
12847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12848#[doc = "## Safety"]
12849#[doc = " * Neon intrinsic unsafe"]
12850#[inline(always)]
12851#[target_feature(enable = "neon,lut")]
12852#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12853#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12854#[rustc_legacy_const_generics(2)]
12855pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12856 static_assert!(INDEX >= 0 && INDEX <= 3);
12857 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12858}
12859#[doc = "Lookup table read with 2-bit indices"]
12860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12861#[doc = "## Safety"]
12862#[doc = " * Neon intrinsic unsafe"]
12863#[inline(always)]
12864#[target_feature(enable = "neon,lut")]
12865#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12866#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12867#[rustc_legacy_const_generics(2)]
12868pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12869 static_assert!(INDEX >= 0 && INDEX <= 3);
12870 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12871}
12872#[doc = "Lookup table read with 2-bit indices"]
12873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12874#[doc = "## Safety"]
12875#[doc = " * Neon intrinsic unsafe"]
12876#[inline(always)]
12877#[target_feature(enable = "neon,lut")]
12878#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12879#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12880#[rustc_legacy_const_generics(2)]
12881pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12882 static_assert!(INDEX >= 0 && INDEX <= 1);
12883 transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12884}
12885#[doc = "Lookup table read with 2-bit indices"]
12886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12887#[doc = "## Safety"]
12888#[doc = " * Neon intrinsic unsafe"]
12889#[inline(always)]
12890#[target_feature(enable = "neon,lut")]
12891#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12892#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12893#[rustc_legacy_const_generics(2)]
12894pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12895 static_assert!(INDEX >= 0 && INDEX <= 1);
12896 transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12897}
12898#[doc = "Lookup table read with 2-bit indices"]
12899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12900#[doc = "## Safety"]
12901#[doc = " * Neon intrinsic unsafe"]
12902#[inline(always)]
12903#[target_feature(enable = "neon,lut")]
12904#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12905#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12906#[rustc_legacy_const_generics(2)]
12907pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12908 static_assert!(INDEX >= 0 && INDEX <= 3);
12909 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12910}
12911#[doc = "Lookup table read with 2-bit indices"]
12912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12913#[doc = "## Safety"]
12914#[doc = " * Neon intrinsic unsafe"]
12915#[inline(always)]
12916#[target_feature(enable = "neon,lut")]
12917#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12918#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12919#[rustc_legacy_const_generics(2)]
12920pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12921 static_assert!(INDEX >= 0 && INDEX <= 3);
12922 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12923}
12924#[doc = "Lookup table read with 2-bit indices"]
12925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12926#[doc = "## Safety"]
12927#[doc = " * Neon intrinsic unsafe"]
12928#[inline(always)]
12929#[target_feature(enable = "neon,lut")]
12930#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12931#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12932#[rustc_legacy_const_generics(2)]
12933pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12934 static_assert!(INDEX >= 0 && INDEX <= 1);
12935 transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12936}
12937#[doc = "Lookup table read with 2-bit indices"]
12938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12939#[doc = "## Safety"]
12940#[doc = " * Neon intrinsic unsafe"]
12941#[inline(always)]
12942#[target_feature(enable = "neon,lut")]
12943#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12944#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12945#[rustc_legacy_const_generics(2)]
12946pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12947 static_assert!(INDEX >= 0 && INDEX <= 1);
12948 transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12949}
12950#[doc = "Lookup table read with 2-bit indices"]
12951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12952#[doc = "## Safety"]
12953#[doc = " * Neon intrinsic unsafe"]
12954#[inline(always)]
12955#[target_feature(enable = "neon,lut")]
12956#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12957#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12958#[rustc_legacy_const_generics(2)]
12959pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12960 static_assert!(INDEX >= 0 && INDEX <= 3);
12961 transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12962}
12963#[doc = "Lookup table read with 2-bit indices"]
12964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12965#[doc = "## Safety"]
12966#[doc = " * Neon intrinsic unsafe"]
12967#[inline(always)]
12968#[target_feature(enable = "neon,lut")]
12969#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12970#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12971#[rustc_legacy_const_generics(2)]
12972pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12973 static_assert!(INDEX >= 0 && INDEX <= 3);
12974 transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12975}
12976#[doc = "Lookup table read with 2-bit indices"]
12977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12978#[doc = "## Safety"]
12979#[doc = " * Neon intrinsic unsafe"]
12980#[inline(always)]
12981#[target_feature(enable = "neon,lut")]
12982#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12983#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12984#[rustc_legacy_const_generics(2)]
12985pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12986 static_assert!(LANE >= 0 && LANE <= 1);
12987 unsafe extern "unadjusted" {
12988 #[cfg_attr(
12989 any(target_arch = "aarch64", target_arch = "arm64ec"),
12990 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12991 )]
12992 fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12993 }
12994 _vluti2_lane_s8(a, b, LANE)
12995}
12996#[doc = "Lookup table read with 2-bit indices"]
12997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12998#[doc = "## Safety"]
12999#[doc = " * Neon intrinsic unsafe"]
13000#[inline(always)]
13001#[target_feature(enable = "neon,lut")]
13002#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13003#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13004#[rustc_legacy_const_generics(2)]
13005pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13006 static_assert!(LANE >= 0 && LANE <= 1);
13007 unsafe extern "unadjusted" {
13008 #[cfg_attr(
13009 any(target_arch = "aarch64", target_arch = "arm64ec"),
13010 link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
13011 )]
13012 fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13013 }
13014 _vluti2q_lane_s8(a, b, LANE)
13015}
13016#[doc = "Lookup table read with 2-bit indices"]
13017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
13018#[doc = "## Safety"]
13019#[doc = " * Neon intrinsic unsafe"]
13020#[inline(always)]
13021#[target_feature(enable = "neon,lut")]
13022#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13023#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13024#[rustc_legacy_const_generics(2)]
13025pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
13026 static_assert!(LANE >= 0 && LANE <= 3);
13027 unsafe extern "unadjusted" {
13028 #[cfg_attr(
13029 any(target_arch = "aarch64", target_arch = "arm64ec"),
13030 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13031 )]
13032 fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13033 }
13034 _vluti2_lane_s16(a, b, LANE)
13035}
13036#[doc = "Lookup table read with 2-bit indices"]
13037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13038#[doc = "## Safety"]
13039#[doc = " * Neon intrinsic unsafe"]
13040#[inline(always)]
13041#[target_feature(enable = "neon,lut")]
13042#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13043#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13044#[rustc_legacy_const_generics(2)]
13045pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13046 static_assert!(LANE >= 0 && LANE <= 3);
13047 unsafe extern "unadjusted" {
13048 #[cfg_attr(
13049 any(target_arch = "aarch64", target_arch = "arm64ec"),
13050 link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13051 )]
13052 fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13053 }
13054 _vluti2q_lane_s16(a, b, LANE)
13055}
13056#[doc = "Lookup table read with 2-bit indices"]
13057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13058#[doc = "## Safety"]
13059#[doc = " * Neon intrinsic unsafe"]
13060#[inline(always)]
13061#[target_feature(enable = "neon,lut")]
13062#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13063#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13064#[rustc_legacy_const_generics(2)]
13065pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13066 static_assert!(INDEX >= 0 && INDEX <= 7);
13067 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13068}
13069#[doc = "Lookup table read with 2-bit indices"]
13070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13071#[doc = "## Safety"]
13072#[doc = " * Neon intrinsic unsafe"]
13073#[inline(always)]
13074#[target_feature(enable = "neon,lut")]
13075#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13076#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13077#[rustc_legacy_const_generics(2)]
13078pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13079 static_assert!(INDEX >= 0 && INDEX <= 7);
13080 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13081}
13082#[doc = "Lookup table read with 2-bit indices"]
13083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13084#[doc = "## Safety"]
13085#[doc = " * Neon intrinsic unsafe"]
13086#[inline(always)]
13087#[target_feature(enable = "neon,lut")]
13088#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13089#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13090#[rustc_legacy_const_generics(2)]
13091pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13092 static_assert!(INDEX >= 0 && INDEX <= 3);
13093 transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13094}
13095#[doc = "Lookup table read with 2-bit indices"]
13096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13097#[doc = "## Safety"]
13098#[doc = " * Neon intrinsic unsafe"]
13099#[inline(always)]
13100#[target_feature(enable = "neon,lut")]
13101#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13102#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13103#[rustc_legacy_const_generics(2)]
13104pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13105 static_assert!(INDEX >= 0 && INDEX <= 3);
13106 transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13107}
13108#[doc = "Lookup table read with 2-bit indices"]
13109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13110#[doc = "## Safety"]
13111#[doc = " * Neon intrinsic unsafe"]
13112#[inline(always)]
13113#[target_feature(enable = "neon,lut")]
13114#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13115#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13116#[rustc_legacy_const_generics(2)]
13117pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13118 static_assert!(INDEX >= 0 && INDEX <= 7);
13119 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13120}
13121#[doc = "Lookup table read with 2-bit indices"]
13122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13123#[doc = "## Safety"]
13124#[doc = " * Neon intrinsic unsafe"]
13125#[inline(always)]
13126#[target_feature(enable = "neon,lut")]
13127#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13128#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13129#[rustc_legacy_const_generics(2)]
13130pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13131 static_assert!(INDEX >= 0 && INDEX <= 7);
13132 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13133}
13134#[doc = "Lookup table read with 2-bit indices"]
13135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13136#[doc = "## Safety"]
13137#[doc = " * Neon intrinsic unsafe"]
13138#[inline(always)]
13139#[target_feature(enable = "neon,lut")]
13140#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13141#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13142#[rustc_legacy_const_generics(2)]
13143pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13144 static_assert!(INDEX >= 0 && INDEX <= 3);
13145 transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13146}
13147#[doc = "Lookup table read with 2-bit indices"]
13148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13149#[doc = "## Safety"]
13150#[doc = " * Neon intrinsic unsafe"]
13151#[inline(always)]
13152#[target_feature(enable = "neon,lut")]
13153#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13154#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13155#[rustc_legacy_const_generics(2)]
13156pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13157 static_assert!(INDEX >= 0 && INDEX <= 3);
13158 transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13159}
13160#[doc = "Lookup table read with 2-bit indices"]
13161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13162#[doc = "## Safety"]
13163#[doc = " * Neon intrinsic unsafe"]
13164#[inline(always)]
13165#[target_feature(enable = "neon,lut")]
13166#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13167#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13168#[rustc_legacy_const_generics(2)]
13169pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13170 static_assert!(INDEX >= 0 && INDEX <= 7);
13171 transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13172}
13173#[doc = "Lookup table read with 2-bit indices"]
13174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13175#[doc = "## Safety"]
13176#[doc = " * Neon intrinsic unsafe"]
13177#[inline(always)]
13178#[target_feature(enable = "neon,lut")]
13179#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13180#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13181#[rustc_legacy_const_generics(2)]
13182pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13183 static_assert!(INDEX >= 0 && INDEX <= 7);
13184 transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13185}
13186#[doc = "Lookup table read with 2-bit indices"]
13187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13188#[doc = "## Safety"]
13189#[doc = " * Neon intrinsic unsafe"]
13190#[inline(always)]
13191#[target_feature(enable = "neon,lut")]
13192#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13193#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13194#[rustc_legacy_const_generics(2)]
13195pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13196 static_assert!(INDEX >= 0 && INDEX <= 3);
13197 unsafe extern "unadjusted" {
13198 #[cfg_attr(
13199 any(target_arch = "aarch64", target_arch = "arm64ec"),
13200 link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13201 )]
13202 fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13203 }
13204 _vluti2_laneq_s8(a, b, INDEX)
13205}
13206#[doc = "Lookup table read with 2-bit indices"]
13207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13208#[doc = "## Safety"]
13209#[doc = " * Neon intrinsic unsafe"]
13210#[inline(always)]
13211#[target_feature(enable = "neon,lut")]
13212#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13213#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13214#[rustc_legacy_const_generics(2)]
13215pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13216 static_assert!(INDEX >= 0 && INDEX <= 3);
13217 unsafe extern "unadjusted" {
13218 #[cfg_attr(
13219 any(target_arch = "aarch64", target_arch = "arm64ec"),
13220 link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13221 )]
13222 fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13223 }
13224 _vluti2q_laneq_s8(a, b, INDEX)
13225}
13226#[doc = "Lookup table read with 2-bit indices"]
13227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13228#[doc = "## Safety"]
13229#[doc = " * Neon intrinsic unsafe"]
13230#[inline(always)]
13231#[target_feature(enable = "neon,lut")]
13232#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13233#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13234#[rustc_legacy_const_generics(2)]
13235pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13236 static_assert!(INDEX >= 0 && INDEX <= 7);
13237 unsafe extern "unadjusted" {
13238 #[cfg_attr(
13239 any(target_arch = "aarch64", target_arch = "arm64ec"),
13240 link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13241 )]
13242 fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13243 }
13244 _vluti2_laneq_s16(a, b, INDEX)
13245}
13246#[doc = "Lookup table read with 2-bit indices"]
13247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13248#[doc = "## Safety"]
13249#[doc = " * Neon intrinsic unsafe"]
13250#[inline(always)]
13251#[target_feature(enable = "neon,lut")]
13252#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13253#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13254#[rustc_legacy_const_generics(2)]
13255pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13256 static_assert!(INDEX >= 0 && INDEX <= 7);
13257 unsafe extern "unadjusted" {
13258 #[cfg_attr(
13259 any(target_arch = "aarch64", target_arch = "arm64ec"),
13260 link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13261 )]
13262 fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13263 }
13264 _vluti2q_laneq_s16(a, b, INDEX)
13265}
13266#[doc = "Lookup table read with 4-bit indices"]
13267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13268#[doc = "## Safety"]
13269#[doc = " * Neon intrinsic unsafe"]
13270#[inline(always)]
13271#[target_feature(enable = "neon,lut,fp16")]
13272#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13273#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13274#[rustc_legacy_const_generics(2)]
13275pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13276 static_assert!(LANE >= 0 && LANE <= 1);
13277 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13278}
13279#[doc = "Lookup table read with 4-bit indices"]
13280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13281#[doc = "## Safety"]
13282#[doc = " * Neon intrinsic unsafe"]
13283#[inline(always)]
13284#[target_feature(enable = "neon,lut")]
13285#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13286#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13287#[rustc_legacy_const_generics(2)]
13288pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13289 static_assert!(LANE >= 0 && LANE <= 1);
13290 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13291}
13292#[doc = "Lookup table read with 4-bit indices"]
13293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13294#[doc = "## Safety"]
13295#[doc = " * Neon intrinsic unsafe"]
13296#[inline(always)]
13297#[target_feature(enable = "neon,lut")]
13298#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13299#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13300#[rustc_legacy_const_generics(2)]
13301pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13302 static_assert!(LANE >= 0 && LANE <= 1);
13303 transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13304}
13305#[doc = "Lookup table read with 4-bit indices"]
13306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13307#[doc = "## Safety"]
13308#[doc = " * Neon intrinsic unsafe"]
13309#[inline(always)]
13310#[target_feature(enable = "neon,lut")]
13311#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13312#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13313#[rustc_legacy_const_generics(2)]
13314pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13315 static_assert!(LANE >= 0 && LANE <= 1);
13316 unsafe extern "unadjusted" {
13317 #[cfg_attr(
13318 any(target_arch = "aarch64", target_arch = "arm64ec"),
13319 link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13320 )]
13321 fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13322 }
13323 _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13324}
13325#[doc = "Lookup table read with 4-bit indices"]
13326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13327#[doc = "## Safety"]
13328#[doc = " * Neon intrinsic unsafe"]
13329#[inline(always)]
13330#[target_feature(enable = "neon,lut")]
13331#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13332#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13333#[rustc_legacy_const_generics(2)]
13334pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13335 static_assert!(LANE == 0);
13336 unsafe extern "unadjusted" {
13337 #[cfg_attr(
13338 any(target_arch = "aarch64", target_arch = "arm64ec"),
13339 link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13340 )]
13341 fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13342 }
13343 _vluti4q_lane_s8(a, b, LANE)
13344}
13345#[doc = "Lookup table read with 4-bit indices"]
13346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13347#[doc = "## Safety"]
13348#[doc = " * Neon intrinsic unsafe"]
13349#[inline(always)]
13350#[target_feature(enable = "neon,lut")]
13351#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13352#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13353#[rustc_legacy_const_generics(2)]
13354pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13355 static_assert!(LANE == 0);
13356 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13357}
13358#[doc = "Lookup table read with 4-bit indices"]
13359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13360#[doc = "## Safety"]
13361#[doc = " * Neon intrinsic unsafe"]
13362#[inline(always)]
13363#[target_feature(enable = "neon,lut")]
13364#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13365#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13366#[rustc_legacy_const_generics(2)]
13367pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13368 static_assert!(LANE == 0);
13369 transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13370}
13371#[doc = "Lookup table read with 4-bit indices"]
13372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13373#[doc = "## Safety"]
13374#[doc = " * Neon intrinsic unsafe"]
13375#[inline(always)]
13376#[target_feature(enable = "neon,lut,fp16")]
13377#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13378#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13379#[rustc_legacy_const_generics(2)]
13380pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13381 a: float16x8x2_t,
13382 b: uint8x16_t,
13383) -> float16x8_t {
13384 static_assert!(LANE >= 0 && LANE <= 3);
13385 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13386}
13387#[doc = "Lookup table read with 4-bit indices"]
13388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13389#[doc = "## Safety"]
13390#[doc = " * Neon intrinsic unsafe"]
13391#[inline(always)]
13392#[target_feature(enable = "neon,lut")]
13393#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13394#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13395#[rustc_legacy_const_generics(2)]
13396pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13397 static_assert!(LANE >= 0 && LANE <= 3);
13398 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13399}
13400#[doc = "Lookup table read with 4-bit indices"]
13401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13402#[doc = "## Safety"]
13403#[doc = " * Neon intrinsic unsafe"]
13404#[inline(always)]
13405#[target_feature(enable = "neon,lut")]
13406#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13407#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13408#[rustc_legacy_const_generics(2)]
13409pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13410 static_assert!(LANE >= 0 && LANE <= 3);
13411 transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13412}
13413#[doc = "Lookup table read with 4-bit indices"]
13414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13415#[doc = "## Safety"]
13416#[doc = " * Neon intrinsic unsafe"]
13417#[inline(always)]
13418#[target_feature(enable = "neon,lut")]
13419#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13420#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13421#[rustc_legacy_const_generics(2)]
13422pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13423 static_assert!(LANE >= 0 && LANE <= 3);
13424 unsafe extern "unadjusted" {
13425 #[cfg_attr(
13426 any(target_arch = "aarch64", target_arch = "arm64ec"),
13427 link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13428 )]
13429 fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13430 }
13431 _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13432}
13433#[doc = "Lookup table read with 4-bit indices"]
13434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13435#[doc = "## Safety"]
13436#[doc = " * Neon intrinsic unsafe"]
13437#[inline(always)]
13438#[target_feature(enable = "neon,lut")]
13439#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13440#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13441#[rustc_legacy_const_generics(2)]
13442pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13443 static_assert!(LANE >= 0 && LANE <= 1);
13444 unsafe extern "unadjusted" {
13445 #[cfg_attr(
13446 any(target_arch = "aarch64", target_arch = "arm64ec"),
13447 link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13448 )]
13449 fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13450 }
13451 _vluti4q_laneq_s8(a, b, LANE)
13452}
13453#[doc = "Lookup table read with 4-bit indices"]
13454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13455#[doc = "## Safety"]
13456#[doc = " * Neon intrinsic unsafe"]
13457#[inline(always)]
13458#[target_feature(enable = "neon,lut")]
13459#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13460#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13461#[rustc_legacy_const_generics(2)]
13462pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13463 static_assert!(LANE >= 0 && LANE <= 1);
13464 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13465}
13466#[doc = "Lookup table read with 4-bit indices"]
13467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13468#[doc = "## Safety"]
13469#[doc = " * Neon intrinsic unsafe"]
13470#[inline(always)]
13471#[target_feature(enable = "neon,lut")]
13472#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13473#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13474#[rustc_legacy_const_generics(2)]
13475pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13476 static_assert!(LANE >= 0 && LANE <= 1);
13477 transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13478}
13479#[doc = "Maximum (vector)"]
13480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13481#[inline(always)]
13482#[target_feature(enable = "neon")]
13483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13484#[cfg_attr(test, assert_instr(fmax))]
13485pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13486 unsafe extern "unadjusted" {
13487 #[cfg_attr(
13488 any(target_arch = "aarch64", target_arch = "arm64ec"),
13489 link_name = "llvm.aarch64.neon.fmax.v1f64"
13490 )]
13491 fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13492 }
13493 unsafe { _vmax_f64(a, b) }
13494}
13495#[doc = "Maximum (vector)"]
13496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13497#[inline(always)]
13498#[target_feature(enable = "neon")]
13499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13500#[cfg_attr(test, assert_instr(fmax))]
13501pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13502 unsafe extern "unadjusted" {
13503 #[cfg_attr(
13504 any(target_arch = "aarch64", target_arch = "arm64ec"),
13505 link_name = "llvm.aarch64.neon.fmax.v2f64"
13506 )]
13507 fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13508 }
13509 unsafe { _vmaxq_f64(a, b) }
13510}
13511#[doc = "Maximum (vector)"]
13512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13513#[inline(always)]
13514#[target_feature(enable = "neon,fp16")]
13515#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13516#[cfg(not(target_arch = "arm64ec"))]
13517#[cfg_attr(test, assert_instr(fmax))]
13518pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13519 unsafe extern "unadjusted" {
13520 #[cfg_attr(
13521 any(target_arch = "aarch64", target_arch = "arm64ec"),
13522 link_name = "llvm.aarch64.neon.fmax.f16"
13523 )]
13524 fn _vmaxh_f16(a: f16, b: f16) -> f16;
13525 }
13526 unsafe { _vmaxh_f16(a, b) }
13527}
13528#[doc = "Floating-point Maximum Number (vector)"]
13529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13530#[inline(always)]
13531#[target_feature(enable = "neon")]
13532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13533#[cfg_attr(test, assert_instr(fmaxnm))]
13534pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13535 unsafe extern "unadjusted" {
13536 #[cfg_attr(
13537 any(target_arch = "aarch64", target_arch = "arm64ec"),
13538 link_name = "llvm.aarch64.neon.fmaxnm.v1f64"
13539 )]
13540 fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13541 }
13542 unsafe { _vmaxnm_f64(a, b) }
13543}
13544#[doc = "Floating-point Maximum Number (vector)"]
13545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13546#[inline(always)]
13547#[target_feature(enable = "neon")]
13548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13549#[cfg_attr(test, assert_instr(fmaxnm))]
13550pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13551 unsafe extern "unadjusted" {
13552 #[cfg_attr(
13553 any(target_arch = "aarch64", target_arch = "arm64ec"),
13554 link_name = "llvm.aarch64.neon.fmaxnm.v2f64"
13555 )]
13556 fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13557 }
13558 unsafe { _vmaxnmq_f64(a, b) }
13559}
13560#[doc = "Floating-point Maximum Number"]
13561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13562#[inline(always)]
13563#[target_feature(enable = "neon,fp16")]
13564#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13565#[cfg(not(target_arch = "arm64ec"))]
13566#[cfg_attr(test, assert_instr(fmaxnm))]
13567pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13568 unsafe extern "unadjusted" {
13569 #[cfg_attr(
13570 any(target_arch = "aarch64", target_arch = "arm64ec"),
13571 link_name = "llvm.aarch64.neon.fmaxnm.f16"
13572 )]
13573 fn _vmaxnmh_f16(a: f16, b: f16) -> f16;
13574 }
13575 unsafe { _vmaxnmh_f16(a, b) }
13576}
13577#[doc = "Floating-point maximum number across vector"]
13578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13579#[inline(always)]
13580#[target_feature(enable = "neon,fp16")]
13581#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13582#[cfg(not(target_arch = "arm64ec"))]
13583#[cfg_attr(test, assert_instr(fmaxnmv))]
13584pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13585 unsafe extern "unadjusted" {
13586 #[cfg_attr(
13587 any(target_arch = "aarch64", target_arch = "arm64ec"),
13588 link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16"
13589 )]
13590 fn _vmaxnmv_f16(a: float16x4_t) -> f16;
13591 }
13592 unsafe { _vmaxnmv_f16(a) }
13593}
13594#[doc = "Floating-point maximum number across vector"]
13595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13596#[inline(always)]
13597#[target_feature(enable = "neon,fp16")]
13598#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13599#[cfg(not(target_arch = "arm64ec"))]
13600#[cfg_attr(test, assert_instr(fmaxnmv))]
13601pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13602 unsafe extern "unadjusted" {
13603 #[cfg_attr(
13604 any(target_arch = "aarch64", target_arch = "arm64ec"),
13605 link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16"
13606 )]
13607 fn _vmaxnmvq_f16(a: float16x8_t) -> f16;
13608 }
13609 unsafe { _vmaxnmvq_f16(a) }
13610}
13611#[doc = "Floating-point maximum number across vector"]
13612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13613#[inline(always)]
13614#[target_feature(enable = "neon")]
13615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13616#[cfg_attr(test, assert_instr(fmaxnmp))]
13617pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13618 unsafe extern "unadjusted" {
13619 #[cfg_attr(
13620 any(target_arch = "aarch64", target_arch = "arm64ec"),
13621 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
13622 )]
13623 fn _vmaxnmv_f32(a: float32x2_t) -> f32;
13624 }
13625 unsafe { _vmaxnmv_f32(a) }
13626}
13627#[doc = "Floating-point maximum number across vector"]
13628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13629#[inline(always)]
13630#[target_feature(enable = "neon")]
13631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13632#[cfg_attr(test, assert_instr(fmaxnmp))]
13633pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13634 unsafe extern "unadjusted" {
13635 #[cfg_attr(
13636 any(target_arch = "aarch64", target_arch = "arm64ec"),
13637 link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
13638 )]
13639 fn _vmaxnmvq_f64(a: float64x2_t) -> f64;
13640 }
13641 unsafe { _vmaxnmvq_f64(a) }
13642}
13643#[doc = "Floating-point maximum number across vector"]
13644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13645#[inline(always)]
13646#[target_feature(enable = "neon")]
13647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13648#[cfg_attr(test, assert_instr(fmaxnmv))]
13649pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13650 unsafe extern "unadjusted" {
13651 #[cfg_attr(
13652 any(target_arch = "aarch64", target_arch = "arm64ec"),
13653 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32"
13654 )]
13655 fn _vmaxnmvq_f32(a: float32x4_t) -> f32;
13656 }
13657 unsafe { _vmaxnmvq_f32(a) }
13658}
13659#[doc = "Floating-point maximum number across vector"]
13660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13661#[inline(always)]
13662#[target_feature(enable = "neon,fp16")]
13663#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13664#[cfg(not(target_arch = "arm64ec"))]
13665#[cfg_attr(test, assert_instr(fmaxv))]
13666pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13667 unsafe extern "unadjusted" {
13668 #[cfg_attr(
13669 any(target_arch = "aarch64", target_arch = "arm64ec"),
13670 link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13671 )]
13672 fn _vmaxv_f16(a: float16x4_t) -> f16;
13673 }
13674 unsafe { _vmaxv_f16(a) }
13675}
13676#[doc = "Floating-point maximum number across vector"]
13677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13678#[inline(always)]
13679#[target_feature(enable = "neon,fp16")]
13680#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13681#[cfg(not(target_arch = "arm64ec"))]
13682#[cfg_attr(test, assert_instr(fmaxv))]
13683pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13684 unsafe extern "unadjusted" {
13685 #[cfg_attr(
13686 any(target_arch = "aarch64", target_arch = "arm64ec"),
13687 link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13688 )]
13689 fn _vmaxvq_f16(a: float16x8_t) -> f16;
13690 }
13691 unsafe { _vmaxvq_f16(a) }
13692}
13693#[doc = "Horizontal vector max."]
13694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13695#[inline(always)]
13696#[target_feature(enable = "neon")]
13697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13698#[cfg_attr(test, assert_instr(fmaxp))]
13699pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13700 unsafe extern "unadjusted" {
13701 #[cfg_attr(
13702 any(target_arch = "aarch64", target_arch = "arm64ec"),
13703 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13704 )]
13705 fn _vmaxv_f32(a: float32x2_t) -> f32;
13706 }
13707 unsafe { _vmaxv_f32(a) }
13708}
13709#[doc = "Horizontal vector max."]
13710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13711#[inline(always)]
13712#[target_feature(enable = "neon")]
13713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13714#[cfg_attr(test, assert_instr(fmaxv))]
13715pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13716 unsafe extern "unadjusted" {
13717 #[cfg_attr(
13718 any(target_arch = "aarch64", target_arch = "arm64ec"),
13719 link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13720 )]
13721 fn _vmaxvq_f32(a: float32x4_t) -> f32;
13722 }
13723 unsafe { _vmaxvq_f32(a) }
13724}
13725#[doc = "Horizontal vector max."]
13726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13727#[inline(always)]
13728#[target_feature(enable = "neon")]
13729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13730#[cfg_attr(test, assert_instr(fmaxp))]
13731pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13732 unsafe extern "unadjusted" {
13733 #[cfg_attr(
13734 any(target_arch = "aarch64", target_arch = "arm64ec"),
13735 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13736 )]
13737 fn _vmaxvq_f64(a: float64x2_t) -> f64;
13738 }
13739 unsafe { _vmaxvq_f64(a) }
13740}
13741#[doc = "Horizontal vector max."]
13742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13743#[inline(always)]
13744#[target_feature(enable = "neon")]
13745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13746#[cfg_attr(test, assert_instr(smaxv))]
13747pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13748 unsafe { simd_reduce_max(a) }
13749}
13750#[doc = "Horizontal vector max."]
13751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13752#[inline(always)]
13753#[target_feature(enable = "neon")]
13754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13755#[cfg_attr(test, assert_instr(smaxv))]
13756pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13757 unsafe { simd_reduce_max(a) }
13758}
13759#[doc = "Horizontal vector max."]
13760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13761#[inline(always)]
13762#[target_feature(enable = "neon")]
13763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13764#[cfg_attr(test, assert_instr(smaxv))]
13765pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13766 unsafe { simd_reduce_max(a) }
13767}
13768#[doc = "Horizontal vector max."]
13769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13770#[inline(always)]
13771#[target_feature(enable = "neon")]
13772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13773#[cfg_attr(test, assert_instr(smaxv))]
13774pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13775 unsafe { simd_reduce_max(a) }
13776}
13777#[doc = "Horizontal vector max."]
13778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13779#[inline(always)]
13780#[target_feature(enable = "neon")]
13781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13782#[cfg_attr(test, assert_instr(smaxp))]
13783pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13784 unsafe { simd_reduce_max(a) }
13785}
13786#[doc = "Horizontal vector max."]
13787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13788#[inline(always)]
13789#[target_feature(enable = "neon")]
13790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13791#[cfg_attr(test, assert_instr(smaxv))]
13792pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13793 unsafe { simd_reduce_max(a) }
13794}
13795#[doc = "Horizontal vector max."]
13796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13797#[inline(always)]
13798#[target_feature(enable = "neon")]
13799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13800#[cfg_attr(test, assert_instr(umaxv))]
13801pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13802 unsafe { simd_reduce_max(a) }
13803}
13804#[doc = "Horizontal vector max."]
13805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13806#[inline(always)]
13807#[target_feature(enable = "neon")]
13808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13809#[cfg_attr(test, assert_instr(umaxv))]
13810pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13811 unsafe { simd_reduce_max(a) }
13812}
13813#[doc = "Horizontal vector max."]
13814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13815#[inline(always)]
13816#[target_feature(enable = "neon")]
13817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13818#[cfg_attr(test, assert_instr(umaxv))]
13819pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13820 unsafe { simd_reduce_max(a) }
13821}
13822#[doc = "Horizontal vector max."]
13823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13824#[inline(always)]
13825#[target_feature(enable = "neon")]
13826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13827#[cfg_attr(test, assert_instr(umaxv))]
13828pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13829 unsafe { simd_reduce_max(a) }
13830}
13831#[doc = "Horizontal vector max."]
13832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13833#[inline(always)]
13834#[target_feature(enable = "neon")]
13835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13836#[cfg_attr(test, assert_instr(umaxp))]
13837pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13838 unsafe { simd_reduce_max(a) }
13839}
13840#[doc = "Horizontal vector max."]
13841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13842#[inline(always)]
13843#[target_feature(enable = "neon")]
13844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13845#[cfg_attr(test, assert_instr(umaxv))]
13846pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13847 unsafe { simd_reduce_max(a) }
13848}
13849#[doc = "Minimum (vector)"]
13850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13851#[inline(always)]
13852#[target_feature(enable = "neon")]
13853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13854#[cfg_attr(test, assert_instr(fmin))]
13855pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13856 unsafe extern "unadjusted" {
13857 #[cfg_attr(
13858 any(target_arch = "aarch64", target_arch = "arm64ec"),
13859 link_name = "llvm.aarch64.neon.fmin.v1f64"
13860 )]
13861 fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13862 }
13863 unsafe { _vmin_f64(a, b) }
13864}
13865#[doc = "Minimum (vector)"]
13866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13867#[inline(always)]
13868#[target_feature(enable = "neon")]
13869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13870#[cfg_attr(test, assert_instr(fmin))]
13871pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13872 unsafe extern "unadjusted" {
13873 #[cfg_attr(
13874 any(target_arch = "aarch64", target_arch = "arm64ec"),
13875 link_name = "llvm.aarch64.neon.fmin.v2f64"
13876 )]
13877 fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13878 }
13879 unsafe { _vminq_f64(a, b) }
13880}
13881#[doc = "Minimum (vector)"]
13882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13883#[inline(always)]
13884#[target_feature(enable = "neon,fp16")]
13885#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13886#[cfg(not(target_arch = "arm64ec"))]
13887#[cfg_attr(test, assert_instr(fmin))]
13888pub fn vminh_f16(a: f16, b: f16) -> f16 {
13889 unsafe extern "unadjusted" {
13890 #[cfg_attr(
13891 any(target_arch = "aarch64", target_arch = "arm64ec"),
13892 link_name = "llvm.aarch64.neon.fmin.f16"
13893 )]
13894 fn _vminh_f16(a: f16, b: f16) -> f16;
13895 }
13896 unsafe { _vminh_f16(a, b) }
13897}
13898#[doc = "Floating-point Minimum Number (vector)"]
13899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13900#[inline(always)]
13901#[target_feature(enable = "neon")]
13902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13903#[cfg_attr(test, assert_instr(fminnm))]
13904pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13905 unsafe extern "unadjusted" {
13906 #[cfg_attr(
13907 any(target_arch = "aarch64", target_arch = "arm64ec"),
13908 link_name = "llvm.aarch64.neon.fminnm.v1f64"
13909 )]
13910 fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13911 }
13912 unsafe { _vminnm_f64(a, b) }
13913}
13914#[doc = "Floating-point Minimum Number (vector)"]
13915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13916#[inline(always)]
13917#[target_feature(enable = "neon")]
13918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13919#[cfg_attr(test, assert_instr(fminnm))]
13920pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13921 unsafe extern "unadjusted" {
13922 #[cfg_attr(
13923 any(target_arch = "aarch64", target_arch = "arm64ec"),
13924 link_name = "llvm.aarch64.neon.fminnm.v2f64"
13925 )]
13926 fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13927 }
13928 unsafe { _vminnmq_f64(a, b) }
13929}
13930#[doc = "Floating-point Minimum Number"]
13931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13932#[inline(always)]
13933#[target_feature(enable = "neon,fp16")]
13934#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13935#[cfg(not(target_arch = "arm64ec"))]
13936#[cfg_attr(test, assert_instr(fminnm))]
13937pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13938 unsafe extern "unadjusted" {
13939 #[cfg_attr(
13940 any(target_arch = "aarch64", target_arch = "arm64ec"),
13941 link_name = "llvm.aarch64.neon.fminnm.f16"
13942 )]
13943 fn _vminnmh_f16(a: f16, b: f16) -> f16;
13944 }
13945 unsafe { _vminnmh_f16(a, b) }
13946}
13947#[doc = "Floating-point minimum number across vector"]
13948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13949#[inline(always)]
13950#[target_feature(enable = "neon,fp16")]
13951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13952#[cfg(not(target_arch = "arm64ec"))]
13953#[cfg_attr(test, assert_instr(fminnmv))]
13954pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13955 unsafe extern "unadjusted" {
13956 #[cfg_attr(
13957 any(target_arch = "aarch64", target_arch = "arm64ec"),
13958 link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16"
13959 )]
13960 fn _vminnmv_f16(a: float16x4_t) -> f16;
13961 }
13962 unsafe { _vminnmv_f16(a) }
13963}
13964#[doc = "Floating-point minimum number across vector"]
13965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13966#[inline(always)]
13967#[target_feature(enable = "neon,fp16")]
13968#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13969#[cfg(not(target_arch = "arm64ec"))]
13970#[cfg_attr(test, assert_instr(fminnmv))]
13971pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13972 unsafe extern "unadjusted" {
13973 #[cfg_attr(
13974 any(target_arch = "aarch64", target_arch = "arm64ec"),
13975 link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16"
13976 )]
13977 fn _vminnmvq_f16(a: float16x8_t) -> f16;
13978 }
13979 unsafe { _vminnmvq_f16(a) }
13980}
13981#[doc = "Floating-point minimum number across vector"]
13982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13983#[inline(always)]
13984#[target_feature(enable = "neon")]
13985#[cfg_attr(test, assert_instr(fminnmp))]
13986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13987pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13988 unsafe extern "unadjusted" {
13989 #[cfg_attr(
13990 any(target_arch = "aarch64", target_arch = "arm64ec"),
13991 link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
13992 )]
13993 fn _vminnmv_f32(a: float32x2_t) -> f32;
13994 }
13995 unsafe { _vminnmv_f32(a) }
13996}
13997#[doc = "Floating-point minimum number across vector"]
13998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13999#[inline(always)]
14000#[target_feature(enable = "neon")]
14001#[cfg_attr(test, assert_instr(fminnmp))]
14002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14003pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
14004 unsafe extern "unadjusted" {
14005 #[cfg_attr(
14006 any(target_arch = "aarch64", target_arch = "arm64ec"),
14007 link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
14008 )]
14009 fn _vminnmvq_f64(a: float64x2_t) -> f64;
14010 }
14011 unsafe { _vminnmvq_f64(a) }
14012}
14013#[doc = "Floating-point minimum number across vector"]
14014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
14015#[inline(always)]
14016#[target_feature(enable = "neon")]
14017#[cfg_attr(test, assert_instr(fminnmv))]
14018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14019pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
14020 unsafe extern "unadjusted" {
14021 #[cfg_attr(
14022 any(target_arch = "aarch64", target_arch = "arm64ec"),
14023 link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32"
14024 )]
14025 fn _vminnmvq_f32(a: float32x4_t) -> f32;
14026 }
14027 unsafe { _vminnmvq_f32(a) }
14028}
14029#[doc = "Floating-point minimum number across vector"]
14030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
14031#[inline(always)]
14032#[target_feature(enable = "neon,fp16")]
14033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14034#[cfg(not(target_arch = "arm64ec"))]
14035#[cfg_attr(test, assert_instr(fminv))]
14036pub fn vminv_f16(a: float16x4_t) -> f16 {
14037 unsafe extern "unadjusted" {
14038 #[cfg_attr(
14039 any(target_arch = "aarch64", target_arch = "arm64ec"),
14040 link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
14041 )]
14042 fn _vminv_f16(a: float16x4_t) -> f16;
14043 }
14044 unsafe { _vminv_f16(a) }
14045}
14046#[doc = "Floating-point minimum number across vector"]
14047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
14048#[inline(always)]
14049#[target_feature(enable = "neon,fp16")]
14050#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14051#[cfg(not(target_arch = "arm64ec"))]
14052#[cfg_attr(test, assert_instr(fminv))]
14053pub fn vminvq_f16(a: float16x8_t) -> f16 {
14054 unsafe extern "unadjusted" {
14055 #[cfg_attr(
14056 any(target_arch = "aarch64", target_arch = "arm64ec"),
14057 link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
14058 )]
14059 fn _vminvq_f16(a: float16x8_t) -> f16;
14060 }
14061 unsafe { _vminvq_f16(a) }
14062}
14063#[doc = "Horizontal vector min."]
14064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
14065#[inline(always)]
14066#[target_feature(enable = "neon")]
14067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14068#[cfg_attr(test, assert_instr(fminp))]
14069pub fn vminv_f32(a: float32x2_t) -> f32 {
14070 unsafe extern "unadjusted" {
14071 #[cfg_attr(
14072 any(target_arch = "aarch64", target_arch = "arm64ec"),
14073 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
14074 )]
14075 fn _vminv_f32(a: float32x2_t) -> f32;
14076 }
14077 unsafe { _vminv_f32(a) }
14078}
14079#[doc = "Horizontal vector min."]
14080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
14081#[inline(always)]
14082#[target_feature(enable = "neon")]
14083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14084#[cfg_attr(test, assert_instr(fminv))]
14085pub fn vminvq_f32(a: float32x4_t) -> f32 {
14086 unsafe extern "unadjusted" {
14087 #[cfg_attr(
14088 any(target_arch = "aarch64", target_arch = "arm64ec"),
14089 link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
14090 )]
14091 fn _vminvq_f32(a: float32x4_t) -> f32;
14092 }
14093 unsafe { _vminvq_f32(a) }
14094}
14095#[doc = "Horizontal vector min."]
14096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
14097#[inline(always)]
14098#[target_feature(enable = "neon")]
14099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14100#[cfg_attr(test, assert_instr(fminp))]
14101pub fn vminvq_f64(a: float64x2_t) -> f64 {
14102 unsafe extern "unadjusted" {
14103 #[cfg_attr(
14104 any(target_arch = "aarch64", target_arch = "arm64ec"),
14105 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
14106 )]
14107 fn _vminvq_f64(a: float64x2_t) -> f64;
14108 }
14109 unsafe { _vminvq_f64(a) }
14110}
14111#[doc = "Horizontal vector min."]
14112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14113#[inline(always)]
14114#[target_feature(enable = "neon")]
14115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14116#[cfg_attr(test, assert_instr(sminv))]
14117pub fn vminv_s8(a: int8x8_t) -> i8 {
14118 unsafe { simd_reduce_min(a) }
14119}
14120#[doc = "Horizontal vector min."]
14121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14122#[inline(always)]
14123#[target_feature(enable = "neon")]
14124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14125#[cfg_attr(test, assert_instr(sminv))]
14126pub fn vminvq_s8(a: int8x16_t) -> i8 {
14127 unsafe { simd_reduce_min(a) }
14128}
14129#[doc = "Horizontal vector min."]
14130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14131#[inline(always)]
14132#[target_feature(enable = "neon")]
14133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14134#[cfg_attr(test, assert_instr(sminv))]
14135pub fn vminv_s16(a: int16x4_t) -> i16 {
14136 unsafe { simd_reduce_min(a) }
14137}
14138#[doc = "Horizontal vector min."]
14139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14140#[inline(always)]
14141#[target_feature(enable = "neon")]
14142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14143#[cfg_attr(test, assert_instr(sminv))]
14144pub fn vminvq_s16(a: int16x8_t) -> i16 {
14145 unsafe { simd_reduce_min(a) }
14146}
14147#[doc = "Horizontal vector min."]
14148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14149#[inline(always)]
14150#[target_feature(enable = "neon")]
14151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14152#[cfg_attr(test, assert_instr(sminp))]
14153pub fn vminv_s32(a: int32x2_t) -> i32 {
14154 unsafe { simd_reduce_min(a) }
14155}
14156#[doc = "Horizontal vector min."]
14157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14158#[inline(always)]
14159#[target_feature(enable = "neon")]
14160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14161#[cfg_attr(test, assert_instr(sminv))]
14162pub fn vminvq_s32(a: int32x4_t) -> i32 {
14163 unsafe { simd_reduce_min(a) }
14164}
14165#[doc = "Horizontal vector min."]
14166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14167#[inline(always)]
14168#[target_feature(enable = "neon")]
14169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14170#[cfg_attr(test, assert_instr(uminv))]
14171pub fn vminv_u8(a: uint8x8_t) -> u8 {
14172 unsafe { simd_reduce_min(a) }
14173}
14174#[doc = "Horizontal vector min."]
14175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14176#[inline(always)]
14177#[target_feature(enable = "neon")]
14178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14179#[cfg_attr(test, assert_instr(uminv))]
14180pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14181 unsafe { simd_reduce_min(a) }
14182}
14183#[doc = "Horizontal vector min."]
14184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14185#[inline(always)]
14186#[target_feature(enable = "neon")]
14187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14188#[cfg_attr(test, assert_instr(uminv))]
14189pub fn vminv_u16(a: uint16x4_t) -> u16 {
14190 unsafe { simd_reduce_min(a) }
14191}
14192#[doc = "Horizontal vector min."]
14193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14194#[inline(always)]
14195#[target_feature(enable = "neon")]
14196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14197#[cfg_attr(test, assert_instr(uminv))]
14198pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14199 unsafe { simd_reduce_min(a) }
14200}
14201#[doc = "Horizontal vector min."]
14202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14203#[inline(always)]
14204#[target_feature(enable = "neon")]
14205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14206#[cfg_attr(test, assert_instr(uminp))]
14207pub fn vminv_u32(a: uint32x2_t) -> u32 {
14208 unsafe { simd_reduce_min(a) }
14209}
14210#[doc = "Horizontal vector min."]
14211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14212#[inline(always)]
14213#[target_feature(enable = "neon")]
14214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14215#[cfg_attr(test, assert_instr(uminv))]
14216pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14217 unsafe { simd_reduce_min(a) }
14218}
14219#[doc = "Floating-point multiply-add to accumulator"]
14220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14221#[inline(always)]
14222#[target_feature(enable = "neon")]
14223#[cfg_attr(test, assert_instr(fmul))]
14224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14225pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14226 unsafe { simd_add(a, simd_mul(b, c)) }
14227}
14228#[doc = "Floating-point multiply-add to accumulator"]
14229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14230#[inline(always)]
14231#[target_feature(enable = "neon")]
14232#[cfg_attr(test, assert_instr(fmul))]
14233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14234pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14235 unsafe { simd_add(a, simd_mul(b, c)) }
14236}
14237#[doc = "Multiply-add long"]
14238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14239#[inline(always)]
14240#[target_feature(enable = "neon")]
14241#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14242#[rustc_legacy_const_generics(3)]
14243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14244pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14245 static_assert_uimm_bits!(LANE, 2);
14246 unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14247}
14248#[doc = "Multiply-add long"]
14249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14250#[inline(always)]
14251#[target_feature(enable = "neon")]
14252#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14253#[rustc_legacy_const_generics(3)]
14254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14255pub fn vmlal_high_laneq_s16<const LANE: i32>(
14256 a: int32x4_t,
14257 b: int16x8_t,
14258 c: int16x8_t,
14259) -> int32x4_t {
14260 static_assert_uimm_bits!(LANE, 3);
14261 unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14262}
14263#[doc = "Multiply-add long"]
14264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14265#[inline(always)]
14266#[target_feature(enable = "neon")]
14267#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14268#[rustc_legacy_const_generics(3)]
14269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14270pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14271 static_assert_uimm_bits!(LANE, 1);
14272 unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14273}
14274#[doc = "Multiply-add long"]
14275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14276#[inline(always)]
14277#[target_feature(enable = "neon")]
14278#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14279#[rustc_legacy_const_generics(3)]
14280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14281pub fn vmlal_high_laneq_s32<const LANE: i32>(
14282 a: int64x2_t,
14283 b: int32x4_t,
14284 c: int32x4_t,
14285) -> int64x2_t {
14286 static_assert_uimm_bits!(LANE, 2);
14287 unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14288}
14289#[doc = "Multiply-add long"]
14290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14291#[inline(always)]
14292#[target_feature(enable = "neon")]
14293#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14294#[rustc_legacy_const_generics(3)]
14295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14296pub fn vmlal_high_lane_u16<const LANE: i32>(
14297 a: uint32x4_t,
14298 b: uint16x8_t,
14299 c: uint16x4_t,
14300) -> uint32x4_t {
14301 static_assert_uimm_bits!(LANE, 2);
14302 unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14303}
14304#[doc = "Multiply-add long"]
14305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14306#[inline(always)]
14307#[target_feature(enable = "neon")]
14308#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14309#[rustc_legacy_const_generics(3)]
14310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14311pub fn vmlal_high_laneq_u16<const LANE: i32>(
14312 a: uint32x4_t,
14313 b: uint16x8_t,
14314 c: uint16x8_t,
14315) -> uint32x4_t {
14316 static_assert_uimm_bits!(LANE, 3);
14317 unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14318}
14319#[doc = "Multiply-add long"]
14320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14321#[inline(always)]
14322#[target_feature(enable = "neon")]
14323#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14324#[rustc_legacy_const_generics(3)]
14325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14326pub fn vmlal_high_lane_u32<const LANE: i32>(
14327 a: uint64x2_t,
14328 b: uint32x4_t,
14329 c: uint32x2_t,
14330) -> uint64x2_t {
14331 static_assert_uimm_bits!(LANE, 1);
14332 unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14333}
14334#[doc = "Multiply-add long"]
14335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14336#[inline(always)]
14337#[target_feature(enable = "neon")]
14338#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14339#[rustc_legacy_const_generics(3)]
14340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14341pub fn vmlal_high_laneq_u32<const LANE: i32>(
14342 a: uint64x2_t,
14343 b: uint32x4_t,
14344 c: uint32x4_t,
14345) -> uint64x2_t {
14346 static_assert_uimm_bits!(LANE, 2);
14347 unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14348}
14349#[doc = "Multiply-add long"]
14350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14351#[inline(always)]
14352#[target_feature(enable = "neon")]
14353#[cfg_attr(test, assert_instr(smlal2))]
14354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14355pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14356 vmlal_high_s16(a, b, vdupq_n_s16(c))
14357}
14358#[doc = "Multiply-add long"]
14359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14360#[inline(always)]
14361#[target_feature(enable = "neon")]
14362#[cfg_attr(test, assert_instr(smlal2))]
14363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14364pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14365 vmlal_high_s32(a, b, vdupq_n_s32(c))
14366}
14367#[doc = "Multiply-add long"]
14368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14369#[inline(always)]
14370#[target_feature(enable = "neon")]
14371#[cfg_attr(test, assert_instr(umlal2))]
14372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14373pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14374 vmlal_high_u16(a, b, vdupq_n_u16(c))
14375}
14376#[doc = "Multiply-add long"]
14377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14378#[inline(always)]
14379#[target_feature(enable = "neon")]
14380#[cfg_attr(test, assert_instr(umlal2))]
14381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14382pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14383 vmlal_high_u32(a, b, vdupq_n_u32(c))
14384}
14385#[doc = "Signed multiply-add long"]
14386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14387#[inline(always)]
14388#[target_feature(enable = "neon")]
14389#[cfg_attr(test, assert_instr(smlal2))]
14390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14391pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14392 unsafe {
14393 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14394 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14395 vmlal_s8(a, b, c)
14396 }
14397}
14398#[doc = "Signed multiply-add long"]
14399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14400#[inline(always)]
14401#[target_feature(enable = "neon")]
14402#[cfg_attr(test, assert_instr(smlal2))]
14403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14404pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14405 unsafe {
14406 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14407 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14408 vmlal_s16(a, b, c)
14409 }
14410}
14411#[doc = "Signed multiply-add long"]
14412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14413#[inline(always)]
14414#[target_feature(enable = "neon")]
14415#[cfg_attr(test, assert_instr(smlal2))]
14416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14417pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14418 unsafe {
14419 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14420 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14421 vmlal_s32(a, b, c)
14422 }
14423}
14424#[doc = "Unsigned multiply-add long"]
14425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14426#[inline(always)]
14427#[target_feature(enable = "neon")]
14428#[cfg_attr(test, assert_instr(umlal2))]
14429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14430pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14431 unsafe {
14432 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14433 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14434 vmlal_u8(a, b, c)
14435 }
14436}
14437#[doc = "Unsigned multiply-add long"]
14438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14439#[inline(always)]
14440#[target_feature(enable = "neon")]
14441#[cfg_attr(test, assert_instr(umlal2))]
14442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14443pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14444 unsafe {
14445 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14446 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14447 vmlal_u16(a, b, c)
14448 }
14449}
14450#[doc = "Unsigned multiply-add long"]
14451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14452#[inline(always)]
14453#[target_feature(enable = "neon")]
14454#[cfg_attr(test, assert_instr(umlal2))]
14455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14456pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14457 unsafe {
14458 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14459 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14460 vmlal_u32(a, b, c)
14461 }
14462}
14463#[doc = "Floating-point multiply-subtract from accumulator"]
14464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14465#[inline(always)]
14466#[target_feature(enable = "neon")]
14467#[cfg_attr(test, assert_instr(fmul))]
14468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14469pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14470 unsafe { simd_sub(a, simd_mul(b, c)) }
14471}
14472#[doc = "Floating-point multiply-subtract from accumulator"]
14473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14474#[inline(always)]
14475#[target_feature(enable = "neon")]
14476#[cfg_attr(test, assert_instr(fmul))]
14477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14478pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14479 unsafe { simd_sub(a, simd_mul(b, c)) }
14480}
14481#[doc = "Multiply-subtract long"]
14482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14483#[inline(always)]
14484#[target_feature(enable = "neon")]
14485#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14486#[rustc_legacy_const_generics(3)]
14487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14488pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14489 static_assert_uimm_bits!(LANE, 2);
14490 unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14491}
14492#[doc = "Multiply-subtract long"]
14493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14494#[inline(always)]
14495#[target_feature(enable = "neon")]
14496#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14497#[rustc_legacy_const_generics(3)]
14498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14499pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14500 a: int32x4_t,
14501 b: int16x8_t,
14502 c: int16x8_t,
14503) -> int32x4_t {
14504 static_assert_uimm_bits!(LANE, 3);
14505 unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14506}
14507#[doc = "Multiply-subtract long"]
14508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14509#[inline(always)]
14510#[target_feature(enable = "neon")]
14511#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14512#[rustc_legacy_const_generics(3)]
14513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14514pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14515 static_assert_uimm_bits!(LANE, 1);
14516 unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14517}
14518#[doc = "Multiply-subtract long"]
14519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14520#[inline(always)]
14521#[target_feature(enable = "neon")]
14522#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14523#[rustc_legacy_const_generics(3)]
14524#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14525pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14526 a: int64x2_t,
14527 b: int32x4_t,
14528 c: int32x4_t,
14529) -> int64x2_t {
14530 static_assert_uimm_bits!(LANE, 2);
14531 unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14532}
14533#[doc = "Multiply-subtract long"]
14534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14535#[inline(always)]
14536#[target_feature(enable = "neon")]
14537#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14538#[rustc_legacy_const_generics(3)]
14539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14540pub fn vmlsl_high_lane_u16<const LANE: i32>(
14541 a: uint32x4_t,
14542 b: uint16x8_t,
14543 c: uint16x4_t,
14544) -> uint32x4_t {
14545 static_assert_uimm_bits!(LANE, 2);
14546 unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14547}
14548#[doc = "Multiply-subtract long"]
14549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14550#[inline(always)]
14551#[target_feature(enable = "neon")]
14552#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14553#[rustc_legacy_const_generics(3)]
14554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14555pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14556 a: uint32x4_t,
14557 b: uint16x8_t,
14558 c: uint16x8_t,
14559) -> uint32x4_t {
14560 static_assert_uimm_bits!(LANE, 3);
14561 unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14562}
14563#[doc = "Multiply-subtract long"]
14564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14565#[inline(always)]
14566#[target_feature(enable = "neon")]
14567#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14568#[rustc_legacy_const_generics(3)]
14569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14570pub fn vmlsl_high_lane_u32<const LANE: i32>(
14571 a: uint64x2_t,
14572 b: uint32x4_t,
14573 c: uint32x2_t,
14574) -> uint64x2_t {
14575 static_assert_uimm_bits!(LANE, 1);
14576 unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14577}
14578#[doc = "Multiply-subtract long"]
14579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14580#[inline(always)]
14581#[target_feature(enable = "neon")]
14582#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14583#[rustc_legacy_const_generics(3)]
14584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14585pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14586 a: uint64x2_t,
14587 b: uint32x4_t,
14588 c: uint32x4_t,
14589) -> uint64x2_t {
14590 static_assert_uimm_bits!(LANE, 2);
14591 unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14592}
14593#[doc = "Multiply-subtract long"]
14594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14595#[inline(always)]
14596#[target_feature(enable = "neon")]
14597#[cfg_attr(test, assert_instr(smlsl2))]
14598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14599pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14600 vmlsl_high_s16(a, b, vdupq_n_s16(c))
14601}
14602#[doc = "Multiply-subtract long"]
14603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14604#[inline(always)]
14605#[target_feature(enable = "neon")]
14606#[cfg_attr(test, assert_instr(smlsl2))]
14607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14608pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14609 vmlsl_high_s32(a, b, vdupq_n_s32(c))
14610}
14611#[doc = "Multiply-subtract long"]
14612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14613#[inline(always)]
14614#[target_feature(enable = "neon")]
14615#[cfg_attr(test, assert_instr(umlsl2))]
14616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14617pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14618 vmlsl_high_u16(a, b, vdupq_n_u16(c))
14619}
14620#[doc = "Multiply-subtract long"]
14621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14622#[inline(always)]
14623#[target_feature(enable = "neon")]
14624#[cfg_attr(test, assert_instr(umlsl2))]
14625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14626pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14627 vmlsl_high_u32(a, b, vdupq_n_u32(c))
14628}
14629#[doc = "Signed multiply-subtract long"]
14630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14631#[inline(always)]
14632#[target_feature(enable = "neon")]
14633#[cfg_attr(test, assert_instr(smlsl2))]
14634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14635pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14636 unsafe {
14637 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14638 let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14639 vmlsl_s8(a, b, c)
14640 }
14641}
14642#[doc = "Signed multiply-subtract long"]
14643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14644#[inline(always)]
14645#[target_feature(enable = "neon")]
14646#[cfg_attr(test, assert_instr(smlsl2))]
14647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14648pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14649 unsafe {
14650 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14651 let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14652 vmlsl_s16(a, b, c)
14653 }
14654}
14655#[doc = "Signed multiply-subtract long"]
14656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14657#[inline(always)]
14658#[target_feature(enable = "neon")]
14659#[cfg_attr(test, assert_instr(smlsl2))]
14660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14661pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14662 unsafe {
14663 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14664 let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14665 vmlsl_s32(a, b, c)
14666 }
14667}
14668#[doc = "Unsigned multiply-subtract long"]
14669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14670#[inline(always)]
14671#[target_feature(enable = "neon")]
14672#[cfg_attr(test, assert_instr(umlsl2))]
14673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14674pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14675 unsafe {
14676 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14677 let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14678 vmlsl_u8(a, b, c)
14679 }
14680}
14681#[doc = "Unsigned multiply-subtract long"]
14682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14683#[inline(always)]
14684#[target_feature(enable = "neon")]
14685#[cfg_attr(test, assert_instr(umlsl2))]
14686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14687pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14688 unsafe {
14689 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14690 let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14691 vmlsl_u16(a, b, c)
14692 }
14693}
14694#[doc = "Unsigned multiply-subtract long"]
14695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14696#[inline(always)]
14697#[target_feature(enable = "neon")]
14698#[cfg_attr(test, assert_instr(umlsl2))]
14699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14700pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14701 unsafe {
14702 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14703 let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14704 vmlsl_u32(a, b, c)
14705 }
14706}
14707#[doc = "Vector move"]
14708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14709#[inline(always)]
14710#[target_feature(enable = "neon")]
14711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14712#[cfg_attr(test, assert_instr(sxtl2))]
14713pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14714 unsafe {
14715 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14716 vmovl_s8(a)
14717 }
14718}
14719#[doc = "Vector move"]
14720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14721#[inline(always)]
14722#[target_feature(enable = "neon")]
14723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14724#[cfg_attr(test, assert_instr(sxtl2))]
14725pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14726 unsafe {
14727 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14728 vmovl_s16(a)
14729 }
14730}
14731#[doc = "Vector move"]
14732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14733#[inline(always)]
14734#[target_feature(enable = "neon")]
14735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14736#[cfg_attr(test, assert_instr(sxtl2))]
14737pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14738 unsafe {
14739 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14740 vmovl_s32(a)
14741 }
14742}
14743#[doc = "Vector move"]
14744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14745#[inline(always)]
14746#[target_feature(enable = "neon")]
14747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14748#[cfg_attr(test, assert_instr(uxtl2))]
14749pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14750 unsafe {
14751 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14752 vmovl_u8(a)
14753 }
14754}
14755#[doc = "Vector move"]
14756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14757#[inline(always)]
14758#[target_feature(enable = "neon")]
14759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14760#[cfg_attr(test, assert_instr(uxtl2))]
14761pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14762 unsafe {
14763 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14764 vmovl_u16(a)
14765 }
14766}
14767#[doc = "Vector move"]
14768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14769#[inline(always)]
14770#[target_feature(enable = "neon")]
14771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14772#[cfg_attr(test, assert_instr(uxtl2))]
14773pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14774 unsafe {
14775 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14776 vmovl_u32(a)
14777 }
14778}
14779#[doc = "Extract narrow"]
14780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14781#[inline(always)]
14782#[target_feature(enable = "neon")]
14783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14784#[cfg_attr(test, assert_instr(xtn2))]
14785pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14786 unsafe {
14787 let c: int8x8_t = simd_cast(b);
14788 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14789 }
14790}
14791#[doc = "Extract narrow"]
14792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14793#[inline(always)]
14794#[target_feature(enable = "neon")]
14795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14796#[cfg_attr(test, assert_instr(xtn2))]
14797pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14798 unsafe {
14799 let c: int16x4_t = simd_cast(b);
14800 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14801 }
14802}
14803#[doc = "Extract narrow"]
14804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14805#[inline(always)]
14806#[target_feature(enable = "neon")]
14807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14808#[cfg_attr(test, assert_instr(xtn2))]
14809pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14810 unsafe {
14811 let c: int32x2_t = simd_cast(b);
14812 simd_shuffle!(a, c, [0, 1, 2, 3])
14813 }
14814}
14815#[doc = "Extract narrow"]
14816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14817#[inline(always)]
14818#[target_feature(enable = "neon")]
14819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14820#[cfg_attr(test, assert_instr(xtn2))]
14821pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14822 unsafe {
14823 let c: uint8x8_t = simd_cast(b);
14824 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14825 }
14826}
14827#[doc = "Extract narrow"]
14828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14829#[inline(always)]
14830#[target_feature(enable = "neon")]
14831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14832#[cfg_attr(test, assert_instr(xtn2))]
14833pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14834 unsafe {
14835 let c: uint16x4_t = simd_cast(b);
14836 simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14837 }
14838}
14839#[doc = "Extract narrow"]
14840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14841#[inline(always)]
14842#[target_feature(enable = "neon")]
14843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14844#[cfg_attr(test, assert_instr(xtn2))]
14845pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14846 unsafe {
14847 let c: uint32x2_t = simd_cast(b);
14848 simd_shuffle!(a, c, [0, 1, 2, 3])
14849 }
14850}
14851#[doc = "Multiply"]
14852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14853#[inline(always)]
14854#[target_feature(enable = "neon")]
14855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14856#[cfg_attr(test, assert_instr(fmul))]
14857pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14858 unsafe { simd_mul(a, b) }
14859}
14860#[doc = "Multiply"]
14861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14862#[inline(always)]
14863#[target_feature(enable = "neon")]
14864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14865#[cfg_attr(test, assert_instr(fmul))]
14866pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14867 unsafe { simd_mul(a, b) }
14868}
14869#[doc = "Floating-point multiply"]
14870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14871#[inline(always)]
14872#[target_feature(enable = "neon")]
14873#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14874#[rustc_legacy_const_generics(2)]
14875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14876pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14877 static_assert!(LANE == 0);
14878 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14879}
14880#[doc = "Floating-point multiply"]
14881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14882#[inline(always)]
14883#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14884#[rustc_legacy_const_generics(2)]
14885#[target_feature(enable = "neon,fp16")]
14886#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14887#[cfg(not(target_arch = "arm64ec"))]
14888pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14889 static_assert_uimm_bits!(LANE, 3);
14890 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14891}
14892#[doc = "Floating-point multiply"]
14893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14894#[inline(always)]
14895#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14896#[rustc_legacy_const_generics(2)]
14897#[target_feature(enable = "neon,fp16")]
14898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14899#[cfg(not(target_arch = "arm64ec"))]
14900pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14901 static_assert_uimm_bits!(LANE, 3);
14902 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14903}
14904#[doc = "Floating-point multiply"]
14905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14906#[inline(always)]
14907#[target_feature(enable = "neon")]
14908#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14909#[rustc_legacy_const_generics(2)]
14910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14911pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14912 static_assert_uimm_bits!(LANE, 1);
14913 unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14914}
14915#[doc = "Vector multiply by scalar"]
14916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14917#[inline(always)]
14918#[target_feature(enable = "neon")]
14919#[cfg_attr(test, assert_instr(fmul))]
14920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14921pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14922 unsafe { simd_mul(a, vdup_n_f64(b)) }
14923}
14924#[doc = "Vector multiply by scalar"]
14925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14926#[inline(always)]
14927#[target_feature(enable = "neon")]
14928#[cfg_attr(test, assert_instr(fmul))]
14929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14930pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14931 unsafe { simd_mul(a, vdupq_n_f64(b)) }
14932}
14933#[doc = "Floating-point multiply"]
14934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14935#[inline(always)]
14936#[target_feature(enable = "neon")]
14937#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14938#[rustc_legacy_const_generics(2)]
14939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14940pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14941 static_assert!(LANE == 0);
14942 unsafe {
14943 let b: f64 = simd_extract!(b, LANE as u32);
14944 a * b
14945 }
14946}
14947#[doc = "Add"]
14948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14949#[inline(always)]
14950#[target_feature(enable = "neon,fp16")]
14951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14952#[cfg(not(target_arch = "arm64ec"))]
14953#[cfg_attr(test, assert_instr(fmul))]
14954pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14955 a * b
14956}
14957#[doc = "Floating-point multiply"]
14958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14959#[inline(always)]
14960#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14961#[rustc_legacy_const_generics(2)]
14962#[target_feature(enable = "neon,fp16")]
14963#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14964#[cfg(not(target_arch = "arm64ec"))]
14965pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14966 static_assert_uimm_bits!(LANE, 2);
14967 unsafe {
14968 let b: f16 = simd_extract!(b, LANE as u32);
14969 a * b
14970 }
14971}
14972#[doc = "Floating-point multiply"]
14973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14974#[inline(always)]
14975#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14976#[rustc_legacy_const_generics(2)]
14977#[target_feature(enable = "neon,fp16")]
14978#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14979#[cfg(not(target_arch = "arm64ec"))]
14980pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14981 static_assert_uimm_bits!(LANE, 3);
14982 unsafe {
14983 let b: f16 = simd_extract!(b, LANE as u32);
14984 a * b
14985 }
14986}
14987#[doc = "Multiply long"]
14988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14989#[inline(always)]
14990#[target_feature(enable = "neon")]
14991#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14992#[rustc_legacy_const_generics(2)]
14993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14994pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14995 static_assert_uimm_bits!(LANE, 2);
14996 unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14997}
14998#[doc = "Multiply long"]
14999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15000#[inline(always)]
15001#[target_feature(enable = "neon")]
15002#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15003#[rustc_legacy_const_generics(2)]
15004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15005pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15006 static_assert_uimm_bits!(LANE, 3);
15007 unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15008}
15009#[doc = "Multiply long"]
15010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15011#[inline(always)]
15012#[target_feature(enable = "neon")]
15013#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15014#[rustc_legacy_const_generics(2)]
15015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15016pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15017 static_assert_uimm_bits!(LANE, 1);
15018 unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15019}
15020#[doc = "Multiply long"]
15021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15022#[inline(always)]
15023#[target_feature(enable = "neon")]
15024#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15025#[rustc_legacy_const_generics(2)]
15026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15027pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15028 static_assert_uimm_bits!(LANE, 2);
15029 unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15030}
15031#[doc = "Multiply long"]
15032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15033#[inline(always)]
15034#[target_feature(enable = "neon")]
15035#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15036#[rustc_legacy_const_generics(2)]
15037#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15038pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15039 static_assert_uimm_bits!(LANE, 2);
15040 unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15041}
15042#[doc = "Multiply long"]
15043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15044#[inline(always)]
15045#[target_feature(enable = "neon")]
15046#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15047#[rustc_legacy_const_generics(2)]
15048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15049pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15050 static_assert_uimm_bits!(LANE, 3);
15051 unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15052}
15053#[doc = "Multiply long"]
15054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15055#[inline(always)]
15056#[target_feature(enable = "neon")]
15057#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15058#[rustc_legacy_const_generics(2)]
15059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15060pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15061 static_assert_uimm_bits!(LANE, 1);
15062 unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15063}
15064#[doc = "Multiply long"]
15065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15066#[inline(always)]
15067#[target_feature(enable = "neon")]
15068#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15069#[rustc_legacy_const_generics(2)]
15070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15071pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15072 static_assert_uimm_bits!(LANE, 2);
15073 unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15074}
15075#[doc = "Multiply long"]
15076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15077#[inline(always)]
15078#[target_feature(enable = "neon")]
15079#[cfg_attr(test, assert_instr(smull2))]
15080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15081pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15082 vmull_high_s16(a, vdupq_n_s16(b))
15083}
15084#[doc = "Multiply long"]
15085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15086#[inline(always)]
15087#[target_feature(enable = "neon")]
15088#[cfg_attr(test, assert_instr(smull2))]
15089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15090pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15091 vmull_high_s32(a, vdupq_n_s32(b))
15092}
15093#[doc = "Multiply long"]
15094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15095#[inline(always)]
15096#[target_feature(enable = "neon")]
15097#[cfg_attr(test, assert_instr(umull2))]
15098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15099pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15100 vmull_high_u16(a, vdupq_n_u16(b))
15101}
15102#[doc = "Multiply long"]
15103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15104#[inline(always)]
15105#[target_feature(enable = "neon")]
15106#[cfg_attr(test, assert_instr(umull2))]
15107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15108pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15109 vmull_high_u32(a, vdupq_n_u32(b))
15110}
15111#[doc = "Polynomial multiply long"]
15112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15113#[inline(always)]
15114#[target_feature(enable = "neon,aes")]
15115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15116#[cfg_attr(test, assert_instr(pmull2))]
15117pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15118 unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15119}
15120#[doc = "Polynomial multiply long"]
15121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15122#[inline(always)]
15123#[target_feature(enable = "neon")]
15124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15125#[cfg_attr(test, assert_instr(pmull2))]
15126pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15127 unsafe {
15128 let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15129 let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15130 vmull_p8(a, b)
15131 }
15132}
15133#[doc = "Signed multiply long"]
15134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15135#[inline(always)]
15136#[target_feature(enable = "neon")]
15137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15138#[cfg_attr(test, assert_instr(smull2))]
15139pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15140 unsafe {
15141 let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15142 let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15143 vmull_s8(a, b)
15144 }
15145}
15146#[doc = "Signed multiply long"]
15147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15148#[inline(always)]
15149#[target_feature(enable = "neon")]
15150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15151#[cfg_attr(test, assert_instr(smull2))]
15152pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15153 unsafe {
15154 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15155 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15156 vmull_s16(a, b)
15157 }
15158}
15159#[doc = "Signed multiply long"]
15160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15161#[inline(always)]
15162#[target_feature(enable = "neon")]
15163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15164#[cfg_attr(test, assert_instr(smull2))]
15165pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15166 unsafe {
15167 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15168 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15169 vmull_s32(a, b)
15170 }
15171}
15172#[doc = "Unsigned multiply long"]
15173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15174#[inline(always)]
15175#[target_feature(enable = "neon")]
15176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15177#[cfg_attr(test, assert_instr(umull2))]
15178pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15179 unsafe {
15180 let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15181 let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15182 vmull_u8(a, b)
15183 }
15184}
15185#[doc = "Unsigned multiply long"]
15186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15187#[inline(always)]
15188#[target_feature(enable = "neon")]
15189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15190#[cfg_attr(test, assert_instr(umull2))]
15191pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15192 unsafe {
15193 let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15194 let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15195 vmull_u16(a, b)
15196 }
15197}
15198#[doc = "Unsigned multiply long"]
15199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15200#[inline(always)]
15201#[target_feature(enable = "neon")]
15202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15203#[cfg_attr(test, assert_instr(umull2))]
15204pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15205 unsafe {
15206 let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15207 let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15208 vmull_u32(a, b)
15209 }
15210}
15211#[doc = "Polynomial multiply long"]
15212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15213#[inline(always)]
15214#[target_feature(enable = "neon,aes")]
15215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15216#[cfg_attr(test, assert_instr(pmull))]
15217pub fn vmull_p64(a: p64, b: p64) -> p128 {
15218 unsafe extern "unadjusted" {
15219 #[cfg_attr(
15220 any(target_arch = "aarch64", target_arch = "arm64ec"),
15221 link_name = "llvm.aarch64.neon.pmull64"
15222 )]
15223 fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15224 }
15225 unsafe { transmute(_vmull_p64(a, b)) }
15226}
15227#[doc = "Floating-point multiply"]
15228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15229#[inline(always)]
15230#[target_feature(enable = "neon")]
15231#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15232#[rustc_legacy_const_generics(2)]
15233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15234pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15235 static_assert!(LANE == 0);
15236 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15237}
15238#[doc = "Floating-point multiply"]
15239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15240#[inline(always)]
15241#[target_feature(enable = "neon")]
15242#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15243#[rustc_legacy_const_generics(2)]
15244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15245pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15246 static_assert_uimm_bits!(LANE, 1);
15247 unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15248}
15249#[doc = "Floating-point multiply"]
15250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15251#[inline(always)]
15252#[target_feature(enable = "neon")]
15253#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15254#[rustc_legacy_const_generics(2)]
15255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15256pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15257 static_assert_uimm_bits!(LANE, 1);
15258 unsafe {
15259 let b: f32 = simd_extract!(b, LANE as u32);
15260 a * b
15261 }
15262}
15263#[doc = "Floating-point multiply"]
15264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15265#[inline(always)]
15266#[target_feature(enable = "neon")]
15267#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15268#[rustc_legacy_const_generics(2)]
15269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15270pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15271 static_assert_uimm_bits!(LANE, 2);
15272 unsafe {
15273 let b: f32 = simd_extract!(b, LANE as u32);
15274 a * b
15275 }
15276}
15277#[doc = "Floating-point multiply"]
15278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15279#[inline(always)]
15280#[target_feature(enable = "neon")]
15281#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15282#[rustc_legacy_const_generics(2)]
15283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15284pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15285 static_assert_uimm_bits!(LANE, 1);
15286 unsafe {
15287 let b: f64 = simd_extract!(b, LANE as u32);
15288 a * b
15289 }
15290}
15291#[doc = "Floating-point multiply extended"]
15292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15293#[inline(always)]
15294#[target_feature(enable = "neon,fp16")]
15295#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15296#[cfg(not(target_arch = "arm64ec"))]
15297#[cfg_attr(test, assert_instr(fmulx))]
15298pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15299 unsafe extern "unadjusted" {
15300 #[cfg_attr(
15301 any(target_arch = "aarch64", target_arch = "arm64ec"),
15302 link_name = "llvm.aarch64.neon.fmulx.v4f16"
15303 )]
15304 fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15305 }
15306 unsafe { _vmulx_f16(a, b) }
15307}
15308#[doc = "Floating-point multiply extended"]
15309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15310#[inline(always)]
15311#[target_feature(enable = "neon,fp16")]
15312#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15313#[cfg(not(target_arch = "arm64ec"))]
15314#[cfg_attr(test, assert_instr(fmulx))]
15315pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15316 unsafe extern "unadjusted" {
15317 #[cfg_attr(
15318 any(target_arch = "aarch64", target_arch = "arm64ec"),
15319 link_name = "llvm.aarch64.neon.fmulx.v8f16"
15320 )]
15321 fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15322 }
15323 unsafe { _vmulxq_f16(a, b) }
15324}
15325#[doc = "Floating-point multiply extended"]
15326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15327#[inline(always)]
15328#[target_feature(enable = "neon")]
15329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15330#[cfg_attr(test, assert_instr(fmulx))]
15331pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15332 unsafe extern "unadjusted" {
15333 #[cfg_attr(
15334 any(target_arch = "aarch64", target_arch = "arm64ec"),
15335 link_name = "llvm.aarch64.neon.fmulx.v2f32"
15336 )]
15337 fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15338 }
15339 unsafe { _vmulx_f32(a, b) }
15340}
15341#[doc = "Floating-point multiply extended"]
15342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15343#[inline(always)]
15344#[target_feature(enable = "neon")]
15345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15346#[cfg_attr(test, assert_instr(fmulx))]
15347pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15348 unsafe extern "unadjusted" {
15349 #[cfg_attr(
15350 any(target_arch = "aarch64", target_arch = "arm64ec"),
15351 link_name = "llvm.aarch64.neon.fmulx.v4f32"
15352 )]
15353 fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15354 }
15355 unsafe { _vmulxq_f32(a, b) }
15356}
15357#[doc = "Floating-point multiply extended"]
15358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15359#[inline(always)]
15360#[target_feature(enable = "neon")]
15361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15362#[cfg_attr(test, assert_instr(fmulx))]
15363pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15364 unsafe extern "unadjusted" {
15365 #[cfg_attr(
15366 any(target_arch = "aarch64", target_arch = "arm64ec"),
15367 link_name = "llvm.aarch64.neon.fmulx.v1f64"
15368 )]
15369 fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15370 }
15371 unsafe { _vmulx_f64(a, b) }
15372}
15373#[doc = "Floating-point multiply extended"]
15374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15375#[inline(always)]
15376#[target_feature(enable = "neon")]
15377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15378#[cfg_attr(test, assert_instr(fmulx))]
15379pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15380 unsafe extern "unadjusted" {
15381 #[cfg_attr(
15382 any(target_arch = "aarch64", target_arch = "arm64ec"),
15383 link_name = "llvm.aarch64.neon.fmulx.v2f64"
15384 )]
15385 fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15386 }
15387 unsafe { _vmulxq_f64(a, b) }
15388}
15389#[doc = "Floating-point multiply extended"]
15390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15391#[inline(always)]
15392#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15393#[rustc_legacy_const_generics(2)]
15394#[target_feature(enable = "neon,fp16")]
15395#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15396#[cfg(not(target_arch = "arm64ec"))]
15397pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15398 static_assert_uimm_bits!(LANE, 2);
15399 unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15400}
15401#[doc = "Floating-point multiply extended"]
15402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15403#[inline(always)]
15404#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15405#[rustc_legacy_const_generics(2)]
15406#[target_feature(enable = "neon,fp16")]
15407#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15408#[cfg(not(target_arch = "arm64ec"))]
15409pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15410 static_assert_uimm_bits!(LANE, 3);
15411 unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15412}
15413#[doc = "Floating-point multiply extended"]
15414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15415#[inline(always)]
15416#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15417#[rustc_legacy_const_generics(2)]
15418#[target_feature(enable = "neon,fp16")]
15419#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15420#[cfg(not(target_arch = "arm64ec"))]
15421pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15422 static_assert_uimm_bits!(LANE, 2);
15423 unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15424}
15425#[doc = "Floating-point multiply extended"]
15426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15427#[inline(always)]
15428#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15429#[rustc_legacy_const_generics(2)]
15430#[target_feature(enable = "neon,fp16")]
15431#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15432#[cfg(not(target_arch = "arm64ec"))]
15433pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15434 static_assert_uimm_bits!(LANE, 3);
15435 unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15436}
15437#[doc = "Floating-point multiply extended"]
15438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15439#[inline(always)]
15440#[target_feature(enable = "neon")]
15441#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15442#[rustc_legacy_const_generics(2)]
15443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15444pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15445 static_assert_uimm_bits!(LANE, 1);
15446 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15447}
15448#[doc = "Floating-point multiply extended"]
15449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15450#[inline(always)]
15451#[target_feature(enable = "neon")]
15452#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15453#[rustc_legacy_const_generics(2)]
15454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15455pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15456 static_assert_uimm_bits!(LANE, 2);
15457 unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15458}
15459#[doc = "Floating-point multiply extended"]
15460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15461#[inline(always)]
15462#[target_feature(enable = "neon")]
15463#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15464#[rustc_legacy_const_generics(2)]
15465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15466pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15467 static_assert_uimm_bits!(LANE, 1);
15468 unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15469}
15470#[doc = "Floating-point multiply extended"]
15471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15472#[inline(always)]
15473#[target_feature(enable = "neon")]
15474#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15475#[rustc_legacy_const_generics(2)]
15476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15477pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15478 static_assert_uimm_bits!(LANE, 2);
15479 unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15480}
15481#[doc = "Floating-point multiply extended"]
15482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15483#[inline(always)]
15484#[target_feature(enable = "neon")]
15485#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15486#[rustc_legacy_const_generics(2)]
15487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15488pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15489 static_assert_uimm_bits!(LANE, 1);
15490 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15491}
15492#[doc = "Floating-point multiply extended"]
15493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15494#[inline(always)]
15495#[target_feature(enable = "neon")]
15496#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15497#[rustc_legacy_const_generics(2)]
15498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15499pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15500 static_assert!(LANE == 0);
15501 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15502}
15503#[doc = "Floating-point multiply extended"]
15504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15505#[inline(always)]
15506#[target_feature(enable = "neon")]
15507#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15508#[rustc_legacy_const_generics(2)]
15509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15510pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15511 static_assert_uimm_bits!(LANE, 1);
15512 unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15513}
15514#[doc = "Vector multiply by scalar"]
15515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15516#[inline(always)]
15517#[cfg_attr(test, assert_instr(fmulx))]
15518#[target_feature(enable = "neon,fp16")]
15519#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15520#[cfg(not(target_arch = "arm64ec"))]
15521pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15522 vmulx_f16(a, vdup_n_f16(b))
15523}
15524#[doc = "Vector multiply by scalar"]
15525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15526#[inline(always)]
15527#[cfg_attr(test, assert_instr(fmulx))]
15528#[target_feature(enable = "neon,fp16")]
15529#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15530#[cfg(not(target_arch = "arm64ec"))]
15531pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15532 vmulxq_f16(a, vdupq_n_f16(b))
15533}
15534#[doc = "Floating-point multiply extended"]
15535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15536#[inline(always)]
15537#[target_feature(enable = "neon")]
15538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15539#[cfg_attr(test, assert_instr(fmulx))]
15540pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15541 unsafe extern "unadjusted" {
15542 #[cfg_attr(
15543 any(target_arch = "aarch64", target_arch = "arm64ec"),
15544 link_name = "llvm.aarch64.neon.fmulx.f64"
15545 )]
15546 fn _vmulxd_f64(a: f64, b: f64) -> f64;
15547 }
15548 unsafe { _vmulxd_f64(a, b) }
15549}
15550#[doc = "Floating-point multiply extended"]
15551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15552#[inline(always)]
15553#[target_feature(enable = "neon")]
15554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15555#[cfg_attr(test, assert_instr(fmulx))]
15556pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15557 unsafe extern "unadjusted" {
15558 #[cfg_attr(
15559 any(target_arch = "aarch64", target_arch = "arm64ec"),
15560 link_name = "llvm.aarch64.neon.fmulx.f32"
15561 )]
15562 fn _vmulxs_f32(a: f32, b: f32) -> f32;
15563 }
15564 unsafe { _vmulxs_f32(a, b) }
15565}
15566#[doc = "Floating-point multiply extended"]
15567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15568#[inline(always)]
15569#[target_feature(enable = "neon")]
15570#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15571#[rustc_legacy_const_generics(2)]
15572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15573pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15574 static_assert!(LANE == 0);
15575 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15576}
15577#[doc = "Floating-point multiply extended"]
15578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15579#[inline(always)]
15580#[target_feature(enable = "neon")]
15581#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15582#[rustc_legacy_const_generics(2)]
15583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15584pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15585 static_assert_uimm_bits!(LANE, 1);
15586 unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15587}
15588#[doc = "Floating-point multiply extended"]
15589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15590#[inline(always)]
15591#[target_feature(enable = "neon")]
15592#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15593#[rustc_legacy_const_generics(2)]
15594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15595pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15596 static_assert_uimm_bits!(LANE, 1);
15597 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15598}
15599#[doc = "Floating-point multiply extended"]
15600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15601#[inline(always)]
15602#[target_feature(enable = "neon")]
15603#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15604#[rustc_legacy_const_generics(2)]
15605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15606pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15607 static_assert_uimm_bits!(LANE, 2);
15608 unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15609}
15610#[doc = "Floating-point multiply extended"]
15611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15612#[inline(always)]
15613#[target_feature(enable = "neon,fp16")]
15614#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15615#[cfg(not(target_arch = "arm64ec"))]
15616#[cfg_attr(test, assert_instr(fmulx))]
15617pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15618 unsafe extern "unadjusted" {
15619 #[cfg_attr(
15620 any(target_arch = "aarch64", target_arch = "arm64ec"),
15621 link_name = "llvm.aarch64.neon.fmulx.f16"
15622 )]
15623 fn _vmulxh_f16(a: f16, b: f16) -> f16;
15624 }
15625 unsafe { _vmulxh_f16(a, b) }
15626}
15627#[doc = "Floating-point multiply extended"]
15628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15629#[inline(always)]
15630#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15631#[rustc_legacy_const_generics(2)]
15632#[target_feature(enable = "neon,fp16")]
15633#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15634#[cfg(not(target_arch = "arm64ec"))]
15635pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15636 static_assert_uimm_bits!(LANE, 2);
15637 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15638}
15639#[doc = "Floating-point multiply extended"]
15640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15641#[inline(always)]
15642#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15643#[rustc_legacy_const_generics(2)]
15644#[target_feature(enable = "neon,fp16")]
15645#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15646#[cfg(not(target_arch = "arm64ec"))]
15647pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15648 static_assert_uimm_bits!(LANE, 3);
15649 unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15650}
15651#[doc = "Floating-point multiply extended"]
15652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15653#[inline(always)]
15654#[target_feature(enable = "neon")]
15655#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15656#[rustc_legacy_const_generics(2)]
15657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15658pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15659 static_assert!(LANE == 0);
15660 unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15661}
15662#[doc = "Negate"]
15663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15664#[inline(always)]
15665#[target_feature(enable = "neon")]
15666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15667#[cfg_attr(test, assert_instr(fneg))]
15668pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15669 unsafe { simd_neg(a) }
15670}
15671#[doc = "Negate"]
15672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15673#[inline(always)]
15674#[target_feature(enable = "neon")]
15675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15676#[cfg_attr(test, assert_instr(fneg))]
15677pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15678 unsafe { simd_neg(a) }
15679}
15680#[doc = "Negate"]
15681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15682#[inline(always)]
15683#[target_feature(enable = "neon")]
15684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15685#[cfg_attr(test, assert_instr(neg))]
15686pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15687 unsafe { simd_neg(a) }
15688}
15689#[doc = "Negate"]
15690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15691#[inline(always)]
15692#[target_feature(enable = "neon")]
15693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15694#[cfg_attr(test, assert_instr(neg))]
15695pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15696 unsafe { simd_neg(a) }
15697}
15698#[doc = "Negate"]
15699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15700#[inline(always)]
15701#[target_feature(enable = "neon")]
15702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15703#[cfg_attr(test, assert_instr(neg))]
15704pub fn vnegd_s64(a: i64) -> i64 {
15705 a.wrapping_neg()
15706}
15707#[doc = "Negate"]
15708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15709#[inline(always)]
15710#[target_feature(enable = "neon,fp16")]
15711#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15712#[cfg(not(target_arch = "arm64ec"))]
15713#[cfg_attr(test, assert_instr(fneg))]
15714pub fn vnegh_f16(a: f16) -> f16 {
15715 -a
15716}
15717#[doc = "Floating-point add pairwise"]
15718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15719#[inline(always)]
15720#[target_feature(enable = "neon")]
15721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15722#[cfg_attr(test, assert_instr(nop))]
15723pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15724 unsafe {
15725 let a1: f64 = simd_extract!(a, 0);
15726 let a2: f64 = simd_extract!(a, 1);
15727 a1 + a2
15728 }
15729}
15730#[doc = "Floating-point add pairwise"]
15731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15732#[inline(always)]
15733#[target_feature(enable = "neon")]
15734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15735#[cfg_attr(test, assert_instr(nop))]
15736pub fn vpadds_f32(a: float32x2_t) -> f32 {
15737 unsafe {
15738 let a1: f32 = simd_extract!(a, 0);
15739 let a2: f32 = simd_extract!(a, 1);
15740 a1 + a2
15741 }
15742}
15743#[doc = "Add pairwise"]
15744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15745#[inline(always)]
15746#[target_feature(enable = "neon")]
15747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15748#[cfg_attr(test, assert_instr(addp))]
15749pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15750 unsafe { simd_reduce_add_ordered(a, 0) }
15751}
15752#[doc = "Add pairwise"]
15753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15754#[inline(always)]
15755#[target_feature(enable = "neon")]
15756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15757#[cfg_attr(test, assert_instr(addp))]
15758pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15759 unsafe { simd_reduce_add_ordered(a, 0) }
15760}
15761#[doc = "Floating-point add pairwise"]
15762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15763#[inline(always)]
15764#[target_feature(enable = "neon,fp16")]
15765#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15766#[cfg(not(target_arch = "arm64ec"))]
15767#[cfg_attr(test, assert_instr(faddp))]
15768pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15769 unsafe {
15770 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15771 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15772 simd_add(even, odd)
15773 }
15774}
15775#[doc = "Floating-point add pairwise"]
15776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15777#[inline(always)]
15778#[target_feature(enable = "neon")]
15779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15780#[cfg_attr(test, assert_instr(faddp))]
15781pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15782 unsafe {
15783 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15784 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15785 simd_add(even, odd)
15786 }
15787}
15788#[doc = "Floating-point add pairwise"]
15789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15790#[inline(always)]
15791#[target_feature(enable = "neon")]
15792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15793#[cfg_attr(test, assert_instr(faddp))]
15794pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15795 unsafe {
15796 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15797 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15798 simd_add(even, odd)
15799 }
15800}
15801#[doc = "Add Pairwise"]
15802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15803#[inline(always)]
15804#[target_feature(enable = "neon")]
15805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15806#[cfg_attr(test, assert_instr(addp))]
15807pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15808 unsafe {
15809 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15810 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15811 simd_add(even, odd)
15812 }
15813}
15814#[doc = "Add Pairwise"]
15815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15816#[inline(always)]
15817#[target_feature(enable = "neon")]
15818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15819#[cfg_attr(test, assert_instr(addp))]
15820pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15821 unsafe {
15822 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15823 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15824 simd_add(even, odd)
15825 }
15826}
15827#[doc = "Add Pairwise"]
15828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15829#[inline(always)]
15830#[target_feature(enable = "neon")]
15831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15832#[cfg_attr(test, assert_instr(addp))]
15833pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15834 unsafe {
15835 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15836 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15837 simd_add(even, odd)
15838 }
15839}
15840#[doc = "Add Pairwise"]
15841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15842#[inline(always)]
15843#[target_feature(enable = "neon")]
15844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15845#[cfg_attr(test, assert_instr(addp))]
15846pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15847 unsafe {
15848 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15849 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15850 simd_add(even, odd)
15851 }
15852}
15853#[doc = "Add Pairwise"]
15854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15855#[inline(always)]
15856#[target_feature(enable = "neon")]
15857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15858#[cfg_attr(test, assert_instr(addp))]
15859pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15860 unsafe {
15861 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15862 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15863 simd_add(even, odd)
15864 }
15865}
15866#[doc = "Add Pairwise"]
15867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15868#[inline(always)]
15869#[target_feature(enable = "neon")]
15870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15871#[cfg_attr(test, assert_instr(addp))]
15872pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15873 unsafe {
15874 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15875 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15876 simd_add(even, odd)
15877 }
15878}
15879#[doc = "Add Pairwise"]
15880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15881#[inline(always)]
15882#[target_feature(enable = "neon")]
15883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15884#[cfg_attr(test, assert_instr(addp))]
15885pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15886 unsafe {
15887 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15888 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15889 simd_add(even, odd)
15890 }
15891}
15892#[doc = "Add Pairwise"]
15893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15894#[inline(always)]
15895#[target_feature(enable = "neon")]
15896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15897#[cfg_attr(test, assert_instr(addp))]
15898pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15899 unsafe {
15900 let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15901 let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15902 simd_add(even, odd)
15903 }
15904}
15905#[doc = "Floating-point add pairwise"]
15906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
15907#[inline(always)]
15908#[target_feature(enable = "neon,fp16")]
15909#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15910#[cfg(not(target_arch = "arm64ec"))]
15911#[cfg_attr(test, assert_instr(fmaxp))]
15912pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15913 unsafe extern "unadjusted" {
15914 #[cfg_attr(
15915 any(target_arch = "aarch64", target_arch = "arm64ec"),
15916 link_name = "llvm.aarch64.neon.fmaxp.v4f16"
15917 )]
15918 fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15919 }
15920 unsafe { _vpmax_f16(a, b) }
15921}
15922#[doc = "Floating-point add pairwise"]
15923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
15924#[inline(always)]
15925#[target_feature(enable = "neon,fp16")]
15926#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15927#[cfg(not(target_arch = "arm64ec"))]
15928#[cfg_attr(test, assert_instr(fmaxp))]
15929pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15930 unsafe extern "unadjusted" {
15931 #[cfg_attr(
15932 any(target_arch = "aarch64", target_arch = "arm64ec"),
15933 link_name = "llvm.aarch64.neon.fmaxp.v8f16"
15934 )]
15935 fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15936 }
15937 unsafe { _vpmaxq_f16(a, b) }
15938}
15939#[doc = "Floating-point add pairwise"]
15940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
15941#[inline(always)]
15942#[target_feature(enable = "neon,fp16")]
15943#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15944#[cfg(not(target_arch = "arm64ec"))]
15945#[cfg_attr(test, assert_instr(fmaxnmp))]
15946pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15947 unsafe extern "unadjusted" {
15948 #[cfg_attr(
15949 any(target_arch = "aarch64", target_arch = "arm64ec"),
15950 link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
15951 )]
15952 fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15953 }
15954 unsafe { _vpmaxnm_f16(a, b) }
15955}
15956#[doc = "Floating-point add pairwise"]
15957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
15958#[inline(always)]
15959#[target_feature(enable = "neon,fp16")]
15960#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15961#[cfg(not(target_arch = "arm64ec"))]
15962#[cfg_attr(test, assert_instr(fmaxnmp))]
15963pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15964 unsafe extern "unadjusted" {
15965 #[cfg_attr(
15966 any(target_arch = "aarch64", target_arch = "arm64ec"),
15967 link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
15968 )]
15969 fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15970 }
15971 unsafe { _vpmaxnmq_f16(a, b) }
15972}
15973#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
15975#[inline(always)]
15976#[target_feature(enable = "neon")]
15977#[cfg_attr(test, assert_instr(fmaxnmp))]
15978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15979pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15980 unsafe extern "unadjusted" {
15981 #[cfg_attr(
15982 any(target_arch = "aarch64", target_arch = "arm64ec"),
15983 link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
15984 )]
15985 fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15986 }
15987 unsafe { _vpmaxnm_f32(a, b) }
15988}
15989#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
15991#[inline(always)]
15992#[target_feature(enable = "neon")]
15993#[cfg_attr(test, assert_instr(fmaxnmp))]
15994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15995pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15996 unsafe extern "unadjusted" {
15997 #[cfg_attr(
15998 any(target_arch = "aarch64", target_arch = "arm64ec"),
15999 link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16000 )]
16001 fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16002 }
16003 unsafe { _vpmaxnmq_f32(a, b) }
16004}
16005#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16007#[inline(always)]
16008#[target_feature(enable = "neon")]
16009#[cfg_attr(test, assert_instr(fmaxnmp))]
16010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16011pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16012 unsafe extern "unadjusted" {
16013 #[cfg_attr(
16014 any(target_arch = "aarch64", target_arch = "arm64ec"),
16015 link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16016 )]
16017 fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16018 }
16019 unsafe { _vpmaxnmq_f64(a, b) }
16020}
16021#[doc = "Floating-point maximum number pairwise"]
16022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16023#[inline(always)]
16024#[target_feature(enable = "neon")]
16025#[cfg_attr(test, assert_instr(fmaxnmp))]
16026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16027pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16028 unsafe extern "unadjusted" {
16029 #[cfg_attr(
16030 any(target_arch = "aarch64", target_arch = "arm64ec"),
16031 link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16032 )]
16033 fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16034 }
16035 unsafe { _vpmaxnmqd_f64(a) }
16036}
16037#[doc = "Floating-point maximum number pairwise"]
16038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16039#[inline(always)]
16040#[target_feature(enable = "neon")]
16041#[cfg_attr(test, assert_instr(fmaxnmp))]
16042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16043pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16044 unsafe extern "unadjusted" {
16045 #[cfg_attr(
16046 any(target_arch = "aarch64", target_arch = "arm64ec"),
16047 link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16048 )]
16049 fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16050 }
16051 unsafe { _vpmaxnms_f32(a) }
16052}
16053#[doc = "Folding maximum of adjacent pairs"]
16054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16055#[inline(always)]
16056#[target_feature(enable = "neon")]
16057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16058#[cfg_attr(test, assert_instr(fmaxp))]
16059pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16060 unsafe extern "unadjusted" {
16061 #[cfg_attr(
16062 any(target_arch = "aarch64", target_arch = "arm64ec"),
16063 link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16064 )]
16065 fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16066 }
16067 unsafe { _vpmaxq_f32(a, b) }
16068}
16069#[doc = "Folding maximum of adjacent pairs"]
16070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16071#[inline(always)]
16072#[target_feature(enable = "neon")]
16073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16074#[cfg_attr(test, assert_instr(fmaxp))]
16075pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16076 unsafe extern "unadjusted" {
16077 #[cfg_attr(
16078 any(target_arch = "aarch64", target_arch = "arm64ec"),
16079 link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16080 )]
16081 fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16082 }
16083 unsafe { _vpmaxq_f64(a, b) }
16084}
16085#[doc = "Folding maximum of adjacent pairs"]
16086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16087#[inline(always)]
16088#[target_feature(enable = "neon")]
16089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16090#[cfg_attr(test, assert_instr(smaxp))]
16091pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16092 unsafe extern "unadjusted" {
16093 #[cfg_attr(
16094 any(target_arch = "aarch64", target_arch = "arm64ec"),
16095 link_name = "llvm.aarch64.neon.smaxp.v16i8"
16096 )]
16097 fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16098 }
16099 unsafe { _vpmaxq_s8(a, b) }
16100}
16101#[doc = "Folding maximum of adjacent pairs"]
16102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16103#[inline(always)]
16104#[target_feature(enable = "neon")]
16105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16106#[cfg_attr(test, assert_instr(smaxp))]
16107pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16108 unsafe extern "unadjusted" {
16109 #[cfg_attr(
16110 any(target_arch = "aarch64", target_arch = "arm64ec"),
16111 link_name = "llvm.aarch64.neon.smaxp.v8i16"
16112 )]
16113 fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16114 }
16115 unsafe { _vpmaxq_s16(a, b) }
16116}
16117#[doc = "Folding maximum of adjacent pairs"]
16118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16119#[inline(always)]
16120#[target_feature(enable = "neon")]
16121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16122#[cfg_attr(test, assert_instr(smaxp))]
16123pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16124 unsafe extern "unadjusted" {
16125 #[cfg_attr(
16126 any(target_arch = "aarch64", target_arch = "arm64ec"),
16127 link_name = "llvm.aarch64.neon.smaxp.v4i32"
16128 )]
16129 fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16130 }
16131 unsafe { _vpmaxq_s32(a, b) }
16132}
16133#[doc = "Folding maximum of adjacent pairs"]
16134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16135#[inline(always)]
16136#[target_feature(enable = "neon")]
16137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16138#[cfg_attr(test, assert_instr(umaxp))]
16139pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16140 unsafe extern "unadjusted" {
16141 #[cfg_attr(
16142 any(target_arch = "aarch64", target_arch = "arm64ec"),
16143 link_name = "llvm.aarch64.neon.umaxp.v16i8"
16144 )]
16145 fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16146 }
16147 unsafe { _vpmaxq_u8(a, b) }
16148}
16149#[doc = "Folding maximum of adjacent pairs"]
16150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16151#[inline(always)]
16152#[target_feature(enable = "neon")]
16153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16154#[cfg_attr(test, assert_instr(umaxp))]
16155pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16156 unsafe extern "unadjusted" {
16157 #[cfg_attr(
16158 any(target_arch = "aarch64", target_arch = "arm64ec"),
16159 link_name = "llvm.aarch64.neon.umaxp.v8i16"
16160 )]
16161 fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16162 }
16163 unsafe { _vpmaxq_u16(a, b) }
16164}
16165#[doc = "Folding maximum of adjacent pairs"]
16166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16167#[inline(always)]
16168#[target_feature(enable = "neon")]
16169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16170#[cfg_attr(test, assert_instr(umaxp))]
16171pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16172 unsafe extern "unadjusted" {
16173 #[cfg_attr(
16174 any(target_arch = "aarch64", target_arch = "arm64ec"),
16175 link_name = "llvm.aarch64.neon.umaxp.v4i32"
16176 )]
16177 fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16178 }
16179 unsafe { _vpmaxq_u32(a, b) }
16180}
16181#[doc = "Floating-point maximum pairwise"]
16182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16183#[inline(always)]
16184#[target_feature(enable = "neon")]
16185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16186#[cfg_attr(test, assert_instr(fmaxp))]
16187pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16188 unsafe extern "unadjusted" {
16189 #[cfg_attr(
16190 any(target_arch = "aarch64", target_arch = "arm64ec"),
16191 link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16192 )]
16193 fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16194 }
16195 unsafe { _vpmaxqd_f64(a) }
16196}
16197#[doc = "Floating-point maximum pairwise"]
16198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16199#[inline(always)]
16200#[target_feature(enable = "neon")]
16201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16202#[cfg_attr(test, assert_instr(fmaxp))]
16203pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16204 unsafe extern "unadjusted" {
16205 #[cfg_attr(
16206 any(target_arch = "aarch64", target_arch = "arm64ec"),
16207 link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16208 )]
16209 fn _vpmaxs_f32(a: float32x2_t) -> f32;
16210 }
16211 unsafe { _vpmaxs_f32(a) }
16212}
16213#[doc = "Floating-point add pairwise"]
16214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16215#[inline(always)]
16216#[target_feature(enable = "neon,fp16")]
16217#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16218#[cfg(not(target_arch = "arm64ec"))]
16219#[cfg_attr(test, assert_instr(fminp))]
16220pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16221 unsafe extern "unadjusted" {
16222 #[cfg_attr(
16223 any(target_arch = "aarch64", target_arch = "arm64ec"),
16224 link_name = "llvm.aarch64.neon.fminp.v4f16"
16225 )]
16226 fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16227 }
16228 unsafe { _vpmin_f16(a, b) }
16229}
16230#[doc = "Floating-point add pairwise"]
16231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16232#[inline(always)]
16233#[target_feature(enable = "neon,fp16")]
16234#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16235#[cfg(not(target_arch = "arm64ec"))]
16236#[cfg_attr(test, assert_instr(fminp))]
16237pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16238 unsafe extern "unadjusted" {
16239 #[cfg_attr(
16240 any(target_arch = "aarch64", target_arch = "arm64ec"),
16241 link_name = "llvm.aarch64.neon.fminp.v8f16"
16242 )]
16243 fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16244 }
16245 unsafe { _vpminq_f16(a, b) }
16246}
16247#[doc = "Floating-point add pairwise"]
16248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16249#[inline(always)]
16250#[target_feature(enable = "neon,fp16")]
16251#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16252#[cfg(not(target_arch = "arm64ec"))]
16253#[cfg_attr(test, assert_instr(fminnmp))]
16254pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16255 unsafe extern "unadjusted" {
16256 #[cfg_attr(
16257 any(target_arch = "aarch64", target_arch = "arm64ec"),
16258 link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16259 )]
16260 fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16261 }
16262 unsafe { _vpminnm_f16(a, b) }
16263}
16264#[doc = "Floating-point add pairwise"]
16265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16266#[inline(always)]
16267#[target_feature(enable = "neon,fp16")]
16268#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16269#[cfg(not(target_arch = "arm64ec"))]
16270#[cfg_attr(test, assert_instr(fminnmp))]
16271pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16272 unsafe extern "unadjusted" {
16273 #[cfg_attr(
16274 any(target_arch = "aarch64", target_arch = "arm64ec"),
16275 link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16276 )]
16277 fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16278 }
16279 unsafe { _vpminnmq_f16(a, b) }
16280}
16281#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16283#[inline(always)]
16284#[target_feature(enable = "neon")]
16285#[cfg_attr(test, assert_instr(fminnmp))]
16286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16287pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16288 unsafe extern "unadjusted" {
16289 #[cfg_attr(
16290 any(target_arch = "aarch64", target_arch = "arm64ec"),
16291 link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16292 )]
16293 fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16294 }
16295 unsafe { _vpminnm_f32(a, b) }
16296}
16297#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16299#[inline(always)]
16300#[target_feature(enable = "neon")]
16301#[cfg_attr(test, assert_instr(fminnmp))]
16302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16303pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16304 unsafe extern "unadjusted" {
16305 #[cfg_attr(
16306 any(target_arch = "aarch64", target_arch = "arm64ec"),
16307 link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16308 )]
16309 fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16310 }
16311 unsafe { _vpminnmq_f32(a, b) }
16312}
16313#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16315#[inline(always)]
16316#[target_feature(enable = "neon")]
16317#[cfg_attr(test, assert_instr(fminnmp))]
16318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16319pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16320 unsafe extern "unadjusted" {
16321 #[cfg_attr(
16322 any(target_arch = "aarch64", target_arch = "arm64ec"),
16323 link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16324 )]
16325 fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16326 }
16327 unsafe { _vpminnmq_f64(a, b) }
16328}
16329#[doc = "Floating-point minimum number pairwise"]
16330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16331#[inline(always)]
16332#[target_feature(enable = "neon")]
16333#[cfg_attr(test, assert_instr(fminnmp))]
16334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16335pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16336 unsafe extern "unadjusted" {
16337 #[cfg_attr(
16338 any(target_arch = "aarch64", target_arch = "arm64ec"),
16339 link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16340 )]
16341 fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16342 }
16343 unsafe { _vpminnmqd_f64(a) }
16344}
16345#[doc = "Floating-point minimum number pairwise"]
16346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16347#[inline(always)]
16348#[target_feature(enable = "neon")]
16349#[cfg_attr(test, assert_instr(fminnmp))]
16350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16351pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16352 unsafe extern "unadjusted" {
16353 #[cfg_attr(
16354 any(target_arch = "aarch64", target_arch = "arm64ec"),
16355 link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16356 )]
16357 fn _vpminnms_f32(a: float32x2_t) -> f32;
16358 }
16359 unsafe { _vpminnms_f32(a) }
16360}
16361#[doc = "Folding minimum of adjacent pairs"]
16362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16363#[inline(always)]
16364#[target_feature(enable = "neon")]
16365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16366#[cfg_attr(test, assert_instr(fminp))]
16367pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16368 unsafe extern "unadjusted" {
16369 #[cfg_attr(
16370 any(target_arch = "aarch64", target_arch = "arm64ec"),
16371 link_name = "llvm.aarch64.neon.fminp.v4f32"
16372 )]
16373 fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16374 }
16375 unsafe { _vpminq_f32(a, b) }
16376}
16377#[doc = "Folding minimum of adjacent pairs"]
16378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16379#[inline(always)]
16380#[target_feature(enable = "neon")]
16381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16382#[cfg_attr(test, assert_instr(fminp))]
16383pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16384 unsafe extern "unadjusted" {
16385 #[cfg_attr(
16386 any(target_arch = "aarch64", target_arch = "arm64ec"),
16387 link_name = "llvm.aarch64.neon.fminp.v2f64"
16388 )]
16389 fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16390 }
16391 unsafe { _vpminq_f64(a, b) }
16392}
16393#[doc = "Folding minimum of adjacent pairs"]
16394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16395#[inline(always)]
16396#[target_feature(enable = "neon")]
16397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16398#[cfg_attr(test, assert_instr(sminp))]
16399pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16400 unsafe extern "unadjusted" {
16401 #[cfg_attr(
16402 any(target_arch = "aarch64", target_arch = "arm64ec"),
16403 link_name = "llvm.aarch64.neon.sminp.v16i8"
16404 )]
16405 fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16406 }
16407 unsafe { _vpminq_s8(a, b) }
16408}
16409#[doc = "Folding minimum of adjacent pairs"]
16410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16411#[inline(always)]
16412#[target_feature(enable = "neon")]
16413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16414#[cfg_attr(test, assert_instr(sminp))]
16415pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16416 unsafe extern "unadjusted" {
16417 #[cfg_attr(
16418 any(target_arch = "aarch64", target_arch = "arm64ec"),
16419 link_name = "llvm.aarch64.neon.sminp.v8i16"
16420 )]
16421 fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16422 }
16423 unsafe { _vpminq_s16(a, b) }
16424}
16425#[doc = "Folding minimum of adjacent pairs"]
16426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16427#[inline(always)]
16428#[target_feature(enable = "neon")]
16429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16430#[cfg_attr(test, assert_instr(sminp))]
16431pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16432 unsafe extern "unadjusted" {
16433 #[cfg_attr(
16434 any(target_arch = "aarch64", target_arch = "arm64ec"),
16435 link_name = "llvm.aarch64.neon.sminp.v4i32"
16436 )]
16437 fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16438 }
16439 unsafe { _vpminq_s32(a, b) }
16440}
16441#[doc = "Folding minimum of adjacent pairs"]
16442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16443#[inline(always)]
16444#[target_feature(enable = "neon")]
16445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16446#[cfg_attr(test, assert_instr(uminp))]
16447pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16448 unsafe extern "unadjusted" {
16449 #[cfg_attr(
16450 any(target_arch = "aarch64", target_arch = "arm64ec"),
16451 link_name = "llvm.aarch64.neon.uminp.v16i8"
16452 )]
16453 fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16454 }
16455 unsafe { _vpminq_u8(a, b) }
16456}
16457#[doc = "Folding minimum of adjacent pairs"]
16458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16459#[inline(always)]
16460#[target_feature(enable = "neon")]
16461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16462#[cfg_attr(test, assert_instr(uminp))]
16463pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16464 unsafe extern "unadjusted" {
16465 #[cfg_attr(
16466 any(target_arch = "aarch64", target_arch = "arm64ec"),
16467 link_name = "llvm.aarch64.neon.uminp.v8i16"
16468 )]
16469 fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16470 }
16471 unsafe { _vpminq_u16(a, b) }
16472}
16473#[doc = "Folding minimum of adjacent pairs"]
16474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16475#[inline(always)]
16476#[target_feature(enable = "neon")]
16477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16478#[cfg_attr(test, assert_instr(uminp))]
16479pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16480 unsafe extern "unadjusted" {
16481 #[cfg_attr(
16482 any(target_arch = "aarch64", target_arch = "arm64ec"),
16483 link_name = "llvm.aarch64.neon.uminp.v4i32"
16484 )]
16485 fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16486 }
16487 unsafe { _vpminq_u32(a, b) }
16488}
16489#[doc = "Floating-point minimum pairwise"]
16490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16491#[inline(always)]
16492#[target_feature(enable = "neon")]
16493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16494#[cfg_attr(test, assert_instr(fminp))]
16495pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16496 unsafe extern "unadjusted" {
16497 #[cfg_attr(
16498 any(target_arch = "aarch64", target_arch = "arm64ec"),
16499 link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16500 )]
16501 fn _vpminqd_f64(a: float64x2_t) -> f64;
16502 }
16503 unsafe { _vpminqd_f64(a) }
16504}
16505#[doc = "Floating-point minimum pairwise"]
16506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16507#[inline(always)]
16508#[target_feature(enable = "neon")]
16509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16510#[cfg_attr(test, assert_instr(fminp))]
16511pub fn vpmins_f32(a: float32x2_t) -> f32 {
16512 unsafe extern "unadjusted" {
16513 #[cfg_attr(
16514 any(target_arch = "aarch64", target_arch = "arm64ec"),
16515 link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16516 )]
16517 fn _vpmins_f32(a: float32x2_t) -> f32;
16518 }
16519 unsafe { _vpmins_f32(a) }
16520}
16521#[doc = "Signed saturating Absolute value"]
16522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16523#[inline(always)]
16524#[target_feature(enable = "neon")]
16525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16526#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16527pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16528 unsafe extern "unadjusted" {
16529 #[cfg_attr(
16530 any(target_arch = "aarch64", target_arch = "arm64ec"),
16531 link_name = "llvm.aarch64.neon.sqabs.v1i64"
16532 )]
16533 fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16534 }
16535 unsafe { _vqabs_s64(a) }
16536}
16537#[doc = "Signed saturating Absolute value"]
16538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16539#[inline(always)]
16540#[target_feature(enable = "neon")]
16541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16542#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16543pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16544 unsafe extern "unadjusted" {
16545 #[cfg_attr(
16546 any(target_arch = "aarch64", target_arch = "arm64ec"),
16547 link_name = "llvm.aarch64.neon.sqabs.v2i64"
16548 )]
16549 fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16550 }
16551 unsafe { _vqabsq_s64(a) }
16552}
16553#[doc = "Signed saturating absolute value"]
16554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16555#[inline(always)]
16556#[target_feature(enable = "neon")]
16557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16558#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16559pub fn vqabsb_s8(a: i8) -> i8 {
16560 unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16561}
16562#[doc = "Signed saturating absolute value"]
16563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16564#[inline(always)]
16565#[target_feature(enable = "neon")]
16566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16567#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16568pub fn vqabsh_s16(a: i16) -> i16 {
16569 unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16570}
16571#[doc = "Signed saturating absolute value"]
16572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16573#[inline(always)]
16574#[target_feature(enable = "neon")]
16575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16576#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16577pub fn vqabss_s32(a: i32) -> i32 {
16578 unsafe extern "unadjusted" {
16579 #[cfg_attr(
16580 any(target_arch = "aarch64", target_arch = "arm64ec"),
16581 link_name = "llvm.aarch64.neon.sqabs.i32"
16582 )]
16583 fn _vqabss_s32(a: i32) -> i32;
16584 }
16585 unsafe { _vqabss_s32(a) }
16586}
16587#[doc = "Signed saturating absolute value"]
16588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16589#[inline(always)]
16590#[target_feature(enable = "neon")]
16591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16592#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16593pub fn vqabsd_s64(a: i64) -> i64 {
16594 unsafe extern "unadjusted" {
16595 #[cfg_attr(
16596 any(target_arch = "aarch64", target_arch = "arm64ec"),
16597 link_name = "llvm.aarch64.neon.sqabs.i64"
16598 )]
16599 fn _vqabsd_s64(a: i64) -> i64;
16600 }
16601 unsafe { _vqabsd_s64(a) }
16602}
16603#[doc = "Saturating add"]
16604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16605#[inline(always)]
16606#[target_feature(enable = "neon")]
16607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16608#[cfg_attr(test, assert_instr(sqadd))]
16609pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16610 let a: int8x8_t = vdup_n_s8(a);
16611 let b: int8x8_t = vdup_n_s8(b);
16612 unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16613}
16614#[doc = "Saturating add"]
16615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16616#[inline(always)]
16617#[target_feature(enable = "neon")]
16618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16619#[cfg_attr(test, assert_instr(sqadd))]
16620pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16621 let a: int16x4_t = vdup_n_s16(a);
16622 let b: int16x4_t = vdup_n_s16(b);
16623 unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16624}
16625#[doc = "Saturating add"]
16626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16627#[inline(always)]
16628#[target_feature(enable = "neon")]
16629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16630#[cfg_attr(test, assert_instr(uqadd))]
16631pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16632 let a: uint8x8_t = vdup_n_u8(a);
16633 let b: uint8x8_t = vdup_n_u8(b);
16634 unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16635}
16636#[doc = "Saturating add"]
16637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16638#[inline(always)]
16639#[target_feature(enable = "neon")]
16640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16641#[cfg_attr(test, assert_instr(uqadd))]
16642pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16643 let a: uint16x4_t = vdup_n_u16(a);
16644 let b: uint16x4_t = vdup_n_u16(b);
16645 unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16646}
16647#[doc = "Saturating add"]
16648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16649#[inline(always)]
16650#[target_feature(enable = "neon")]
16651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16652#[cfg_attr(test, assert_instr(sqadd))]
16653pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16654 unsafe extern "unadjusted" {
16655 #[cfg_attr(
16656 any(target_arch = "aarch64", target_arch = "arm64ec"),
16657 link_name = "llvm.aarch64.neon.sqadd.i32"
16658 )]
16659 fn _vqadds_s32(a: i32, b: i32) -> i32;
16660 }
16661 unsafe { _vqadds_s32(a, b) }
16662}
16663#[doc = "Saturating add"]
16664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16665#[inline(always)]
16666#[target_feature(enable = "neon")]
16667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16668#[cfg_attr(test, assert_instr(sqadd))]
16669pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16670 unsafe extern "unadjusted" {
16671 #[cfg_attr(
16672 any(target_arch = "aarch64", target_arch = "arm64ec"),
16673 link_name = "llvm.aarch64.neon.sqadd.i64"
16674 )]
16675 fn _vqaddd_s64(a: i64, b: i64) -> i64;
16676 }
16677 unsafe { _vqaddd_s64(a, b) }
16678}
16679#[doc = "Saturating add"]
16680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16681#[inline(always)]
16682#[target_feature(enable = "neon")]
16683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16684#[cfg_attr(test, assert_instr(uqadd))]
16685pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16686 unsafe extern "unadjusted" {
16687 #[cfg_attr(
16688 any(target_arch = "aarch64", target_arch = "arm64ec"),
16689 link_name = "llvm.aarch64.neon.uqadd.i32"
16690 )]
16691 fn _vqadds_u32(a: u32, b: u32) -> u32;
16692 }
16693 unsafe { _vqadds_u32(a, b) }
16694}
16695#[doc = "Saturating add"]
16696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16697#[inline(always)]
16698#[target_feature(enable = "neon")]
16699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16700#[cfg_attr(test, assert_instr(uqadd))]
16701pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16702 unsafe extern "unadjusted" {
16703 #[cfg_attr(
16704 any(target_arch = "aarch64", target_arch = "arm64ec"),
16705 link_name = "llvm.aarch64.neon.uqadd.i64"
16706 )]
16707 fn _vqaddd_u64(a: u64, b: u64) -> u64;
16708 }
16709 unsafe { _vqaddd_u64(a, b) }
16710}
16711#[doc = "Signed saturating doubling multiply-add long"]
16712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16713#[inline(always)]
16714#[target_feature(enable = "neon")]
16715#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16716#[rustc_legacy_const_generics(3)]
16717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16718pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16719 static_assert_uimm_bits!(N, 2);
16720 vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16721}
16722#[doc = "Signed saturating doubling multiply-add long"]
16723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16724#[inline(always)]
16725#[target_feature(enable = "neon")]
16726#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16727#[rustc_legacy_const_generics(3)]
16728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16729pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16730 static_assert_uimm_bits!(N, 3);
16731 vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16732}
16733#[doc = "Signed saturating doubling multiply-add long"]
16734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16735#[inline(always)]
16736#[target_feature(enable = "neon")]
16737#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16738#[rustc_legacy_const_generics(3)]
16739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16740pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16741 static_assert_uimm_bits!(N, 1);
16742 vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16743}
16744#[doc = "Signed saturating doubling multiply-add long"]
16745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16746#[inline(always)]
16747#[target_feature(enable = "neon")]
16748#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16749#[rustc_legacy_const_generics(3)]
16750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16751pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16752 static_assert_uimm_bits!(N, 2);
16753 vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16754}
16755#[doc = "Signed saturating doubling multiply-add long"]
16756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16757#[inline(always)]
16758#[target_feature(enable = "neon")]
16759#[cfg_attr(test, assert_instr(sqdmlal2))]
16760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16761pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16762 vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16763}
16764#[doc = "Signed saturating doubling multiply-add long"]
16765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16766#[inline(always)]
16767#[target_feature(enable = "neon")]
16768#[cfg_attr(test, assert_instr(sqdmlal2))]
16769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16770pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16771 vqaddq_s32(a, vqdmull_high_s16(b, c))
16772}
16773#[doc = "Signed saturating doubling multiply-add long"]
16774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16775#[inline(always)]
16776#[target_feature(enable = "neon")]
16777#[cfg_attr(test, assert_instr(sqdmlal2))]
16778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16779pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16780 vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16781}
16782#[doc = "Signed saturating doubling multiply-add long"]
16783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16784#[inline(always)]
16785#[target_feature(enable = "neon")]
16786#[cfg_attr(test, assert_instr(sqdmlal2))]
16787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16788pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16789 vqaddq_s64(a, vqdmull_high_s32(b, c))
16790}
16791#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16793#[inline(always)]
16794#[target_feature(enable = "neon")]
16795#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16796#[rustc_legacy_const_generics(3)]
16797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16798pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16799 static_assert_uimm_bits!(N, 3);
16800 vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16801}
16802#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16804#[inline(always)]
16805#[target_feature(enable = "neon")]
16806#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16807#[rustc_legacy_const_generics(3)]
16808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16809pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16810 static_assert_uimm_bits!(N, 2);
16811 vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16812}
16813#[doc = "Signed saturating doubling multiply-add long"]
16814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16815#[inline(always)]
16816#[target_feature(enable = "neon")]
16817#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16818#[rustc_legacy_const_generics(3)]
16819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16820pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16821 static_assert_uimm_bits!(LANE, 2);
16822 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16823}
16824#[doc = "Signed saturating doubling multiply-add long"]
16825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16826#[inline(always)]
16827#[target_feature(enable = "neon")]
16828#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16829#[rustc_legacy_const_generics(3)]
16830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16831pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16832 static_assert_uimm_bits!(LANE, 3);
16833 unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16834}
16835#[doc = "Signed saturating doubling multiply-add long"]
16836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16837#[inline(always)]
16838#[target_feature(enable = "neon")]
16839#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16840#[rustc_legacy_const_generics(3)]
16841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16842pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16843 static_assert_uimm_bits!(LANE, 1);
16844 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16845}
16846#[doc = "Signed saturating doubling multiply-add long"]
16847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16848#[inline(always)]
16849#[target_feature(enable = "neon")]
16850#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16851#[rustc_legacy_const_generics(3)]
16852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16853pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16854 static_assert_uimm_bits!(LANE, 2);
16855 unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16856}
16857#[doc = "Signed saturating doubling multiply-add long"]
16858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16859#[inline(always)]
16860#[target_feature(enable = "neon")]
16861#[cfg_attr(test, assert_instr(sqdmlal))]
16862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16863pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16864 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16865 unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16866}
16867#[doc = "Signed saturating doubling multiply-add long"]
16868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16869#[inline(always)]
16870#[target_feature(enable = "neon")]
16871#[cfg_attr(test, assert_instr(sqdmlal))]
16872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16873pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16874 let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16875 x
16876}
16877#[doc = "Signed saturating doubling multiply-subtract long"]
16878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16879#[inline(always)]
16880#[target_feature(enable = "neon")]
16881#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16882#[rustc_legacy_const_generics(3)]
16883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16884pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16885 static_assert_uimm_bits!(N, 2);
16886 vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16887}
16888#[doc = "Signed saturating doubling multiply-subtract long"]
16889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16890#[inline(always)]
16891#[target_feature(enable = "neon")]
16892#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16893#[rustc_legacy_const_generics(3)]
16894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16895pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16896 static_assert_uimm_bits!(N, 3);
16897 vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16898}
16899#[doc = "Signed saturating doubling multiply-subtract long"]
16900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
16901#[inline(always)]
16902#[target_feature(enable = "neon")]
16903#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16904#[rustc_legacy_const_generics(3)]
16905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16906pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16907 static_assert_uimm_bits!(N, 1);
16908 vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16909}
16910#[doc = "Signed saturating doubling multiply-subtract long"]
16911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
16912#[inline(always)]
16913#[target_feature(enable = "neon")]
16914#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16915#[rustc_legacy_const_generics(3)]
16916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16917pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16918 static_assert_uimm_bits!(N, 2);
16919 vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16920}
16921#[doc = "Signed saturating doubling multiply-subtract long"]
16922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
16923#[inline(always)]
16924#[target_feature(enable = "neon")]
16925#[cfg_attr(test, assert_instr(sqdmlsl2))]
16926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16927pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16928 vqsubq_s32(a, vqdmull_high_n_s16(b, c))
16929}
16930#[doc = "Signed saturating doubling multiply-subtract long"]
16931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
16932#[inline(always)]
16933#[target_feature(enable = "neon")]
16934#[cfg_attr(test, assert_instr(sqdmlsl2))]
16935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16936pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16937 vqsubq_s32(a, vqdmull_high_s16(b, c))
16938}
16939#[doc = "Signed saturating doubling multiply-subtract long"]
16940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
16941#[inline(always)]
16942#[target_feature(enable = "neon")]
16943#[cfg_attr(test, assert_instr(sqdmlsl2))]
16944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16945pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16946 vqsubq_s64(a, vqdmull_high_n_s32(b, c))
16947}
16948#[doc = "Signed saturating doubling multiply-subtract long"]
16949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
16950#[inline(always)]
16951#[target_feature(enable = "neon")]
16952#[cfg_attr(test, assert_instr(sqdmlsl2))]
16953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16954pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16955 vqsubq_s64(a, vqdmull_high_s32(b, c))
16956}
16957#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
16959#[inline(always)]
16960#[target_feature(enable = "neon")]
16961#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
16962#[rustc_legacy_const_generics(3)]
16963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16964pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16965 static_assert_uimm_bits!(N, 3);
16966 vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16967}
16968#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
16970#[inline(always)]
16971#[target_feature(enable = "neon")]
16972#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
16973#[rustc_legacy_const_generics(3)]
16974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16975pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16976 static_assert_uimm_bits!(N, 2);
16977 vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16978}
16979#[doc = "Signed saturating doubling multiply-subtract long"]
16980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
16981#[inline(always)]
16982#[target_feature(enable = "neon")]
16983#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16984#[rustc_legacy_const_generics(3)]
16985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16986pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16987 static_assert_uimm_bits!(LANE, 2);
16988 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16989}
16990#[doc = "Signed saturating doubling multiply-subtract long"]
16991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
16992#[inline(always)]
16993#[target_feature(enable = "neon")]
16994#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16995#[rustc_legacy_const_generics(3)]
16996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16997pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16998 static_assert_uimm_bits!(LANE, 3);
16999 unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17000}
17001#[doc = "Signed saturating doubling multiply-subtract long"]
17002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17003#[inline(always)]
17004#[target_feature(enable = "neon")]
17005#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17006#[rustc_legacy_const_generics(3)]
17007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17008pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17009 static_assert_uimm_bits!(LANE, 1);
17010 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17011}
17012#[doc = "Signed saturating doubling multiply-subtract long"]
17013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17014#[inline(always)]
17015#[target_feature(enable = "neon")]
17016#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17017#[rustc_legacy_const_generics(3)]
17018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17019pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17020 static_assert_uimm_bits!(LANE, 2);
17021 unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17022}
17023#[doc = "Signed saturating doubling multiply-subtract long"]
17024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17025#[inline(always)]
17026#[target_feature(enable = "neon")]
17027#[cfg_attr(test, assert_instr(sqdmlsl))]
17028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17029pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17030 let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17031 unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17032}
17033#[doc = "Signed saturating doubling multiply-subtract long"]
17034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17035#[inline(always)]
17036#[target_feature(enable = "neon")]
17037#[cfg_attr(test, assert_instr(sqdmlsl))]
17038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17039pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17040 let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17041 x
17042}
17043#[doc = "Vector saturating doubling multiply high by scalar"]
17044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17045#[inline(always)]
17046#[target_feature(enable = "neon")]
17047#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17048#[rustc_legacy_const_generics(2)]
17049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17050pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17051 static_assert_uimm_bits!(LANE, 2);
17052 unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17053}
17054#[doc = "Vector saturating doubling multiply high by scalar"]
17055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17056#[inline(always)]
17057#[target_feature(enable = "neon")]
17058#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17059#[rustc_legacy_const_generics(2)]
17060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17061pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17062 static_assert_uimm_bits!(LANE, 2);
17063 unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17064}
17065#[doc = "Vector saturating doubling multiply high by scalar"]
17066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17067#[inline(always)]
17068#[target_feature(enable = "neon")]
17069#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17070#[rustc_legacy_const_generics(2)]
17071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17072pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17073 static_assert_uimm_bits!(LANE, 1);
17074 unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17075}
17076#[doc = "Vector saturating doubling multiply high by scalar"]
17077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17078#[inline(always)]
17079#[target_feature(enable = "neon")]
17080#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17081#[rustc_legacy_const_generics(2)]
17082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17083pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17084 static_assert_uimm_bits!(LANE, 1);
17085 unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17086}
17087#[doc = "Signed saturating doubling multiply returning high half"]
17088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17089#[inline(always)]
17090#[target_feature(enable = "neon")]
17091#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17092#[rustc_legacy_const_generics(2)]
17093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17094pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17095 static_assert_uimm_bits!(N, 2);
17096 unsafe {
17097 let b: i16 = simd_extract!(b, N as u32);
17098 vqdmulhh_s16(a, b)
17099 }
17100}
17101#[doc = "Signed saturating doubling multiply returning high half"]
17102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17103#[inline(always)]
17104#[target_feature(enable = "neon")]
17105#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17106#[rustc_legacy_const_generics(2)]
17107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17108pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17109 static_assert_uimm_bits!(N, 3);
17110 unsafe {
17111 let b: i16 = simd_extract!(b, N as u32);
17112 vqdmulhh_s16(a, b)
17113 }
17114}
17115#[doc = "Signed saturating doubling multiply returning high half"]
17116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17117#[inline(always)]
17118#[target_feature(enable = "neon")]
17119#[cfg_attr(test, assert_instr(sqdmulh))]
17120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17121pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17122 let a: int16x4_t = vdup_n_s16(a);
17123 let b: int16x4_t = vdup_n_s16(b);
17124 unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17125}
17126#[doc = "Signed saturating doubling multiply returning high half"]
17127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17128#[inline(always)]
17129#[target_feature(enable = "neon")]
17130#[cfg_attr(test, assert_instr(sqdmulh))]
17131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17132pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17133 let a: int32x2_t = vdup_n_s32(a);
17134 let b: int32x2_t = vdup_n_s32(b);
17135 unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17136}
17137#[doc = "Signed saturating doubling multiply returning high half"]
17138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17139#[inline(always)]
17140#[target_feature(enable = "neon")]
17141#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17142#[rustc_legacy_const_generics(2)]
17143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17144pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17145 static_assert_uimm_bits!(N, 1);
17146 unsafe {
17147 let b: i32 = simd_extract!(b, N as u32);
17148 vqdmulhs_s32(a, b)
17149 }
17150}
17151#[doc = "Signed saturating doubling multiply returning high half"]
17152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17153#[inline(always)]
17154#[target_feature(enable = "neon")]
17155#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17156#[rustc_legacy_const_generics(2)]
17157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17158pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17159 static_assert_uimm_bits!(N, 2);
17160 unsafe {
17161 let b: i32 = simd_extract!(b, N as u32);
17162 vqdmulhs_s32(a, b)
17163 }
17164}
17165#[doc = "Signed saturating doubling multiply long"]
17166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17167#[inline(always)]
17168#[target_feature(enable = "neon")]
17169#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17170#[rustc_legacy_const_generics(2)]
17171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17172pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17173 static_assert_uimm_bits!(N, 2);
17174 unsafe {
17175 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17176 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17177 vqdmull_s16(a, b)
17178 }
17179}
17180#[doc = "Signed saturating doubling multiply long"]
17181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17182#[inline(always)]
17183#[target_feature(enable = "neon")]
17184#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17185#[rustc_legacy_const_generics(2)]
17186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17187pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17188 static_assert_uimm_bits!(N, 2);
17189 unsafe {
17190 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17191 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17192 vqdmull_s32(a, b)
17193 }
17194}
17195#[doc = "Signed saturating doubling multiply long"]
17196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17197#[inline(always)]
17198#[target_feature(enable = "neon")]
17199#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17200#[rustc_legacy_const_generics(2)]
17201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17202pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17203 static_assert_uimm_bits!(N, 1);
17204 unsafe {
17205 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17206 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17207 vqdmull_s32(a, b)
17208 }
17209}
17210#[doc = "Signed saturating doubling multiply long"]
17211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17212#[inline(always)]
17213#[target_feature(enable = "neon")]
17214#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17215#[rustc_legacy_const_generics(2)]
17216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17217pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17218 static_assert_uimm_bits!(N, 3);
17219 unsafe {
17220 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17221 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17222 vqdmull_s16(a, b)
17223 }
17224}
17225#[doc = "Signed saturating doubling multiply long"]
17226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17227#[inline(always)]
17228#[target_feature(enable = "neon")]
17229#[cfg_attr(test, assert_instr(sqdmull2))]
17230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17231pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17232 unsafe {
17233 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17234 let b: int16x4_t = vdup_n_s16(b);
17235 vqdmull_s16(a, b)
17236 }
17237}
17238#[doc = "Signed saturating doubling multiply long"]
17239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17240#[inline(always)]
17241#[target_feature(enable = "neon")]
17242#[cfg_attr(test, assert_instr(sqdmull2))]
17243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17244pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17245 unsafe {
17246 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17247 let b: int32x2_t = vdup_n_s32(b);
17248 vqdmull_s32(a, b)
17249 }
17250}
17251#[doc = "Signed saturating doubling multiply long"]
17252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17253#[inline(always)]
17254#[target_feature(enable = "neon")]
17255#[cfg_attr(test, assert_instr(sqdmull2))]
17256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17257pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17258 unsafe {
17259 let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17260 let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17261 vqdmull_s16(a, b)
17262 }
17263}
17264#[doc = "Signed saturating doubling multiply long"]
17265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17266#[inline(always)]
17267#[target_feature(enable = "neon")]
17268#[cfg_attr(test, assert_instr(sqdmull2))]
17269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17270pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17271 unsafe {
17272 let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17273 let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17274 vqdmull_s32(a, b)
17275 }
17276}
17277#[doc = "Vector saturating doubling long multiply by scalar"]
17278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17279#[inline(always)]
17280#[target_feature(enable = "neon")]
17281#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17282#[rustc_legacy_const_generics(2)]
17283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17284pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17285 static_assert_uimm_bits!(N, 3);
17286 unsafe {
17287 let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17288 vqdmull_s16(a, b)
17289 }
17290}
17291#[doc = "Vector saturating doubling long multiply by scalar"]
17292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17293#[inline(always)]
17294#[target_feature(enable = "neon")]
17295#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17296#[rustc_legacy_const_generics(2)]
17297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17298pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17299 static_assert_uimm_bits!(N, 2);
17300 unsafe {
17301 let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17302 vqdmull_s32(a, b)
17303 }
17304}
17305#[doc = "Signed saturating doubling multiply long"]
17306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17307#[inline(always)]
17308#[target_feature(enable = "neon")]
17309#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17310#[rustc_legacy_const_generics(2)]
17311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17312pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17313 static_assert_uimm_bits!(N, 2);
17314 unsafe {
17315 let b: i16 = simd_extract!(b, N as u32);
17316 vqdmullh_s16(a, b)
17317 }
17318}
17319#[doc = "Signed saturating doubling multiply long"]
17320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17321#[inline(always)]
17322#[target_feature(enable = "neon")]
17323#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17324#[rustc_legacy_const_generics(2)]
17325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17326pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17327 static_assert_uimm_bits!(N, 2);
17328 unsafe {
17329 let b: i32 = simd_extract!(b, N as u32);
17330 vqdmulls_s32(a, b)
17331 }
17332}
17333#[doc = "Signed saturating doubling multiply long"]
17334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17335#[inline(always)]
17336#[target_feature(enable = "neon")]
17337#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17338#[rustc_legacy_const_generics(2)]
17339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17340pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17341 static_assert_uimm_bits!(N, 3);
17342 unsafe {
17343 let b: i16 = simd_extract!(b, N as u32);
17344 vqdmullh_s16(a, b)
17345 }
17346}
17347#[doc = "Signed saturating doubling multiply long"]
17348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17349#[inline(always)]
17350#[target_feature(enable = "neon")]
17351#[cfg_attr(test, assert_instr(sqdmull))]
17352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17353pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17354 let a: int16x4_t = vdup_n_s16(a);
17355 let b: int16x4_t = vdup_n_s16(b);
17356 unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17357}
17358#[doc = "Signed saturating doubling multiply long"]
17359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17360#[inline(always)]
17361#[target_feature(enable = "neon")]
17362#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17363#[rustc_legacy_const_generics(2)]
17364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17365pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17366 static_assert_uimm_bits!(N, 1);
17367 unsafe {
17368 let b: i32 = simd_extract!(b, N as u32);
17369 vqdmulls_s32(a, b)
17370 }
17371}
17372#[doc = "Signed saturating doubling multiply long"]
17373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17374#[inline(always)]
17375#[target_feature(enable = "neon")]
17376#[cfg_attr(test, assert_instr(sqdmull))]
17377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17378pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17379 unsafe extern "unadjusted" {
17380 #[cfg_attr(
17381 any(target_arch = "aarch64", target_arch = "arm64ec"),
17382 link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17383 )]
17384 fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17385 }
17386 unsafe { _vqdmulls_s32(a, b) }
17387}
17388#[doc = "Signed saturating extract narrow"]
17389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17390#[inline(always)]
17391#[target_feature(enable = "neon")]
17392#[cfg_attr(test, assert_instr(sqxtn2))]
17393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17394pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17395 unsafe {
17396 simd_shuffle!(
17397 a,
17398 vqmovn_s16(b),
17399 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17400 )
17401 }
17402}
17403#[doc = "Signed saturating extract narrow"]
17404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17405#[inline(always)]
17406#[target_feature(enable = "neon")]
17407#[cfg_attr(test, assert_instr(sqxtn2))]
17408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17409pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17410 unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17411}
17412#[doc = "Signed saturating extract narrow"]
17413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17414#[inline(always)]
17415#[target_feature(enable = "neon")]
17416#[cfg_attr(test, assert_instr(sqxtn2))]
17417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17418pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17419 unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17420}
17421#[doc = "Signed saturating extract narrow"]
17422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17423#[inline(always)]
17424#[target_feature(enable = "neon")]
17425#[cfg_attr(test, assert_instr(uqxtn2))]
17426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17427pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17428 unsafe {
17429 simd_shuffle!(
17430 a,
17431 vqmovn_u16(b),
17432 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17433 )
17434 }
17435}
17436#[doc = "Signed saturating extract narrow"]
17437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17438#[inline(always)]
17439#[target_feature(enable = "neon")]
17440#[cfg_attr(test, assert_instr(uqxtn2))]
17441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17442pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17443 unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17444}
17445#[doc = "Signed saturating extract narrow"]
17446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17447#[inline(always)]
17448#[target_feature(enable = "neon")]
17449#[cfg_attr(test, assert_instr(uqxtn2))]
17450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17451pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17452 unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17453}
17454#[doc = "Saturating extract narrow"]
17455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17456#[inline(always)]
17457#[target_feature(enable = "neon")]
17458#[cfg_attr(test, assert_instr(sqxtn))]
17459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17460pub fn vqmovnd_s64(a: i64) -> i32 {
17461 unsafe extern "unadjusted" {
17462 #[cfg_attr(
17463 any(target_arch = "aarch64", target_arch = "arm64ec"),
17464 link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17465 )]
17466 fn _vqmovnd_s64(a: i64) -> i32;
17467 }
17468 unsafe { _vqmovnd_s64(a) }
17469}
17470#[doc = "Saturating extract narrow"]
17471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17472#[inline(always)]
17473#[target_feature(enable = "neon")]
17474#[cfg_attr(test, assert_instr(uqxtn))]
17475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17476pub fn vqmovnd_u64(a: u64) -> u32 {
17477 unsafe extern "unadjusted" {
17478 #[cfg_attr(
17479 any(target_arch = "aarch64", target_arch = "arm64ec"),
17480 link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17481 )]
17482 fn _vqmovnd_u64(a: u64) -> u32;
17483 }
17484 unsafe { _vqmovnd_u64(a) }
17485}
17486#[doc = "Saturating extract narrow"]
17487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17488#[inline(always)]
17489#[target_feature(enable = "neon")]
17490#[cfg_attr(test, assert_instr(sqxtn))]
17491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17492pub fn vqmovnh_s16(a: i16) -> i8 {
17493 unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17494}
17495#[doc = "Saturating extract narrow"]
17496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17497#[inline(always)]
17498#[target_feature(enable = "neon")]
17499#[cfg_attr(test, assert_instr(sqxtn))]
17500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17501pub fn vqmovns_s32(a: i32) -> i16 {
17502 unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17503}
17504#[doc = "Saturating extract narrow"]
17505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17506#[inline(always)]
17507#[target_feature(enable = "neon")]
17508#[cfg_attr(test, assert_instr(uqxtn))]
17509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17510pub fn vqmovnh_u16(a: u16) -> u8 {
17511 unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17512}
17513#[doc = "Saturating extract narrow"]
17514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17515#[inline(always)]
17516#[target_feature(enable = "neon")]
17517#[cfg_attr(test, assert_instr(uqxtn))]
17518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17519pub fn vqmovns_u32(a: u32) -> u16 {
17520 unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17521}
17522#[doc = "Signed saturating extract unsigned narrow"]
17523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17524#[inline(always)]
17525#[target_feature(enable = "neon")]
17526#[cfg_attr(test, assert_instr(sqxtun2))]
17527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17528pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17529 unsafe {
17530 simd_shuffle!(
17531 a,
17532 vqmovun_s16(b),
17533 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17534 )
17535 }
17536}
17537#[doc = "Signed saturating extract unsigned narrow"]
17538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17539#[inline(always)]
17540#[target_feature(enable = "neon")]
17541#[cfg_attr(test, assert_instr(sqxtun2))]
17542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17543pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17544 unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17545}
17546#[doc = "Signed saturating extract unsigned narrow"]
17547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17548#[inline(always)]
17549#[target_feature(enable = "neon")]
17550#[cfg_attr(test, assert_instr(sqxtun2))]
17551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17552pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17553 unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17554}
17555#[doc = "Signed saturating extract unsigned narrow"]
17556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17557#[inline(always)]
17558#[target_feature(enable = "neon")]
17559#[cfg_attr(test, assert_instr(sqxtun))]
17560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17561pub fn vqmovunh_s16(a: i16) -> u8 {
17562 unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17563}
17564#[doc = "Signed saturating extract unsigned narrow"]
17565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17566#[inline(always)]
17567#[target_feature(enable = "neon")]
17568#[cfg_attr(test, assert_instr(sqxtun))]
17569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17570pub fn vqmovuns_s32(a: i32) -> u16 {
17571 unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17572}
17573#[doc = "Signed saturating extract unsigned narrow"]
17574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17575#[inline(always)]
17576#[target_feature(enable = "neon")]
17577#[cfg_attr(test, assert_instr(sqxtun))]
17578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17579pub fn vqmovund_s64(a: i64) -> u32 {
17580 unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17581}
17582#[doc = "Signed saturating negate"]
17583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17584#[inline(always)]
17585#[target_feature(enable = "neon")]
17586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17587#[cfg_attr(test, assert_instr(sqneg))]
17588pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17589 unsafe extern "unadjusted" {
17590 #[cfg_attr(
17591 any(target_arch = "aarch64", target_arch = "arm64ec"),
17592 link_name = "llvm.aarch64.neon.sqneg.v1i64"
17593 )]
17594 fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17595 }
17596 unsafe { _vqneg_s64(a) }
17597}
17598#[doc = "Signed saturating negate"]
17599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17600#[inline(always)]
17601#[target_feature(enable = "neon")]
17602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17603#[cfg_attr(test, assert_instr(sqneg))]
17604pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17605 unsafe extern "unadjusted" {
17606 #[cfg_attr(
17607 any(target_arch = "aarch64", target_arch = "arm64ec"),
17608 link_name = "llvm.aarch64.neon.sqneg.v2i64"
17609 )]
17610 fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17611 }
17612 unsafe { _vqnegq_s64(a) }
17613}
17614#[doc = "Signed saturating negate"]
17615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17616#[inline(always)]
17617#[target_feature(enable = "neon")]
17618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17619#[cfg_attr(test, assert_instr(sqneg))]
17620pub fn vqnegb_s8(a: i8) -> i8 {
17621 unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17622}
17623#[doc = "Signed saturating negate"]
17624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17625#[inline(always)]
17626#[target_feature(enable = "neon")]
17627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17628#[cfg_attr(test, assert_instr(sqneg))]
17629pub fn vqnegh_s16(a: i16) -> i16 {
17630 unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17631}
17632#[doc = "Signed saturating negate"]
17633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17634#[inline(always)]
17635#[target_feature(enable = "neon")]
17636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17637#[cfg_attr(test, assert_instr(sqneg))]
17638pub fn vqnegs_s32(a: i32) -> i32 {
17639 unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17640}
17641#[doc = "Signed saturating negate"]
17642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17643#[inline(always)]
17644#[target_feature(enable = "neon")]
17645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17646#[cfg_attr(test, assert_instr(sqneg))]
17647pub fn vqnegd_s64(a: i64) -> i64 {
17648 unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17649}
17650#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17652#[inline(always)]
17653#[target_feature(enable = "rdm")]
17654#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17655#[rustc_legacy_const_generics(3)]
17656#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17657pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17658 static_assert_uimm_bits!(LANE, 2);
17659 unsafe {
17660 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17661 vqrdmlah_s16(a, b, c)
17662 }
17663}
17664#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17666#[inline(always)]
17667#[target_feature(enable = "rdm")]
17668#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17669#[rustc_legacy_const_generics(3)]
17670#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17671pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17672 static_assert_uimm_bits!(LANE, 1);
17673 unsafe {
17674 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17675 vqrdmlah_s32(a, b, c)
17676 }
17677}
17678#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17680#[inline(always)]
17681#[target_feature(enable = "rdm")]
17682#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17683#[rustc_legacy_const_generics(3)]
17684#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17685pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17686 static_assert_uimm_bits!(LANE, 3);
17687 unsafe {
17688 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17689 vqrdmlah_s16(a, b, c)
17690 }
17691}
17692#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17694#[inline(always)]
17695#[target_feature(enable = "rdm")]
17696#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17697#[rustc_legacy_const_generics(3)]
17698#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17699pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17700 static_assert_uimm_bits!(LANE, 2);
17701 unsafe {
17702 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17703 vqrdmlah_s32(a, b, c)
17704 }
17705}
17706#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17708#[inline(always)]
17709#[target_feature(enable = "rdm")]
17710#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17711#[rustc_legacy_const_generics(3)]
17712#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17713pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17714 static_assert_uimm_bits!(LANE, 2);
17715 unsafe {
17716 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17717 vqrdmlahq_s16(a, b, c)
17718 }
17719}
17720#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17722#[inline(always)]
17723#[target_feature(enable = "rdm")]
17724#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17725#[rustc_legacy_const_generics(3)]
17726#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17727pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17728 static_assert_uimm_bits!(LANE, 1);
17729 unsafe {
17730 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17731 vqrdmlahq_s32(a, b, c)
17732 }
17733}
17734#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17736#[inline(always)]
17737#[target_feature(enable = "rdm")]
17738#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17739#[rustc_legacy_const_generics(3)]
17740#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17741pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17742 static_assert_uimm_bits!(LANE, 3);
17743 unsafe {
17744 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17745 vqrdmlahq_s16(a, b, c)
17746 }
17747}
17748#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17750#[inline(always)]
17751#[target_feature(enable = "rdm")]
17752#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17753#[rustc_legacy_const_generics(3)]
17754#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17755pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17756 static_assert_uimm_bits!(LANE, 2);
17757 unsafe {
17758 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17759 vqrdmlahq_s32(a, b, c)
17760 }
17761}
17762#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17764#[inline(always)]
17765#[target_feature(enable = "rdm")]
17766#[cfg_attr(test, assert_instr(sqrdmlah))]
17767#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17768pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17769 unsafe extern "unadjusted" {
17770 #[cfg_attr(
17771 any(target_arch = "aarch64", target_arch = "arm64ec"),
17772 link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17773 )]
17774 fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17775 }
17776 unsafe { _vqrdmlah_s16(a, b, c) }
17777}
17778#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17780#[inline(always)]
17781#[target_feature(enable = "rdm")]
17782#[cfg_attr(test, assert_instr(sqrdmlah))]
17783#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17784pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17785 unsafe extern "unadjusted" {
17786 #[cfg_attr(
17787 any(target_arch = "aarch64", target_arch = "arm64ec"),
17788 link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17789 )]
17790 fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17791 }
17792 unsafe { _vqrdmlahq_s16(a, b, c) }
17793}
17794#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17796#[inline(always)]
17797#[target_feature(enable = "rdm")]
17798#[cfg_attr(test, assert_instr(sqrdmlah))]
17799#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17800pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17801 unsafe extern "unadjusted" {
17802 #[cfg_attr(
17803 any(target_arch = "aarch64", target_arch = "arm64ec"),
17804 link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17805 )]
17806 fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17807 }
17808 unsafe { _vqrdmlah_s32(a, b, c) }
17809}
17810#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17812#[inline(always)]
17813#[target_feature(enable = "rdm")]
17814#[cfg_attr(test, assert_instr(sqrdmlah))]
17815#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17816pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17817 unsafe extern "unadjusted" {
17818 #[cfg_attr(
17819 any(target_arch = "aarch64", target_arch = "arm64ec"),
17820 link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17821 )]
17822 fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17823 }
17824 unsafe { _vqrdmlahq_s32(a, b, c) }
17825}
17826#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17828#[inline(always)]
17829#[target_feature(enable = "rdm")]
17830#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17831#[rustc_legacy_const_generics(3)]
17832#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17833pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17834 static_assert_uimm_bits!(LANE, 2);
17835 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17836}
17837#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17839#[inline(always)]
17840#[target_feature(enable = "rdm")]
17841#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17842#[rustc_legacy_const_generics(3)]
17843#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17844pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17845 static_assert_uimm_bits!(LANE, 3);
17846 unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17847}
17848#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17850#[inline(always)]
17851#[target_feature(enable = "rdm")]
17852#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17853#[rustc_legacy_const_generics(3)]
17854#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17855pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17856 static_assert_uimm_bits!(LANE, 1);
17857 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17858}
17859#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17861#[inline(always)]
17862#[target_feature(enable = "rdm")]
17863#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17864#[rustc_legacy_const_generics(3)]
17865#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17866pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17867 static_assert_uimm_bits!(LANE, 2);
17868 unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17869}
17870#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
17872#[inline(always)]
17873#[target_feature(enable = "rdm")]
17874#[cfg_attr(test, assert_instr(sqrdmlah))]
17875#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17876pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
17877 let a: int16x4_t = vdup_n_s16(a);
17878 let b: int16x4_t = vdup_n_s16(b);
17879 let c: int16x4_t = vdup_n_s16(c);
17880 unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
17881}
17882#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
17884#[inline(always)]
17885#[target_feature(enable = "rdm")]
17886#[cfg_attr(test, assert_instr(sqrdmlah))]
17887#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17888pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
17889 let a: int32x2_t = vdup_n_s32(a);
17890 let b: int32x2_t = vdup_n_s32(b);
17891 let c: int32x2_t = vdup_n_s32(c);
17892 unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
17893}
17894#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
17896#[inline(always)]
17897#[target_feature(enable = "rdm")]
17898#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17899#[rustc_legacy_const_generics(3)]
17900#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17901pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17902 static_assert_uimm_bits!(LANE, 2);
17903 unsafe {
17904 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17905 vqrdmlsh_s16(a, b, c)
17906 }
17907}
17908#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
17910#[inline(always)]
17911#[target_feature(enable = "rdm")]
17912#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17913#[rustc_legacy_const_generics(3)]
17914#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17915pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17916 static_assert_uimm_bits!(LANE, 1);
17917 unsafe {
17918 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17919 vqrdmlsh_s32(a, b, c)
17920 }
17921}
17922#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
17924#[inline(always)]
17925#[target_feature(enable = "rdm")]
17926#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17927#[rustc_legacy_const_generics(3)]
17928#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17929pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17930 static_assert_uimm_bits!(LANE, 3);
17931 unsafe {
17932 let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17933 vqrdmlsh_s16(a, b, c)
17934 }
17935}
17936#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
17938#[inline(always)]
17939#[target_feature(enable = "rdm")]
17940#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17941#[rustc_legacy_const_generics(3)]
17942#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17943pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17944 static_assert_uimm_bits!(LANE, 2);
17945 unsafe {
17946 let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17947 vqrdmlsh_s32(a, b, c)
17948 }
17949}
17950#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
17952#[inline(always)]
17953#[target_feature(enable = "rdm")]
17954#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17955#[rustc_legacy_const_generics(3)]
17956#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17957pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17958 static_assert_uimm_bits!(LANE, 2);
17959 unsafe {
17960 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17961 vqrdmlshq_s16(a, b, c)
17962 }
17963}
17964#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
17966#[inline(always)]
17967#[target_feature(enable = "rdm")]
17968#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17969#[rustc_legacy_const_generics(3)]
17970#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17971pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17972 static_assert_uimm_bits!(LANE, 1);
17973 unsafe {
17974 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17975 vqrdmlshq_s32(a, b, c)
17976 }
17977}
17978#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
17980#[inline(always)]
17981#[target_feature(enable = "rdm")]
17982#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17983#[rustc_legacy_const_generics(3)]
17984#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17985pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17986 static_assert_uimm_bits!(LANE, 3);
17987 unsafe {
17988 let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17989 vqrdmlshq_s16(a, b, c)
17990 }
17991}
17992#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
17994#[inline(always)]
17995#[target_feature(enable = "rdm")]
17996#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17997#[rustc_legacy_const_generics(3)]
17998#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17999pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18000 static_assert_uimm_bits!(LANE, 2);
18001 unsafe {
18002 let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
18003 vqrdmlshq_s32(a, b, c)
18004 }
18005}
18006#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18008#[inline(always)]
18009#[target_feature(enable = "rdm")]
18010#[cfg_attr(test, assert_instr(sqrdmlsh))]
18011#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18012pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18013 unsafe extern "unadjusted" {
18014 #[cfg_attr(
18015 any(target_arch = "aarch64", target_arch = "arm64ec"),
18016 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18017 )]
18018 fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18019 }
18020 unsafe { _vqrdmlsh_s16(a, b, c) }
18021}
18022#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18024#[inline(always)]
18025#[target_feature(enable = "rdm")]
18026#[cfg_attr(test, assert_instr(sqrdmlsh))]
18027#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18028pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18029 unsafe extern "unadjusted" {
18030 #[cfg_attr(
18031 any(target_arch = "aarch64", target_arch = "arm64ec"),
18032 link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18033 )]
18034 fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18035 }
18036 unsafe { _vqrdmlshq_s16(a, b, c) }
18037}
18038#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18040#[inline(always)]
18041#[target_feature(enable = "rdm")]
18042#[cfg_attr(test, assert_instr(sqrdmlsh))]
18043#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18044pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18045 unsafe extern "unadjusted" {
18046 #[cfg_attr(
18047 any(target_arch = "aarch64", target_arch = "arm64ec"),
18048 link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18049 )]
18050 fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18051 }
18052 unsafe { _vqrdmlsh_s32(a, b, c) }
18053}
18054#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18056#[inline(always)]
18057#[target_feature(enable = "rdm")]
18058#[cfg_attr(test, assert_instr(sqrdmlsh))]
18059#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18060pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18061 unsafe extern "unadjusted" {
18062 #[cfg_attr(
18063 any(target_arch = "aarch64", target_arch = "arm64ec"),
18064 link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18065 )]
18066 fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18067 }
18068 unsafe { _vqrdmlshq_s32(a, b, c) }
18069}
18070#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18072#[inline(always)]
18073#[target_feature(enable = "rdm")]
18074#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18075#[rustc_legacy_const_generics(3)]
18076#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18077pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18078 static_assert_uimm_bits!(LANE, 2);
18079 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18080}
18081#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18083#[inline(always)]
18084#[target_feature(enable = "rdm")]
18085#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18086#[rustc_legacy_const_generics(3)]
18087#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18088pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18089 static_assert_uimm_bits!(LANE, 3);
18090 unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18091}
18092#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18094#[inline(always)]
18095#[target_feature(enable = "rdm")]
18096#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18097#[rustc_legacy_const_generics(3)]
18098#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18099pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18100 static_assert_uimm_bits!(LANE, 1);
18101 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18102}
18103#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18105#[inline(always)]
18106#[target_feature(enable = "rdm")]
18107#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18108#[rustc_legacy_const_generics(3)]
18109#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18110pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18111 static_assert_uimm_bits!(LANE, 2);
18112 unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18113}
18114#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18116#[inline(always)]
18117#[target_feature(enable = "rdm")]
18118#[cfg_attr(test, assert_instr(sqrdmlsh))]
18119#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18120pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18121 let a: int16x4_t = vdup_n_s16(a);
18122 let b: int16x4_t = vdup_n_s16(b);
18123 let c: int16x4_t = vdup_n_s16(c);
18124 unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18125}
18126#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18128#[inline(always)]
18129#[target_feature(enable = "rdm")]
18130#[cfg_attr(test, assert_instr(sqrdmlsh))]
18131#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18132pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18133 let a: int32x2_t = vdup_n_s32(a);
18134 let b: int32x2_t = vdup_n_s32(b);
18135 let c: int32x2_t = vdup_n_s32(c);
18136 unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18137}
18138#[doc = "Signed saturating rounding doubling multiply returning high half"]
18139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18140#[inline(always)]
18141#[target_feature(enable = "neon")]
18142#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18143#[rustc_legacy_const_generics(2)]
18144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18145pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18146 static_assert_uimm_bits!(LANE, 2);
18147 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18148}
18149#[doc = "Signed saturating rounding doubling multiply returning high half"]
18150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18151#[inline(always)]
18152#[target_feature(enable = "neon")]
18153#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18154#[rustc_legacy_const_generics(2)]
18155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18156pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18157 static_assert_uimm_bits!(LANE, 3);
18158 unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18159}
18160#[doc = "Signed saturating rounding doubling multiply returning high half"]
18161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18162#[inline(always)]
18163#[target_feature(enable = "neon")]
18164#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18165#[rustc_legacy_const_generics(2)]
18166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18167pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18168 static_assert_uimm_bits!(LANE, 1);
18169 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18170}
18171#[doc = "Signed saturating rounding doubling multiply returning high half"]
18172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18173#[inline(always)]
18174#[target_feature(enable = "neon")]
18175#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18176#[rustc_legacy_const_generics(2)]
18177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18178pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18179 static_assert_uimm_bits!(LANE, 2);
18180 unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18181}
18182#[doc = "Signed saturating rounding doubling multiply returning high half"]
18183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18184#[inline(always)]
18185#[target_feature(enable = "neon")]
18186#[cfg_attr(test, assert_instr(sqrdmulh))]
18187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18188pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18189 unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18190}
18191#[doc = "Signed saturating rounding doubling multiply returning high half"]
18192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18193#[inline(always)]
18194#[target_feature(enable = "neon")]
18195#[cfg_attr(test, assert_instr(sqrdmulh))]
18196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18197pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18198 unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18199}
18200#[doc = "Signed saturating rounding shift left"]
18201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18202#[inline(always)]
18203#[target_feature(enable = "neon")]
18204#[cfg_attr(test, assert_instr(sqrshl))]
18205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18206pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18207 let a: int8x8_t = vdup_n_s8(a);
18208 let b: int8x8_t = vdup_n_s8(b);
18209 unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18210}
18211#[doc = "Signed saturating rounding shift left"]
18212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18213#[inline(always)]
18214#[target_feature(enable = "neon")]
18215#[cfg_attr(test, assert_instr(sqrshl))]
18216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18217pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18218 let a: int16x4_t = vdup_n_s16(a);
18219 let b: int16x4_t = vdup_n_s16(b);
18220 unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18221}
18222#[doc = "Unsigned signed saturating rounding shift left"]
18223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18224#[inline(always)]
18225#[target_feature(enable = "neon")]
18226#[cfg_attr(test, assert_instr(uqrshl))]
18227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18228pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18229 let a: uint8x8_t = vdup_n_u8(a);
18230 let b: int8x8_t = vdup_n_s8(b);
18231 unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18232}
18233#[doc = "Unsigned signed saturating rounding shift left"]
18234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18235#[inline(always)]
18236#[target_feature(enable = "neon")]
18237#[cfg_attr(test, assert_instr(uqrshl))]
18238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18239pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18240 let a: uint16x4_t = vdup_n_u16(a);
18241 let b: int16x4_t = vdup_n_s16(b);
18242 unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18243}
18244#[doc = "Signed saturating rounding shift left"]
18245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18246#[inline(always)]
18247#[target_feature(enable = "neon")]
18248#[cfg_attr(test, assert_instr(sqrshl))]
18249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18250pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18251 unsafe extern "unadjusted" {
18252 #[cfg_attr(
18253 any(target_arch = "aarch64", target_arch = "arm64ec"),
18254 link_name = "llvm.aarch64.neon.sqrshl.i64"
18255 )]
18256 fn _vqrshld_s64(a: i64, b: i64) -> i64;
18257 }
18258 unsafe { _vqrshld_s64(a, b) }
18259}
18260#[doc = "Signed saturating rounding shift left"]
18261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18262#[inline(always)]
18263#[target_feature(enable = "neon")]
18264#[cfg_attr(test, assert_instr(sqrshl))]
18265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18266pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18267 unsafe extern "unadjusted" {
18268 #[cfg_attr(
18269 any(target_arch = "aarch64", target_arch = "arm64ec"),
18270 link_name = "llvm.aarch64.neon.sqrshl.i32"
18271 )]
18272 fn _vqrshls_s32(a: i32, b: i32) -> i32;
18273 }
18274 unsafe { _vqrshls_s32(a, b) }
18275}
18276#[doc = "Unsigned signed saturating rounding shift left"]
18277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18278#[inline(always)]
18279#[target_feature(enable = "neon")]
18280#[cfg_attr(test, assert_instr(uqrshl))]
18281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18282pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18283 unsafe extern "unadjusted" {
18284 #[cfg_attr(
18285 any(target_arch = "aarch64", target_arch = "arm64ec"),
18286 link_name = "llvm.aarch64.neon.uqrshl.i32"
18287 )]
18288 fn _vqrshls_u32(a: u32, b: i32) -> u32;
18289 }
18290 unsafe { _vqrshls_u32(a, b) }
18291}
18292#[doc = "Unsigned signed saturating rounding shift left"]
18293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18294#[inline(always)]
18295#[target_feature(enable = "neon")]
18296#[cfg_attr(test, assert_instr(uqrshl))]
18297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18298pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18299 unsafe extern "unadjusted" {
18300 #[cfg_attr(
18301 any(target_arch = "aarch64", target_arch = "arm64ec"),
18302 link_name = "llvm.aarch64.neon.uqrshl.i64"
18303 )]
18304 fn _vqrshld_u64(a: u64, b: i64) -> u64;
18305 }
18306 unsafe { _vqrshld_u64(a, b) }
18307}
18308#[doc = "Signed saturating rounded shift right narrow"]
18309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18310#[inline(always)]
18311#[target_feature(enable = "neon")]
18312#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18313#[rustc_legacy_const_generics(2)]
18314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18315pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18316 static_assert!(N >= 1 && N <= 8);
18317 unsafe {
18318 simd_shuffle!(
18319 a,
18320 vqrshrn_n_s16::<N>(b),
18321 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18322 )
18323 }
18324}
18325#[doc = "Signed saturating rounded shift right narrow"]
18326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18327#[inline(always)]
18328#[target_feature(enable = "neon")]
18329#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18330#[rustc_legacy_const_generics(2)]
18331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18332pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18333 static_assert!(N >= 1 && N <= 16);
18334 unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18335}
18336#[doc = "Signed saturating rounded shift right narrow"]
18337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18338#[inline(always)]
18339#[target_feature(enable = "neon")]
18340#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18341#[rustc_legacy_const_generics(2)]
18342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18343pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18344 static_assert!(N >= 1 && N <= 32);
18345 unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18346}
18347#[doc = "Unsigned saturating rounded shift right narrow"]
18348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18349#[inline(always)]
18350#[target_feature(enable = "neon")]
18351#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18352#[rustc_legacy_const_generics(2)]
18353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18354pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18355 static_assert!(N >= 1 && N <= 8);
18356 unsafe {
18357 simd_shuffle!(
18358 a,
18359 vqrshrn_n_u16::<N>(b),
18360 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18361 )
18362 }
18363}
18364#[doc = "Unsigned saturating rounded shift right narrow"]
18365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18366#[inline(always)]
18367#[target_feature(enable = "neon")]
18368#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18369#[rustc_legacy_const_generics(2)]
18370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18371pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18372 static_assert!(N >= 1 && N <= 16);
18373 unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18374}
18375#[doc = "Unsigned saturating rounded shift right narrow"]
18376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18377#[inline(always)]
18378#[target_feature(enable = "neon")]
18379#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18380#[rustc_legacy_const_generics(2)]
18381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18382pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18383 static_assert!(N >= 1 && N <= 32);
18384 unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18385}
18386#[doc = "Unsigned saturating rounded shift right narrow"]
18387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18388#[inline(always)]
18389#[target_feature(enable = "neon")]
18390#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18391#[rustc_legacy_const_generics(1)]
18392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18393pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18394 static_assert!(N >= 1 && N <= 32);
18395 let a: uint64x2_t = vdupq_n_u64(a);
18396 unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18397}
18398#[doc = "Unsigned saturating rounded shift right narrow"]
18399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18400#[inline(always)]
18401#[target_feature(enable = "neon")]
18402#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18403#[rustc_legacy_const_generics(1)]
18404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18405pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18406 static_assert!(N >= 1 && N <= 8);
18407 let a: uint16x8_t = vdupq_n_u16(a);
18408 unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18409}
18410#[doc = "Unsigned saturating rounded shift right narrow"]
18411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18412#[inline(always)]
18413#[target_feature(enable = "neon")]
18414#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18415#[rustc_legacy_const_generics(1)]
18416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18417pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18418 static_assert!(N >= 1 && N <= 16);
18419 let a: uint32x4_t = vdupq_n_u32(a);
18420 unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18421}
18422#[doc = "Signed saturating rounded shift right narrow"]
18423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18424#[inline(always)]
18425#[target_feature(enable = "neon")]
18426#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18427#[rustc_legacy_const_generics(1)]
18428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18429pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18430 static_assert!(N >= 1 && N <= 8);
18431 let a: int16x8_t = vdupq_n_s16(a);
18432 unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18433}
18434#[doc = "Signed saturating rounded shift right narrow"]
18435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18436#[inline(always)]
18437#[target_feature(enable = "neon")]
18438#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18439#[rustc_legacy_const_generics(1)]
18440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18441pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18442 static_assert!(N >= 1 && N <= 16);
18443 let a: int32x4_t = vdupq_n_s32(a);
18444 unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18445}
18446#[doc = "Signed saturating rounded shift right narrow"]
18447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18448#[inline(always)]
18449#[target_feature(enable = "neon")]
18450#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18451#[rustc_legacy_const_generics(1)]
18452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18453pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18454 static_assert!(N >= 1 && N <= 32);
18455 let a: int64x2_t = vdupq_n_s64(a);
18456 unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18457}
18458#[doc = "Signed saturating rounded shift right unsigned narrow"]
18459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18460#[inline(always)]
18461#[target_feature(enable = "neon")]
18462#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18463#[rustc_legacy_const_generics(2)]
18464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18465pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18466 static_assert!(N >= 1 && N <= 8);
18467 unsafe {
18468 simd_shuffle!(
18469 a,
18470 vqrshrun_n_s16::<N>(b),
18471 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18472 )
18473 }
18474}
18475#[doc = "Signed saturating rounded shift right unsigned narrow"]
18476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18477#[inline(always)]
18478#[target_feature(enable = "neon")]
18479#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18480#[rustc_legacy_const_generics(2)]
18481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18482pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18483 static_assert!(N >= 1 && N <= 16);
18484 unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18485}
18486#[doc = "Signed saturating rounded shift right unsigned narrow"]
18487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18488#[inline(always)]
18489#[target_feature(enable = "neon")]
18490#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18491#[rustc_legacy_const_generics(2)]
18492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18493pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18494 static_assert!(N >= 1 && N <= 32);
18495 unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18496}
18497#[doc = "Signed saturating rounded shift right unsigned narrow"]
18498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18499#[inline(always)]
18500#[target_feature(enable = "neon")]
18501#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18502#[rustc_legacy_const_generics(1)]
18503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18504pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18505 static_assert!(N >= 1 && N <= 32);
18506 let a: int64x2_t = vdupq_n_s64(a);
18507 unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18508}
18509#[doc = "Signed saturating rounded shift right unsigned narrow"]
18510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18511#[inline(always)]
18512#[target_feature(enable = "neon")]
18513#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18514#[rustc_legacy_const_generics(1)]
18515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18516pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18517 static_assert!(N >= 1 && N <= 8);
18518 let a: int16x8_t = vdupq_n_s16(a);
18519 unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18520}
18521#[doc = "Signed saturating rounded shift right unsigned narrow"]
18522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18523#[inline(always)]
18524#[target_feature(enable = "neon")]
18525#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18526#[rustc_legacy_const_generics(1)]
18527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18528pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18529 static_assert!(N >= 1 && N <= 16);
18530 let a: int32x4_t = vdupq_n_s32(a);
18531 unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18532}
18533#[doc = "Signed saturating shift left"]
18534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18535#[inline(always)]
18536#[target_feature(enable = "neon")]
18537#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18538#[rustc_legacy_const_generics(1)]
18539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18540pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18541 static_assert_uimm_bits!(N, 3);
18542 unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18543}
18544#[doc = "Signed saturating shift left"]
18545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18546#[inline(always)]
18547#[target_feature(enable = "neon")]
18548#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18549#[rustc_legacy_const_generics(1)]
18550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18551pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18552 static_assert_uimm_bits!(N, 6);
18553 unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18554}
18555#[doc = "Signed saturating shift left"]
18556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18557#[inline(always)]
18558#[target_feature(enable = "neon")]
18559#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18560#[rustc_legacy_const_generics(1)]
18561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18562pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18563 static_assert_uimm_bits!(N, 4);
18564 unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18565}
18566#[doc = "Signed saturating shift left"]
18567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18568#[inline(always)]
18569#[target_feature(enable = "neon")]
18570#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18571#[rustc_legacy_const_generics(1)]
18572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18573pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18574 static_assert_uimm_bits!(N, 5);
18575 unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18576}
18577#[doc = "Unsigned saturating shift left"]
18578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18579#[inline(always)]
18580#[target_feature(enable = "neon")]
18581#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18582#[rustc_legacy_const_generics(1)]
18583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18584pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18585 static_assert_uimm_bits!(N, 3);
18586 unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18587}
18588#[doc = "Unsigned saturating shift left"]
18589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18590#[inline(always)]
18591#[target_feature(enable = "neon")]
18592#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18593#[rustc_legacy_const_generics(1)]
18594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18595pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18596 static_assert_uimm_bits!(N, 6);
18597 unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18598}
18599#[doc = "Unsigned saturating shift left"]
18600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18601#[inline(always)]
18602#[target_feature(enable = "neon")]
18603#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18604#[rustc_legacy_const_generics(1)]
18605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18606pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18607 static_assert_uimm_bits!(N, 4);
18608 unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18609}
18610#[doc = "Unsigned saturating shift left"]
18611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18612#[inline(always)]
18613#[target_feature(enable = "neon")]
18614#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18615#[rustc_legacy_const_generics(1)]
18616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18617pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18618 static_assert_uimm_bits!(N, 5);
18619 unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18620}
18621#[doc = "Signed saturating shift left"]
18622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18623#[inline(always)]
18624#[target_feature(enable = "neon")]
18625#[cfg_attr(test, assert_instr(sqshl))]
18626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18627pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18628 let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18629 unsafe { simd_extract!(c, 0) }
18630}
18631#[doc = "Signed saturating shift left"]
18632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18633#[inline(always)]
18634#[target_feature(enable = "neon")]
18635#[cfg_attr(test, assert_instr(sqshl))]
18636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18637pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18638 let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18639 unsafe { simd_extract!(c, 0) }
18640}
18641#[doc = "Signed saturating shift left"]
18642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18643#[inline(always)]
18644#[target_feature(enable = "neon")]
18645#[cfg_attr(test, assert_instr(sqshl))]
18646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18647pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18648 let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18649 unsafe { simd_extract!(c, 0) }
18650}
18651#[doc = "Unsigned saturating shift left"]
18652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18653#[inline(always)]
18654#[target_feature(enable = "neon")]
18655#[cfg_attr(test, assert_instr(uqshl))]
18656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18657pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18658 let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18659 unsafe { simd_extract!(c, 0) }
18660}
18661#[doc = "Unsigned saturating shift left"]
18662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18663#[inline(always)]
18664#[target_feature(enable = "neon")]
18665#[cfg_attr(test, assert_instr(uqshl))]
18666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18667pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18668 let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18669 unsafe { simd_extract!(c, 0) }
18670}
18671#[doc = "Unsigned saturating shift left"]
18672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18673#[inline(always)]
18674#[target_feature(enable = "neon")]
18675#[cfg_attr(test, assert_instr(uqshl))]
18676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18677pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18678 let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18679 unsafe { simd_extract!(c, 0) }
18680}
18681#[doc = "Signed saturating shift left"]
18682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18683#[inline(always)]
18684#[target_feature(enable = "neon")]
18685#[cfg_attr(test, assert_instr(sqshl))]
18686#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18687pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18688 unsafe extern "unadjusted" {
18689 #[cfg_attr(
18690 any(target_arch = "aarch64", target_arch = "arm64ec"),
18691 link_name = "llvm.aarch64.neon.sqshl.i64"
18692 )]
18693 fn _vqshld_s64(a: i64, b: i64) -> i64;
18694 }
18695 unsafe { _vqshld_s64(a, b) }
18696}
18697#[doc = "Unsigned saturating shift left"]
18698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18699#[inline(always)]
18700#[target_feature(enable = "neon")]
18701#[cfg_attr(test, assert_instr(uqshl))]
18702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18703pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18704 unsafe extern "unadjusted" {
18705 #[cfg_attr(
18706 any(target_arch = "aarch64", target_arch = "arm64ec"),
18707 link_name = "llvm.aarch64.neon.uqshl.i64"
18708 )]
18709 fn _vqshld_u64(a: u64, b: i64) -> u64;
18710 }
18711 unsafe { _vqshld_u64(a, b) }
18712}
18713#[doc = "Signed saturating shift left unsigned"]
18714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18715#[inline(always)]
18716#[target_feature(enable = "neon")]
18717#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18718#[rustc_legacy_const_generics(1)]
18719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18720pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18721 static_assert_uimm_bits!(N, 3);
18722 unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18723}
18724#[doc = "Signed saturating shift left unsigned"]
18725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18726#[inline(always)]
18727#[target_feature(enable = "neon")]
18728#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18729#[rustc_legacy_const_generics(1)]
18730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18731pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18732 static_assert_uimm_bits!(N, 6);
18733 unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18734}
18735#[doc = "Signed saturating shift left unsigned"]
18736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18737#[inline(always)]
18738#[target_feature(enable = "neon")]
18739#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18740#[rustc_legacy_const_generics(1)]
18741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18742pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18743 static_assert_uimm_bits!(N, 4);
18744 unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18745}
18746#[doc = "Signed saturating shift left unsigned"]
18747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18748#[inline(always)]
18749#[target_feature(enable = "neon")]
18750#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18751#[rustc_legacy_const_generics(1)]
18752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18753pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18754 static_assert_uimm_bits!(N, 5);
18755 unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18756}
18757#[doc = "Signed saturating shift right narrow"]
18758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18759#[inline(always)]
18760#[target_feature(enable = "neon")]
18761#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18762#[rustc_legacy_const_generics(2)]
18763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18764pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18765 static_assert!(N >= 1 && N <= 8);
18766 unsafe {
18767 simd_shuffle!(
18768 a,
18769 vqshrn_n_s16::<N>(b),
18770 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18771 )
18772 }
18773}
18774#[doc = "Signed saturating shift right narrow"]
18775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18776#[inline(always)]
18777#[target_feature(enable = "neon")]
18778#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18779#[rustc_legacy_const_generics(2)]
18780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18781pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18782 static_assert!(N >= 1 && N <= 16);
18783 unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18784}
18785#[doc = "Signed saturating shift right narrow"]
18786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18787#[inline(always)]
18788#[target_feature(enable = "neon")]
18789#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18790#[rustc_legacy_const_generics(2)]
18791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18792pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18793 static_assert!(N >= 1 && N <= 32);
18794 unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18795}
18796#[doc = "Unsigned saturating shift right narrow"]
18797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18798#[inline(always)]
18799#[target_feature(enable = "neon")]
18800#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18801#[rustc_legacy_const_generics(2)]
18802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18803pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18804 static_assert!(N >= 1 && N <= 8);
18805 unsafe {
18806 simd_shuffle!(
18807 a,
18808 vqshrn_n_u16::<N>(b),
18809 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18810 )
18811 }
18812}
18813#[doc = "Unsigned saturating shift right narrow"]
18814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18815#[inline(always)]
18816#[target_feature(enable = "neon")]
18817#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18818#[rustc_legacy_const_generics(2)]
18819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18820pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18821 static_assert!(N >= 1 && N <= 16);
18822 unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18823}
18824#[doc = "Unsigned saturating shift right narrow"]
18825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18826#[inline(always)]
18827#[target_feature(enable = "neon")]
18828#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18829#[rustc_legacy_const_generics(2)]
18830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18831pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18832 static_assert!(N >= 1 && N <= 32);
18833 unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18834}
18835#[doc = "Signed saturating shift right narrow"]
18836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18837#[inline(always)]
18838#[target_feature(enable = "neon")]
18839#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18840#[rustc_legacy_const_generics(1)]
18841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18842pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18843 static_assert!(N >= 1 && N <= 32);
18844 unsafe extern "unadjusted" {
18845 #[cfg_attr(
18846 any(target_arch = "aarch64", target_arch = "arm64ec"),
18847 link_name = "llvm.aarch64.neon.sqshrn.i32"
18848 )]
18849 fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
18850 }
18851 unsafe { _vqshrnd_n_s64(a, N) }
18852}
18853#[doc = "Unsigned saturating shift right narrow"]
18854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
18855#[inline(always)]
18856#[target_feature(enable = "neon")]
18857#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18858#[rustc_legacy_const_generics(1)]
18859#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18860pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18861 static_assert!(N >= 1 && N <= 32);
18862 unsafe extern "unadjusted" {
18863 #[cfg_attr(
18864 any(target_arch = "aarch64", target_arch = "arm64ec"),
18865 link_name = "llvm.aarch64.neon.uqshrn.i32"
18866 )]
18867 fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
18868 }
18869 unsafe { _vqshrnd_n_u64(a, N) }
18870}
18871#[doc = "Signed saturating shift right narrow"]
18872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
18873#[inline(always)]
18874#[target_feature(enable = "neon")]
18875#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18876#[rustc_legacy_const_generics(1)]
18877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18878pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18879 static_assert!(N >= 1 && N <= 8);
18880 unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
18881}
18882#[doc = "Signed saturating shift right narrow"]
18883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
18884#[inline(always)]
18885#[target_feature(enable = "neon")]
18886#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18887#[rustc_legacy_const_generics(1)]
18888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18889pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
18890 static_assert!(N >= 1 && N <= 16);
18891 unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
18892}
18893#[doc = "Unsigned saturating shift right narrow"]
18894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
18895#[inline(always)]
18896#[target_feature(enable = "neon")]
18897#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18898#[rustc_legacy_const_generics(1)]
18899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18900pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18901 static_assert!(N >= 1 && N <= 8);
18902 unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
18903}
18904#[doc = "Unsigned saturating shift right narrow"]
18905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
18906#[inline(always)]
18907#[target_feature(enable = "neon")]
18908#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18909#[rustc_legacy_const_generics(1)]
18910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18911pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
18912 static_assert!(N >= 1 && N <= 16);
18913 unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
18914}
18915#[doc = "Signed saturating shift right unsigned narrow"]
18916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
18917#[inline(always)]
18918#[target_feature(enable = "neon")]
18919#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18920#[rustc_legacy_const_generics(2)]
18921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18922pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18923 static_assert!(N >= 1 && N <= 8);
18924 unsafe {
18925 simd_shuffle!(
18926 a,
18927 vqshrun_n_s16::<N>(b),
18928 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18929 )
18930 }
18931}
18932#[doc = "Signed saturating shift right unsigned narrow"]
18933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
18934#[inline(always)]
18935#[target_feature(enable = "neon")]
18936#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18937#[rustc_legacy_const_generics(2)]
18938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18939pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18940 static_assert!(N >= 1 && N <= 16);
18941 unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18942}
18943#[doc = "Signed saturating shift right unsigned narrow"]
18944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
18945#[inline(always)]
18946#[target_feature(enable = "neon")]
18947#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18948#[rustc_legacy_const_generics(2)]
18949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18950pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18951 static_assert!(N >= 1 && N <= 32);
18952 unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18953}
18954#[doc = "Signed saturating shift right unsigned narrow"]
18955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
18956#[inline(always)]
18957#[target_feature(enable = "neon")]
18958#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18959#[rustc_legacy_const_generics(1)]
18960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18961pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
18962 static_assert!(N >= 1 && N <= 32);
18963 unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
18964}
18965#[doc = "Signed saturating shift right unsigned narrow"]
18966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
18967#[inline(always)]
18968#[target_feature(enable = "neon")]
18969#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18970#[rustc_legacy_const_generics(1)]
18971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18972pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18973 static_assert!(N >= 1 && N <= 8);
18974 unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
18975}
18976#[doc = "Signed saturating shift right unsigned narrow"]
18977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
18978#[inline(always)]
18979#[target_feature(enable = "neon")]
18980#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18981#[rustc_legacy_const_generics(1)]
18982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18983pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
18984 static_assert!(N >= 1 && N <= 16);
18985 unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
18986}
18987#[doc = "Saturating subtract"]
18988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
18989#[inline(always)]
18990#[target_feature(enable = "neon")]
18991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18992#[cfg_attr(test, assert_instr(sqsub))]
18993pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
18994 let a: int8x8_t = vdup_n_s8(a);
18995 let b: int8x8_t = vdup_n_s8(b);
18996 unsafe { simd_extract!(vqsub_s8(a, b), 0) }
18997}
18998#[doc = "Saturating subtract"]
18999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19000#[inline(always)]
19001#[target_feature(enable = "neon")]
19002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19003#[cfg_attr(test, assert_instr(sqsub))]
19004pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19005 let a: int16x4_t = vdup_n_s16(a);
19006 let b: int16x4_t = vdup_n_s16(b);
19007 unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19008}
19009#[doc = "Saturating subtract"]
19010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19011#[inline(always)]
19012#[target_feature(enable = "neon")]
19013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19014#[cfg_attr(test, assert_instr(uqsub))]
19015pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19016 let a: uint8x8_t = vdup_n_u8(a);
19017 let b: uint8x8_t = vdup_n_u8(b);
19018 unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19019}
19020#[doc = "Saturating subtract"]
19021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19022#[inline(always)]
19023#[target_feature(enable = "neon")]
19024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19025#[cfg_attr(test, assert_instr(uqsub))]
19026pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19027 let a: uint16x4_t = vdup_n_u16(a);
19028 let b: uint16x4_t = vdup_n_u16(b);
19029 unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19030}
19031#[doc = "Saturating subtract"]
19032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19033#[inline(always)]
19034#[target_feature(enable = "neon")]
19035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19036#[cfg_attr(test, assert_instr(sqsub))]
19037pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19038 unsafe extern "unadjusted" {
19039 #[cfg_attr(
19040 any(target_arch = "aarch64", target_arch = "arm64ec"),
19041 link_name = "llvm.aarch64.neon.sqsub.i32"
19042 )]
19043 fn _vqsubs_s32(a: i32, b: i32) -> i32;
19044 }
19045 unsafe { _vqsubs_s32(a, b) }
19046}
19047#[doc = "Saturating subtract"]
19048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19049#[inline(always)]
19050#[target_feature(enable = "neon")]
19051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19052#[cfg_attr(test, assert_instr(sqsub))]
19053pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19054 unsafe extern "unadjusted" {
19055 #[cfg_attr(
19056 any(target_arch = "aarch64", target_arch = "arm64ec"),
19057 link_name = "llvm.aarch64.neon.sqsub.i64"
19058 )]
19059 fn _vqsubd_s64(a: i64, b: i64) -> i64;
19060 }
19061 unsafe { _vqsubd_s64(a, b) }
19062}
19063#[doc = "Saturating subtract"]
19064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19065#[inline(always)]
19066#[target_feature(enable = "neon")]
19067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19068#[cfg_attr(test, assert_instr(uqsub))]
19069pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19070 unsafe extern "unadjusted" {
19071 #[cfg_attr(
19072 any(target_arch = "aarch64", target_arch = "arm64ec"),
19073 link_name = "llvm.aarch64.neon.uqsub.i32"
19074 )]
19075 fn _vqsubs_u32(a: u32, b: u32) -> u32;
19076 }
19077 unsafe { _vqsubs_u32(a, b) }
19078}
19079#[doc = "Saturating subtract"]
19080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19081#[inline(always)]
19082#[target_feature(enable = "neon")]
19083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19084#[cfg_attr(test, assert_instr(uqsub))]
19085pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19086 unsafe extern "unadjusted" {
19087 #[cfg_attr(
19088 any(target_arch = "aarch64", target_arch = "arm64ec"),
19089 link_name = "llvm.aarch64.neon.uqsub.i64"
19090 )]
19091 fn _vqsubd_u64(a: u64, b: u64) -> u64;
19092 }
19093 unsafe { _vqsubd_u64(a, b) }
19094}
19095#[doc = "Table look-up"]
19096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19097#[inline(always)]
19098#[target_feature(enable = "neon")]
19099#[cfg_attr(test, assert_instr(tbl))]
19100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19101fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19102 unsafe extern "unadjusted" {
19103 #[cfg_attr(
19104 any(target_arch = "aarch64", target_arch = "arm64ec"),
19105 link_name = "llvm.aarch64.neon.tbl1.v8i8"
19106 )]
19107 fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19108 }
19109 unsafe { _vqtbl1(a, b) }
19110}
19111#[doc = "Table look-up"]
19112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19113#[inline(always)]
19114#[target_feature(enable = "neon")]
19115#[cfg_attr(test, assert_instr(tbl))]
19116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19117fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19118 unsafe extern "unadjusted" {
19119 #[cfg_attr(
19120 any(target_arch = "aarch64", target_arch = "arm64ec"),
19121 link_name = "llvm.aarch64.neon.tbl1.v16i8"
19122 )]
19123 fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19124 }
19125 unsafe { _vqtbl1q(a, b) }
19126}
19127#[doc = "Table look-up"]
19128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19129#[inline(always)]
19130#[target_feature(enable = "neon")]
19131#[cfg_attr(test, assert_instr(tbl))]
19132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19133pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19134 vqtbl1(a, b)
19135}
19136#[doc = "Table look-up"]
19137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19138#[inline(always)]
19139#[target_feature(enable = "neon")]
19140#[cfg_attr(test, assert_instr(tbl))]
19141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19142pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19143 vqtbl1q(a, b)
19144}
19145#[doc = "Table look-up"]
19146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19147#[inline(always)]
19148#[target_feature(enable = "neon")]
19149#[cfg_attr(test, assert_instr(tbl))]
19150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19151pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19152 unsafe { transmute(vqtbl1(transmute(a), b)) }
19153}
19154#[doc = "Table look-up"]
19155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19156#[inline(always)]
19157#[target_feature(enable = "neon")]
19158#[cfg_attr(test, assert_instr(tbl))]
19159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19160pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19161 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19162}
19163#[doc = "Table look-up"]
19164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19165#[inline(always)]
19166#[target_feature(enable = "neon")]
19167#[cfg_attr(test, assert_instr(tbl))]
19168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19169pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19170 unsafe { transmute(vqtbl1(transmute(a), b)) }
19171}
19172#[doc = "Table look-up"]
19173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19174#[inline(always)]
19175#[target_feature(enable = "neon")]
19176#[cfg_attr(test, assert_instr(tbl))]
19177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19178pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19179 unsafe { transmute(vqtbl1q(transmute(a), b)) }
19180}
19181#[doc = "Table look-up"]
19182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19183#[inline(always)]
19184#[target_feature(enable = "neon")]
19185#[cfg_attr(test, assert_instr(tbl))]
19186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19187fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19188 unsafe extern "unadjusted" {
19189 #[cfg_attr(
19190 any(target_arch = "aarch64", target_arch = "arm64ec"),
19191 link_name = "llvm.aarch64.neon.tbl2.v8i8"
19192 )]
19193 fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19194 }
19195 unsafe { _vqtbl2(a, b, c) }
19196}
19197#[doc = "Table look-up"]
19198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19199#[inline(always)]
19200#[target_feature(enable = "neon")]
19201#[cfg_attr(test, assert_instr(tbl))]
19202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19203fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19204 unsafe extern "unadjusted" {
19205 #[cfg_attr(
19206 any(target_arch = "aarch64", target_arch = "arm64ec"),
19207 link_name = "llvm.aarch64.neon.tbl2.v16i8"
19208 )]
19209 fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19210 }
19211 unsafe { _vqtbl2q(a, b, c) }
19212}
19213#[doc = "Table look-up"]
19214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19215#[inline(always)]
19216#[target_feature(enable = "neon")]
19217#[cfg_attr(test, assert_instr(tbl))]
19218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19219pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19220 vqtbl2(a.0, a.1, b)
19221}
19222#[doc = "Table look-up"]
19223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19224#[inline(always)]
19225#[target_feature(enable = "neon")]
19226#[cfg_attr(test, assert_instr(tbl))]
19227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19228pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19229 vqtbl2q(a.0, a.1, b)
19230}
19231#[doc = "Table look-up"]
19232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19233#[inline(always)]
19234#[target_feature(enable = "neon")]
19235#[cfg_attr(test, assert_instr(tbl))]
19236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19237pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19238 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19239}
19240#[doc = "Table look-up"]
19241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19242#[inline(always)]
19243#[target_feature(enable = "neon")]
19244#[cfg_attr(test, assert_instr(tbl))]
19245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19246pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19247 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19248}
19249#[doc = "Table look-up"]
19250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19251#[inline(always)]
19252#[target_feature(enable = "neon")]
19253#[cfg_attr(test, assert_instr(tbl))]
19254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19255pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19256 unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19257}
19258#[doc = "Table look-up"]
19259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19260#[inline(always)]
19261#[target_feature(enable = "neon")]
19262#[cfg_attr(test, assert_instr(tbl))]
19263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19264pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19265 unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19266}
19267#[doc = "Table look-up"]
19268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19269#[inline(always)]
19270#[target_feature(enable = "neon")]
19271#[cfg_attr(test, assert_instr(tbl))]
19272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19273fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19274 unsafe extern "unadjusted" {
19275 #[cfg_attr(
19276 any(target_arch = "aarch64", target_arch = "arm64ec"),
19277 link_name = "llvm.aarch64.neon.tbl3.v8i8"
19278 )]
19279 fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19280 }
19281 unsafe { _vqtbl3(a, b, c, d) }
19282}
19283#[doc = "Table look-up"]
19284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19285#[inline(always)]
19286#[target_feature(enable = "neon")]
19287#[cfg_attr(test, assert_instr(tbl))]
19288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19289fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19290 unsafe extern "unadjusted" {
19291 #[cfg_attr(
19292 any(target_arch = "aarch64", target_arch = "arm64ec"),
19293 link_name = "llvm.aarch64.neon.tbl3.v16i8"
19294 )]
19295 fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19296 }
19297 unsafe { _vqtbl3q(a, b, c, d) }
19298}
19299#[doc = "Table look-up"]
19300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19301#[inline(always)]
19302#[target_feature(enable = "neon")]
19303#[cfg_attr(test, assert_instr(tbl))]
19304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19305pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19306 vqtbl3(a.0, a.1, a.2, b)
19307}
19308#[doc = "Table look-up"]
19309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19310#[inline(always)]
19311#[target_feature(enable = "neon")]
19312#[cfg_attr(test, assert_instr(tbl))]
19313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19314pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19315 vqtbl3q(a.0, a.1, a.2, b)
19316}
19317#[doc = "Table look-up"]
19318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19319#[inline(always)]
19320#[target_feature(enable = "neon")]
19321#[cfg_attr(test, assert_instr(tbl))]
19322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19323pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19324 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19325}
19326#[doc = "Table look-up"]
19327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19328#[inline(always)]
19329#[target_feature(enable = "neon")]
19330#[cfg_attr(test, assert_instr(tbl))]
19331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19332pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19333 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19334}
19335#[doc = "Table look-up"]
19336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19337#[inline(always)]
19338#[target_feature(enable = "neon")]
19339#[cfg_attr(test, assert_instr(tbl))]
19340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19341pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19342 unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19343}
19344#[doc = "Table look-up"]
19345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19346#[inline(always)]
19347#[target_feature(enable = "neon")]
19348#[cfg_attr(test, assert_instr(tbl))]
19349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19350pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19351 unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19352}
19353#[doc = "Table look-up"]
19354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19355#[inline(always)]
19356#[target_feature(enable = "neon")]
19357#[cfg_attr(test, assert_instr(tbl))]
19358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19359fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19360 unsafe extern "unadjusted" {
19361 #[cfg_attr(
19362 any(target_arch = "aarch64", target_arch = "arm64ec"),
19363 link_name = "llvm.aarch64.neon.tbl4.v8i8"
19364 )]
19365 fn _vqtbl4(
19366 a: int8x16_t,
19367 b: int8x16_t,
19368 c: int8x16_t,
19369 d: int8x16_t,
19370 e: uint8x8_t,
19371 ) -> int8x8_t;
19372 }
19373 unsafe { _vqtbl4(a, b, c, d, e) }
19374}
19375#[doc = "Table look-up"]
19376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19377#[inline(always)]
19378#[target_feature(enable = "neon")]
19379#[cfg_attr(test, assert_instr(tbl))]
19380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19381fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19382 unsafe extern "unadjusted" {
19383 #[cfg_attr(
19384 any(target_arch = "aarch64", target_arch = "arm64ec"),
19385 link_name = "llvm.aarch64.neon.tbl4.v16i8"
19386 )]
19387 fn _vqtbl4q(
19388 a: int8x16_t,
19389 b: int8x16_t,
19390 c: int8x16_t,
19391 d: int8x16_t,
19392 e: uint8x16_t,
19393 ) -> int8x16_t;
19394 }
19395 unsafe { _vqtbl4q(a, b, c, d, e) }
19396}
19397#[doc = "Table look-up"]
19398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19399#[inline(always)]
19400#[target_feature(enable = "neon")]
19401#[cfg_attr(test, assert_instr(tbl))]
19402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19403pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19404 vqtbl4(a.0, a.1, a.2, a.3, b)
19405}
19406#[doc = "Table look-up"]
19407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19408#[inline(always)]
19409#[target_feature(enable = "neon")]
19410#[cfg_attr(test, assert_instr(tbl))]
19411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19412pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19413 vqtbl4q(a.0, a.1, a.2, a.3, b)
19414}
19415#[doc = "Table look-up"]
19416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19417#[inline(always)]
19418#[target_feature(enable = "neon")]
19419#[cfg_attr(test, assert_instr(tbl))]
19420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19421pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19422 unsafe {
19423 transmute(vqtbl4(
19424 transmute(a.0),
19425 transmute(a.1),
19426 transmute(a.2),
19427 transmute(a.3),
19428 b,
19429 ))
19430 }
19431}
19432#[doc = "Table look-up"]
19433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19434#[inline(always)]
19435#[target_feature(enable = "neon")]
19436#[cfg_attr(test, assert_instr(tbl))]
19437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19438pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19439 unsafe {
19440 transmute(vqtbl4q(
19441 transmute(a.0),
19442 transmute(a.1),
19443 transmute(a.2),
19444 transmute(a.3),
19445 b,
19446 ))
19447 }
19448}
19449#[doc = "Table look-up"]
19450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19451#[inline(always)]
19452#[target_feature(enable = "neon")]
19453#[cfg_attr(test, assert_instr(tbl))]
19454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19455pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19456 unsafe {
19457 transmute(vqtbl4(
19458 transmute(a.0),
19459 transmute(a.1),
19460 transmute(a.2),
19461 transmute(a.3),
19462 b,
19463 ))
19464 }
19465}
19466#[doc = "Table look-up"]
19467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19468#[inline(always)]
19469#[target_feature(enable = "neon")]
19470#[cfg_attr(test, assert_instr(tbl))]
19471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19472pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19473 unsafe {
19474 transmute(vqtbl4q(
19475 transmute(a.0),
19476 transmute(a.1),
19477 transmute(a.2),
19478 transmute(a.3),
19479 b,
19480 ))
19481 }
19482}
19483#[doc = "Extended table look-up"]
19484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
19485#[inline(always)]
19486#[target_feature(enable = "neon")]
19487#[cfg_attr(test, assert_instr(tbx))]
19488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19489fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19490 unsafe extern "unadjusted" {
19491 #[cfg_attr(
19492 any(target_arch = "aarch64", target_arch = "arm64ec"),
19493 link_name = "llvm.aarch64.neon.tbx1.v8i8"
19494 )]
19495 fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19496 }
19497 unsafe { _vqtbx1(a, b, c) }
19498}
19499#[doc = "Extended table look-up"]
19500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
19501#[inline(always)]
19502#[target_feature(enable = "neon")]
19503#[cfg_attr(test, assert_instr(tbx))]
19504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19505fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19506 unsafe extern "unadjusted" {
19507 #[cfg_attr(
19508 any(target_arch = "aarch64", target_arch = "arm64ec"),
19509 link_name = "llvm.aarch64.neon.tbx1.v16i8"
19510 )]
19511 fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19512 }
19513 unsafe { _vqtbx1q(a, b, c) }
19514}
19515#[doc = "Extended table look-up"]
19516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
19517#[inline(always)]
19518#[target_feature(enable = "neon")]
19519#[cfg_attr(test, assert_instr(tbx))]
19520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19521pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19522 vqtbx1(a, b, c)
19523}
19524#[doc = "Extended table look-up"]
19525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
19526#[inline(always)]
19527#[target_feature(enable = "neon")]
19528#[cfg_attr(test, assert_instr(tbx))]
19529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19530pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19531 vqtbx1q(a, b, c)
19532}
19533#[doc = "Extended table look-up"]
19534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
19535#[inline(always)]
19536#[target_feature(enable = "neon")]
19537#[cfg_attr(test, assert_instr(tbx))]
19538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19539pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
19540 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19541}
19542#[doc = "Extended table look-up"]
19543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
19544#[inline(always)]
19545#[target_feature(enable = "neon")]
19546#[cfg_attr(test, assert_instr(tbx))]
19547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19548pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
19549 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19550}
19551#[doc = "Extended table look-up"]
19552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
19553#[inline(always)]
19554#[target_feature(enable = "neon")]
19555#[cfg_attr(test, assert_instr(tbx))]
19556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19557pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
19558 unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19559}
19560#[doc = "Extended table look-up"]
19561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
19562#[inline(always)]
19563#[target_feature(enable = "neon")]
19564#[cfg_attr(test, assert_instr(tbx))]
19565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19566pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
19567 unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19568}
19569#[doc = "Extended table look-up"]
19570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
19571#[inline(always)]
19572#[target_feature(enable = "neon")]
19573#[cfg_attr(test, assert_instr(tbx))]
19574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19575fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19576 unsafe extern "unadjusted" {
19577 #[cfg_attr(
19578 any(target_arch = "aarch64", target_arch = "arm64ec"),
19579 link_name = "llvm.aarch64.neon.tbx2.v8i8"
19580 )]
19581 fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19582 }
19583 unsafe { _vqtbx2(a, b, c, d) }
19584}
19585#[doc = "Extended table look-up"]
19586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
19587#[inline(always)]
19588#[target_feature(enable = "neon")]
19589#[cfg_attr(test, assert_instr(tbx))]
19590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19591fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19592 unsafe extern "unadjusted" {
19593 #[cfg_attr(
19594 any(target_arch = "aarch64", target_arch = "arm64ec"),
19595 link_name = "llvm.aarch64.neon.tbx2.v16i8"
19596 )]
19597 fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19598 }
19599 unsafe { _vqtbx2q(a, b, c, d) }
19600}
19601#[doc = "Extended table look-up"]
19602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
19603#[inline(always)]
19604#[target_feature(enable = "neon")]
19605#[cfg_attr(test, assert_instr(tbx))]
19606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19607pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
19608 vqtbx2(a, b.0, b.1, c)
19609}
19610#[doc = "Extended table look-up"]
19611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
19612#[inline(always)]
19613#[target_feature(enable = "neon")]
19614#[cfg_attr(test, assert_instr(tbx))]
19615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19616pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
19617 vqtbx2q(a, b.0, b.1, c)
19618}
19619#[doc = "Extended table look-up"]
19620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
19621#[inline(always)]
19622#[target_feature(enable = "neon")]
19623#[cfg_attr(test, assert_instr(tbx))]
19624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19625pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
19626 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19627}
19628#[doc = "Extended table look-up"]
19629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
19630#[inline(always)]
19631#[target_feature(enable = "neon")]
19632#[cfg_attr(test, assert_instr(tbx))]
19633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19634pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
19635 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19636}
19637#[doc = "Extended table look-up"]
19638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
19639#[inline(always)]
19640#[target_feature(enable = "neon")]
19641#[cfg_attr(test, assert_instr(tbx))]
19642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19643pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
19644 unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19645}
19646#[doc = "Extended table look-up"]
19647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
19648#[inline(always)]
19649#[target_feature(enable = "neon")]
19650#[cfg_attr(test, assert_instr(tbx))]
19651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19652pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
19653 unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19654}
19655#[doc = "Extended table look-up"]
19656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
19657#[inline(always)]
19658#[target_feature(enable = "neon")]
19659#[cfg_attr(test, assert_instr(tbx))]
19660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19661fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19662 unsafe extern "unadjusted" {
19663 #[cfg_attr(
19664 any(target_arch = "aarch64", target_arch = "arm64ec"),
19665 link_name = "llvm.aarch64.neon.tbx3.v8i8"
19666 )]
19667 fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
19668 -> int8x8_t;
19669 }
19670 unsafe { _vqtbx3(a, b, c, d, e) }
19671}
19672#[doc = "Extended table look-up"]
19673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
19674#[inline(always)]
19675#[target_feature(enable = "neon")]
19676#[cfg_attr(test, assert_instr(tbx))]
19677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19678fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19679 unsafe extern "unadjusted" {
19680 #[cfg_attr(
19681 any(target_arch = "aarch64", target_arch = "arm64ec"),
19682 link_name = "llvm.aarch64.neon.tbx3.v16i8"
19683 )]
19684 fn _vqtbx3q(
19685 a: int8x16_t,
19686 b: int8x16_t,
19687 c: int8x16_t,
19688 d: int8x16_t,
19689 e: uint8x16_t,
19690 ) -> int8x16_t;
19691 }
19692 unsafe { _vqtbx3q(a, b, c, d, e) }
19693}
19694#[doc = "Extended table look-up"]
19695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
19696#[inline(always)]
19697#[target_feature(enable = "neon")]
19698#[cfg_attr(test, assert_instr(tbx))]
19699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19700pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
19701 vqtbx3(a, b.0, b.1, b.2, c)
19702}
19703#[doc = "Extended table look-up"]
19704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
19705#[inline(always)]
19706#[target_feature(enable = "neon")]
19707#[cfg_attr(test, assert_instr(tbx))]
19708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19709pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
19710 vqtbx3q(a, b.0, b.1, b.2, c)
19711}
19712#[doc = "Extended table look-up"]
19713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
19714#[inline(always)]
19715#[target_feature(enable = "neon")]
19716#[cfg_attr(test, assert_instr(tbx))]
19717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19718pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
19719 unsafe {
19720 transmute(vqtbx3(
19721 transmute(a),
19722 transmute(b.0),
19723 transmute(b.1),
19724 transmute(b.2),
19725 c,
19726 ))
19727 }
19728}
19729#[doc = "Extended table look-up"]
19730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
19731#[inline(always)]
19732#[target_feature(enable = "neon")]
19733#[cfg_attr(test, assert_instr(tbx))]
19734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19735pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
19736 unsafe {
19737 transmute(vqtbx3q(
19738 transmute(a),
19739 transmute(b.0),
19740 transmute(b.1),
19741 transmute(b.2),
19742 c,
19743 ))
19744 }
19745}
19746#[doc = "Extended table look-up"]
19747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
19748#[inline(always)]
19749#[target_feature(enable = "neon")]
19750#[cfg_attr(test, assert_instr(tbx))]
19751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19752pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
19753 unsafe {
19754 transmute(vqtbx3(
19755 transmute(a),
19756 transmute(b.0),
19757 transmute(b.1),
19758 transmute(b.2),
19759 c,
19760 ))
19761 }
19762}
19763#[doc = "Extended table look-up"]
19764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
19765#[inline(always)]
19766#[target_feature(enable = "neon")]
19767#[cfg_attr(test, assert_instr(tbx))]
19768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19769pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
19770 unsafe {
19771 transmute(vqtbx3q(
19772 transmute(a),
19773 transmute(b.0),
19774 transmute(b.1),
19775 transmute(b.2),
19776 c,
19777 ))
19778 }
19779}
19780#[doc = "Extended table look-up"]
19781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
19782#[inline(always)]
19783#[target_feature(enable = "neon")]
19784#[cfg_attr(test, assert_instr(tbx))]
19785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19786fn vqtbx4(
19787 a: int8x8_t,
19788 b: int8x16_t,
19789 c: int8x16_t,
19790 d: int8x16_t,
19791 e: int8x16_t,
19792 f: uint8x8_t,
19793) -> int8x8_t {
19794 unsafe extern "unadjusted" {
19795 #[cfg_attr(
19796 any(target_arch = "aarch64", target_arch = "arm64ec"),
19797 link_name = "llvm.aarch64.neon.tbx4.v8i8"
19798 )]
19799 fn _vqtbx4(
19800 a: int8x8_t,
19801 b: int8x16_t,
19802 c: int8x16_t,
19803 d: int8x16_t,
19804 e: int8x16_t,
19805 f: uint8x8_t,
19806 ) -> int8x8_t;
19807 }
19808 unsafe { _vqtbx4(a, b, c, d, e, f) }
19809}
19810#[doc = "Extended table look-up"]
19811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
19812#[inline(always)]
19813#[target_feature(enable = "neon")]
19814#[cfg_attr(test, assert_instr(tbx))]
19815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19816fn vqtbx4q(
19817 a: int8x16_t,
19818 b: int8x16_t,
19819 c: int8x16_t,
19820 d: int8x16_t,
19821 e: int8x16_t,
19822 f: uint8x16_t,
19823) -> int8x16_t {
19824 unsafe extern "unadjusted" {
19825 #[cfg_attr(
19826 any(target_arch = "aarch64", target_arch = "arm64ec"),
19827 link_name = "llvm.aarch64.neon.tbx4.v16i8"
19828 )]
19829 fn _vqtbx4q(
19830 a: int8x16_t,
19831 b: int8x16_t,
19832 c: int8x16_t,
19833 d: int8x16_t,
19834 e: int8x16_t,
19835 f: uint8x16_t,
19836 ) -> int8x16_t;
19837 }
19838 unsafe { _vqtbx4q(a, b, c, d, e, f) }
19839}
19840#[doc = "Extended table look-up"]
19841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
19842#[inline(always)]
19843#[target_feature(enable = "neon")]
19844#[cfg_attr(test, assert_instr(tbx))]
19845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19846pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
19847 vqtbx4(a, b.0, b.1, b.2, b.3, c)
19848}
19849#[doc = "Extended table look-up"]
19850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
19851#[inline(always)]
19852#[target_feature(enable = "neon")]
19853#[cfg_attr(test, assert_instr(tbx))]
19854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19855pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
19856 vqtbx4q(a, b.0, b.1, b.2, b.3, c)
19857}
19858#[doc = "Extended table look-up"]
19859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
19860#[inline(always)]
19861#[target_feature(enable = "neon")]
19862#[cfg_attr(test, assert_instr(tbx))]
19863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19864pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
19865 unsafe {
19866 transmute(vqtbx4(
19867 transmute(a),
19868 transmute(b.0),
19869 transmute(b.1),
19870 transmute(b.2),
19871 transmute(b.3),
19872 c,
19873 ))
19874 }
19875}
19876#[doc = "Extended table look-up"]
19877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
19878#[inline(always)]
19879#[target_feature(enable = "neon")]
19880#[cfg_attr(test, assert_instr(tbx))]
19881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19882pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
19883 unsafe {
19884 transmute(vqtbx4q(
19885 transmute(a),
19886 transmute(b.0),
19887 transmute(b.1),
19888 transmute(b.2),
19889 transmute(b.3),
19890 c,
19891 ))
19892 }
19893}
19894#[doc = "Extended table look-up"]
19895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
19896#[inline(always)]
19897#[target_feature(enable = "neon")]
19898#[cfg_attr(test, assert_instr(tbx))]
19899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19900pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
19901 unsafe {
19902 transmute(vqtbx4(
19903 transmute(a),
19904 transmute(b.0),
19905 transmute(b.1),
19906 transmute(b.2),
19907 transmute(b.3),
19908 c,
19909 ))
19910 }
19911}
19912#[doc = "Extended table look-up"]
19913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
19914#[inline(always)]
19915#[target_feature(enable = "neon")]
19916#[cfg_attr(test, assert_instr(tbx))]
19917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19918pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
19919 unsafe {
19920 transmute(vqtbx4q(
19921 transmute(a),
19922 transmute(b.0),
19923 transmute(b.1),
19924 transmute(b.2),
19925 transmute(b.3),
19926 c,
19927 ))
19928 }
19929}
19930#[doc = "Rotate and exclusive OR"]
19931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
19932#[inline(always)]
19933#[target_feature(enable = "neon,sha3")]
19934#[cfg_attr(test, assert_instr(rax1))]
19935#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
19936pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
19937 unsafe extern "unadjusted" {
19938 #[cfg_attr(
19939 any(target_arch = "aarch64", target_arch = "arm64ec"),
19940 link_name = "llvm.aarch64.crypto.rax1"
19941 )]
19942 fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
19943 }
19944 unsafe { _vrax1q_u64(a, b) }
19945}
19946#[doc = "Reverse bit order"]
19947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
19948#[inline(always)]
19949#[target_feature(enable = "neon")]
19950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19951#[cfg_attr(test, assert_instr(rbit))]
19952pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
19953 unsafe { simd_bitreverse(a) }
19954}
19955#[doc = "Reverse bit order"]
19956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
19957#[inline(always)]
19958#[target_feature(enable = "neon")]
19959#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19960#[cfg_attr(test, assert_instr(rbit))]
19961pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
19962 unsafe { simd_bitreverse(a) }
19963}
19964#[doc = "Reverse bit order"]
19965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19966#[inline(always)]
19967#[cfg(target_endian = "little")]
19968#[target_feature(enable = "neon")]
19969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19970#[cfg_attr(test, assert_instr(rbit))]
19971pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19972 unsafe { transmute(vrbit_s8(transmute(a))) }
19973}
19974#[doc = "Reverse bit order"]
19975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19976#[inline(always)]
19977#[cfg(target_endian = "big")]
19978#[target_feature(enable = "neon")]
19979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19980#[cfg_attr(test, assert_instr(rbit))]
19981pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19982 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
19983 unsafe {
19984 let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
19985 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19986 }
19987}
19988#[doc = "Reverse bit order"]
19989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
19990#[inline(always)]
19991#[cfg(target_endian = "little")]
19992#[target_feature(enable = "neon")]
19993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19994#[cfg_attr(test, assert_instr(rbit))]
19995pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
19996 unsafe { transmute(vrbitq_s8(transmute(a))) }
19997}
19998#[doc = "Reverse bit order"]
19999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
20000#[inline(always)]
20001#[cfg(target_endian = "big")]
20002#[target_feature(enable = "neon")]
20003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20004#[cfg_attr(test, assert_instr(rbit))]
20005pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
20006 let a: uint8x16_t =
20007 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20008 unsafe {
20009 let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
20010 simd_shuffle!(
20011 ret_val,
20012 ret_val,
20013 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20014 )
20015 }
20016}
20017#[doc = "Reverse bit order"]
20018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
20019#[inline(always)]
20020#[cfg(target_endian = "little")]
20021#[target_feature(enable = "neon")]
20022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20023#[cfg_attr(test, assert_instr(rbit))]
20024pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
20025 unsafe { transmute(vrbit_s8(transmute(a))) }
20026}
20027#[doc = "Reverse bit order"]
20028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
20029#[inline(always)]
20030#[cfg(target_endian = "big")]
20031#[target_feature(enable = "neon")]
20032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20033#[cfg_attr(test, assert_instr(rbit))]
20034pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
20035 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20036 unsafe {
20037 let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
20038 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20039 }
20040}
20041#[doc = "Reverse bit order"]
20042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20043#[inline(always)]
20044#[cfg(target_endian = "little")]
20045#[target_feature(enable = "neon")]
20046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20047#[cfg_attr(test, assert_instr(rbit))]
20048pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20049 unsafe { transmute(vrbitq_s8(transmute(a))) }
20050}
20051#[doc = "Reverse bit order"]
20052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20053#[inline(always)]
20054#[cfg(target_endian = "big")]
20055#[target_feature(enable = "neon")]
20056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20057#[cfg_attr(test, assert_instr(rbit))]
20058pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20059 let a: poly8x16_t =
20060 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20061 unsafe {
20062 let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
20063 simd_shuffle!(
20064 ret_val,
20065 ret_val,
20066 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20067 )
20068 }
20069}
20070#[doc = "Reciprocal estimate."]
20071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
20072#[inline(always)]
20073#[target_feature(enable = "neon")]
20074#[cfg_attr(test, assert_instr(frecpe))]
20075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20076pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
20077 unsafe extern "unadjusted" {
20078 #[cfg_attr(
20079 any(target_arch = "aarch64", target_arch = "arm64ec"),
20080 link_name = "llvm.aarch64.neon.frecpe.v1f64"
20081 )]
20082 fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
20083 }
20084 unsafe { _vrecpe_f64(a) }
20085}
20086#[doc = "Reciprocal estimate."]
20087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
20088#[inline(always)]
20089#[target_feature(enable = "neon")]
20090#[cfg_attr(test, assert_instr(frecpe))]
20091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20092pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
20093 unsafe extern "unadjusted" {
20094 #[cfg_attr(
20095 any(target_arch = "aarch64", target_arch = "arm64ec"),
20096 link_name = "llvm.aarch64.neon.frecpe.v2f64"
20097 )]
20098 fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
20099 }
20100 unsafe { _vrecpeq_f64(a) }
20101}
20102#[doc = "Reciprocal estimate."]
20103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
20104#[inline(always)]
20105#[target_feature(enable = "neon")]
20106#[cfg_attr(test, assert_instr(frecpe))]
20107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20108pub fn vrecped_f64(a: f64) -> f64 {
20109 unsafe extern "unadjusted" {
20110 #[cfg_attr(
20111 any(target_arch = "aarch64", target_arch = "arm64ec"),
20112 link_name = "llvm.aarch64.neon.frecpe.f64"
20113 )]
20114 fn _vrecped_f64(a: f64) -> f64;
20115 }
20116 unsafe { _vrecped_f64(a) }
20117}
20118#[doc = "Reciprocal estimate."]
20119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
20120#[inline(always)]
20121#[target_feature(enable = "neon")]
20122#[cfg_attr(test, assert_instr(frecpe))]
20123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20124pub fn vrecpes_f32(a: f32) -> f32 {
20125 unsafe extern "unadjusted" {
20126 #[cfg_attr(
20127 any(target_arch = "aarch64", target_arch = "arm64ec"),
20128 link_name = "llvm.aarch64.neon.frecpe.f32"
20129 )]
20130 fn _vrecpes_f32(a: f32) -> f32;
20131 }
20132 unsafe { _vrecpes_f32(a) }
20133}
20134#[doc = "Reciprocal estimate."]
20135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
20136#[inline(always)]
20137#[cfg_attr(test, assert_instr(frecpe))]
20138#[target_feature(enable = "neon,fp16")]
20139#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20140#[cfg(not(target_arch = "arm64ec"))]
20141pub fn vrecpeh_f16(a: f16) -> f16 {
20142 unsafe extern "unadjusted" {
20143 #[cfg_attr(
20144 any(target_arch = "aarch64", target_arch = "arm64ec"),
20145 link_name = "llvm.aarch64.neon.frecpe.f16"
20146 )]
20147 fn _vrecpeh_f16(a: f16) -> f16;
20148 }
20149 unsafe { _vrecpeh_f16(a) }
20150}
20151#[doc = "Floating-point reciprocal step"]
20152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
20153#[inline(always)]
20154#[target_feature(enable = "neon")]
20155#[cfg_attr(test, assert_instr(frecps))]
20156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20157pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
20158 unsafe extern "unadjusted" {
20159 #[cfg_attr(
20160 any(target_arch = "aarch64", target_arch = "arm64ec"),
20161 link_name = "llvm.aarch64.neon.frecps.v1f64"
20162 )]
20163 fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
20164 }
20165 unsafe { _vrecps_f64(a, b) }
20166}
20167#[doc = "Floating-point reciprocal step"]
20168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
20169#[inline(always)]
20170#[target_feature(enable = "neon")]
20171#[cfg_attr(test, assert_instr(frecps))]
20172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20173pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
20174 unsafe extern "unadjusted" {
20175 #[cfg_attr(
20176 any(target_arch = "aarch64", target_arch = "arm64ec"),
20177 link_name = "llvm.aarch64.neon.frecps.v2f64"
20178 )]
20179 fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
20180 }
20181 unsafe { _vrecpsq_f64(a, b) }
20182}
20183#[doc = "Floating-point reciprocal step"]
20184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
20185#[inline(always)]
20186#[target_feature(enable = "neon")]
20187#[cfg_attr(test, assert_instr(frecps))]
20188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20189pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
20190 unsafe extern "unadjusted" {
20191 #[cfg_attr(
20192 any(target_arch = "aarch64", target_arch = "arm64ec"),
20193 link_name = "llvm.aarch64.neon.frecps.f64"
20194 )]
20195 fn _vrecpsd_f64(a: f64, b: f64) -> f64;
20196 }
20197 unsafe { _vrecpsd_f64(a, b) }
20198}
20199#[doc = "Floating-point reciprocal step"]
20200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
20201#[inline(always)]
20202#[target_feature(enable = "neon")]
20203#[cfg_attr(test, assert_instr(frecps))]
20204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20205pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
20206 unsafe extern "unadjusted" {
20207 #[cfg_attr(
20208 any(target_arch = "aarch64", target_arch = "arm64ec"),
20209 link_name = "llvm.aarch64.neon.frecps.f32"
20210 )]
20211 fn _vrecpss_f32(a: f32, b: f32) -> f32;
20212 }
20213 unsafe { _vrecpss_f32(a, b) }
20214}
20215#[doc = "Floating-point reciprocal step"]
20216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
20217#[inline(always)]
20218#[cfg_attr(test, assert_instr(frecps))]
20219#[target_feature(enable = "neon,fp16")]
20220#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20221#[cfg(not(target_arch = "arm64ec"))]
20222pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
20223 unsafe extern "unadjusted" {
20224 #[cfg_attr(
20225 any(target_arch = "aarch64", target_arch = "arm64ec"),
20226 link_name = "llvm.aarch64.neon.frecps.f16"
20227 )]
20228 fn _vrecpsh_f16(a: f16, b: f16) -> f16;
20229 }
20230 unsafe { _vrecpsh_f16(a, b) }
20231}
20232#[doc = "Floating-point reciprocal exponent"]
20233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
20234#[inline(always)]
20235#[target_feature(enable = "neon")]
20236#[cfg_attr(test, assert_instr(frecpx))]
20237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20238pub fn vrecpxd_f64(a: f64) -> f64 {
20239 unsafe extern "unadjusted" {
20240 #[cfg_attr(
20241 any(target_arch = "aarch64", target_arch = "arm64ec"),
20242 link_name = "llvm.aarch64.neon.frecpx.f64"
20243 )]
20244 fn _vrecpxd_f64(a: f64) -> f64;
20245 }
20246 unsafe { _vrecpxd_f64(a) }
20247}
20248#[doc = "Floating-point reciprocal exponent"]
20249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
20250#[inline(always)]
20251#[target_feature(enable = "neon")]
20252#[cfg_attr(test, assert_instr(frecpx))]
20253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20254pub fn vrecpxs_f32(a: f32) -> f32 {
20255 unsafe extern "unadjusted" {
20256 #[cfg_attr(
20257 any(target_arch = "aarch64", target_arch = "arm64ec"),
20258 link_name = "llvm.aarch64.neon.frecpx.f32"
20259 )]
20260 fn _vrecpxs_f32(a: f32) -> f32;
20261 }
20262 unsafe { _vrecpxs_f32(a) }
20263}
20264#[doc = "Floating-point reciprocal exponent"]
20265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
20266#[inline(always)]
20267#[cfg_attr(test, assert_instr(frecpx))]
20268#[target_feature(enable = "neon,fp16")]
20269#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20270#[cfg(not(target_arch = "arm64ec"))]
20271pub fn vrecpxh_f16(a: f16) -> f16 {
20272 unsafe extern "unadjusted" {
20273 #[cfg_attr(
20274 any(target_arch = "aarch64", target_arch = "arm64ec"),
20275 link_name = "llvm.aarch64.neon.frecpx.f16"
20276 )]
20277 fn _vrecpxh_f16(a: f16) -> f16;
20278 }
20279 unsafe { _vrecpxh_f16(a) }
20280}
20281#[doc = "Vector reinterpret cast operation"]
20282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20283#[inline(always)]
20284#[cfg(target_endian = "little")]
20285#[target_feature(enable = "neon")]
20286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20287#[cfg(not(target_arch = "arm64ec"))]
20288#[cfg_attr(test, assert_instr(nop))]
20289pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20290 unsafe { transmute(a) }
20291}
20292#[doc = "Vector reinterpret cast operation"]
20293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20294#[inline(always)]
20295#[cfg(target_endian = "big")]
20296#[target_feature(enable = "neon")]
20297#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20298#[cfg(not(target_arch = "arm64ec"))]
20299#[cfg_attr(test, assert_instr(nop))]
20300pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20301 let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20302 unsafe { transmute(a) }
20303}
20304#[doc = "Vector reinterpret cast operation"]
20305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20306#[inline(always)]
20307#[cfg(target_endian = "little")]
20308#[target_feature(enable = "neon")]
20309#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20310#[cfg(not(target_arch = "arm64ec"))]
20311#[cfg_attr(test, assert_instr(nop))]
20312pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20313 unsafe { transmute(a) }
20314}
20315#[doc = "Vector reinterpret cast operation"]
20316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20317#[inline(always)]
20318#[cfg(target_endian = "big")]
20319#[target_feature(enable = "neon")]
20320#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20321#[cfg(not(target_arch = "arm64ec"))]
20322#[cfg_attr(test, assert_instr(nop))]
20323pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20324 let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20325 unsafe {
20326 let ret_val: float64x2_t = transmute(a);
20327 simd_shuffle!(ret_val, ret_val, [1, 0])
20328 }
20329}
20330#[doc = "Vector reinterpret cast operation"]
20331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20332#[inline(always)]
20333#[cfg(target_endian = "little")]
20334#[target_feature(enable = "neon")]
20335#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20336#[cfg(not(target_arch = "arm64ec"))]
20337#[cfg_attr(test, assert_instr(nop))]
20338pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20339 unsafe { transmute(a) }
20340}
20341#[doc = "Vector reinterpret cast operation"]
20342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20343#[inline(always)]
20344#[cfg(target_endian = "big")]
20345#[target_feature(enable = "neon")]
20346#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20347#[cfg(not(target_arch = "arm64ec"))]
20348#[cfg_attr(test, assert_instr(nop))]
20349pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20350 unsafe {
20351 let ret_val: float16x4_t = transmute(a);
20352 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20353 }
20354}
20355#[doc = "Vector reinterpret cast operation"]
20356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20357#[inline(always)]
20358#[cfg(target_endian = "little")]
20359#[target_feature(enable = "neon")]
20360#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20361#[cfg(not(target_arch = "arm64ec"))]
20362#[cfg_attr(test, assert_instr(nop))]
20363pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20364 unsafe { transmute(a) }
20365}
20366#[doc = "Vector reinterpret cast operation"]
20367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20368#[inline(always)]
20369#[cfg(target_endian = "big")]
20370#[target_feature(enable = "neon")]
20371#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20372#[cfg(not(target_arch = "arm64ec"))]
20373#[cfg_attr(test, assert_instr(nop))]
20374pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20375 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20376 unsafe {
20377 let ret_val: float16x8_t = transmute(a);
20378 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20379 }
20380}
20381#[doc = "Vector reinterpret cast operation"]
20382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20383#[inline(always)]
20384#[cfg(target_endian = "little")]
20385#[target_feature(enable = "neon")]
20386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20387#[cfg_attr(test, assert_instr(nop))]
20388pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20389 unsafe { transmute(a) }
20390}
20391#[doc = "Vector reinterpret cast operation"]
20392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20393#[inline(always)]
20394#[cfg(target_endian = "big")]
20395#[target_feature(enable = "neon")]
20396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20397#[cfg_attr(test, assert_instr(nop))]
20398pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20399 unsafe {
20400 let ret_val: float64x2_t = transmute(a);
20401 simd_shuffle!(ret_val, ret_val, [1, 0])
20402 }
20403}
20404#[doc = "Vector reinterpret cast operation"]
20405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20406#[inline(always)]
20407#[cfg(target_endian = "little")]
20408#[target_feature(enable = "neon")]
20409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20410#[cfg_attr(test, assert_instr(nop))]
20411pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20412 unsafe { transmute(a) }
20413}
20414#[doc = "Vector reinterpret cast operation"]
20415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20416#[inline(always)]
20417#[cfg(target_endian = "big")]
20418#[target_feature(enable = "neon")]
20419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20420#[cfg_attr(test, assert_instr(nop))]
20421pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20422 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20423 unsafe { transmute(a) }
20424}
20425#[doc = "Vector reinterpret cast operation"]
20426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20427#[inline(always)]
20428#[cfg(target_endian = "little")]
20429#[target_feature(enable = "neon")]
20430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20431#[cfg_attr(test, assert_instr(nop))]
20432pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20433 unsafe { transmute(a) }
20434}
20435#[doc = "Vector reinterpret cast operation"]
20436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20437#[inline(always)]
20438#[cfg(target_endian = "big")]
20439#[target_feature(enable = "neon")]
20440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20441#[cfg_attr(test, assert_instr(nop))]
20442pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20443 let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20444 unsafe { transmute(a) }
20445}
20446#[doc = "Vector reinterpret cast operation"]
20447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20448#[inline(always)]
20449#[cfg(target_endian = "little")]
20450#[target_feature(enable = "neon")]
20451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20452#[cfg_attr(test, assert_instr(nop))]
20453pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20454 unsafe { transmute(a) }
20455}
20456#[doc = "Vector reinterpret cast operation"]
20457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20458#[inline(always)]
20459#[cfg(target_endian = "big")]
20460#[target_feature(enable = "neon")]
20461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20462#[cfg_attr(test, assert_instr(nop))]
20463pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20464 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20465 unsafe {
20466 let ret_val: float64x2_t = transmute(a);
20467 simd_shuffle!(ret_val, ret_val, [1, 0])
20468 }
20469}
20470#[doc = "Vector reinterpret cast operation"]
20471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20472#[inline(always)]
20473#[cfg(target_endian = "little")]
20474#[target_feature(enable = "neon")]
20475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20476#[cfg_attr(test, assert_instr(nop))]
20477pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20478 unsafe { transmute(a) }
20479}
20480#[doc = "Vector reinterpret cast operation"]
20481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20482#[inline(always)]
20483#[cfg(target_endian = "big")]
20484#[target_feature(enable = "neon")]
20485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20486#[cfg_attr(test, assert_instr(nop))]
20487pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20488 let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20489 unsafe {
20490 let ret_val: poly64x2_t = transmute(a);
20491 simd_shuffle!(ret_val, ret_val, [1, 0])
20492 }
20493}
20494#[doc = "Vector reinterpret cast operation"]
20495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20496#[inline(always)]
20497#[cfg(target_endian = "little")]
20498#[target_feature(enable = "neon")]
20499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20500#[cfg_attr(test, assert_instr(nop))]
20501pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20502 unsafe { transmute(a) }
20503}
20504#[doc = "Vector reinterpret cast operation"]
20505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20506#[inline(always)]
20507#[cfg(target_endian = "big")]
20508#[target_feature(enable = "neon")]
20509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20510#[cfg_attr(test, assert_instr(nop))]
20511pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20512 unsafe {
20513 let ret_val: float32x2_t = transmute(a);
20514 simd_shuffle!(ret_val, ret_val, [1, 0])
20515 }
20516}
20517#[doc = "Vector reinterpret cast operation"]
20518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20519#[inline(always)]
20520#[cfg(target_endian = "little")]
20521#[target_feature(enable = "neon")]
20522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20523#[cfg_attr(test, assert_instr(nop))]
20524pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20525 unsafe { transmute(a) }
20526}
20527#[doc = "Vector reinterpret cast operation"]
20528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20529#[inline(always)]
20530#[cfg(target_endian = "big")]
20531#[target_feature(enable = "neon")]
20532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20533#[cfg_attr(test, assert_instr(nop))]
20534pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20535 unsafe {
20536 let ret_val: int8x8_t = transmute(a);
20537 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20538 }
20539}
20540#[doc = "Vector reinterpret cast operation"]
20541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20542#[inline(always)]
20543#[cfg(target_endian = "little")]
20544#[target_feature(enable = "neon")]
20545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20546#[cfg_attr(test, assert_instr(nop))]
20547pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20548 unsafe { transmute(a) }
20549}
20550#[doc = "Vector reinterpret cast operation"]
20551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20552#[inline(always)]
20553#[cfg(target_endian = "big")]
20554#[target_feature(enable = "neon")]
20555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20556#[cfg_attr(test, assert_instr(nop))]
20557pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20558 unsafe {
20559 let ret_val: int16x4_t = transmute(a);
20560 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20561 }
20562}
20563#[doc = "Vector reinterpret cast operation"]
20564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20565#[inline(always)]
20566#[cfg(target_endian = "little")]
20567#[target_feature(enable = "neon")]
20568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20569#[cfg_attr(test, assert_instr(nop))]
20570pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20571 unsafe { transmute(a) }
20572}
20573#[doc = "Vector reinterpret cast operation"]
20574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20575#[inline(always)]
20576#[cfg(target_endian = "big")]
20577#[target_feature(enable = "neon")]
20578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20579#[cfg_attr(test, assert_instr(nop))]
20580pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20581 unsafe {
20582 let ret_val: int32x2_t = transmute(a);
20583 simd_shuffle!(ret_val, ret_val, [1, 0])
20584 }
20585}
20586#[doc = "Vector reinterpret cast operation"]
20587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
20588#[inline(always)]
20589#[target_feature(enable = "neon")]
20590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20591#[cfg_attr(test, assert_instr(nop))]
20592pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
20593 unsafe { transmute(a) }
20594}
20595#[doc = "Vector reinterpret cast operation"]
20596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20597#[inline(always)]
20598#[cfg(target_endian = "little")]
20599#[target_feature(enable = "neon")]
20600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20601#[cfg_attr(test, assert_instr(nop))]
20602pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20603 unsafe { transmute(a) }
20604}
20605#[doc = "Vector reinterpret cast operation"]
20606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20607#[inline(always)]
20608#[cfg(target_endian = "big")]
20609#[target_feature(enable = "neon")]
20610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20611#[cfg_attr(test, assert_instr(nop))]
20612pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20613 unsafe {
20614 let ret_val: uint8x8_t = transmute(a);
20615 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20616 }
20617}
20618#[doc = "Vector reinterpret cast operation"]
20619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20620#[inline(always)]
20621#[cfg(target_endian = "little")]
20622#[target_feature(enable = "neon")]
20623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20624#[cfg_attr(test, assert_instr(nop))]
20625pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20626 unsafe { transmute(a) }
20627}
20628#[doc = "Vector reinterpret cast operation"]
20629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20630#[inline(always)]
20631#[cfg(target_endian = "big")]
20632#[target_feature(enable = "neon")]
20633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20634#[cfg_attr(test, assert_instr(nop))]
20635pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20636 unsafe {
20637 let ret_val: uint16x4_t = transmute(a);
20638 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20639 }
20640}
20641#[doc = "Vector reinterpret cast operation"]
20642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20643#[inline(always)]
20644#[cfg(target_endian = "little")]
20645#[target_feature(enable = "neon")]
20646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20647#[cfg_attr(test, assert_instr(nop))]
20648pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20649 unsafe { transmute(a) }
20650}
20651#[doc = "Vector reinterpret cast operation"]
20652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20653#[inline(always)]
20654#[cfg(target_endian = "big")]
20655#[target_feature(enable = "neon")]
20656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20657#[cfg_attr(test, assert_instr(nop))]
20658pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20659 unsafe {
20660 let ret_val: uint32x2_t = transmute(a);
20661 simd_shuffle!(ret_val, ret_val, [1, 0])
20662 }
20663}
20664#[doc = "Vector reinterpret cast operation"]
20665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
20666#[inline(always)]
20667#[target_feature(enable = "neon")]
20668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20669#[cfg_attr(test, assert_instr(nop))]
20670pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
20671 unsafe { transmute(a) }
20672}
20673#[doc = "Vector reinterpret cast operation"]
20674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20675#[inline(always)]
20676#[cfg(target_endian = "little")]
20677#[target_feature(enable = "neon")]
20678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20679#[cfg_attr(test, assert_instr(nop))]
20680pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20681 unsafe { transmute(a) }
20682}
20683#[doc = "Vector reinterpret cast operation"]
20684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20685#[inline(always)]
20686#[cfg(target_endian = "big")]
20687#[target_feature(enable = "neon")]
20688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20689#[cfg_attr(test, assert_instr(nop))]
20690pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20691 unsafe {
20692 let ret_val: poly8x8_t = transmute(a);
20693 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20694 }
20695}
20696#[doc = "Vector reinterpret cast operation"]
20697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20698#[inline(always)]
20699#[cfg(target_endian = "little")]
20700#[target_feature(enable = "neon")]
20701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20702#[cfg_attr(test, assert_instr(nop))]
20703pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20704 unsafe { transmute(a) }
20705}
20706#[doc = "Vector reinterpret cast operation"]
20707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20708#[inline(always)]
20709#[cfg(target_endian = "big")]
20710#[target_feature(enable = "neon")]
20711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20712#[cfg_attr(test, assert_instr(nop))]
20713pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20714 unsafe {
20715 let ret_val: poly16x4_t = transmute(a);
20716 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20717 }
20718}
20719#[doc = "Vector reinterpret cast operation"]
20720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
20721#[inline(always)]
20722#[target_feature(enable = "neon")]
20723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20724#[cfg_attr(test, assert_instr(nop))]
20725pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
20726 unsafe { transmute(a) }
20727}
20728#[doc = "Vector reinterpret cast operation"]
20729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20730#[inline(always)]
20731#[cfg(target_endian = "little")]
20732#[target_feature(enable = "neon")]
20733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20734#[cfg_attr(test, assert_instr(nop))]
20735pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20736 unsafe { transmute(a) }
20737}
20738#[doc = "Vector reinterpret cast operation"]
20739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20740#[inline(always)]
20741#[cfg(target_endian = "big")]
20742#[target_feature(enable = "neon")]
20743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20744#[cfg_attr(test, assert_instr(nop))]
20745pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20746 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20747 unsafe { transmute(a) }
20748}
20749#[doc = "Vector reinterpret cast operation"]
20750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20751#[inline(always)]
20752#[cfg(target_endian = "little")]
20753#[target_feature(enable = "neon")]
20754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20755#[cfg_attr(test, assert_instr(nop))]
20756pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20757 unsafe { transmute(a) }
20758}
20759#[doc = "Vector reinterpret cast operation"]
20760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20761#[inline(always)]
20762#[cfg(target_endian = "big")]
20763#[target_feature(enable = "neon")]
20764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20765#[cfg_attr(test, assert_instr(nop))]
20766pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20767 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20768 unsafe {
20769 let ret_val: float32x4_t = transmute(a);
20770 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20771 }
20772}
20773#[doc = "Vector reinterpret cast operation"]
20774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20775#[inline(always)]
20776#[cfg(target_endian = "little")]
20777#[target_feature(enable = "neon")]
20778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20779#[cfg_attr(test, assert_instr(nop))]
20780pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20781 unsafe { transmute(a) }
20782}
20783#[doc = "Vector reinterpret cast operation"]
20784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20785#[inline(always)]
20786#[cfg(target_endian = "big")]
20787#[target_feature(enable = "neon")]
20788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20789#[cfg_attr(test, assert_instr(nop))]
20790pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20791 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20792 unsafe {
20793 let ret_val: int8x16_t = transmute(a);
20794 simd_shuffle!(
20795 ret_val,
20796 ret_val,
20797 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20798 )
20799 }
20800}
20801#[doc = "Vector reinterpret cast operation"]
20802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20803#[inline(always)]
20804#[cfg(target_endian = "little")]
20805#[target_feature(enable = "neon")]
20806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20807#[cfg_attr(test, assert_instr(nop))]
20808pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20809 unsafe { transmute(a) }
20810}
20811#[doc = "Vector reinterpret cast operation"]
20812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20813#[inline(always)]
20814#[cfg(target_endian = "big")]
20815#[target_feature(enable = "neon")]
20816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20817#[cfg_attr(test, assert_instr(nop))]
20818pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20819 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20820 unsafe {
20821 let ret_val: int16x8_t = transmute(a);
20822 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20823 }
20824}
20825#[doc = "Vector reinterpret cast operation"]
20826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20827#[inline(always)]
20828#[cfg(target_endian = "little")]
20829#[target_feature(enable = "neon")]
20830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20831#[cfg_attr(test, assert_instr(nop))]
20832pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20833 unsafe { transmute(a) }
20834}
20835#[doc = "Vector reinterpret cast operation"]
20836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20837#[inline(always)]
20838#[cfg(target_endian = "big")]
20839#[target_feature(enable = "neon")]
20840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20841#[cfg_attr(test, assert_instr(nop))]
20842pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20843 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20844 unsafe {
20845 let ret_val: int32x4_t = transmute(a);
20846 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20847 }
20848}
20849#[doc = "Vector reinterpret cast operation"]
20850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20851#[inline(always)]
20852#[cfg(target_endian = "little")]
20853#[target_feature(enable = "neon")]
20854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20855#[cfg_attr(test, assert_instr(nop))]
20856pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20857 unsafe { transmute(a) }
20858}
20859#[doc = "Vector reinterpret cast operation"]
20860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20861#[inline(always)]
20862#[cfg(target_endian = "big")]
20863#[target_feature(enable = "neon")]
20864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20865#[cfg_attr(test, assert_instr(nop))]
20866pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20867 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20868 unsafe {
20869 let ret_val: int64x2_t = transmute(a);
20870 simd_shuffle!(ret_val, ret_val, [1, 0])
20871 }
20872}
20873#[doc = "Vector reinterpret cast operation"]
20874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20875#[inline(always)]
20876#[cfg(target_endian = "little")]
20877#[target_feature(enable = "neon")]
20878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20879#[cfg_attr(test, assert_instr(nop))]
20880pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20881 unsafe { transmute(a) }
20882}
20883#[doc = "Vector reinterpret cast operation"]
20884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20885#[inline(always)]
20886#[cfg(target_endian = "big")]
20887#[target_feature(enable = "neon")]
20888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20889#[cfg_attr(test, assert_instr(nop))]
20890pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20891 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20892 unsafe {
20893 let ret_val: uint8x16_t = transmute(a);
20894 simd_shuffle!(
20895 ret_val,
20896 ret_val,
20897 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20898 )
20899 }
20900}
20901#[doc = "Vector reinterpret cast operation"]
20902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20903#[inline(always)]
20904#[cfg(target_endian = "little")]
20905#[target_feature(enable = "neon")]
20906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20907#[cfg_attr(test, assert_instr(nop))]
20908pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20909 unsafe { transmute(a) }
20910}
20911#[doc = "Vector reinterpret cast operation"]
20912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20913#[inline(always)]
20914#[cfg(target_endian = "big")]
20915#[target_feature(enable = "neon")]
20916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20917#[cfg_attr(test, assert_instr(nop))]
20918pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20919 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20920 unsafe {
20921 let ret_val: uint16x8_t = transmute(a);
20922 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20923 }
20924}
20925#[doc = "Vector reinterpret cast operation"]
20926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20927#[inline(always)]
20928#[cfg(target_endian = "little")]
20929#[target_feature(enable = "neon")]
20930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20931#[cfg_attr(test, assert_instr(nop))]
20932pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20933 unsafe { transmute(a) }
20934}
20935#[doc = "Vector reinterpret cast operation"]
20936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20937#[inline(always)]
20938#[cfg(target_endian = "big")]
20939#[target_feature(enable = "neon")]
20940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20941#[cfg_attr(test, assert_instr(nop))]
20942pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20943 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20944 unsafe {
20945 let ret_val: uint32x4_t = transmute(a);
20946 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20947 }
20948}
20949#[doc = "Vector reinterpret cast operation"]
20950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20951#[inline(always)]
20952#[cfg(target_endian = "little")]
20953#[target_feature(enable = "neon")]
20954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20955#[cfg_attr(test, assert_instr(nop))]
20956pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20957 unsafe { transmute(a) }
20958}
20959#[doc = "Vector reinterpret cast operation"]
20960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20961#[inline(always)]
20962#[cfg(target_endian = "big")]
20963#[target_feature(enable = "neon")]
20964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20965#[cfg_attr(test, assert_instr(nop))]
20966pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20967 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20968 unsafe {
20969 let ret_val: uint64x2_t = transmute(a);
20970 simd_shuffle!(ret_val, ret_val, [1, 0])
20971 }
20972}
20973#[doc = "Vector reinterpret cast operation"]
20974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20975#[inline(always)]
20976#[cfg(target_endian = "little")]
20977#[target_feature(enable = "neon")]
20978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20979#[cfg_attr(test, assert_instr(nop))]
20980pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20981 unsafe { transmute(a) }
20982}
20983#[doc = "Vector reinterpret cast operation"]
20984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20985#[inline(always)]
20986#[cfg(target_endian = "big")]
20987#[target_feature(enable = "neon")]
20988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20989#[cfg_attr(test, assert_instr(nop))]
20990pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20991 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20992 unsafe {
20993 let ret_val: poly8x16_t = transmute(a);
20994 simd_shuffle!(
20995 ret_val,
20996 ret_val,
20997 [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20998 )
20999 }
21000}
21001#[doc = "Vector reinterpret cast operation"]
21002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21003#[inline(always)]
21004#[cfg(target_endian = "little")]
21005#[target_feature(enable = "neon")]
21006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21007#[cfg_attr(test, assert_instr(nop))]
21008pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21009 unsafe { transmute(a) }
21010}
21011#[doc = "Vector reinterpret cast operation"]
21012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21013#[inline(always)]
21014#[cfg(target_endian = "big")]
21015#[target_feature(enable = "neon")]
21016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21017#[cfg_attr(test, assert_instr(nop))]
21018pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21019 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21020 unsafe {
21021 let ret_val: poly16x8_t = transmute(a);
21022 simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21023 }
21024}
21025#[doc = "Vector reinterpret cast operation"]
21026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21027#[inline(always)]
21028#[cfg(target_endian = "little")]
21029#[target_feature(enable = "neon")]
21030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21031#[cfg_attr(test, assert_instr(nop))]
21032pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21033 unsafe { transmute(a) }
21034}
21035#[doc = "Vector reinterpret cast operation"]
21036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21037#[inline(always)]
21038#[cfg(target_endian = "big")]
21039#[target_feature(enable = "neon")]
21040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21041#[cfg_attr(test, assert_instr(nop))]
21042pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21043 let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21044 unsafe {
21045 let ret_val: poly64x2_t = transmute(a);
21046 simd_shuffle!(ret_val, ret_val, [1, 0])
21047 }
21048}
21049#[doc = "Vector reinterpret cast operation"]
21050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21051#[inline(always)]
21052#[cfg(target_endian = "little")]
21053#[target_feature(enable = "neon")]
21054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21055#[cfg_attr(test, assert_instr(nop))]
21056pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21057 unsafe { transmute(a) }
21058}
21059#[doc = "Vector reinterpret cast operation"]
21060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21061#[inline(always)]
21062#[cfg(target_endian = "big")]
21063#[target_feature(enable = "neon")]
21064#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21065#[cfg_attr(test, assert_instr(nop))]
21066pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21067 let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21068 unsafe { transmute(a) }
21069}
21070#[doc = "Vector reinterpret cast operation"]
21071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21072#[inline(always)]
21073#[cfg(target_endian = "little")]
21074#[target_feature(enable = "neon")]
21075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21076#[cfg_attr(test, assert_instr(nop))]
21077pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21078 unsafe { transmute(a) }
21079}
21080#[doc = "Vector reinterpret cast operation"]
21081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21082#[inline(always)]
21083#[cfg(target_endian = "big")]
21084#[target_feature(enable = "neon")]
21085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21086#[cfg_attr(test, assert_instr(nop))]
21087pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21088 let a: int8x16_t =
21089 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21090 unsafe {
21091 let ret_val: float64x2_t = transmute(a);
21092 simd_shuffle!(ret_val, ret_val, [1, 0])
21093 }
21094}
21095#[doc = "Vector reinterpret cast operation"]
21096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21097#[inline(always)]
21098#[cfg(target_endian = "little")]
21099#[target_feature(enable = "neon")]
21100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21101#[cfg_attr(test, assert_instr(nop))]
21102pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21103 unsafe { transmute(a) }
21104}
21105#[doc = "Vector reinterpret cast operation"]
21106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21107#[inline(always)]
21108#[cfg(target_endian = "big")]
21109#[target_feature(enable = "neon")]
21110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21111#[cfg_attr(test, assert_instr(nop))]
21112pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21113 let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21114 unsafe { transmute(a) }
21115}
21116#[doc = "Vector reinterpret cast operation"]
21117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21118#[inline(always)]
21119#[cfg(target_endian = "little")]
21120#[target_feature(enable = "neon")]
21121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21122#[cfg_attr(test, assert_instr(nop))]
21123pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21124 unsafe { transmute(a) }
21125}
21126#[doc = "Vector reinterpret cast operation"]
21127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21128#[inline(always)]
21129#[cfg(target_endian = "big")]
21130#[target_feature(enable = "neon")]
21131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21132#[cfg_attr(test, assert_instr(nop))]
21133pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21134 let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21135 unsafe {
21136 let ret_val: float64x2_t = transmute(a);
21137 simd_shuffle!(ret_val, ret_val, [1, 0])
21138 }
21139}
21140#[doc = "Vector reinterpret cast operation"]
21141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21142#[inline(always)]
21143#[cfg(target_endian = "little")]
21144#[target_feature(enable = "neon")]
21145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21146#[cfg_attr(test, assert_instr(nop))]
21147pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21148 unsafe { transmute(a) }
21149}
21150#[doc = "Vector reinterpret cast operation"]
21151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21152#[inline(always)]
21153#[cfg(target_endian = "big")]
21154#[target_feature(enable = "neon")]
21155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21156#[cfg_attr(test, assert_instr(nop))]
21157pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21158 let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21159 unsafe { transmute(a) }
21160}
21161#[doc = "Vector reinterpret cast operation"]
21162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21163#[inline(always)]
21164#[cfg(target_endian = "little")]
21165#[target_feature(enable = "neon")]
21166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21167#[cfg_attr(test, assert_instr(nop))]
21168pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21169 unsafe { transmute(a) }
21170}
21171#[doc = "Vector reinterpret cast operation"]
21172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21173#[inline(always)]
21174#[cfg(target_endian = "big")]
21175#[target_feature(enable = "neon")]
21176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21177#[cfg_attr(test, assert_instr(nop))]
21178pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21179 let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21180 unsafe {
21181 let ret_val: float64x2_t = transmute(a);
21182 simd_shuffle!(ret_val, ret_val, [1, 0])
21183 }
21184}
21185#[doc = "Vector reinterpret cast operation"]
21186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
21187#[inline(always)]
21188#[target_feature(enable = "neon")]
21189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21190#[cfg_attr(test, assert_instr(nop))]
21191pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
21192 unsafe { transmute(a) }
21193}
21194#[doc = "Vector reinterpret cast operation"]
21195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
21196#[inline(always)]
21197#[target_feature(enable = "neon")]
21198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21199#[cfg_attr(test, assert_instr(nop))]
21200pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
21201 unsafe { transmute(a) }
21202}
21203#[doc = "Vector reinterpret cast operation"]
21204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21205#[inline(always)]
21206#[cfg(target_endian = "little")]
21207#[target_feature(enable = "neon")]
21208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21209#[cfg_attr(test, assert_instr(nop))]
21210pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21211 unsafe { transmute(a) }
21212}
21213#[doc = "Vector reinterpret cast operation"]
21214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21215#[inline(always)]
21216#[cfg(target_endian = "big")]
21217#[target_feature(enable = "neon")]
21218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21219#[cfg_attr(test, assert_instr(nop))]
21220pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21221 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21222 unsafe {
21223 let ret_val: float64x2_t = transmute(a);
21224 simd_shuffle!(ret_val, ret_val, [1, 0])
21225 }
21226}
21227#[doc = "Vector reinterpret cast operation"]
21228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21229#[inline(always)]
21230#[cfg(target_endian = "little")]
21231#[target_feature(enable = "neon")]
21232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21233#[cfg_attr(test, assert_instr(nop))]
21234pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21235 unsafe { transmute(a) }
21236}
21237#[doc = "Vector reinterpret cast operation"]
21238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21239#[inline(always)]
21240#[cfg(target_endian = "big")]
21241#[target_feature(enable = "neon")]
21242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21243#[cfg_attr(test, assert_instr(nop))]
21244pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21245 let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21246 unsafe {
21247 let ret_val: poly64x2_t = transmute(a);
21248 simd_shuffle!(ret_val, ret_val, [1, 0])
21249 }
21250}
21251#[doc = "Vector reinterpret cast operation"]
21252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21253#[inline(always)]
21254#[cfg(target_endian = "little")]
21255#[target_feature(enable = "neon")]
21256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21257#[cfg_attr(test, assert_instr(nop))]
21258pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21259 unsafe { transmute(a) }
21260}
21261#[doc = "Vector reinterpret cast operation"]
21262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21263#[inline(always)]
21264#[cfg(target_endian = "big")]
21265#[target_feature(enable = "neon")]
21266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21267#[cfg_attr(test, assert_instr(nop))]
21268pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21269 let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21270 unsafe { transmute(a) }
21271}
21272#[doc = "Vector reinterpret cast operation"]
21273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21274#[inline(always)]
21275#[cfg(target_endian = "little")]
21276#[target_feature(enable = "neon")]
21277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21278#[cfg_attr(test, assert_instr(nop))]
21279pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21280 unsafe { transmute(a) }
21281}
21282#[doc = "Vector reinterpret cast operation"]
21283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21284#[inline(always)]
21285#[cfg(target_endian = "big")]
21286#[target_feature(enable = "neon")]
21287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21288#[cfg_attr(test, assert_instr(nop))]
21289pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21290 let a: uint8x16_t =
21291 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21292 unsafe {
21293 let ret_val: float64x2_t = transmute(a);
21294 simd_shuffle!(ret_val, ret_val, [1, 0])
21295 }
21296}
21297#[doc = "Vector reinterpret cast operation"]
21298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21299#[inline(always)]
21300#[cfg(target_endian = "little")]
21301#[target_feature(enable = "neon")]
21302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21303#[cfg_attr(test, assert_instr(nop))]
21304pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21305 unsafe { transmute(a) }
21306}
21307#[doc = "Vector reinterpret cast operation"]
21308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21309#[inline(always)]
21310#[cfg(target_endian = "big")]
21311#[target_feature(enable = "neon")]
21312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21313#[cfg_attr(test, assert_instr(nop))]
21314pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21315 let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21316 unsafe { transmute(a) }
21317}
21318#[doc = "Vector reinterpret cast operation"]
21319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21320#[inline(always)]
21321#[cfg(target_endian = "little")]
21322#[target_feature(enable = "neon")]
21323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21324#[cfg_attr(test, assert_instr(nop))]
21325pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21326 unsafe { transmute(a) }
21327}
21328#[doc = "Vector reinterpret cast operation"]
21329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21330#[inline(always)]
21331#[cfg(target_endian = "big")]
21332#[target_feature(enable = "neon")]
21333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21334#[cfg_attr(test, assert_instr(nop))]
21335pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21336 let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21337 unsafe {
21338 let ret_val: float64x2_t = transmute(a);
21339 simd_shuffle!(ret_val, ret_val, [1, 0])
21340 }
21341}
21342#[doc = "Vector reinterpret cast operation"]
21343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21344#[inline(always)]
21345#[cfg(target_endian = "little")]
21346#[target_feature(enable = "neon")]
21347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21348#[cfg_attr(test, assert_instr(nop))]
21349pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21350 unsafe { transmute(a) }
21351}
21352#[doc = "Vector reinterpret cast operation"]
21353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21354#[inline(always)]
21355#[cfg(target_endian = "big")]
21356#[target_feature(enable = "neon")]
21357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21358#[cfg_attr(test, assert_instr(nop))]
21359pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21360 let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21361 unsafe { transmute(a) }
21362}
21363#[doc = "Vector reinterpret cast operation"]
21364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21365#[inline(always)]
21366#[cfg(target_endian = "little")]
21367#[target_feature(enable = "neon")]
21368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21369#[cfg_attr(test, assert_instr(nop))]
21370pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21371 unsafe { transmute(a) }
21372}
21373#[doc = "Vector reinterpret cast operation"]
21374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21375#[inline(always)]
21376#[cfg(target_endian = "big")]
21377#[target_feature(enable = "neon")]
21378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21379#[cfg_attr(test, assert_instr(nop))]
21380pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21381 let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21382 unsafe {
21383 let ret_val: float64x2_t = transmute(a);
21384 simd_shuffle!(ret_val, ret_val, [1, 0])
21385 }
21386}
21387#[doc = "Vector reinterpret cast operation"]
21388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
21389#[inline(always)]
21390#[target_feature(enable = "neon")]
21391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21392#[cfg_attr(test, assert_instr(nop))]
21393pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
21394 unsafe { transmute(a) }
21395}
21396#[doc = "Vector reinterpret cast operation"]
21397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
21398#[inline(always)]
21399#[target_feature(enable = "neon")]
21400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21401#[cfg_attr(test, assert_instr(nop))]
21402pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
21403 unsafe { transmute(a) }
21404}
21405#[doc = "Vector reinterpret cast operation"]
21406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21407#[inline(always)]
21408#[cfg(target_endian = "little")]
21409#[target_feature(enable = "neon")]
21410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21411#[cfg_attr(test, assert_instr(nop))]
21412pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21413 unsafe { transmute(a) }
21414}
21415#[doc = "Vector reinterpret cast operation"]
21416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21417#[inline(always)]
21418#[cfg(target_endian = "big")]
21419#[target_feature(enable = "neon")]
21420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21421#[cfg_attr(test, assert_instr(nop))]
21422pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21423 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21424 unsafe {
21425 let ret_val: float64x2_t = transmute(a);
21426 simd_shuffle!(ret_val, ret_val, [1, 0])
21427 }
21428}
21429#[doc = "Vector reinterpret cast operation"]
21430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21431#[inline(always)]
21432#[cfg(target_endian = "little")]
21433#[target_feature(enable = "neon")]
21434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21435#[cfg_attr(test, assert_instr(nop))]
21436pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21437 unsafe { transmute(a) }
21438}
21439#[doc = "Vector reinterpret cast operation"]
21440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21441#[inline(always)]
21442#[cfg(target_endian = "big")]
21443#[target_feature(enable = "neon")]
21444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21445#[cfg_attr(test, assert_instr(nop))]
21446pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21447 let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21448 unsafe {
21449 let ret_val: poly64x2_t = transmute(a);
21450 simd_shuffle!(ret_val, ret_val, [1, 0])
21451 }
21452}
21453#[doc = "Vector reinterpret cast operation"]
21454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21455#[inline(always)]
21456#[cfg(target_endian = "little")]
21457#[target_feature(enable = "neon")]
21458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21459#[cfg_attr(test, assert_instr(nop))]
21460pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21461 unsafe { transmute(a) }
21462}
21463#[doc = "Vector reinterpret cast operation"]
21464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21465#[inline(always)]
21466#[cfg(target_endian = "big")]
21467#[target_feature(enable = "neon")]
21468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21469#[cfg_attr(test, assert_instr(nop))]
21470pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21471 let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21472 unsafe { transmute(a) }
21473}
21474#[doc = "Vector reinterpret cast operation"]
21475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21476#[inline(always)]
21477#[cfg(target_endian = "little")]
21478#[target_feature(enable = "neon")]
21479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21480#[cfg_attr(test, assert_instr(nop))]
21481pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21482 unsafe { transmute(a) }
21483}
21484#[doc = "Vector reinterpret cast operation"]
21485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21486#[inline(always)]
21487#[cfg(target_endian = "big")]
21488#[target_feature(enable = "neon")]
21489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21490#[cfg_attr(test, assert_instr(nop))]
21491pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21492 let a: poly8x16_t =
21493 unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21494 unsafe {
21495 let ret_val: float64x2_t = transmute(a);
21496 simd_shuffle!(ret_val, ret_val, [1, 0])
21497 }
21498}
21499#[doc = "Vector reinterpret cast operation"]
21500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21501#[inline(always)]
21502#[cfg(target_endian = "little")]
21503#[target_feature(enable = "neon")]
21504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21505#[cfg_attr(test, assert_instr(nop))]
21506pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21507 unsafe { transmute(a) }
21508}
21509#[doc = "Vector reinterpret cast operation"]
21510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21511#[inline(always)]
21512#[cfg(target_endian = "big")]
21513#[target_feature(enable = "neon")]
21514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21515#[cfg_attr(test, assert_instr(nop))]
21516pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21517 let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21518 unsafe { transmute(a) }
21519}
21520#[doc = "Vector reinterpret cast operation"]
21521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21522#[inline(always)]
21523#[cfg(target_endian = "little")]
21524#[target_feature(enable = "neon")]
21525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21526#[cfg_attr(test, assert_instr(nop))]
21527pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21528 unsafe { transmute(a) }
21529}
21530#[doc = "Vector reinterpret cast operation"]
21531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21532#[inline(always)]
21533#[cfg(target_endian = "big")]
21534#[target_feature(enable = "neon")]
21535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21536#[cfg_attr(test, assert_instr(nop))]
21537pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21538 let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21539 unsafe {
21540 let ret_val: float64x2_t = transmute(a);
21541 simd_shuffle!(ret_val, ret_val, [1, 0])
21542 }
21543}
21544#[doc = "Vector reinterpret cast operation"]
21545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21546#[inline(always)]
21547#[cfg(target_endian = "little")]
21548#[target_feature(enable = "neon")]
21549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21550#[cfg_attr(test, assert_instr(nop))]
21551pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21552 unsafe { transmute(a) }
21553}
21554#[doc = "Vector reinterpret cast operation"]
21555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21556#[inline(always)]
21557#[cfg(target_endian = "big")]
21558#[target_feature(enable = "neon")]
21559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21560#[cfg_attr(test, assert_instr(nop))]
21561pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21562 unsafe {
21563 let ret_val: float32x2_t = transmute(a);
21564 simd_shuffle!(ret_val, ret_val, [1, 0])
21565 }
21566}
21567#[doc = "Vector reinterpret cast operation"]
21568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
21569#[inline(always)]
21570#[target_feature(enable = "neon")]
21571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21572#[cfg_attr(test, assert_instr(nop))]
21573pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
21574 unsafe { transmute(a) }
21575}
21576#[doc = "Vector reinterpret cast operation"]
21577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
21578#[inline(always)]
21579#[target_feature(enable = "neon")]
21580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21581#[cfg_attr(test, assert_instr(nop))]
21582pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
21583 unsafe { transmute(a) }
21584}
21585#[doc = "Vector reinterpret cast operation"]
21586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
21587#[inline(always)]
21588#[target_feature(enable = "neon")]
21589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21590#[cfg_attr(test, assert_instr(nop))]
21591pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
21592 unsafe { transmute(a) }
21593}
21594#[doc = "Vector reinterpret cast operation"]
21595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21596#[inline(always)]
21597#[cfg(target_endian = "little")]
21598#[target_feature(enable = "neon")]
21599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21600#[cfg_attr(test, assert_instr(nop))]
21601pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21602 unsafe { transmute(a) }
21603}
21604#[doc = "Vector reinterpret cast operation"]
21605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21606#[inline(always)]
21607#[cfg(target_endian = "big")]
21608#[target_feature(enable = "neon")]
21609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21610#[cfg_attr(test, assert_instr(nop))]
21611pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21612 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21613 unsafe {
21614 let ret_val: float32x4_t = transmute(a);
21615 simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21616 }
21617}
21618#[doc = "Vector reinterpret cast operation"]
21619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21620#[inline(always)]
21621#[cfg(target_endian = "little")]
21622#[target_feature(enable = "neon")]
21623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21624#[cfg_attr(test, assert_instr(nop))]
21625pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21626 unsafe { transmute(a) }
21627}
21628#[doc = "Vector reinterpret cast operation"]
21629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21630#[inline(always)]
21631#[cfg(target_endian = "big")]
21632#[target_feature(enable = "neon")]
21633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21634#[cfg_attr(test, assert_instr(nop))]
21635pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21636 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21637 unsafe {
21638 let ret_val: float64x2_t = transmute(a);
21639 simd_shuffle!(ret_val, ret_val, [1, 0])
21640 }
21641}
21642#[doc = "Vector reinterpret cast operation"]
21643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21644#[inline(always)]
21645#[cfg(target_endian = "little")]
21646#[target_feature(enable = "neon")]
21647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21648#[cfg_attr(test, assert_instr(nop))]
21649pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21650 unsafe { transmute(a) }
21651}
21652#[doc = "Vector reinterpret cast operation"]
21653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21654#[inline(always)]
21655#[cfg(target_endian = "big")]
21656#[target_feature(enable = "neon")]
21657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21658#[cfg_attr(test, assert_instr(nop))]
21659pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21660 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21661 unsafe {
21662 let ret_val: int64x2_t = transmute(a);
21663 simd_shuffle!(ret_val, ret_val, [1, 0])
21664 }
21665}
21666#[doc = "Vector reinterpret cast operation"]
21667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21668#[inline(always)]
21669#[cfg(target_endian = "little")]
21670#[target_feature(enable = "neon")]
21671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21672#[cfg_attr(test, assert_instr(nop))]
21673pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21674 unsafe { transmute(a) }
21675}
21676#[doc = "Vector reinterpret cast operation"]
21677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21678#[inline(always)]
21679#[cfg(target_endian = "big")]
21680#[target_feature(enable = "neon")]
21681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21682#[cfg_attr(test, assert_instr(nop))]
21683pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21684 let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21685 unsafe {
21686 let ret_val: uint64x2_t = transmute(a);
21687 simd_shuffle!(ret_val, ret_val, [1, 0])
21688 }
21689}
21690#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
21692#[inline(always)]
21693#[target_feature(enable = "neon,frintts")]
21694#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21695#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21696pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
21697 unsafe extern "unadjusted" {
21698 #[cfg_attr(
21699 any(target_arch = "aarch64", target_arch = "arm64ec"),
21700 link_name = "llvm.aarch64.neon.frint32x.v2f32"
21701 )]
21702 fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
21703 }
21704 unsafe { _vrnd32x_f32(a) }
21705}
21706#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
21708#[inline(always)]
21709#[target_feature(enable = "neon,frintts")]
21710#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21711#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21712pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
21713 unsafe extern "unadjusted" {
21714 #[cfg_attr(
21715 any(target_arch = "aarch64", target_arch = "arm64ec"),
21716 link_name = "llvm.aarch64.neon.frint32x.v4f32"
21717 )]
21718 fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
21719 }
21720 unsafe { _vrnd32xq_f32(a) }
21721}
21722#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
21724#[inline(always)]
21725#[target_feature(enable = "neon,frintts")]
21726#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21727#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21728pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
21729 unsafe extern "unadjusted" {
21730 #[cfg_attr(
21731 any(target_arch = "aarch64", target_arch = "arm64ec"),
21732 link_name = "llvm.aarch64.neon.frint32x.v2f64"
21733 )]
21734 fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
21735 }
21736 unsafe { _vrnd32xq_f64(a) }
21737}
21738#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
21740#[inline(always)]
21741#[target_feature(enable = "neon,frintts")]
21742#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21743#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21744pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
21745 unsafe extern "unadjusted" {
21746 #[cfg_attr(
21747 any(target_arch = "aarch64", target_arch = "arm64ec"),
21748 link_name = "llvm.aarch64.frint32x.f64"
21749 )]
21750 fn _vrnd32x_f64(a: f64) -> f64;
21751 }
21752 unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
21753}
21754#[doc = "Floating-point round to 32-bit integer toward zero"]
21755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
21756#[inline(always)]
21757#[target_feature(enable = "neon,frintts")]
21758#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21759#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21760pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
21761 unsafe extern "unadjusted" {
21762 #[cfg_attr(
21763 any(target_arch = "aarch64", target_arch = "arm64ec"),
21764 link_name = "llvm.aarch64.neon.frint32z.v2f32"
21765 )]
21766 fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
21767 }
21768 unsafe { _vrnd32z_f32(a) }
21769}
21770#[doc = "Floating-point round to 32-bit integer toward zero"]
21771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
21772#[inline(always)]
21773#[target_feature(enable = "neon,frintts")]
21774#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21775#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21776pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
21777 unsafe extern "unadjusted" {
21778 #[cfg_attr(
21779 any(target_arch = "aarch64", target_arch = "arm64ec"),
21780 link_name = "llvm.aarch64.neon.frint32z.v4f32"
21781 )]
21782 fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
21783 }
21784 unsafe { _vrnd32zq_f32(a) }
21785}
21786#[doc = "Floating-point round to 32-bit integer toward zero"]
21787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
21788#[inline(always)]
21789#[target_feature(enable = "neon,frintts")]
21790#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21791#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21792pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
21793 unsafe extern "unadjusted" {
21794 #[cfg_attr(
21795 any(target_arch = "aarch64", target_arch = "arm64ec"),
21796 link_name = "llvm.aarch64.neon.frint32z.v2f64"
21797 )]
21798 fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
21799 }
21800 unsafe { _vrnd32zq_f64(a) }
21801}
21802#[doc = "Floating-point round to 32-bit integer toward zero"]
21803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
21804#[inline(always)]
21805#[target_feature(enable = "neon,frintts")]
21806#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21807#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21808pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
21809 unsafe extern "unadjusted" {
21810 #[cfg_attr(
21811 any(target_arch = "aarch64", target_arch = "arm64ec"),
21812 link_name = "llvm.aarch64.frint32z.f64"
21813 )]
21814 fn _vrnd32z_f64(a: f64) -> f64;
21815 }
21816 unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
21817}
21818#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
21820#[inline(always)]
21821#[target_feature(enable = "neon,frintts")]
21822#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21823#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21824pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
21825 unsafe extern "unadjusted" {
21826 #[cfg_attr(
21827 any(target_arch = "aarch64", target_arch = "arm64ec"),
21828 link_name = "llvm.aarch64.neon.frint64x.v2f32"
21829 )]
21830 fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
21831 }
21832 unsafe { _vrnd64x_f32(a) }
21833}
21834#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
21836#[inline(always)]
21837#[target_feature(enable = "neon,frintts")]
21838#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21840pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
21841 unsafe extern "unadjusted" {
21842 #[cfg_attr(
21843 any(target_arch = "aarch64", target_arch = "arm64ec"),
21844 link_name = "llvm.aarch64.neon.frint64x.v4f32"
21845 )]
21846 fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
21847 }
21848 unsafe { _vrnd64xq_f32(a) }
21849}
21850#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
21852#[inline(always)]
21853#[target_feature(enable = "neon,frintts")]
21854#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21856pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
21857 unsafe extern "unadjusted" {
21858 #[cfg_attr(
21859 any(target_arch = "aarch64", target_arch = "arm64ec"),
21860 link_name = "llvm.aarch64.neon.frint64x.v2f64"
21861 )]
21862 fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
21863 }
21864 unsafe { _vrnd64xq_f64(a) }
21865}
21866#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
21868#[inline(always)]
21869#[target_feature(enable = "neon,frintts")]
21870#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21871#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21872pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
21873 unsafe extern "unadjusted" {
21874 #[cfg_attr(
21875 any(target_arch = "aarch64", target_arch = "arm64ec"),
21876 link_name = "llvm.aarch64.frint64x.f64"
21877 )]
21878 fn _vrnd64x_f64(a: f64) -> f64;
21879 }
21880 unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
21881}
21882#[doc = "Floating-point round to 64-bit integer toward zero"]
21883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
21884#[inline(always)]
21885#[target_feature(enable = "neon,frintts")]
21886#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21887#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21888pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
21889 unsafe extern "unadjusted" {
21890 #[cfg_attr(
21891 any(target_arch = "aarch64", target_arch = "arm64ec"),
21892 link_name = "llvm.aarch64.neon.frint64z.v2f32"
21893 )]
21894 fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
21895 }
21896 unsafe { _vrnd64z_f32(a) }
21897}
21898#[doc = "Floating-point round to 64-bit integer toward zero"]
21899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
21900#[inline(always)]
21901#[target_feature(enable = "neon,frintts")]
21902#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21903#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21904pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
21905 unsafe extern "unadjusted" {
21906 #[cfg_attr(
21907 any(target_arch = "aarch64", target_arch = "arm64ec"),
21908 link_name = "llvm.aarch64.neon.frint64z.v4f32"
21909 )]
21910 fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
21911 }
21912 unsafe { _vrnd64zq_f32(a) }
21913}
21914#[doc = "Floating-point round to 64-bit integer toward zero"]
21915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
21916#[inline(always)]
21917#[target_feature(enable = "neon,frintts")]
21918#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21919#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21920pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
21921 unsafe extern "unadjusted" {
21922 #[cfg_attr(
21923 any(target_arch = "aarch64", target_arch = "arm64ec"),
21924 link_name = "llvm.aarch64.neon.frint64z.v2f64"
21925 )]
21926 fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
21927 }
21928 unsafe { _vrnd64zq_f64(a) }
21929}
21930#[doc = "Floating-point round to 64-bit integer toward zero"]
21931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
21932#[inline(always)]
21933#[target_feature(enable = "neon,frintts")]
21934#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21935#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21936pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
21937 unsafe extern "unadjusted" {
21938 #[cfg_attr(
21939 any(target_arch = "aarch64", target_arch = "arm64ec"),
21940 link_name = "llvm.aarch64.frint64z.f64"
21941 )]
21942 fn _vrnd64z_f64(a: f64) -> f64;
21943 }
21944 unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
21945}
21946#[doc = "Floating-point round to integral, toward zero"]
21947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
21948#[inline(always)]
21949#[target_feature(enable = "neon,fp16")]
21950#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21951#[cfg(not(target_arch = "arm64ec"))]
21952#[cfg_attr(test, assert_instr(frintz))]
21953pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
21954 unsafe { simd_trunc(a) }
21955}
21956#[doc = "Floating-point round to integral, toward zero"]
21957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
21958#[inline(always)]
21959#[target_feature(enable = "neon,fp16")]
21960#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21961#[cfg(not(target_arch = "arm64ec"))]
21962#[cfg_attr(test, assert_instr(frintz))]
21963pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
21964 unsafe { simd_trunc(a) }
21965}
21966#[doc = "Floating-point round to integral, toward zero"]
21967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
21968#[inline(always)]
21969#[target_feature(enable = "neon")]
21970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21971#[cfg_attr(test, assert_instr(frintz))]
21972pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
21973 unsafe { simd_trunc(a) }
21974}
21975#[doc = "Floating-point round to integral, toward zero"]
21976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
21977#[inline(always)]
21978#[target_feature(enable = "neon")]
21979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21980#[cfg_attr(test, assert_instr(frintz))]
21981pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
21982 unsafe { simd_trunc(a) }
21983}
21984#[doc = "Floating-point round to integral, toward zero"]
21985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
21986#[inline(always)]
21987#[target_feature(enable = "neon")]
21988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21989#[cfg_attr(test, assert_instr(frintz))]
21990pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
21991 unsafe { simd_trunc(a) }
21992}
21993#[doc = "Floating-point round to integral, toward zero"]
21994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
21995#[inline(always)]
21996#[target_feature(enable = "neon")]
21997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21998#[cfg_attr(test, assert_instr(frintz))]
21999pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
22000 unsafe { simd_trunc(a) }
22001}
22002#[doc = "Floating-point round to integral, to nearest with ties to away"]
22003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
22004#[inline(always)]
22005#[target_feature(enable = "neon,fp16")]
22006#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22007#[cfg(not(target_arch = "arm64ec"))]
22008#[cfg_attr(test, assert_instr(frinta))]
22009pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
22010 unsafe { simd_round(a) }
22011}
22012#[doc = "Floating-point round to integral, to nearest with ties to away"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
22014#[inline(always)]
22015#[target_feature(enable = "neon,fp16")]
22016#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22017#[cfg(not(target_arch = "arm64ec"))]
22018#[cfg_attr(test, assert_instr(frinta))]
22019pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
22020 unsafe { simd_round(a) }
22021}
22022#[doc = "Floating-point round to integral, to nearest with ties to away"]
22023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
22024#[inline(always)]
22025#[target_feature(enable = "neon")]
22026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22027#[cfg_attr(test, assert_instr(frinta))]
22028pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
22029 unsafe { simd_round(a) }
22030}
22031#[doc = "Floating-point round to integral, to nearest with ties to away"]
22032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
22033#[inline(always)]
22034#[target_feature(enable = "neon")]
22035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22036#[cfg_attr(test, assert_instr(frinta))]
22037pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
22038 unsafe { simd_round(a) }
22039}
22040#[doc = "Floating-point round to integral, to nearest with ties to away"]
22041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
22042#[inline(always)]
22043#[target_feature(enable = "neon")]
22044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22045#[cfg_attr(test, assert_instr(frinta))]
22046pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
22047 unsafe { simd_round(a) }
22048}
22049#[doc = "Floating-point round to integral, to nearest with ties to away"]
22050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
22051#[inline(always)]
22052#[target_feature(enable = "neon")]
22053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22054#[cfg_attr(test, assert_instr(frinta))]
22055pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
22056 unsafe { simd_round(a) }
22057}
22058#[doc = "Floating-point round to integral, to nearest with ties to away"]
22059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
22060#[inline(always)]
22061#[target_feature(enable = "neon,fp16")]
22062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22063#[cfg(not(target_arch = "arm64ec"))]
22064#[cfg_attr(test, assert_instr(frinta))]
22065pub fn vrndah_f16(a: f16) -> f16 {
22066 roundf16(a)
22067}
22068#[doc = "Floating-point round to integral, to nearest with ties to away"]
22069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
22070#[inline(always)]
22071#[target_feature(enable = "neon,fp16")]
22072#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22073#[cfg(not(target_arch = "arm64ec"))]
22074#[cfg_attr(test, assert_instr(frintz))]
22075pub fn vrndh_f16(a: f16) -> f16 {
22076 truncf16(a)
22077}
22078#[doc = "Floating-point round to integral, using current rounding mode"]
22079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
22080#[inline(always)]
22081#[target_feature(enable = "neon,fp16")]
22082#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22083#[cfg(not(target_arch = "arm64ec"))]
22084#[cfg_attr(test, assert_instr(frinti))]
22085pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
22086 unsafe extern "unadjusted" {
22087 #[cfg_attr(
22088 any(target_arch = "aarch64", target_arch = "arm64ec"),
22089 link_name = "llvm.nearbyint.v4f16"
22090 )]
22091 fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
22092 }
22093 unsafe { _vrndi_f16(a) }
22094}
22095#[doc = "Floating-point round to integral, using current rounding mode"]
22096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
22097#[inline(always)]
22098#[target_feature(enable = "neon,fp16")]
22099#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22100#[cfg(not(target_arch = "arm64ec"))]
22101#[cfg_attr(test, assert_instr(frinti))]
22102pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
22103 unsafe extern "unadjusted" {
22104 #[cfg_attr(
22105 any(target_arch = "aarch64", target_arch = "arm64ec"),
22106 link_name = "llvm.nearbyint.v8f16"
22107 )]
22108 fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
22109 }
22110 unsafe { _vrndiq_f16(a) }
22111}
22112#[doc = "Floating-point round to integral, using current rounding mode"]
22113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
22114#[inline(always)]
22115#[target_feature(enable = "neon")]
22116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22117#[cfg_attr(test, assert_instr(frinti))]
22118pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
22119 unsafe extern "unadjusted" {
22120 #[cfg_attr(
22121 any(target_arch = "aarch64", target_arch = "arm64ec"),
22122 link_name = "llvm.nearbyint.v2f32"
22123 )]
22124 fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
22125 }
22126 unsafe { _vrndi_f32(a) }
22127}
22128#[doc = "Floating-point round to integral, using current rounding mode"]
22129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
22130#[inline(always)]
22131#[target_feature(enable = "neon")]
22132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22133#[cfg_attr(test, assert_instr(frinti))]
22134pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
22135 unsafe extern "unadjusted" {
22136 #[cfg_attr(
22137 any(target_arch = "aarch64", target_arch = "arm64ec"),
22138 link_name = "llvm.nearbyint.v4f32"
22139 )]
22140 fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
22141 }
22142 unsafe { _vrndiq_f32(a) }
22143}
22144#[doc = "Floating-point round to integral, using current rounding mode"]
22145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
22146#[inline(always)]
22147#[target_feature(enable = "neon")]
22148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22149#[cfg_attr(test, assert_instr(frinti))]
22150pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
22151 unsafe extern "unadjusted" {
22152 #[cfg_attr(
22153 any(target_arch = "aarch64", target_arch = "arm64ec"),
22154 link_name = "llvm.nearbyint.v1f64"
22155 )]
22156 fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
22157 }
22158 unsafe { _vrndi_f64(a) }
22159}
22160#[doc = "Floating-point round to integral, using current rounding mode"]
22161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
22162#[inline(always)]
22163#[target_feature(enable = "neon")]
22164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22165#[cfg_attr(test, assert_instr(frinti))]
22166pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
22167 unsafe extern "unadjusted" {
22168 #[cfg_attr(
22169 any(target_arch = "aarch64", target_arch = "arm64ec"),
22170 link_name = "llvm.nearbyint.v2f64"
22171 )]
22172 fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
22173 }
22174 unsafe { _vrndiq_f64(a) }
22175}
22176#[doc = "Floating-point round to integral, using current rounding mode"]
22177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
22178#[inline(always)]
22179#[target_feature(enable = "neon,fp16")]
22180#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22181#[cfg(not(target_arch = "arm64ec"))]
22182#[cfg_attr(test, assert_instr(frinti))]
22183pub fn vrndih_f16(a: f16) -> f16 {
22184 unsafe extern "unadjusted" {
22185 #[cfg_attr(
22186 any(target_arch = "aarch64", target_arch = "arm64ec"),
22187 link_name = "llvm.nearbyint.f16"
22188 )]
22189 fn _vrndih_f16(a: f16) -> f16;
22190 }
22191 unsafe { _vrndih_f16(a) }
22192}
22193#[doc = "Floating-point round to integral, toward minus infinity"]
22194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
22195#[inline(always)]
22196#[target_feature(enable = "neon,fp16")]
22197#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22198#[cfg(not(target_arch = "arm64ec"))]
22199#[cfg_attr(test, assert_instr(frintm))]
22200pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
22201 unsafe { simd_floor(a) }
22202}
22203#[doc = "Floating-point round to integral, toward minus infinity"]
22204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
22205#[inline(always)]
22206#[target_feature(enable = "neon,fp16")]
22207#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22208#[cfg(not(target_arch = "arm64ec"))]
22209#[cfg_attr(test, assert_instr(frintm))]
22210pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
22211 unsafe { simd_floor(a) }
22212}
22213#[doc = "Floating-point round to integral, toward minus infinity"]
22214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
22215#[inline(always)]
22216#[target_feature(enable = "neon")]
22217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22218#[cfg_attr(test, assert_instr(frintm))]
22219pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
22220 unsafe { simd_floor(a) }
22221}
22222#[doc = "Floating-point round to integral, toward minus infinity"]
22223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
22224#[inline(always)]
22225#[target_feature(enable = "neon")]
22226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22227#[cfg_attr(test, assert_instr(frintm))]
22228pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
22229 unsafe { simd_floor(a) }
22230}
22231#[doc = "Floating-point round to integral, toward minus infinity"]
22232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
22233#[inline(always)]
22234#[target_feature(enable = "neon")]
22235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22236#[cfg_attr(test, assert_instr(frintm))]
22237pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
22238 unsafe { simd_floor(a) }
22239}
22240#[doc = "Floating-point round to integral, toward minus infinity"]
22241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
22242#[inline(always)]
22243#[target_feature(enable = "neon")]
22244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22245#[cfg_attr(test, assert_instr(frintm))]
22246pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
22247 unsafe { simd_floor(a) }
22248}
22249#[doc = "Floating-point round to integral, toward minus infinity"]
22250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
22251#[inline(always)]
22252#[target_feature(enable = "neon,fp16")]
22253#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22254#[cfg(not(target_arch = "arm64ec"))]
22255#[cfg_attr(test, assert_instr(frintm))]
22256pub fn vrndmh_f16(a: f16) -> f16 {
22257 floorf16(a)
22258}
22259#[doc = "Floating-point round to integral, to nearest with ties to even"]
22260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
22261#[inline(always)]
22262#[target_feature(enable = "neon")]
22263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22264#[cfg_attr(test, assert_instr(frintn))]
22265pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
22266 unsafe extern "unadjusted" {
22267 #[cfg_attr(
22268 any(target_arch = "aarch64", target_arch = "arm64ec"),
22269 link_name = "llvm.roundeven.v1f64"
22270 )]
22271 fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
22272 }
22273 unsafe { _vrndn_f64(a) }
22274}
22275#[doc = "Floating-point round to integral, to nearest with ties to even"]
22276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
22277#[inline(always)]
22278#[target_feature(enable = "neon")]
22279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22280#[cfg_attr(test, assert_instr(frintn))]
22281pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
22282 unsafe extern "unadjusted" {
22283 #[cfg_attr(
22284 any(target_arch = "aarch64", target_arch = "arm64ec"),
22285 link_name = "llvm.roundeven.v2f64"
22286 )]
22287 fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
22288 }
22289 unsafe { _vrndnq_f64(a) }
22290}
22291#[doc = "Floating-point round to integral, toward minus infinity"]
22292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
22293#[inline(always)]
22294#[target_feature(enable = "neon,fp16")]
22295#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22296#[cfg(not(target_arch = "arm64ec"))]
22297#[cfg_attr(test, assert_instr(frintn))]
22298pub fn vrndnh_f16(a: f16) -> f16 {
22299 unsafe extern "unadjusted" {
22300 #[cfg_attr(
22301 any(target_arch = "aarch64", target_arch = "arm64ec"),
22302 link_name = "llvm.roundeven.f16"
22303 )]
22304 fn _vrndnh_f16(a: f16) -> f16;
22305 }
22306 unsafe { _vrndnh_f16(a) }
22307}
22308#[doc = "Floating-point round to integral, to nearest with ties to even"]
22309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
22310#[inline(always)]
22311#[target_feature(enable = "neon")]
22312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22313#[cfg_attr(test, assert_instr(frintn))]
22314pub fn vrndns_f32(a: f32) -> f32 {
22315 unsafe extern "unadjusted" {
22316 #[cfg_attr(
22317 any(target_arch = "aarch64", target_arch = "arm64ec"),
22318 link_name = "llvm.roundeven.f32"
22319 )]
22320 fn _vrndns_f32(a: f32) -> f32;
22321 }
22322 unsafe { _vrndns_f32(a) }
22323}
22324#[doc = "Floating-point round to integral, toward plus infinity"]
22325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
22326#[inline(always)]
22327#[target_feature(enable = "neon,fp16")]
22328#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22329#[cfg(not(target_arch = "arm64ec"))]
22330#[cfg_attr(test, assert_instr(frintp))]
22331pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
22332 unsafe { simd_ceil(a) }
22333}
22334#[doc = "Floating-point round to integral, toward plus infinity"]
22335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
22336#[inline(always)]
22337#[target_feature(enable = "neon,fp16")]
22338#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22339#[cfg(not(target_arch = "arm64ec"))]
22340#[cfg_attr(test, assert_instr(frintp))]
22341pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
22342 unsafe { simd_ceil(a) }
22343}
22344#[doc = "Floating-point round to integral, toward plus infinity"]
22345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
22346#[inline(always)]
22347#[target_feature(enable = "neon")]
22348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22349#[cfg_attr(test, assert_instr(frintp))]
22350pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
22351 unsafe { simd_ceil(a) }
22352}
22353#[doc = "Floating-point round to integral, toward plus infinity"]
22354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
22355#[inline(always)]
22356#[target_feature(enable = "neon")]
22357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22358#[cfg_attr(test, assert_instr(frintp))]
22359pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
22360 unsafe { simd_ceil(a) }
22361}
22362#[doc = "Floating-point round to integral, toward plus infinity"]
22363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
22364#[inline(always)]
22365#[target_feature(enable = "neon")]
22366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22367#[cfg_attr(test, assert_instr(frintp))]
22368pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
22369 unsafe { simd_ceil(a) }
22370}
22371#[doc = "Floating-point round to integral, toward plus infinity"]
22372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
22373#[inline(always)]
22374#[target_feature(enable = "neon")]
22375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22376#[cfg_attr(test, assert_instr(frintp))]
22377pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
22378 unsafe { simd_ceil(a) }
22379}
22380#[doc = "Floating-point round to integral, toward plus infinity"]
22381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
22382#[inline(always)]
22383#[target_feature(enable = "neon,fp16")]
22384#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22385#[cfg(not(target_arch = "arm64ec"))]
22386#[cfg_attr(test, assert_instr(frintp))]
22387pub fn vrndph_f16(a: f16) -> f16 {
22388 ceilf16(a)
22389}
22390#[doc = "Floating-point round to integral exact, using current rounding mode"]
22391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
22392#[inline(always)]
22393#[target_feature(enable = "neon,fp16")]
22394#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22395#[cfg(not(target_arch = "arm64ec"))]
22396#[cfg_attr(test, assert_instr(frintx))]
22397pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
22398 unsafe { simd_round_ties_even(a) }
22399}
22400#[doc = "Floating-point round to integral exact, using current rounding mode"]
22401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
22402#[inline(always)]
22403#[target_feature(enable = "neon,fp16")]
22404#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22405#[cfg(not(target_arch = "arm64ec"))]
22406#[cfg_attr(test, assert_instr(frintx))]
22407pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
22408 unsafe { simd_round_ties_even(a) }
22409}
22410#[doc = "Floating-point round to integral exact, using current rounding mode"]
22411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
22412#[inline(always)]
22413#[target_feature(enable = "neon")]
22414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22415#[cfg_attr(test, assert_instr(frintx))]
22416pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
22417 unsafe { simd_round_ties_even(a) }
22418}
22419#[doc = "Floating-point round to integral exact, using current rounding mode"]
22420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
22421#[inline(always)]
22422#[target_feature(enable = "neon")]
22423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22424#[cfg_attr(test, assert_instr(frintx))]
22425pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
22426 unsafe { simd_round_ties_even(a) }
22427}
22428#[doc = "Floating-point round to integral exact, using current rounding mode"]
22429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
22430#[inline(always)]
22431#[target_feature(enable = "neon")]
22432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22433#[cfg_attr(test, assert_instr(frintx))]
22434pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
22435 unsafe { simd_round_ties_even(a) }
22436}
22437#[doc = "Floating-point round to integral exact, using current rounding mode"]
22438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
22439#[inline(always)]
22440#[target_feature(enable = "neon")]
22441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22442#[cfg_attr(test, assert_instr(frintx))]
22443pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
22444 unsafe { simd_round_ties_even(a) }
22445}
22446#[doc = "Floating-point round to integral, using current rounding mode"]
22447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
22448#[inline(always)]
22449#[target_feature(enable = "neon,fp16")]
22450#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22451#[cfg(not(target_arch = "arm64ec"))]
22452#[cfg_attr(test, assert_instr(frintx))]
22453pub fn vrndxh_f16(a: f16) -> f16 {
22454 round_ties_even_f16(a)
22455}
22456#[doc = "Signed rounding shift left"]
22457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
22458#[inline(always)]
22459#[target_feature(enable = "neon")]
22460#[cfg_attr(test, assert_instr(srshl))]
22461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22462pub fn vrshld_s64(a: i64, b: i64) -> i64 {
22463 unsafe extern "unadjusted" {
22464 #[cfg_attr(
22465 any(target_arch = "aarch64", target_arch = "arm64ec"),
22466 link_name = "llvm.aarch64.neon.srshl.i64"
22467 )]
22468 fn _vrshld_s64(a: i64, b: i64) -> i64;
22469 }
22470 unsafe { _vrshld_s64(a, b) }
22471}
22472#[doc = "Unsigned rounding shift left"]
22473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
22474#[inline(always)]
22475#[target_feature(enable = "neon")]
22476#[cfg_attr(test, assert_instr(urshl))]
22477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22478pub fn vrshld_u64(a: u64, b: i64) -> u64 {
22479 unsafe extern "unadjusted" {
22480 #[cfg_attr(
22481 any(target_arch = "aarch64", target_arch = "arm64ec"),
22482 link_name = "llvm.aarch64.neon.urshl.i64"
22483 )]
22484 fn _vrshld_u64(a: u64, b: i64) -> u64;
22485 }
22486 unsafe { _vrshld_u64(a, b) }
22487}
22488#[doc = "Signed rounding shift right"]
22489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
22490#[inline(always)]
22491#[target_feature(enable = "neon")]
22492#[cfg_attr(test, assert_instr(srshr, N = 2))]
22493#[rustc_legacy_const_generics(1)]
22494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22495pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
22496 static_assert!(N >= 1 && N <= 64);
22497 vrshld_s64(a, -N as i64)
22498}
22499#[doc = "Unsigned rounding shift right"]
22500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
22501#[inline(always)]
22502#[target_feature(enable = "neon")]
22503#[cfg_attr(test, assert_instr(urshr, N = 2))]
22504#[rustc_legacy_const_generics(1)]
22505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22506pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
22507 static_assert!(N >= 1 && N <= 64);
22508 vrshld_u64(a, -N as i64)
22509}
22510#[doc = "Rounding shift right narrow"]
22511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
22512#[inline(always)]
22513#[target_feature(enable = "neon")]
22514#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22515#[rustc_legacy_const_generics(2)]
22516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22517pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
22518 static_assert!(N >= 1 && N <= 8);
22519 unsafe {
22520 simd_shuffle!(
22521 a,
22522 vrshrn_n_s16::<N>(b),
22523 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22524 )
22525 }
22526}
22527#[doc = "Rounding shift right narrow"]
22528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
22529#[inline(always)]
22530#[target_feature(enable = "neon")]
22531#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22532#[rustc_legacy_const_generics(2)]
22533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22534pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
22535 static_assert!(N >= 1 && N <= 16);
22536 unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22537}
22538#[doc = "Rounding shift right narrow"]
22539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
22540#[inline(always)]
22541#[target_feature(enable = "neon")]
22542#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22543#[rustc_legacy_const_generics(2)]
22544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22545pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
22546 static_assert!(N >= 1 && N <= 32);
22547 unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
22548}
22549#[doc = "Rounding shift right narrow"]
22550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
22551#[inline(always)]
22552#[target_feature(enable = "neon")]
22553#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22554#[rustc_legacy_const_generics(2)]
22555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22556pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
22557 static_assert!(N >= 1 && N <= 8);
22558 unsafe {
22559 simd_shuffle!(
22560 a,
22561 vrshrn_n_u16::<N>(b),
22562 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22563 )
22564 }
22565}
22566#[doc = "Rounding shift right narrow"]
22567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
22568#[inline(always)]
22569#[target_feature(enable = "neon")]
22570#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22571#[rustc_legacy_const_generics(2)]
22572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22573pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
22574 static_assert!(N >= 1 && N <= 16);
22575 unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22576}
22577#[doc = "Rounding shift right narrow"]
22578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
22579#[inline(always)]
22580#[target_feature(enable = "neon")]
22581#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22582#[rustc_legacy_const_generics(2)]
22583#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22584pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
22585 static_assert!(N >= 1 && N <= 32);
22586 unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
22587}
22588#[doc = "Reciprocal square-root estimate."]
22589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
22590#[inline(always)]
22591#[target_feature(enable = "neon")]
22592#[cfg_attr(test, assert_instr(frsqrte))]
22593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22594pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
22595 unsafe extern "unadjusted" {
22596 #[cfg_attr(
22597 any(target_arch = "aarch64", target_arch = "arm64ec"),
22598 link_name = "llvm.aarch64.neon.frsqrte.v1f64"
22599 )]
22600 fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
22601 }
22602 unsafe { _vrsqrte_f64(a) }
22603}
22604#[doc = "Reciprocal square-root estimate."]
22605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
22606#[inline(always)]
22607#[target_feature(enable = "neon")]
22608#[cfg_attr(test, assert_instr(frsqrte))]
22609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22610pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
22611 unsafe extern "unadjusted" {
22612 #[cfg_attr(
22613 any(target_arch = "aarch64", target_arch = "arm64ec"),
22614 link_name = "llvm.aarch64.neon.frsqrte.v2f64"
22615 )]
22616 fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
22617 }
22618 unsafe { _vrsqrteq_f64(a) }
22619}
22620#[doc = "Reciprocal square-root estimate."]
22621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
22622#[inline(always)]
22623#[target_feature(enable = "neon")]
22624#[cfg_attr(test, assert_instr(frsqrte))]
22625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22626pub fn vrsqrted_f64(a: f64) -> f64 {
22627 unsafe extern "unadjusted" {
22628 #[cfg_attr(
22629 any(target_arch = "aarch64", target_arch = "arm64ec"),
22630 link_name = "llvm.aarch64.neon.frsqrte.f64"
22631 )]
22632 fn _vrsqrted_f64(a: f64) -> f64;
22633 }
22634 unsafe { _vrsqrted_f64(a) }
22635}
22636#[doc = "Reciprocal square-root estimate."]
22637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
22638#[inline(always)]
22639#[target_feature(enable = "neon")]
22640#[cfg_attr(test, assert_instr(frsqrte))]
22641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22642pub fn vrsqrtes_f32(a: f32) -> f32 {
22643 unsafe extern "unadjusted" {
22644 #[cfg_attr(
22645 any(target_arch = "aarch64", target_arch = "arm64ec"),
22646 link_name = "llvm.aarch64.neon.frsqrte.f32"
22647 )]
22648 fn _vrsqrtes_f32(a: f32) -> f32;
22649 }
22650 unsafe { _vrsqrtes_f32(a) }
22651}
22652#[doc = "Reciprocal square-root estimate."]
22653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
22654#[inline(always)]
22655#[cfg_attr(test, assert_instr(frsqrte))]
22656#[target_feature(enable = "neon,fp16")]
22657#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22658#[cfg(not(target_arch = "arm64ec"))]
22659pub fn vrsqrteh_f16(a: f16) -> f16 {
22660 unsafe extern "unadjusted" {
22661 #[cfg_attr(
22662 any(target_arch = "aarch64", target_arch = "arm64ec"),
22663 link_name = "llvm.aarch64.neon.frsqrte.f16"
22664 )]
22665 fn _vrsqrteh_f16(a: f16) -> f16;
22666 }
22667 unsafe { _vrsqrteh_f16(a) }
22668}
22669#[doc = "Floating-point reciprocal square root step"]
22670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
22671#[inline(always)]
22672#[target_feature(enable = "neon")]
22673#[cfg_attr(test, assert_instr(frsqrts))]
22674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22675pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
22676 unsafe extern "unadjusted" {
22677 #[cfg_attr(
22678 any(target_arch = "aarch64", target_arch = "arm64ec"),
22679 link_name = "llvm.aarch64.neon.frsqrts.v1f64"
22680 )]
22681 fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
22682 }
22683 unsafe { _vrsqrts_f64(a, b) }
22684}
22685#[doc = "Floating-point reciprocal square root step"]
22686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
22687#[inline(always)]
22688#[target_feature(enable = "neon")]
22689#[cfg_attr(test, assert_instr(frsqrts))]
22690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22691pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
22692 unsafe extern "unadjusted" {
22693 #[cfg_attr(
22694 any(target_arch = "aarch64", target_arch = "arm64ec"),
22695 link_name = "llvm.aarch64.neon.frsqrts.v2f64"
22696 )]
22697 fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
22698 }
22699 unsafe { _vrsqrtsq_f64(a, b) }
22700}
22701#[doc = "Floating-point reciprocal square root step"]
22702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
22703#[inline(always)]
22704#[target_feature(enable = "neon")]
22705#[cfg_attr(test, assert_instr(frsqrts))]
22706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22707pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
22708 unsafe extern "unadjusted" {
22709 #[cfg_attr(
22710 any(target_arch = "aarch64", target_arch = "arm64ec"),
22711 link_name = "llvm.aarch64.neon.frsqrts.f64"
22712 )]
22713 fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
22714 }
22715 unsafe { _vrsqrtsd_f64(a, b) }
22716}
22717#[doc = "Floating-point reciprocal square root step"]
22718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
22719#[inline(always)]
22720#[target_feature(enable = "neon")]
22721#[cfg_attr(test, assert_instr(frsqrts))]
22722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22723pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
22724 unsafe extern "unadjusted" {
22725 #[cfg_attr(
22726 any(target_arch = "aarch64", target_arch = "arm64ec"),
22727 link_name = "llvm.aarch64.neon.frsqrts.f32"
22728 )]
22729 fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
22730 }
22731 unsafe { _vrsqrtss_f32(a, b) }
22732}
22733#[doc = "Floating-point reciprocal square root step"]
22734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
22735#[inline(always)]
22736#[target_feature(enable = "neon,fp16")]
22737#[cfg_attr(test, assert_instr(frsqrts))]
22738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22739#[cfg(not(target_arch = "arm64ec"))]
22740pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
22741 unsafe extern "unadjusted" {
22742 #[cfg_attr(
22743 any(target_arch = "aarch64", target_arch = "arm64ec"),
22744 link_name = "llvm.aarch64.neon.frsqrts.f16"
22745 )]
22746 fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
22747 }
22748 unsafe { _vrsqrtsh_f16(a, b) }
22749}
22750#[doc = "Signed rounding shift right and accumulate."]
22751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
22752#[inline(always)]
22753#[target_feature(enable = "neon")]
22754#[cfg_attr(test, assert_instr(srshr, N = 2))]
22755#[rustc_legacy_const_generics(2)]
22756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22757pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
22758 static_assert!(N >= 1 && N <= 64);
22759 let b: i64 = vrshrd_n_s64::<N>(b);
22760 a.wrapping_add(b)
22761}
22762#[doc = "Unsigned rounding shift right and accumulate."]
22763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
22764#[inline(always)]
22765#[target_feature(enable = "neon")]
22766#[cfg_attr(test, assert_instr(urshr, N = 2))]
22767#[rustc_legacy_const_generics(2)]
22768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22769pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
22770 static_assert!(N >= 1 && N <= 64);
22771 let b: u64 = vrshrd_n_u64::<N>(b);
22772 a.wrapping_add(b)
22773}
22774#[doc = "Rounding subtract returning high narrow"]
22775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22776#[inline(always)]
22777#[target_feature(enable = "neon")]
22778#[cfg(target_endian = "little")]
22779#[cfg_attr(test, assert_instr(rsubhn2))]
22780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22781pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22782 let x: int8x8_t = vrsubhn_s16(b, c);
22783 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22784}
22785#[doc = "Rounding subtract returning high narrow"]
22786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22787#[inline(always)]
22788#[target_feature(enable = "neon")]
22789#[cfg(target_endian = "little")]
22790#[cfg_attr(test, assert_instr(rsubhn2))]
22791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22792pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22793 let x: int16x4_t = vrsubhn_s32(b, c);
22794 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22795}
22796#[doc = "Rounding subtract returning high narrow"]
22797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22798#[inline(always)]
22799#[target_feature(enable = "neon")]
22800#[cfg(target_endian = "little")]
22801#[cfg_attr(test, assert_instr(rsubhn2))]
22802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22803pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22804 let x: int32x2_t = vrsubhn_s64(b, c);
22805 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22806}
22807#[doc = "Rounding subtract returning high narrow"]
22808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22809#[inline(always)]
22810#[target_feature(enable = "neon")]
22811#[cfg(target_endian = "little")]
22812#[cfg_attr(test, assert_instr(rsubhn2))]
22813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22814pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22815 let x: uint8x8_t = vrsubhn_u16(b, c);
22816 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22817}
22818#[doc = "Rounding subtract returning high narrow"]
22819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22820#[inline(always)]
22821#[target_feature(enable = "neon")]
22822#[cfg(target_endian = "little")]
22823#[cfg_attr(test, assert_instr(rsubhn2))]
22824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22825pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22826 let x: uint16x4_t = vrsubhn_u32(b, c);
22827 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22828}
22829#[doc = "Rounding subtract returning high narrow"]
22830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22831#[inline(always)]
22832#[target_feature(enable = "neon")]
22833#[cfg(target_endian = "little")]
22834#[cfg_attr(test, assert_instr(rsubhn2))]
22835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22836pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22837 let x: uint32x2_t = vrsubhn_u64(b, c);
22838 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22839}
22840#[doc = "Rounding subtract returning high narrow"]
22841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22842#[inline(always)]
22843#[target_feature(enable = "neon")]
22844#[cfg(target_endian = "big")]
22845#[cfg_attr(test, assert_instr(rsubhn))]
22846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22847pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22848 let x: int8x8_t = vrsubhn_s16(b, c);
22849 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22850}
22851#[doc = "Rounding subtract returning high narrow"]
22852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22853#[inline(always)]
22854#[target_feature(enable = "neon")]
22855#[cfg(target_endian = "big")]
22856#[cfg_attr(test, assert_instr(rsubhn))]
22857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22858pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22859 let x: int16x4_t = vrsubhn_s32(b, c);
22860 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22861}
22862#[doc = "Rounding subtract returning high narrow"]
22863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22864#[inline(always)]
22865#[target_feature(enable = "neon")]
22866#[cfg(target_endian = "big")]
22867#[cfg_attr(test, assert_instr(rsubhn))]
22868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22869pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22870 let x: int32x2_t = vrsubhn_s64(b, c);
22871 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22872}
22873#[doc = "Rounding subtract returning high narrow"]
22874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22875#[inline(always)]
22876#[target_feature(enable = "neon")]
22877#[cfg(target_endian = "big")]
22878#[cfg_attr(test, assert_instr(rsubhn))]
22879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22880pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22881 let x: uint8x8_t = vrsubhn_u16(b, c);
22882 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22883}
22884#[doc = "Rounding subtract returning high narrow"]
22885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22886#[inline(always)]
22887#[target_feature(enable = "neon")]
22888#[cfg(target_endian = "big")]
22889#[cfg_attr(test, assert_instr(rsubhn))]
22890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22891pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22892 let x: uint16x4_t = vrsubhn_u32(b, c);
22893 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22894}
22895#[doc = "Rounding subtract returning high narrow"]
22896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22897#[inline(always)]
22898#[target_feature(enable = "neon")]
22899#[cfg(target_endian = "big")]
22900#[cfg_attr(test, assert_instr(rsubhn))]
22901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22902pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22903 let x: uint32x2_t = vrsubhn_u64(b, c);
22904 unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22905}
22906#[doc = "Multi-vector floating-point adjust exponent"]
22907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
22908#[inline(always)]
22909#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22910#[target_feature(enable = "neon,fp8")]
22911#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22912pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
22913 unsafe extern "unadjusted" {
22914 #[cfg_attr(
22915 any(target_arch = "aarch64", target_arch = "arm64ec"),
22916 link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
22917 )]
22918 fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
22919 }
22920 unsafe { _vscale_f16(vn, vm) }
22921}
22922#[doc = "Multi-vector floating-point adjust exponent"]
22923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
22924#[inline(always)]
22925#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22926#[target_feature(enable = "neon,fp8")]
22927#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22928pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
22929 unsafe extern "unadjusted" {
22930 #[cfg_attr(
22931 any(target_arch = "aarch64", target_arch = "arm64ec"),
22932 link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
22933 )]
22934 fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
22935 }
22936 unsafe { _vscaleq_f16(vn, vm) }
22937}
22938#[doc = "Multi-vector floating-point adjust exponent"]
22939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
22940#[inline(always)]
22941#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22942#[target_feature(enable = "neon,fp8")]
22943#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22944pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
22945 unsafe extern "unadjusted" {
22946 #[cfg_attr(
22947 any(target_arch = "aarch64", target_arch = "arm64ec"),
22948 link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
22949 )]
22950 fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
22951 }
22952 unsafe { _vscale_f32(vn, vm) }
22953}
22954#[doc = "Multi-vector floating-point adjust exponent"]
22955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
22956#[inline(always)]
22957#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22958#[target_feature(enable = "neon,fp8")]
22959#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22960pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
22961 unsafe extern "unadjusted" {
22962 #[cfg_attr(
22963 any(target_arch = "aarch64", target_arch = "arm64ec"),
22964 link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
22965 )]
22966 fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
22967 }
22968 unsafe { _vscaleq_f32(vn, vm) }
22969}
22970#[doc = "Multi-vector floating-point adjust exponent"]
22971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
22972#[inline(always)]
22973#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22974#[target_feature(enable = "neon,fp8")]
22975#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22976pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
22977 unsafe extern "unadjusted" {
22978 #[cfg_attr(
22979 any(target_arch = "aarch64", target_arch = "arm64ec"),
22980 link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
22981 )]
22982 fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
22983 }
22984 unsafe { _vscaleq_f64(vn, vm) }
22985}
22986#[doc = "Insert vector element from another vector element"]
22987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
22988#[inline(always)]
22989#[target_feature(enable = "neon")]
22990#[cfg_attr(test, assert_instr(nop, LANE = 0))]
22991#[rustc_legacy_const_generics(2)]
22992#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22993pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
22994 static_assert!(LANE == 0);
22995 unsafe { simd_insert!(b, LANE as u32, a) }
22996}
22997#[doc = "Insert vector element from another vector element"]
22998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
22999#[inline(always)]
23000#[target_feature(enable = "neon")]
23001#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23002#[rustc_legacy_const_generics(2)]
23003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23004pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
23005 static_assert_uimm_bits!(LANE, 1);
23006 unsafe { simd_insert!(b, LANE as u32, a) }
23007}
23008#[doc = "SHA512 hash update part 2"]
23009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
23010#[inline(always)]
23011#[target_feature(enable = "neon,sha3")]
23012#[cfg_attr(test, assert_instr(sha512h2))]
23013#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23014pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23015 unsafe extern "unadjusted" {
23016 #[cfg_attr(
23017 any(target_arch = "aarch64", target_arch = "arm64ec"),
23018 link_name = "llvm.aarch64.crypto.sha512h2"
23019 )]
23020 fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23021 }
23022 unsafe { _vsha512h2q_u64(a, b, c) }
23023}
23024#[doc = "SHA512 hash update part 1"]
23025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
23026#[inline(always)]
23027#[target_feature(enable = "neon,sha3")]
23028#[cfg_attr(test, assert_instr(sha512h))]
23029#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23030pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23031 unsafe extern "unadjusted" {
23032 #[cfg_attr(
23033 any(target_arch = "aarch64", target_arch = "arm64ec"),
23034 link_name = "llvm.aarch64.crypto.sha512h"
23035 )]
23036 fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23037 }
23038 unsafe { _vsha512hq_u64(a, b, c) }
23039}
23040#[doc = "SHA512 schedule update 0"]
23041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
23042#[inline(always)]
23043#[target_feature(enable = "neon,sha3")]
23044#[cfg_attr(test, assert_instr(sha512su0))]
23045#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23046pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23047 unsafe extern "unadjusted" {
23048 #[cfg_attr(
23049 any(target_arch = "aarch64", target_arch = "arm64ec"),
23050 link_name = "llvm.aarch64.crypto.sha512su0"
23051 )]
23052 fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
23053 }
23054 unsafe { _vsha512su0q_u64(a, b) }
23055}
23056#[doc = "SHA512 schedule update 1"]
23057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
23058#[inline(always)]
23059#[target_feature(enable = "neon,sha3")]
23060#[cfg_attr(test, assert_instr(sha512su1))]
23061#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23062pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23063 unsafe extern "unadjusted" {
23064 #[cfg_attr(
23065 any(target_arch = "aarch64", target_arch = "arm64ec"),
23066 link_name = "llvm.aarch64.crypto.sha512su1"
23067 )]
23068 fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23069 }
23070 unsafe { _vsha512su1q_u64(a, b, c) }
23071}
23072#[doc = "Signed Shift left"]
23073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
23074#[inline(always)]
23075#[target_feature(enable = "neon")]
23076#[cfg_attr(test, assert_instr(sshl))]
23077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23078pub fn vshld_s64(a: i64, b: i64) -> i64 {
23079 unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
23080}
23081#[doc = "Unsigned Shift left"]
23082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
23083#[inline(always)]
23084#[target_feature(enable = "neon")]
23085#[cfg_attr(test, assert_instr(ushl))]
23086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23087pub fn vshld_u64(a: u64, b: i64) -> u64 {
23088 unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
23089}
23090#[doc = "Signed shift left long"]
23091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
23092#[inline(always)]
23093#[target_feature(enable = "neon")]
23094#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23095#[rustc_legacy_const_generics(1)]
23096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23097pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
23098 static_assert!(N >= 0 && N <= 8);
23099 unsafe {
23100 let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23101 vshll_n_s8::<N>(b)
23102 }
23103}
23104#[doc = "Signed shift left long"]
23105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
23106#[inline(always)]
23107#[target_feature(enable = "neon")]
23108#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23109#[rustc_legacy_const_generics(1)]
23110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23111pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23112 static_assert!(N >= 0 && N <= 16);
23113 unsafe {
23114 let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23115 vshll_n_s16::<N>(b)
23116 }
23117}
23118#[doc = "Signed shift left long"]
23119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
23120#[inline(always)]
23121#[target_feature(enable = "neon")]
23122#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23123#[rustc_legacy_const_generics(1)]
23124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23125pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
23126 static_assert!(N >= 0 && N <= 32);
23127 unsafe {
23128 let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
23129 vshll_n_s32::<N>(b)
23130 }
23131}
23132#[doc = "Signed shift left long"]
23133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
23134#[inline(always)]
23135#[target_feature(enable = "neon")]
23136#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23137#[rustc_legacy_const_generics(1)]
23138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23139pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
23140 static_assert!(N >= 0 && N <= 8);
23141 unsafe {
23142 let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23143 vshll_n_u8::<N>(b)
23144 }
23145}
23146#[doc = "Signed shift left long"]
23147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
23148#[inline(always)]
23149#[target_feature(enable = "neon")]
23150#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23151#[rustc_legacy_const_generics(1)]
23152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23153pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
23154 static_assert!(N >= 0 && N <= 16);
23155 unsafe {
23156 let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23157 vshll_n_u16::<N>(b)
23158 }
23159}
23160#[doc = "Signed shift left long"]
23161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
23162#[inline(always)]
23163#[target_feature(enable = "neon")]
23164#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23165#[rustc_legacy_const_generics(1)]
23166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23167pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
23168 static_assert!(N >= 0 && N <= 32);
23169 unsafe {
23170 let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
23171 vshll_n_u32::<N>(b)
23172 }
23173}
23174#[doc = "Shift right narrow"]
23175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
23176#[inline(always)]
23177#[target_feature(enable = "neon")]
23178#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23179#[rustc_legacy_const_generics(2)]
23180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23181pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23182 static_assert!(N >= 1 && N <= 8);
23183 unsafe {
23184 simd_shuffle!(
23185 a,
23186 vshrn_n_s16::<N>(b),
23187 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23188 )
23189 }
23190}
23191#[doc = "Shift right narrow"]
23192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
23193#[inline(always)]
23194#[target_feature(enable = "neon")]
23195#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23196#[rustc_legacy_const_generics(2)]
23197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23198pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23199 static_assert!(N >= 1 && N <= 16);
23200 unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23201}
23202#[doc = "Shift right narrow"]
23203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
23204#[inline(always)]
23205#[target_feature(enable = "neon")]
23206#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23207#[rustc_legacy_const_generics(2)]
23208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23209pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23210 static_assert!(N >= 1 && N <= 32);
23211 unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23212}
23213#[doc = "Shift right narrow"]
23214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
23215#[inline(always)]
23216#[target_feature(enable = "neon")]
23217#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23218#[rustc_legacy_const_generics(2)]
23219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23220pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23221 static_assert!(N >= 1 && N <= 8);
23222 unsafe {
23223 simd_shuffle!(
23224 a,
23225 vshrn_n_u16::<N>(b),
23226 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23227 )
23228 }
23229}
23230#[doc = "Shift right narrow"]
23231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
23232#[inline(always)]
23233#[target_feature(enable = "neon")]
23234#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23235#[rustc_legacy_const_generics(2)]
23236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23237pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23238 static_assert!(N >= 1 && N <= 16);
23239 unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23240}
23241#[doc = "Shift right narrow"]
23242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
23243#[inline(always)]
23244#[target_feature(enable = "neon")]
23245#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23246#[rustc_legacy_const_generics(2)]
23247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23248pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23249 static_assert!(N >= 1 && N <= 32);
23250 unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23251}
23252#[doc = "Shift Left and Insert (immediate)"]
23253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
23254#[inline(always)]
23255#[target_feature(enable = "neon")]
23256#[cfg_attr(test, assert_instr(sli, N = 1))]
23257#[rustc_legacy_const_generics(2)]
23258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23259pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23260 static_assert_uimm_bits!(N, 3);
23261 unsafe extern "unadjusted" {
23262 #[cfg_attr(
23263 any(target_arch = "aarch64", target_arch = "arm64ec"),
23264 link_name = "llvm.aarch64.neon.vsli.v8i8"
23265 )]
23266 fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
23267 }
23268 unsafe { _vsli_n_s8(a, b, N) }
23269}
23270#[doc = "Shift Left and Insert (immediate)"]
23271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
23272#[inline(always)]
23273#[target_feature(enable = "neon")]
23274#[cfg_attr(test, assert_instr(sli, N = 1))]
23275#[rustc_legacy_const_generics(2)]
23276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23277pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23278 static_assert_uimm_bits!(N, 3);
23279 unsafe extern "unadjusted" {
23280 #[cfg_attr(
23281 any(target_arch = "aarch64", target_arch = "arm64ec"),
23282 link_name = "llvm.aarch64.neon.vsli.v16i8"
23283 )]
23284 fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
23285 }
23286 unsafe { _vsliq_n_s8(a, b, N) }
23287}
23288#[doc = "Shift Left and Insert (immediate)"]
23289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
23290#[inline(always)]
23291#[target_feature(enable = "neon")]
23292#[cfg_attr(test, assert_instr(sli, N = 1))]
23293#[rustc_legacy_const_generics(2)]
23294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23295pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23296 static_assert_uimm_bits!(N, 4);
23297 unsafe extern "unadjusted" {
23298 #[cfg_attr(
23299 any(target_arch = "aarch64", target_arch = "arm64ec"),
23300 link_name = "llvm.aarch64.neon.vsli.v4i16"
23301 )]
23302 fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
23303 }
23304 unsafe { _vsli_n_s16(a, b, N) }
23305}
23306#[doc = "Shift Left and Insert (immediate)"]
23307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
23308#[inline(always)]
23309#[target_feature(enable = "neon")]
23310#[cfg_attr(test, assert_instr(sli, N = 1))]
23311#[rustc_legacy_const_generics(2)]
23312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23313pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23314 static_assert_uimm_bits!(N, 4);
23315 unsafe extern "unadjusted" {
23316 #[cfg_attr(
23317 any(target_arch = "aarch64", target_arch = "arm64ec"),
23318 link_name = "llvm.aarch64.neon.vsli.v8i16"
23319 )]
23320 fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
23321 }
23322 unsafe { _vsliq_n_s16(a, b, N) }
23323}
23324#[doc = "Shift Left and Insert (immediate)"]
23325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
23326#[inline(always)]
23327#[target_feature(enable = "neon")]
23328#[cfg_attr(test, assert_instr(sli, N = 1))]
23329#[rustc_legacy_const_generics(2)]
23330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23331pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23332 static_assert!(N >= 0 && N <= 31);
23333 unsafe extern "unadjusted" {
23334 #[cfg_attr(
23335 any(target_arch = "aarch64", target_arch = "arm64ec"),
23336 link_name = "llvm.aarch64.neon.vsli.v2i32"
23337 )]
23338 fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
23339 }
23340 unsafe { _vsli_n_s32(a, b, N) }
23341}
23342#[doc = "Shift Left and Insert (immediate)"]
23343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
23344#[inline(always)]
23345#[target_feature(enable = "neon")]
23346#[cfg_attr(test, assert_instr(sli, N = 1))]
23347#[rustc_legacy_const_generics(2)]
23348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23349pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23350 static_assert!(N >= 0 && N <= 31);
23351 unsafe extern "unadjusted" {
23352 #[cfg_attr(
23353 any(target_arch = "aarch64", target_arch = "arm64ec"),
23354 link_name = "llvm.aarch64.neon.vsli.v4i32"
23355 )]
23356 fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
23357 }
23358 unsafe { _vsliq_n_s32(a, b, N) }
23359}
23360#[doc = "Shift Left and Insert (immediate)"]
23361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
23362#[inline(always)]
23363#[target_feature(enable = "neon")]
23364#[cfg_attr(test, assert_instr(sli, N = 1))]
23365#[rustc_legacy_const_generics(2)]
23366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23367pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23368 static_assert!(N >= 0 && N <= 63);
23369 unsafe extern "unadjusted" {
23370 #[cfg_attr(
23371 any(target_arch = "aarch64", target_arch = "arm64ec"),
23372 link_name = "llvm.aarch64.neon.vsli.v1i64"
23373 )]
23374 fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
23375 }
23376 unsafe { _vsli_n_s64(a, b, N) }
23377}
23378#[doc = "Shift Left and Insert (immediate)"]
23379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
23380#[inline(always)]
23381#[target_feature(enable = "neon")]
23382#[cfg_attr(test, assert_instr(sli, N = 1))]
23383#[rustc_legacy_const_generics(2)]
23384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23385pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23386 static_assert!(N >= 0 && N <= 63);
23387 unsafe extern "unadjusted" {
23388 #[cfg_attr(
23389 any(target_arch = "aarch64", target_arch = "arm64ec"),
23390 link_name = "llvm.aarch64.neon.vsli.v2i64"
23391 )]
23392 fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
23393 }
23394 unsafe { _vsliq_n_s64(a, b, N) }
23395}
23396#[doc = "Shift Left and Insert (immediate)"]
23397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
23398#[inline(always)]
23399#[target_feature(enable = "neon")]
23400#[cfg_attr(test, assert_instr(sli, N = 1))]
23401#[rustc_legacy_const_generics(2)]
23402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23403pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23404 static_assert_uimm_bits!(N, 3);
23405 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23406}
23407#[doc = "Shift Left and Insert (immediate)"]
23408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
23409#[inline(always)]
23410#[target_feature(enable = "neon")]
23411#[cfg_attr(test, assert_instr(sli, N = 1))]
23412#[rustc_legacy_const_generics(2)]
23413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23414pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23415 static_assert_uimm_bits!(N, 3);
23416 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23417}
23418#[doc = "Shift Left and Insert (immediate)"]
23419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
23420#[inline(always)]
23421#[target_feature(enable = "neon")]
23422#[cfg_attr(test, assert_instr(sli, N = 1))]
23423#[rustc_legacy_const_generics(2)]
23424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23425pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23426 static_assert_uimm_bits!(N, 4);
23427 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23428}
23429#[doc = "Shift Left and Insert (immediate)"]
23430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
23431#[inline(always)]
23432#[target_feature(enable = "neon")]
23433#[cfg_attr(test, assert_instr(sli, N = 1))]
23434#[rustc_legacy_const_generics(2)]
23435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23436pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23437 static_assert_uimm_bits!(N, 4);
23438 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23439}
23440#[doc = "Shift Left and Insert (immediate)"]
23441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
23442#[inline(always)]
23443#[target_feature(enable = "neon")]
23444#[cfg_attr(test, assert_instr(sli, N = 1))]
23445#[rustc_legacy_const_generics(2)]
23446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23447pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23448 static_assert!(N >= 0 && N <= 31);
23449 unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
23450}
23451#[doc = "Shift Left and Insert (immediate)"]
23452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
23453#[inline(always)]
23454#[target_feature(enable = "neon")]
23455#[cfg_attr(test, assert_instr(sli, N = 1))]
23456#[rustc_legacy_const_generics(2)]
23457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23458pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23459 static_assert!(N >= 0 && N <= 31);
23460 unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
23461}
23462#[doc = "Shift Left and Insert (immediate)"]
23463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
23464#[inline(always)]
23465#[target_feature(enable = "neon")]
23466#[cfg_attr(test, assert_instr(sli, N = 1))]
23467#[rustc_legacy_const_generics(2)]
23468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23469pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
23470 static_assert!(N >= 0 && N <= 63);
23471 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23472}
23473#[doc = "Shift Left and Insert (immediate)"]
23474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
23475#[inline(always)]
23476#[target_feature(enable = "neon")]
23477#[cfg_attr(test, assert_instr(sli, N = 1))]
23478#[rustc_legacy_const_generics(2)]
23479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23480pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23481 static_assert!(N >= 0 && N <= 63);
23482 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23483}
23484#[doc = "Shift Left and Insert (immediate)"]
23485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
23486#[inline(always)]
23487#[target_feature(enable = "neon")]
23488#[cfg_attr(test, assert_instr(sli, N = 1))]
23489#[rustc_legacy_const_generics(2)]
23490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23491pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
23492 static_assert_uimm_bits!(N, 3);
23493 unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23494}
23495#[doc = "Shift Left and Insert (immediate)"]
23496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
23497#[inline(always)]
23498#[target_feature(enable = "neon")]
23499#[cfg_attr(test, assert_instr(sli, N = 1))]
23500#[rustc_legacy_const_generics(2)]
23501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23502pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
23503 static_assert_uimm_bits!(N, 3);
23504 unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23505}
23506#[doc = "Shift Left and Insert (immediate)"]
23507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
23508#[inline(always)]
23509#[target_feature(enable = "neon")]
23510#[cfg_attr(test, assert_instr(sli, N = 1))]
23511#[rustc_legacy_const_generics(2)]
23512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23513pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
23514 static_assert_uimm_bits!(N, 4);
23515 unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23516}
23517#[doc = "Shift Left and Insert (immediate)"]
23518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
23519#[inline(always)]
23520#[target_feature(enable = "neon")]
23521#[cfg_attr(test, assert_instr(sli, N = 1))]
23522#[rustc_legacy_const_generics(2)]
23523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23524pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
23525 static_assert_uimm_bits!(N, 4);
23526 unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23527}
23528#[doc = "Shift Left and Insert (immediate)"]
23529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
23530#[inline(always)]
23531#[target_feature(enable = "neon,aes")]
23532#[cfg_attr(test, assert_instr(sli, N = 1))]
23533#[rustc_legacy_const_generics(2)]
23534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23535pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
23536 static_assert!(N >= 0 && N <= 63);
23537 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23538}
23539#[doc = "Shift Left and Insert (immediate)"]
23540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
23541#[inline(always)]
23542#[target_feature(enable = "neon,aes")]
23543#[cfg_attr(test, assert_instr(sli, N = 1))]
23544#[rustc_legacy_const_generics(2)]
23545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23546pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
23547 static_assert!(N >= 0 && N <= 63);
23548 unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23549}
23550#[doc = "Shift left and insert"]
23551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
23552#[inline(always)]
23553#[target_feature(enable = "neon")]
23554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23555#[rustc_legacy_const_generics(2)]
23556#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23557pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23558 static_assert!(N >= 0 && N <= 63);
23559 unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23560}
23561#[doc = "Shift left and insert"]
23562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
23563#[inline(always)]
23564#[target_feature(enable = "neon")]
23565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23566#[rustc_legacy_const_generics(2)]
23567#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23568pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23569 static_assert!(N >= 0 && N <= 63);
23570 unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
23571}
23572#[doc = "SM3PARTW1"]
23573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
23574#[inline(always)]
23575#[target_feature(enable = "neon,sm4")]
23576#[cfg_attr(test, assert_instr(sm3partw1))]
23577#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23578pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23579 unsafe extern "unadjusted" {
23580 #[cfg_attr(
23581 any(target_arch = "aarch64", target_arch = "arm64ec"),
23582 link_name = "llvm.aarch64.crypto.sm3partw1"
23583 )]
23584 fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23585 }
23586 unsafe { _vsm3partw1q_u32(a, b, c) }
23587}
23588#[doc = "SM3PARTW2"]
23589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
23590#[inline(always)]
23591#[target_feature(enable = "neon,sm4")]
23592#[cfg_attr(test, assert_instr(sm3partw2))]
23593#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23594pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23595 unsafe extern "unadjusted" {
23596 #[cfg_attr(
23597 any(target_arch = "aarch64", target_arch = "arm64ec"),
23598 link_name = "llvm.aarch64.crypto.sm3partw2"
23599 )]
23600 fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23601 }
23602 unsafe { _vsm3partw2q_u32(a, b, c) }
23603}
23604#[doc = "SM3SS1"]
23605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
23606#[inline(always)]
23607#[target_feature(enable = "neon,sm4")]
23608#[cfg_attr(test, assert_instr(sm3ss1))]
23609#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23610pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23611 unsafe extern "unadjusted" {
23612 #[cfg_attr(
23613 any(target_arch = "aarch64", target_arch = "arm64ec"),
23614 link_name = "llvm.aarch64.crypto.sm3ss1"
23615 )]
23616 fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23617 }
23618 unsafe { _vsm3ss1q_u32(a, b, c) }
23619}
23620#[doc = "SM3TT1A"]
23621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
23622#[inline(always)]
23623#[target_feature(enable = "neon,sm4")]
23624#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
23625#[rustc_legacy_const_generics(3)]
23626#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23627pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23628 static_assert_uimm_bits!(IMM2, 2);
23629 unsafe extern "unadjusted" {
23630 #[cfg_attr(
23631 any(target_arch = "aarch64", target_arch = "arm64ec"),
23632 link_name = "llvm.aarch64.crypto.sm3tt1a"
23633 )]
23634 fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23635 }
23636 unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
23637}
23638#[doc = "SM3TT1B"]
23639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
23640#[inline(always)]
23641#[target_feature(enable = "neon,sm4")]
23642#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
23643#[rustc_legacy_const_generics(3)]
23644#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23645pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23646 static_assert_uimm_bits!(IMM2, 2);
23647 unsafe extern "unadjusted" {
23648 #[cfg_attr(
23649 any(target_arch = "aarch64", target_arch = "arm64ec"),
23650 link_name = "llvm.aarch64.crypto.sm3tt1b"
23651 )]
23652 fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23653 }
23654 unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
23655}
23656#[doc = "SM3TT2A"]
23657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
23658#[inline(always)]
23659#[target_feature(enable = "neon,sm4")]
23660#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
23661#[rustc_legacy_const_generics(3)]
23662#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23663pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23664 static_assert_uimm_bits!(IMM2, 2);
23665 unsafe extern "unadjusted" {
23666 #[cfg_attr(
23667 any(target_arch = "aarch64", target_arch = "arm64ec"),
23668 link_name = "llvm.aarch64.crypto.sm3tt2a"
23669 )]
23670 fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23671 }
23672 unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
23673}
23674#[doc = "SM3TT2B"]
23675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
23676#[inline(always)]
23677#[target_feature(enable = "neon,sm4")]
23678#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
23679#[rustc_legacy_const_generics(3)]
23680#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23681pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23682 static_assert_uimm_bits!(IMM2, 2);
23683 unsafe extern "unadjusted" {
23684 #[cfg_attr(
23685 any(target_arch = "aarch64", target_arch = "arm64ec"),
23686 link_name = "llvm.aarch64.crypto.sm3tt2b"
23687 )]
23688 fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23689 }
23690 unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
23691}
23692#[doc = "SM4 key"]
23693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
23694#[inline(always)]
23695#[target_feature(enable = "neon,sm4")]
23696#[cfg_attr(test, assert_instr(sm4ekey))]
23697#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23698pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23699 unsafe extern "unadjusted" {
23700 #[cfg_attr(
23701 any(target_arch = "aarch64", target_arch = "arm64ec"),
23702 link_name = "llvm.aarch64.crypto.sm4ekey"
23703 )]
23704 fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23705 }
23706 unsafe { _vsm4ekeyq_u32(a, b) }
23707}
23708#[doc = "SM4 encode"]
23709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
23710#[inline(always)]
23711#[target_feature(enable = "neon,sm4")]
23712#[cfg_attr(test, assert_instr(sm4e))]
23713#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23714pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23715 unsafe extern "unadjusted" {
23716 #[cfg_attr(
23717 any(target_arch = "aarch64", target_arch = "arm64ec"),
23718 link_name = "llvm.aarch64.crypto.sm4e"
23719 )]
23720 fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23721 }
23722 unsafe { _vsm4eq_u32(a, b) }
23723}
23724#[doc = "Unsigned saturating Accumulate of Signed value."]
23725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
23726#[inline(always)]
23727#[target_feature(enable = "neon")]
23728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23729#[cfg_attr(test, assert_instr(usqadd))]
23730pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
23731 unsafe extern "unadjusted" {
23732 #[cfg_attr(
23733 any(target_arch = "aarch64", target_arch = "arm64ec"),
23734 link_name = "llvm.aarch64.neon.usqadd.v8i8"
23735 )]
23736 fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
23737 }
23738 unsafe { _vsqadd_u8(a, b) }
23739}
23740#[doc = "Unsigned saturating Accumulate of Signed value."]
23741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
23742#[inline(always)]
23743#[target_feature(enable = "neon")]
23744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23745#[cfg_attr(test, assert_instr(usqadd))]
23746pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
23747 unsafe extern "unadjusted" {
23748 #[cfg_attr(
23749 any(target_arch = "aarch64", target_arch = "arm64ec"),
23750 link_name = "llvm.aarch64.neon.usqadd.v16i8"
23751 )]
23752 fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
23753 }
23754 unsafe { _vsqaddq_u8(a, b) }
23755}
23756#[doc = "Unsigned saturating Accumulate of Signed value."]
23757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
23758#[inline(always)]
23759#[target_feature(enable = "neon")]
23760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23761#[cfg_attr(test, assert_instr(usqadd))]
23762pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
23763 unsafe extern "unadjusted" {
23764 #[cfg_attr(
23765 any(target_arch = "aarch64", target_arch = "arm64ec"),
23766 link_name = "llvm.aarch64.neon.usqadd.v4i16"
23767 )]
23768 fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
23769 }
23770 unsafe { _vsqadd_u16(a, b) }
23771}
23772#[doc = "Unsigned saturating Accumulate of Signed value."]
23773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
23774#[inline(always)]
23775#[target_feature(enable = "neon")]
23776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23777#[cfg_attr(test, assert_instr(usqadd))]
23778pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
23779 unsafe extern "unadjusted" {
23780 #[cfg_attr(
23781 any(target_arch = "aarch64", target_arch = "arm64ec"),
23782 link_name = "llvm.aarch64.neon.usqadd.v8i16"
23783 )]
23784 fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
23785 }
23786 unsafe { _vsqaddq_u16(a, b) }
23787}
23788#[doc = "Unsigned saturating Accumulate of Signed value."]
23789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
23790#[inline(always)]
23791#[target_feature(enable = "neon")]
23792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23793#[cfg_attr(test, assert_instr(usqadd))]
23794pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
23795 unsafe extern "unadjusted" {
23796 #[cfg_attr(
23797 any(target_arch = "aarch64", target_arch = "arm64ec"),
23798 link_name = "llvm.aarch64.neon.usqadd.v2i32"
23799 )]
23800 fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
23801 }
23802 unsafe { _vsqadd_u32(a, b) }
23803}
23804#[doc = "Unsigned saturating Accumulate of Signed value."]
23805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
23806#[inline(always)]
23807#[target_feature(enable = "neon")]
23808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23809#[cfg_attr(test, assert_instr(usqadd))]
23810pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
23811 unsafe extern "unadjusted" {
23812 #[cfg_attr(
23813 any(target_arch = "aarch64", target_arch = "arm64ec"),
23814 link_name = "llvm.aarch64.neon.usqadd.v4i32"
23815 )]
23816 fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
23817 }
23818 unsafe { _vsqaddq_u32(a, b) }
23819}
23820#[doc = "Unsigned saturating Accumulate of Signed value."]
23821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
23822#[inline(always)]
23823#[target_feature(enable = "neon")]
23824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23825#[cfg_attr(test, assert_instr(usqadd))]
23826pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
23827 unsafe extern "unadjusted" {
23828 #[cfg_attr(
23829 any(target_arch = "aarch64", target_arch = "arm64ec"),
23830 link_name = "llvm.aarch64.neon.usqadd.v1i64"
23831 )]
23832 fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
23833 }
23834 unsafe { _vsqadd_u64(a, b) }
23835}
23836#[doc = "Unsigned saturating Accumulate of Signed value."]
23837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
23838#[inline(always)]
23839#[target_feature(enable = "neon")]
23840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23841#[cfg_attr(test, assert_instr(usqadd))]
23842pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
23843 unsafe extern "unadjusted" {
23844 #[cfg_attr(
23845 any(target_arch = "aarch64", target_arch = "arm64ec"),
23846 link_name = "llvm.aarch64.neon.usqadd.v2i64"
23847 )]
23848 fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
23849 }
23850 unsafe { _vsqaddq_u64(a, b) }
23851}
23852#[doc = "Unsigned saturating accumulate of signed value"]
23853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
23854#[inline(always)]
23855#[target_feature(enable = "neon")]
23856#[cfg_attr(test, assert_instr(usqadd))]
23857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23858pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
23859 unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
23860}
23861#[doc = "Unsigned saturating accumulate of signed value"]
23862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
23863#[inline(always)]
23864#[target_feature(enable = "neon")]
23865#[cfg_attr(test, assert_instr(usqadd))]
23866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23867pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
23868 unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
23869}
23870#[doc = "Unsigned saturating accumulate of signed value"]
23871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
23872#[inline(always)]
23873#[target_feature(enable = "neon")]
23874#[cfg_attr(test, assert_instr(usqadd))]
23875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23876pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
23877 unsafe extern "unadjusted" {
23878 #[cfg_attr(
23879 any(target_arch = "aarch64", target_arch = "arm64ec"),
23880 link_name = "llvm.aarch64.neon.usqadd.i64"
23881 )]
23882 fn _vsqaddd_u64(a: u64, b: i64) -> u64;
23883 }
23884 unsafe { _vsqaddd_u64(a, b) }
23885}
23886#[doc = "Unsigned saturating accumulate of signed value"]
23887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
23888#[inline(always)]
23889#[target_feature(enable = "neon")]
23890#[cfg_attr(test, assert_instr(usqadd))]
23891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23892pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
23893 unsafe extern "unadjusted" {
23894 #[cfg_attr(
23895 any(target_arch = "aarch64", target_arch = "arm64ec"),
23896 link_name = "llvm.aarch64.neon.usqadd.i32"
23897 )]
23898 fn _vsqadds_u32(a: u32, b: i32) -> u32;
23899 }
23900 unsafe { _vsqadds_u32(a, b) }
23901}
23902#[doc = "Calculates the square root of each lane."]
23903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
23904#[inline(always)]
23905#[cfg_attr(test, assert_instr(fsqrt))]
23906#[target_feature(enable = "neon,fp16")]
23907#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23908#[cfg(not(target_arch = "arm64ec"))]
23909pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
23910 unsafe { simd_fsqrt(a) }
23911}
23912#[doc = "Calculates the square root of each lane."]
23913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
23914#[inline(always)]
23915#[cfg_attr(test, assert_instr(fsqrt))]
23916#[target_feature(enable = "neon,fp16")]
23917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23918#[cfg(not(target_arch = "arm64ec"))]
23919pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
23920 unsafe { simd_fsqrt(a) }
23921}
23922#[doc = "Calculates the square root of each lane."]
23923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
23924#[inline(always)]
23925#[target_feature(enable = "neon")]
23926#[cfg_attr(test, assert_instr(fsqrt))]
23927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23928pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
23929 unsafe { simd_fsqrt(a) }
23930}
23931#[doc = "Calculates the square root of each lane."]
23932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
23933#[inline(always)]
23934#[target_feature(enable = "neon")]
23935#[cfg_attr(test, assert_instr(fsqrt))]
23936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23937pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
23938 unsafe { simd_fsqrt(a) }
23939}
23940#[doc = "Calculates the square root of each lane."]
23941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
23942#[inline(always)]
23943#[target_feature(enable = "neon")]
23944#[cfg_attr(test, assert_instr(fsqrt))]
23945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23946pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
23947 unsafe { simd_fsqrt(a) }
23948}
23949#[doc = "Calculates the square root of each lane."]
23950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
23951#[inline(always)]
23952#[target_feature(enable = "neon")]
23953#[cfg_attr(test, assert_instr(fsqrt))]
23954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23955pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
23956 unsafe { simd_fsqrt(a) }
23957}
23958#[doc = "Floating-point round to integral, using current rounding mode"]
23959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
23960#[inline(always)]
23961#[target_feature(enable = "neon,fp16")]
23962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23963#[cfg(not(target_arch = "arm64ec"))]
23964#[cfg_attr(test, assert_instr(fsqrt))]
23965pub fn vsqrth_f16(a: f16) -> f16 {
23966 sqrtf16(a)
23967}
23968#[doc = "Shift Right and Insert (immediate)"]
23969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
23970#[inline(always)]
23971#[target_feature(enable = "neon")]
23972#[cfg_attr(test, assert_instr(sri, N = 1))]
23973#[rustc_legacy_const_generics(2)]
23974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23975pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23976 static_assert!(N >= 1 && N <= 8);
23977 unsafe { super::shift_right_and_insert!(u8, 8, N, a, b) }
23978}
23979#[doc = "Shift Right and Insert (immediate)"]
23980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
23981#[inline(always)]
23982#[target_feature(enable = "neon")]
23983#[cfg_attr(test, assert_instr(sri, N = 1))]
23984#[rustc_legacy_const_generics(2)]
23985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23986pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23987 static_assert!(N >= 1 && N <= 8);
23988 unsafe { super::shift_right_and_insert!(u8, 16, N, a, b) }
23989}
23990#[doc = "Shift Right and Insert (immediate)"]
23991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
23992#[inline(always)]
23993#[target_feature(enable = "neon")]
23994#[cfg_attr(test, assert_instr(sri, N = 1))]
23995#[rustc_legacy_const_generics(2)]
23996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23997pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23998 static_assert!(N >= 1 && N <= 16);
23999 unsafe { super::shift_right_and_insert!(u16, 4, N, a, b) }
24000}
24001#[doc = "Shift Right and Insert (immediate)"]
24002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
24003#[inline(always)]
24004#[target_feature(enable = "neon")]
24005#[cfg_attr(test, assert_instr(sri, N = 1))]
24006#[rustc_legacy_const_generics(2)]
24007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24008pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24009 static_assert!(N >= 1 && N <= 16);
24010 unsafe { super::shift_right_and_insert!(u16, 8, N, a, b) }
24011}
24012#[doc = "Shift Right and Insert (immediate)"]
24013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
24014#[inline(always)]
24015#[target_feature(enable = "neon")]
24016#[cfg_attr(test, assert_instr(sri, N = 1))]
24017#[rustc_legacy_const_generics(2)]
24018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24019pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24020 static_assert!(N >= 1 && N <= 32);
24021 unsafe { super::shift_right_and_insert!(u32, 2, N, a, b) }
24022}
24023#[doc = "Shift Right and Insert (immediate)"]
24024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
24025#[inline(always)]
24026#[target_feature(enable = "neon")]
24027#[cfg_attr(test, assert_instr(sri, N = 1))]
24028#[rustc_legacy_const_generics(2)]
24029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24030pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24031 static_assert!(N >= 1 && N <= 32);
24032 unsafe { super::shift_right_and_insert!(u32, 4, N, a, b) }
24033}
24034#[doc = "Shift Right and Insert (immediate)"]
24035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
24036#[inline(always)]
24037#[target_feature(enable = "neon")]
24038#[cfg_attr(test, assert_instr(sri, N = 1))]
24039#[rustc_legacy_const_generics(2)]
24040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24041pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24042 static_assert!(N >= 1 && N <= 64);
24043 unsafe { super::shift_right_and_insert!(u64, 1, N, a, b) }
24044}
24045#[doc = "Shift Right and Insert (immediate)"]
24046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
24047#[inline(always)]
24048#[target_feature(enable = "neon")]
24049#[cfg_attr(test, assert_instr(sri, N = 1))]
24050#[rustc_legacy_const_generics(2)]
24051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24052pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24053 static_assert!(N >= 1 && N <= 64);
24054 unsafe { super::shift_right_and_insert!(u64, 2, N, a, b) }
24055}
24056#[doc = "Shift Right and Insert (immediate)"]
24057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
24058#[inline(always)]
24059#[target_feature(enable = "neon")]
24060#[cfg_attr(test, assert_instr(sri, N = 1))]
24061#[rustc_legacy_const_generics(2)]
24062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24063pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24064 static_assert!(N >= 1 && N <= 8);
24065 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24066}
24067#[doc = "Shift Right and Insert (immediate)"]
24068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
24069#[inline(always)]
24070#[target_feature(enable = "neon")]
24071#[cfg_attr(test, assert_instr(sri, N = 1))]
24072#[rustc_legacy_const_generics(2)]
24073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24074pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24075 static_assert!(N >= 1 && N <= 8);
24076 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24077}
24078#[doc = "Shift Right and Insert (immediate)"]
24079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
24080#[inline(always)]
24081#[target_feature(enable = "neon")]
24082#[cfg_attr(test, assert_instr(sri, N = 1))]
24083#[rustc_legacy_const_generics(2)]
24084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24085pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24086 static_assert!(N >= 1 && N <= 16);
24087 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24088}
24089#[doc = "Shift Right and Insert (immediate)"]
24090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
24091#[inline(always)]
24092#[target_feature(enable = "neon")]
24093#[cfg_attr(test, assert_instr(sri, N = 1))]
24094#[rustc_legacy_const_generics(2)]
24095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24096pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24097 static_assert!(N >= 1 && N <= 16);
24098 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24099}
24100#[doc = "Shift Right and Insert (immediate)"]
24101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
24102#[inline(always)]
24103#[target_feature(enable = "neon")]
24104#[cfg_attr(test, assert_instr(sri, N = 1))]
24105#[rustc_legacy_const_generics(2)]
24106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24107pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24108 static_assert!(N >= 1 && N <= 32);
24109 unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
24110}
24111#[doc = "Shift Right and Insert (immediate)"]
24112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
24113#[inline(always)]
24114#[target_feature(enable = "neon")]
24115#[cfg_attr(test, assert_instr(sri, N = 1))]
24116#[rustc_legacy_const_generics(2)]
24117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24118pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24119 static_assert!(N >= 1 && N <= 32);
24120 unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
24121}
24122#[doc = "Shift Right and Insert (immediate)"]
24123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
24124#[inline(always)]
24125#[target_feature(enable = "neon")]
24126#[cfg_attr(test, assert_instr(sri, N = 1))]
24127#[rustc_legacy_const_generics(2)]
24128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24129pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24130 static_assert!(N >= 1 && N <= 64);
24131 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24132}
24133#[doc = "Shift Right and Insert (immediate)"]
24134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
24135#[inline(always)]
24136#[target_feature(enable = "neon")]
24137#[cfg_attr(test, assert_instr(sri, N = 1))]
24138#[rustc_legacy_const_generics(2)]
24139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24140pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24141 static_assert!(N >= 1 && N <= 64);
24142 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24143}
24144#[doc = "Shift Right and Insert (immediate)"]
24145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
24146#[inline(always)]
24147#[target_feature(enable = "neon")]
24148#[cfg_attr(test, assert_instr(sri, N = 1))]
24149#[rustc_legacy_const_generics(2)]
24150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24151pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24152 static_assert!(N >= 1 && N <= 8);
24153 unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24154}
24155#[doc = "Shift Right and Insert (immediate)"]
24156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
24157#[inline(always)]
24158#[target_feature(enable = "neon")]
24159#[cfg_attr(test, assert_instr(sri, N = 1))]
24160#[rustc_legacy_const_generics(2)]
24161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24162pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24163 static_assert!(N >= 1 && N <= 8);
24164 unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24165}
24166#[doc = "Shift Right and Insert (immediate)"]
24167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
24168#[inline(always)]
24169#[target_feature(enable = "neon")]
24170#[cfg_attr(test, assert_instr(sri, N = 1))]
24171#[rustc_legacy_const_generics(2)]
24172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24173pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24174 static_assert!(N >= 1 && N <= 16);
24175 unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24176}
24177#[doc = "Shift Right and Insert (immediate)"]
24178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
24179#[inline(always)]
24180#[target_feature(enable = "neon")]
24181#[cfg_attr(test, assert_instr(sri, N = 1))]
24182#[rustc_legacy_const_generics(2)]
24183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24184pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24185 static_assert!(N >= 1 && N <= 16);
24186 unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24187}
24188#[doc = "Shift Right and Insert (immediate)"]
24189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
24190#[inline(always)]
24191#[target_feature(enable = "neon,aes")]
24192#[cfg_attr(test, assert_instr(sri, N = 1))]
24193#[rustc_legacy_const_generics(2)]
24194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24195pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24196 static_assert!(N >= 1 && N <= 64);
24197 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24198}
24199#[doc = "Shift Right and Insert (immediate)"]
24200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
24201#[inline(always)]
24202#[target_feature(enable = "neon,aes")]
24203#[cfg_attr(test, assert_instr(sri, N = 1))]
24204#[rustc_legacy_const_generics(2)]
24205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24206pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24207 static_assert!(N >= 1 && N <= 64);
24208 unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24209}
24210#[doc = "Shift right and insert"]
24211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
24212#[inline(always)]
24213#[target_feature(enable = "neon")]
24214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24215#[rustc_legacy_const_generics(2)]
24216#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24217pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24218 static_assert!(N >= 1 && N <= 64);
24219 unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24220}
24221#[doc = "Shift right and insert"]
24222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
24223#[inline(always)]
24224#[target_feature(enable = "neon")]
24225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24226#[rustc_legacy_const_generics(2)]
24227#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24228pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24229 static_assert!(N >= 1 && N <= 64);
24230 unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
24231}
24232#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
24234#[doc = "## Safety"]
24235#[doc = " * Neon intrinsic unsafe"]
24236#[inline(always)]
24237#[target_feature(enable = "neon,fp16")]
24238#[cfg_attr(test, assert_instr(str))]
24239#[allow(clippy::cast_ptr_alignment)]
24240#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24241#[cfg(not(target_arch = "arm64ec"))]
24242pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
24243 crate::ptr::write_unaligned(ptr.cast(), a)
24244}
24245#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
24247#[doc = "## Safety"]
24248#[doc = " * Neon intrinsic unsafe"]
24249#[inline(always)]
24250#[target_feature(enable = "neon,fp16")]
24251#[cfg_attr(test, assert_instr(str))]
24252#[allow(clippy::cast_ptr_alignment)]
24253#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24254#[cfg(not(target_arch = "arm64ec"))]
24255pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
24256 crate::ptr::write_unaligned(ptr.cast(), a)
24257}
24258#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
24260#[doc = "## Safety"]
24261#[doc = " * Neon intrinsic unsafe"]
24262#[inline(always)]
24263#[target_feature(enable = "neon")]
24264#[cfg_attr(test, assert_instr(str))]
24265#[allow(clippy::cast_ptr_alignment)]
24266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24267pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
24268 crate::ptr::write_unaligned(ptr.cast(), a)
24269}
24270#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
24272#[doc = "## Safety"]
24273#[doc = " * Neon intrinsic unsafe"]
24274#[inline(always)]
24275#[target_feature(enable = "neon")]
24276#[cfg_attr(test, assert_instr(str))]
24277#[allow(clippy::cast_ptr_alignment)]
24278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24279pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
24280 crate::ptr::write_unaligned(ptr.cast(), a)
24281}
24282#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
24284#[doc = "## Safety"]
24285#[doc = " * Neon intrinsic unsafe"]
24286#[inline(always)]
24287#[target_feature(enable = "neon")]
24288#[cfg_attr(test, assert_instr(str))]
24289#[allow(clippy::cast_ptr_alignment)]
24290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24291pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
24292 crate::ptr::write_unaligned(ptr.cast(), a)
24293}
24294#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
24296#[doc = "## Safety"]
24297#[doc = " * Neon intrinsic unsafe"]
24298#[inline(always)]
24299#[target_feature(enable = "neon")]
24300#[cfg_attr(test, assert_instr(str))]
24301#[allow(clippy::cast_ptr_alignment)]
24302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24303pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
24304 crate::ptr::write_unaligned(ptr.cast(), a)
24305}
24306#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
24308#[doc = "## Safety"]
24309#[doc = " * Neon intrinsic unsafe"]
24310#[inline(always)]
24311#[target_feature(enable = "neon")]
24312#[cfg_attr(test, assert_instr(str))]
24313#[allow(clippy::cast_ptr_alignment)]
24314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24315pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
24316 crate::ptr::write_unaligned(ptr.cast(), a)
24317}
24318#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
24320#[doc = "## Safety"]
24321#[doc = " * Neon intrinsic unsafe"]
24322#[inline(always)]
24323#[target_feature(enable = "neon")]
24324#[cfg_attr(test, assert_instr(str))]
24325#[allow(clippy::cast_ptr_alignment)]
24326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24327pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
24328 crate::ptr::write_unaligned(ptr.cast(), a)
24329}
24330#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
24332#[doc = "## Safety"]
24333#[doc = " * Neon intrinsic unsafe"]
24334#[inline(always)]
24335#[target_feature(enable = "neon")]
24336#[cfg_attr(test, assert_instr(str))]
24337#[allow(clippy::cast_ptr_alignment)]
24338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24339pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
24340 crate::ptr::write_unaligned(ptr.cast(), a)
24341}
24342#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
24344#[doc = "## Safety"]
24345#[doc = " * Neon intrinsic unsafe"]
24346#[inline(always)]
24347#[target_feature(enable = "neon")]
24348#[cfg_attr(test, assert_instr(str))]
24349#[allow(clippy::cast_ptr_alignment)]
24350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24351pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
24352 crate::ptr::write_unaligned(ptr.cast(), a)
24353}
24354#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
24356#[doc = "## Safety"]
24357#[doc = " * Neon intrinsic unsafe"]
24358#[inline(always)]
24359#[target_feature(enable = "neon")]
24360#[cfg_attr(test, assert_instr(str))]
24361#[allow(clippy::cast_ptr_alignment)]
24362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24363pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
24364 crate::ptr::write_unaligned(ptr.cast(), a)
24365}
24366#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
24368#[doc = "## Safety"]
24369#[doc = " * Neon intrinsic unsafe"]
24370#[inline(always)]
24371#[target_feature(enable = "neon")]
24372#[cfg_attr(test, assert_instr(str))]
24373#[allow(clippy::cast_ptr_alignment)]
24374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24375pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
24376 crate::ptr::write_unaligned(ptr.cast(), a)
24377}
24378#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
24380#[doc = "## Safety"]
24381#[doc = " * Neon intrinsic unsafe"]
24382#[inline(always)]
24383#[target_feature(enable = "neon")]
24384#[cfg_attr(test, assert_instr(str))]
24385#[allow(clippy::cast_ptr_alignment)]
24386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24387pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
24388 crate::ptr::write_unaligned(ptr.cast(), a)
24389}
24390#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
24392#[doc = "## Safety"]
24393#[doc = " * Neon intrinsic unsafe"]
24394#[inline(always)]
24395#[target_feature(enable = "neon")]
24396#[cfg_attr(test, assert_instr(str))]
24397#[allow(clippy::cast_ptr_alignment)]
24398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24399pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
24400 crate::ptr::write_unaligned(ptr.cast(), a)
24401}
24402#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
24404#[doc = "## Safety"]
24405#[doc = " * Neon intrinsic unsafe"]
24406#[inline(always)]
24407#[target_feature(enable = "neon")]
24408#[cfg_attr(test, assert_instr(str))]
24409#[allow(clippy::cast_ptr_alignment)]
24410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24411pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
24412 crate::ptr::write_unaligned(ptr.cast(), a)
24413}
24414#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
24416#[doc = "## Safety"]
24417#[doc = " * Neon intrinsic unsafe"]
24418#[inline(always)]
24419#[target_feature(enable = "neon")]
24420#[cfg_attr(test, assert_instr(str))]
24421#[allow(clippy::cast_ptr_alignment)]
24422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24423pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
24424 crate::ptr::write_unaligned(ptr.cast(), a)
24425}
24426#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
24428#[doc = "## Safety"]
24429#[doc = " * Neon intrinsic unsafe"]
24430#[inline(always)]
24431#[target_feature(enable = "neon")]
24432#[cfg_attr(test, assert_instr(str))]
24433#[allow(clippy::cast_ptr_alignment)]
24434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24435pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
24436 crate::ptr::write_unaligned(ptr.cast(), a)
24437}
24438#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
24440#[doc = "## Safety"]
24441#[doc = " * Neon intrinsic unsafe"]
24442#[inline(always)]
24443#[target_feature(enable = "neon")]
24444#[cfg_attr(test, assert_instr(str))]
24445#[allow(clippy::cast_ptr_alignment)]
24446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24447pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
24448 crate::ptr::write_unaligned(ptr.cast(), a)
24449}
24450#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
24452#[doc = "## Safety"]
24453#[doc = " * Neon intrinsic unsafe"]
24454#[inline(always)]
24455#[target_feature(enable = "neon")]
24456#[cfg_attr(test, assert_instr(str))]
24457#[allow(clippy::cast_ptr_alignment)]
24458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24459pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
24460 crate::ptr::write_unaligned(ptr.cast(), a)
24461}
24462#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
24464#[doc = "## Safety"]
24465#[doc = " * Neon intrinsic unsafe"]
24466#[inline(always)]
24467#[target_feature(enable = "neon")]
24468#[cfg_attr(test, assert_instr(str))]
24469#[allow(clippy::cast_ptr_alignment)]
24470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24471pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
24472 crate::ptr::write_unaligned(ptr.cast(), a)
24473}
24474#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
24476#[doc = "## Safety"]
24477#[doc = " * Neon intrinsic unsafe"]
24478#[inline(always)]
24479#[target_feature(enable = "neon")]
24480#[cfg_attr(test, assert_instr(str))]
24481#[allow(clippy::cast_ptr_alignment)]
24482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24483pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
24484 crate::ptr::write_unaligned(ptr.cast(), a)
24485}
24486#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
24488#[doc = "## Safety"]
24489#[doc = " * Neon intrinsic unsafe"]
24490#[inline(always)]
24491#[target_feature(enable = "neon")]
24492#[cfg_attr(test, assert_instr(str))]
24493#[allow(clippy::cast_ptr_alignment)]
24494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24495pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
24496 crate::ptr::write_unaligned(ptr.cast(), a)
24497}
24498#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
24500#[doc = "## Safety"]
24501#[doc = " * Neon intrinsic unsafe"]
24502#[inline(always)]
24503#[target_feature(enable = "neon")]
24504#[cfg_attr(test, assert_instr(str))]
24505#[allow(clippy::cast_ptr_alignment)]
24506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24507pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
24508 crate::ptr::write_unaligned(ptr.cast(), a)
24509}
24510#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
24512#[doc = "## Safety"]
24513#[doc = " * Neon intrinsic unsafe"]
24514#[inline(always)]
24515#[target_feature(enable = "neon")]
24516#[cfg_attr(test, assert_instr(str))]
24517#[allow(clippy::cast_ptr_alignment)]
24518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24519pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
24520 crate::ptr::write_unaligned(ptr.cast(), a)
24521}
24522#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
24524#[doc = "## Safety"]
24525#[doc = " * Neon intrinsic unsafe"]
24526#[inline(always)]
24527#[target_feature(enable = "neon")]
24528#[cfg_attr(test, assert_instr(str))]
24529#[allow(clippy::cast_ptr_alignment)]
24530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24531pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
24532 crate::ptr::write_unaligned(ptr.cast(), a)
24533}
24534#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
24536#[doc = "## Safety"]
24537#[doc = " * Neon intrinsic unsafe"]
24538#[inline(always)]
24539#[target_feature(enable = "neon")]
24540#[cfg_attr(test, assert_instr(str))]
24541#[allow(clippy::cast_ptr_alignment)]
24542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24543pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
24544 crate::ptr::write_unaligned(ptr.cast(), a)
24545}
24546#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
24548#[doc = "## Safety"]
24549#[doc = " * Neon intrinsic unsafe"]
24550#[inline(always)]
24551#[target_feature(enable = "neon,aes")]
24552#[cfg_attr(test, assert_instr(str))]
24553#[allow(clippy::cast_ptr_alignment)]
24554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24555pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
24556 crate::ptr::write_unaligned(ptr.cast(), a)
24557}
24558#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
24560#[doc = "## Safety"]
24561#[doc = " * Neon intrinsic unsafe"]
24562#[inline(always)]
24563#[target_feature(enable = "neon,aes")]
24564#[cfg_attr(test, assert_instr(str))]
24565#[allow(clippy::cast_ptr_alignment)]
24566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24567pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
24568 crate::ptr::write_unaligned(ptr.cast(), a)
24569}
24570#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
24572#[doc = "## Safety"]
24573#[doc = " * Neon intrinsic unsafe"]
24574#[inline(always)]
24575#[target_feature(enable = "neon")]
24576#[cfg_attr(test, assert_instr(st1))]
24577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24578pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
24579 unsafe extern "unadjusted" {
24580 #[cfg_attr(
24581 any(target_arch = "aarch64", target_arch = "arm64ec"),
24582 link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
24583 )]
24584 fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
24585 }
24586 _vst1_f64_x2(b.0, b.1, a)
24587}
24588#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
24590#[doc = "## Safety"]
24591#[doc = " * Neon intrinsic unsafe"]
24592#[inline(always)]
24593#[target_feature(enable = "neon")]
24594#[cfg_attr(test, assert_instr(st1))]
24595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24596pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
24597 unsafe extern "unadjusted" {
24598 #[cfg_attr(
24599 any(target_arch = "aarch64", target_arch = "arm64ec"),
24600 link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
24601 )]
24602 fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
24603 }
24604 _vst1q_f64_x2(b.0, b.1, a)
24605}
24606#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
24608#[doc = "## Safety"]
24609#[doc = " * Neon intrinsic unsafe"]
24610#[inline(always)]
24611#[target_feature(enable = "neon")]
24612#[cfg_attr(test, assert_instr(st1))]
24613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24614pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
24615 unsafe extern "unadjusted" {
24616 #[cfg_attr(
24617 any(target_arch = "aarch64", target_arch = "arm64ec"),
24618 link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
24619 )]
24620 fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
24621 }
24622 _vst1_f64_x3(b.0, b.1, b.2, a)
24623}
24624#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
24626#[doc = "## Safety"]
24627#[doc = " * Neon intrinsic unsafe"]
24628#[inline(always)]
24629#[target_feature(enable = "neon")]
24630#[cfg_attr(test, assert_instr(st1))]
24631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24632pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
24633 unsafe extern "unadjusted" {
24634 #[cfg_attr(
24635 any(target_arch = "aarch64", target_arch = "arm64ec"),
24636 link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
24637 )]
24638 fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
24639 }
24640 _vst1q_f64_x3(b.0, b.1, b.2, a)
24641}
24642#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
24644#[doc = "## Safety"]
24645#[doc = " * Neon intrinsic unsafe"]
24646#[inline(always)]
24647#[target_feature(enable = "neon")]
24648#[cfg_attr(test, assert_instr(st1))]
24649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24650pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
24651 unsafe extern "unadjusted" {
24652 #[cfg_attr(
24653 any(target_arch = "aarch64", target_arch = "arm64ec"),
24654 link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
24655 )]
24656 fn _vst1_f64_x4(
24657 a: float64x1_t,
24658 b: float64x1_t,
24659 c: float64x1_t,
24660 d: float64x1_t,
24661 ptr: *mut f64,
24662 );
24663 }
24664 _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
24665}
24666#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
24668#[doc = "## Safety"]
24669#[doc = " * Neon intrinsic unsafe"]
24670#[inline(always)]
24671#[target_feature(enable = "neon")]
24672#[cfg_attr(test, assert_instr(st1))]
24673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24674pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
24675 unsafe extern "unadjusted" {
24676 #[cfg_attr(
24677 any(target_arch = "aarch64", target_arch = "arm64ec"),
24678 link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
24679 )]
24680 fn _vst1q_f64_x4(
24681 a: float64x2_t,
24682 b: float64x2_t,
24683 c: float64x2_t,
24684 d: float64x2_t,
24685 ptr: *mut f64,
24686 );
24687 }
24688 _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
24689}
24690#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
24692#[doc = "## Safety"]
24693#[doc = " * Neon intrinsic unsafe"]
24694#[inline(always)]
24695#[target_feature(enable = "neon")]
24696#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24697#[rustc_legacy_const_generics(2)]
24698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24699pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
24700 static_assert!(LANE == 0);
24701 *a = simd_extract!(b, LANE as u32);
24702}
24703#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
24705#[doc = "## Safety"]
24706#[doc = " * Neon intrinsic unsafe"]
24707#[inline(always)]
24708#[target_feature(enable = "neon")]
24709#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24710#[rustc_legacy_const_generics(2)]
24711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24712pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
24713 static_assert_uimm_bits!(LANE, 1);
24714 *a = simd_extract!(b, LANE as u32);
24715}
24716#[doc = "Store multiple 2-element structures from two registers"]
24717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
24718#[doc = "## Safety"]
24719#[doc = " * Neon intrinsic unsafe"]
24720#[inline(always)]
24721#[target_feature(enable = "neon")]
24722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24723#[cfg_attr(test, assert_instr(stp))]
24724pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
24725 core::ptr::write_unaligned(a.cast(), b)
24726}
24727#[doc = "Store multiple 2-element structures from two registers"]
24728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
24729#[doc = "## Safety"]
24730#[doc = " * Neon intrinsic unsafe"]
24731#[inline(always)]
24732#[target_feature(enable = "neon")]
24733#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24734#[rustc_legacy_const_generics(2)]
24735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24736pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
24737 static_assert!(LANE == 0);
24738 unsafe extern "unadjusted" {
24739 #[cfg_attr(
24740 any(target_arch = "aarch64", target_arch = "arm64ec"),
24741 link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
24742 )]
24743 fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
24744 }
24745 _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
24746}
24747#[doc = "Store multiple 2-element structures from two registers"]
24748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
24749#[doc = "## Safety"]
24750#[doc = " * Neon intrinsic unsafe"]
24751#[inline(always)]
24752#[target_feature(enable = "neon")]
24753#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24754#[rustc_legacy_const_generics(2)]
24755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24756pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
24757 static_assert!(LANE == 0);
24758 unsafe extern "unadjusted" {
24759 #[cfg_attr(
24760 any(target_arch = "aarch64", target_arch = "arm64ec"),
24761 link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
24762 )]
24763 fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
24764 }
24765 _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
24766}
24767#[doc = "Store multiple 2-element structures from two registers"]
24768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
24769#[doc = "## Safety"]
24770#[doc = " * Neon intrinsic unsafe"]
24771#[inline(always)]
24772#[target_feature(enable = "neon,aes")]
24773#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24774#[rustc_legacy_const_generics(2)]
24775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24776pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
24777 static_assert!(LANE == 0);
24778 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24779}
24780#[doc = "Store multiple 2-element structures from two registers"]
24781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
24782#[doc = "## Safety"]
24783#[doc = " * Neon intrinsic unsafe"]
24784#[inline(always)]
24785#[target_feature(enable = "neon")]
24786#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24787#[rustc_legacy_const_generics(2)]
24788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24789pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
24790 static_assert!(LANE == 0);
24791 vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24792}
24793#[doc = "Store multiple 2-element structures from two registers"]
24794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
24795#[doc = "## Safety"]
24796#[doc = " * Neon intrinsic unsafe"]
24797#[inline(always)]
24798#[target_feature(enable = "neon")]
24799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24800#[cfg_attr(test, assert_instr(st2))]
24801pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
24802 crate::core_arch::macros::interleaving_store!(f64, 2, 2, a, b)
24803}
24804#[doc = "Store multiple 2-element structures from two registers"]
24805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
24806#[doc = "## Safety"]
24807#[doc = " * Neon intrinsic unsafe"]
24808#[inline(always)]
24809#[target_feature(enable = "neon")]
24810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24811#[cfg_attr(test, assert_instr(st2))]
24812pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
24813 crate::core_arch::macros::interleaving_store!(i64, 2, 2, a, b)
24814}
24815#[doc = "Store multiple 2-element structures from two registers"]
24816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
24817#[doc = "## Safety"]
24818#[doc = " * Neon intrinsic unsafe"]
24819#[inline(always)]
24820#[target_feature(enable = "neon")]
24821#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24822#[rustc_legacy_const_generics(2)]
24823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24824pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
24825 static_assert_uimm_bits!(LANE, 1);
24826 unsafe extern "unadjusted" {
24827 #[cfg_attr(
24828 any(target_arch = "aarch64", target_arch = "arm64ec"),
24829 link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
24830 )]
24831 fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
24832 }
24833 _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
24834}
24835#[doc = "Store multiple 2-element structures from two registers"]
24836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
24837#[doc = "## Safety"]
24838#[doc = " * Neon intrinsic unsafe"]
24839#[inline(always)]
24840#[target_feature(enable = "neon")]
24841#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24842#[rustc_legacy_const_generics(2)]
24843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24844pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
24845 static_assert_uimm_bits!(LANE, 4);
24846 unsafe extern "unadjusted" {
24847 #[cfg_attr(
24848 any(target_arch = "aarch64", target_arch = "arm64ec"),
24849 link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
24850 )]
24851 fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
24852 }
24853 _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
24854}
24855#[doc = "Store multiple 2-element structures from two registers"]
24856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
24857#[doc = "## Safety"]
24858#[doc = " * Neon intrinsic unsafe"]
24859#[inline(always)]
24860#[target_feature(enable = "neon")]
24861#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24862#[rustc_legacy_const_generics(2)]
24863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24864pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
24865 static_assert_uimm_bits!(LANE, 1);
24866 unsafe extern "unadjusted" {
24867 #[cfg_attr(
24868 any(target_arch = "aarch64", target_arch = "arm64ec"),
24869 link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
24870 )]
24871 fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
24872 }
24873 _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
24874}
24875#[doc = "Store multiple 2-element structures from two registers"]
24876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
24877#[doc = "## Safety"]
24878#[doc = " * Neon intrinsic unsafe"]
24879#[inline(always)]
24880#[target_feature(enable = "neon,aes")]
24881#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24882#[rustc_legacy_const_generics(2)]
24883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24884pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
24885 static_assert_uimm_bits!(LANE, 1);
24886 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24887}
24888#[doc = "Store multiple 2-element structures from two registers"]
24889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
24890#[doc = "## Safety"]
24891#[doc = " * Neon intrinsic unsafe"]
24892#[inline(always)]
24893#[target_feature(enable = "neon")]
24894#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24895#[rustc_legacy_const_generics(2)]
24896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24897pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
24898 static_assert_uimm_bits!(LANE, 4);
24899 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24900}
24901#[doc = "Store multiple 2-element structures from two registers"]
24902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
24903#[doc = "## Safety"]
24904#[doc = " * Neon intrinsic unsafe"]
24905#[inline(always)]
24906#[target_feature(enable = "neon")]
24907#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24908#[rustc_legacy_const_generics(2)]
24909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24910pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
24911 static_assert_uimm_bits!(LANE, 1);
24912 vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24913}
24914#[doc = "Store multiple 2-element structures from two registers"]
24915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
24916#[doc = "## Safety"]
24917#[doc = " * Neon intrinsic unsafe"]
24918#[inline(always)]
24919#[target_feature(enable = "neon")]
24920#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24921#[rustc_legacy_const_generics(2)]
24922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24923pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
24924 static_assert_uimm_bits!(LANE, 4);
24925 vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24926}
24927#[doc = "Store multiple 2-element structures from two registers"]
24928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
24929#[doc = "## Safety"]
24930#[doc = " * Neon intrinsic unsafe"]
24931#[inline(always)]
24932#[target_feature(enable = "neon,aes")]
24933#[cfg_attr(test, assert_instr(st2))]
24934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24935pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
24936 vst2q_s64(transmute(a), transmute(b))
24937}
24938#[doc = "Store multiple 2-element structures from two registers"]
24939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
24940#[doc = "## Safety"]
24941#[doc = " * Neon intrinsic unsafe"]
24942#[inline(always)]
24943#[target_feature(enable = "neon")]
24944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24945#[cfg_attr(test, assert_instr(st2))]
24946pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
24947 vst2q_s64(transmute(a), transmute(b))
24948}
24949#[doc = "Store multiple 3-element structures from three registers"]
24950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
24951#[doc = "## Safety"]
24952#[doc = " * Neon intrinsic unsafe"]
24953#[inline(always)]
24954#[target_feature(enable = "neon")]
24955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24956#[cfg_attr(test, assert_instr(nop))]
24957pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
24958 core::ptr::write_unaligned(a.cast(), b)
24959}
24960#[doc = "Store multiple 3-element structures from three registers"]
24961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
24962#[doc = "## Safety"]
24963#[doc = " * Neon intrinsic unsafe"]
24964#[inline(always)]
24965#[target_feature(enable = "neon")]
24966#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24967#[rustc_legacy_const_generics(2)]
24968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24969pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
24970 static_assert!(LANE == 0);
24971 unsafe extern "unadjusted" {
24972 #[cfg_attr(
24973 any(target_arch = "aarch64", target_arch = "arm64ec"),
24974 link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
24975 )]
24976 fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
24977 }
24978 _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
24979}
24980#[doc = "Store multiple 3-element structures from three registers"]
24981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
24982#[doc = "## Safety"]
24983#[doc = " * Neon intrinsic unsafe"]
24984#[inline(always)]
24985#[target_feature(enable = "neon")]
24986#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24987#[rustc_legacy_const_generics(2)]
24988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24989pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
24990 static_assert!(LANE == 0);
24991 unsafe extern "unadjusted" {
24992 #[cfg_attr(
24993 any(target_arch = "aarch64", target_arch = "arm64ec"),
24994 link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
24995 )]
24996 fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
24997 }
24998 _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
24999}
25000#[doc = "Store multiple 3-element structures from three registers"]
25001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
25002#[doc = "## Safety"]
25003#[doc = " * Neon intrinsic unsafe"]
25004#[inline(always)]
25005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25006#[target_feature(enable = "neon,aes")]
25007#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25008#[rustc_legacy_const_generics(2)]
25009pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
25010 static_assert!(LANE == 0);
25011 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25012}
25013#[doc = "Store multiple 3-element structures from three registers"]
25014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
25015#[doc = "## Safety"]
25016#[doc = " * Neon intrinsic unsafe"]
25017#[inline(always)]
25018#[target_feature(enable = "neon")]
25019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25020#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25021#[rustc_legacy_const_generics(2)]
25022pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
25023 static_assert!(LANE == 0);
25024 vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25025}
25026#[doc = "Store multiple 3-element structures from three registers"]
25027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
25028#[doc = "## Safety"]
25029#[doc = " * Neon intrinsic unsafe"]
25030#[inline(always)]
25031#[target_feature(enable = "neon")]
25032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25033#[cfg_attr(test, assert_instr(st3))]
25034pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
25035 crate::core_arch::macros::interleaving_store!(f64, 2, 3, a, b)
25036}
25037#[doc = "Store multiple 3-element structures from three registers"]
25038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
25039#[doc = "## Safety"]
25040#[doc = " * Neon intrinsic unsafe"]
25041#[inline(always)]
25042#[target_feature(enable = "neon")]
25043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25044#[cfg_attr(test, assert_instr(st3))]
25045pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
25046 crate::core_arch::macros::interleaving_store!(i64, 2, 3, a, b)
25047}
25048#[doc = "Store multiple 3-element structures from three registers"]
25049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
25050#[doc = "## Safety"]
25051#[doc = " * Neon intrinsic unsafe"]
25052#[inline(always)]
25053#[target_feature(enable = "neon")]
25054#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25055#[rustc_legacy_const_generics(2)]
25056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25057pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
25058 static_assert_uimm_bits!(LANE, 1);
25059 unsafe extern "unadjusted" {
25060 #[cfg_attr(
25061 any(target_arch = "aarch64", target_arch = "arm64ec"),
25062 link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
25063 )]
25064 fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
25065 }
25066 _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25067}
25068#[doc = "Store multiple 3-element structures from three registers"]
25069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
25070#[doc = "## Safety"]
25071#[doc = " * Neon intrinsic unsafe"]
25072#[inline(always)]
25073#[target_feature(enable = "neon")]
25074#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25075#[rustc_legacy_const_generics(2)]
25076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25077pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
25078 static_assert_uimm_bits!(LANE, 4);
25079 unsafe extern "unadjusted" {
25080 #[cfg_attr(
25081 any(target_arch = "aarch64", target_arch = "arm64ec"),
25082 link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
25083 )]
25084 fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
25085 }
25086 _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
25087}
25088#[doc = "Store multiple 3-element structures from three registers"]
25089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
25090#[doc = "## Safety"]
25091#[doc = " * Neon intrinsic unsafe"]
25092#[inline(always)]
25093#[target_feature(enable = "neon")]
25094#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25095#[rustc_legacy_const_generics(2)]
25096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25097pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
25098 static_assert_uimm_bits!(LANE, 1);
25099 unsafe extern "unadjusted" {
25100 #[cfg_attr(
25101 any(target_arch = "aarch64", target_arch = "arm64ec"),
25102 link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
25103 )]
25104 fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
25105 }
25106 _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25107}
25108#[doc = "Store multiple 3-element structures from three registers"]
25109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
25110#[doc = "## Safety"]
25111#[doc = " * Neon intrinsic unsafe"]
25112#[inline(always)]
25113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25114#[target_feature(enable = "neon,aes")]
25115#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25116#[rustc_legacy_const_generics(2)]
25117pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
25118 static_assert_uimm_bits!(LANE, 1);
25119 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25120}
25121#[doc = "Store multiple 3-element structures from three registers"]
25122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
25123#[doc = "## Safety"]
25124#[doc = " * Neon intrinsic unsafe"]
25125#[inline(always)]
25126#[target_feature(enable = "neon")]
25127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25128#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25129#[rustc_legacy_const_generics(2)]
25130pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
25131 static_assert_uimm_bits!(LANE, 4);
25132 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25133}
25134#[doc = "Store multiple 3-element structures from three registers"]
25135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
25136#[doc = "## Safety"]
25137#[doc = " * Neon intrinsic unsafe"]
25138#[inline(always)]
25139#[target_feature(enable = "neon")]
25140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25141#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25142#[rustc_legacy_const_generics(2)]
25143pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
25144 static_assert_uimm_bits!(LANE, 1);
25145 vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25146}
25147#[doc = "Store multiple 3-element structures from three registers"]
25148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
25149#[doc = "## Safety"]
25150#[doc = " * Neon intrinsic unsafe"]
25151#[inline(always)]
25152#[target_feature(enable = "neon")]
25153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25154#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25155#[rustc_legacy_const_generics(2)]
25156pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
25157 static_assert_uimm_bits!(LANE, 4);
25158 vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25159}
25160#[doc = "Store multiple 3-element structures from three registers"]
25161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
25162#[doc = "## Safety"]
25163#[doc = " * Neon intrinsic unsafe"]
25164#[inline(always)]
25165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25166#[target_feature(enable = "neon,aes")]
25167#[cfg_attr(test, assert_instr(st3))]
25168pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
25169 vst3q_s64(transmute(a), transmute(b))
25170}
25171#[doc = "Store multiple 3-element structures from three registers"]
25172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
25173#[doc = "## Safety"]
25174#[doc = " * Neon intrinsic unsafe"]
25175#[inline(always)]
25176#[target_feature(enable = "neon")]
25177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25178#[cfg_attr(test, assert_instr(st3))]
25179pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
25180 vst3q_s64(transmute(a), transmute(b))
25181}
25182#[doc = "Store multiple 4-element structures from four registers"]
25183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
25184#[doc = "## Safety"]
25185#[doc = " * Neon intrinsic unsafe"]
25186#[inline(always)]
25187#[target_feature(enable = "neon")]
25188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25189#[cfg_attr(test, assert_instr(nop))]
25190pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
25191 core::ptr::write_unaligned(a.cast(), b)
25192}
25193#[doc = "Store multiple 4-element structures from four registers"]
25194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
25195#[doc = "## Safety"]
25196#[doc = " * Neon intrinsic unsafe"]
25197#[inline(always)]
25198#[target_feature(enable = "neon")]
25199#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25200#[rustc_legacy_const_generics(2)]
25201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25202pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
25203 static_assert!(LANE == 0);
25204 unsafe extern "unadjusted" {
25205 #[cfg_attr(
25206 any(target_arch = "aarch64", target_arch = "arm64ec"),
25207 link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
25208 )]
25209 fn _vst4_lane_f64(
25210 a: float64x1_t,
25211 b: float64x1_t,
25212 c: float64x1_t,
25213 d: float64x1_t,
25214 n: i64,
25215 ptr: *mut i8,
25216 );
25217 }
25218 _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25219}
25220#[doc = "Store multiple 4-element structures from four registers"]
25221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
25222#[doc = "## Safety"]
25223#[doc = " * Neon intrinsic unsafe"]
25224#[inline(always)]
25225#[target_feature(enable = "neon")]
25226#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25227#[rustc_legacy_const_generics(2)]
25228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25229pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
25230 static_assert!(LANE == 0);
25231 unsafe extern "unadjusted" {
25232 #[cfg_attr(
25233 any(target_arch = "aarch64", target_arch = "arm64ec"),
25234 link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
25235 )]
25236 fn _vst4_lane_s64(
25237 a: int64x1_t,
25238 b: int64x1_t,
25239 c: int64x1_t,
25240 d: int64x1_t,
25241 n: i64,
25242 ptr: *mut i8,
25243 );
25244 }
25245 _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25246}
25247#[doc = "Store multiple 4-element structures from four registers"]
25248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
25249#[doc = "## Safety"]
25250#[doc = " * Neon intrinsic unsafe"]
25251#[inline(always)]
25252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25253#[target_feature(enable = "neon,aes")]
25254#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25255#[rustc_legacy_const_generics(2)]
25256pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
25257 static_assert!(LANE == 0);
25258 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25259}
25260#[doc = "Store multiple 4-element structures from four registers"]
25261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
25262#[doc = "## Safety"]
25263#[doc = " * Neon intrinsic unsafe"]
25264#[inline(always)]
25265#[target_feature(enable = "neon")]
25266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25267#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25268#[rustc_legacy_const_generics(2)]
25269pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
25270 static_assert!(LANE == 0);
25271 vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25272}
25273#[doc = "Store multiple 4-element structures from four registers"]
25274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
25275#[doc = "## Safety"]
25276#[doc = " * Neon intrinsic unsafe"]
25277#[inline(always)]
25278#[target_feature(enable = "neon")]
25279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25280#[cfg_attr(test, assert_instr(st4))]
25281pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
25282 crate::core_arch::macros::interleaving_store!(f64, 2, 4, a, b)
25283}
25284#[doc = "Store multiple 4-element structures from four registers"]
25285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
25286#[doc = "## Safety"]
25287#[doc = " * Neon intrinsic unsafe"]
25288#[inline(always)]
25289#[target_feature(enable = "neon")]
25290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25291#[cfg_attr(test, assert_instr(st4))]
25292pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
25293 crate::core_arch::macros::interleaving_store!(i64, 2, 4, a, b)
25294}
25295#[doc = "Store multiple 4-element structures from four registers"]
25296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
25297#[doc = "## Safety"]
25298#[doc = " * Neon intrinsic unsafe"]
25299#[inline(always)]
25300#[target_feature(enable = "neon")]
25301#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25302#[rustc_legacy_const_generics(2)]
25303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25304pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
25305 static_assert_uimm_bits!(LANE, 1);
25306 unsafe extern "unadjusted" {
25307 #[cfg_attr(
25308 any(target_arch = "aarch64", target_arch = "arm64ec"),
25309 link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
25310 )]
25311 fn _vst4q_lane_f64(
25312 a: float64x2_t,
25313 b: float64x2_t,
25314 c: float64x2_t,
25315 d: float64x2_t,
25316 n: i64,
25317 ptr: *mut i8,
25318 );
25319 }
25320 _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25321}
25322#[doc = "Store multiple 4-element structures from four registers"]
25323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
25324#[doc = "## Safety"]
25325#[doc = " * Neon intrinsic unsafe"]
25326#[inline(always)]
25327#[target_feature(enable = "neon")]
25328#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25329#[rustc_legacy_const_generics(2)]
25330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25331pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
25332 static_assert_uimm_bits!(LANE, 4);
25333 unsafe extern "unadjusted" {
25334 #[cfg_attr(
25335 any(target_arch = "aarch64", target_arch = "arm64ec"),
25336 link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
25337 )]
25338 fn _vst4q_lane_s8(
25339 a: int8x16_t,
25340 b: int8x16_t,
25341 c: int8x16_t,
25342 d: int8x16_t,
25343 n: i64,
25344 ptr: *mut i8,
25345 );
25346 }
25347 _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25348}
25349#[doc = "Store multiple 4-element structures from four registers"]
25350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
25351#[doc = "## Safety"]
25352#[doc = " * Neon intrinsic unsafe"]
25353#[inline(always)]
25354#[target_feature(enable = "neon")]
25355#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25356#[rustc_legacy_const_generics(2)]
25357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25358pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
25359 static_assert_uimm_bits!(LANE, 1);
25360 unsafe extern "unadjusted" {
25361 #[cfg_attr(
25362 any(target_arch = "aarch64", target_arch = "arm64ec"),
25363 link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
25364 )]
25365 fn _vst4q_lane_s64(
25366 a: int64x2_t,
25367 b: int64x2_t,
25368 c: int64x2_t,
25369 d: int64x2_t,
25370 n: i64,
25371 ptr: *mut i8,
25372 );
25373 }
25374 _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25375}
25376#[doc = "Store multiple 4-element structures from four registers"]
25377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
25378#[doc = "## Safety"]
25379#[doc = " * Neon intrinsic unsafe"]
25380#[inline(always)]
25381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25382#[target_feature(enable = "neon,aes")]
25383#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25384#[rustc_legacy_const_generics(2)]
25385pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
25386 static_assert_uimm_bits!(LANE, 1);
25387 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25388}
25389#[doc = "Store multiple 4-element structures from four registers"]
25390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
25391#[doc = "## Safety"]
25392#[doc = " * Neon intrinsic unsafe"]
25393#[inline(always)]
25394#[target_feature(enable = "neon")]
25395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25396#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25397#[rustc_legacy_const_generics(2)]
25398pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
25399 static_assert_uimm_bits!(LANE, 4);
25400 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25401}
25402#[doc = "Store multiple 4-element structures from four registers"]
25403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
25404#[doc = "## Safety"]
25405#[doc = " * Neon intrinsic unsafe"]
25406#[inline(always)]
25407#[target_feature(enable = "neon")]
25408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25409#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25410#[rustc_legacy_const_generics(2)]
25411pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
25412 static_assert_uimm_bits!(LANE, 1);
25413 vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25414}
25415#[doc = "Store multiple 4-element structures from four registers"]
25416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
25417#[doc = "## Safety"]
25418#[doc = " * Neon intrinsic unsafe"]
25419#[inline(always)]
25420#[target_feature(enable = "neon")]
25421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25422#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25423#[rustc_legacy_const_generics(2)]
25424pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
25425 static_assert_uimm_bits!(LANE, 4);
25426 vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25427}
25428#[doc = "Store multiple 4-element structures from four registers"]
25429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
25430#[doc = "## Safety"]
25431#[doc = " * Neon intrinsic unsafe"]
25432#[inline(always)]
25433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25434#[target_feature(enable = "neon,aes")]
25435#[cfg_attr(test, assert_instr(st4))]
25436pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
25437 vst4q_s64(transmute(a), transmute(b))
25438}
25439#[doc = "Store multiple 4-element structures from four registers"]
25440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
25441#[doc = "## Safety"]
25442#[doc = " * Neon intrinsic unsafe"]
25443#[inline(always)]
25444#[target_feature(enable = "neon")]
25445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25446#[cfg_attr(test, assert_instr(st4))]
25447pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
25448 vst4q_s64(transmute(a), transmute(b))
25449}
25450#[doc = "Store-Release a single-element structure from one lane of one register."]
25451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
25452#[inline(always)]
25453#[target_feature(enable = "neon,rcpc3")]
25454#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25455#[rustc_legacy_const_generics(2)]
25456#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25457#[cfg(target_has_atomic = "64")]
25458pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
25459 static_assert!(LANE == 0);
25460 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25461}
25462#[doc = "Store-Release a single-element structure from one lane of one register."]
25463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
25464#[inline(always)]
25465#[target_feature(enable = "neon,rcpc3")]
25466#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25467#[rustc_legacy_const_generics(2)]
25468#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25469#[cfg(target_has_atomic = "64")]
25470pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
25471 static_assert_uimm_bits!(LANE, 1);
25472 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25473}
25474#[doc = "Store-Release a single-element structure from one lane of one register."]
25475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
25476#[inline(always)]
25477#[target_feature(enable = "neon,rcpc3")]
25478#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25479#[rustc_legacy_const_generics(2)]
25480#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25481#[cfg(target_has_atomic = "64")]
25482pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
25483 static_assert!(LANE == 0);
25484 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25485}
25486#[doc = "Store-Release a single-element structure from one lane of one register."]
25487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
25488#[inline(always)]
25489#[target_feature(enable = "neon,rcpc3")]
25490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25491#[rustc_legacy_const_generics(2)]
25492#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25493#[cfg(target_has_atomic = "64")]
25494pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
25495 static_assert_uimm_bits!(LANE, 1);
25496 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25497}
25498#[doc = "Store-Release a single-element structure from one lane of one register."]
25499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
25500#[inline(always)]
25501#[target_feature(enable = "neon,rcpc3")]
25502#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25503#[rustc_legacy_const_generics(2)]
25504#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25505#[cfg(target_has_atomic = "64")]
25506pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
25507 static_assert!(LANE == 0);
25508 unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25509}
25510#[doc = "Store-Release a single-element structure from one lane of one register."]
25511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
25512#[inline(always)]
25513#[target_feature(enable = "neon,rcpc3")]
25514#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25515#[rustc_legacy_const_generics(2)]
25516#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25517#[cfg(target_has_atomic = "64")]
25518pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
25519 static_assert_uimm_bits!(LANE, 1);
25520 unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25521}
25522#[doc = "Store-Release a single-element structure from one lane of one register."]
25523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
25524#[inline(always)]
25525#[target_feature(enable = "neon,rcpc3")]
25526#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25527#[rustc_legacy_const_generics(2)]
25528#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25529#[cfg(target_has_atomic = "64")]
25530pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
25531 static_assert!(LANE == 0);
25532 let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25533 unsafe {
25534 let lane: i64 = simd_extract!(val, LANE as u32);
25535 (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25536 }
25537}
25538#[doc = "Store-Release a single-element structure from one lane of one register."]
25539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
25540#[inline(always)]
25541#[target_feature(enable = "neon,rcpc3")]
25542#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25543#[rustc_legacy_const_generics(2)]
25544#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25545#[cfg(target_has_atomic = "64")]
25546pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
25547 static_assert_uimm_bits!(LANE, 1);
25548 let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25549 unsafe {
25550 let lane: i64 = simd_extract!(val, LANE as u32);
25551 (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25552 }
25553}
25554#[doc = "Subtract"]
25555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
25556#[inline(always)]
25557#[target_feature(enable = "neon")]
25558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25559#[cfg_attr(test, assert_instr(fsub))]
25560pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
25561 unsafe { simd_sub(a, b) }
25562}
25563#[doc = "Subtract"]
25564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
25565#[inline(always)]
25566#[target_feature(enable = "neon")]
25567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25568#[cfg_attr(test, assert_instr(fsub))]
25569pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
25570 unsafe { simd_sub(a, b) }
25571}
25572#[doc = "Subtract"]
25573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
25574#[inline(always)]
25575#[target_feature(enable = "neon")]
25576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25577#[cfg_attr(test, assert_instr(sub))]
25578pub fn vsubd_s64(a: i64, b: i64) -> i64 {
25579 a.wrapping_sub(b)
25580}
25581#[doc = "Subtract"]
25582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
25583#[inline(always)]
25584#[target_feature(enable = "neon")]
25585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25586#[cfg_attr(test, assert_instr(sub))]
25587pub fn vsubd_u64(a: u64, b: u64) -> u64 {
25588 a.wrapping_sub(b)
25589}
25590#[doc = "Subtract"]
25591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
25592#[inline(always)]
25593#[target_feature(enable = "neon,fp16")]
25594#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25595#[cfg(not(target_arch = "arm64ec"))]
25596#[cfg_attr(test, assert_instr(fsub))]
25597pub fn vsubh_f16(a: f16, b: f16) -> f16 {
25598 a - b
25599}
25600#[doc = "Signed Subtract Long"]
25601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
25602#[inline(always)]
25603#[target_feature(enable = "neon")]
25604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25605#[cfg_attr(test, assert_instr(ssubl2))]
25606pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
25607 unsafe {
25608 let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25609 let d: int16x8_t = simd_cast(c);
25610 let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25611 let f: int16x8_t = simd_cast(e);
25612 simd_sub(d, f)
25613 }
25614}
25615#[doc = "Signed Subtract Long"]
25616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
25617#[inline(always)]
25618#[target_feature(enable = "neon")]
25619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25620#[cfg_attr(test, assert_instr(ssubl2))]
25621pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
25622 unsafe {
25623 let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25624 let d: int32x4_t = simd_cast(c);
25625 let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25626 let f: int32x4_t = simd_cast(e);
25627 simd_sub(d, f)
25628 }
25629}
25630#[doc = "Signed Subtract Long"]
25631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
25632#[inline(always)]
25633#[target_feature(enable = "neon")]
25634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25635#[cfg_attr(test, assert_instr(ssubl2))]
25636pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
25637 unsafe {
25638 let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
25639 let d: int64x2_t = simd_cast(c);
25640 let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25641 let f: int64x2_t = simd_cast(e);
25642 simd_sub(d, f)
25643 }
25644}
25645#[doc = "Unsigned Subtract Long"]
25646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
25647#[inline(always)]
25648#[target_feature(enable = "neon")]
25649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25650#[cfg_attr(test, assert_instr(usubl2))]
25651pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
25652 unsafe {
25653 let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25654 let d: uint16x8_t = simd_cast(c);
25655 let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25656 let f: uint16x8_t = simd_cast(e);
25657 simd_sub(d, f)
25658 }
25659}
25660#[doc = "Unsigned Subtract Long"]
25661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
25662#[inline(always)]
25663#[target_feature(enable = "neon")]
25664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25665#[cfg_attr(test, assert_instr(usubl2))]
25666pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
25667 unsafe {
25668 let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25669 let d: uint32x4_t = simd_cast(c);
25670 let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25671 let f: uint32x4_t = simd_cast(e);
25672 simd_sub(d, f)
25673 }
25674}
25675#[doc = "Unsigned Subtract Long"]
25676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
25677#[inline(always)]
25678#[target_feature(enable = "neon")]
25679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25680#[cfg_attr(test, assert_instr(usubl2))]
25681pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
25682 unsafe {
25683 let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
25684 let d: uint64x2_t = simd_cast(c);
25685 let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25686 let f: uint64x2_t = simd_cast(e);
25687 simd_sub(d, f)
25688 }
25689}
25690#[doc = "Signed Subtract Wide"]
25691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
25692#[inline(always)]
25693#[target_feature(enable = "neon")]
25694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25695#[cfg_attr(test, assert_instr(ssubw2))]
25696pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
25697 unsafe {
25698 let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25699 simd_sub(a, simd_cast(c))
25700 }
25701}
25702#[doc = "Signed Subtract Wide"]
25703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
25704#[inline(always)]
25705#[target_feature(enable = "neon")]
25706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25707#[cfg_attr(test, assert_instr(ssubw2))]
25708pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
25709 unsafe {
25710 let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25711 simd_sub(a, simd_cast(c))
25712 }
25713}
25714#[doc = "Signed Subtract Wide"]
25715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
25716#[inline(always)]
25717#[target_feature(enable = "neon")]
25718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25719#[cfg_attr(test, assert_instr(ssubw2))]
25720pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
25721 unsafe {
25722 let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25723 simd_sub(a, simd_cast(c))
25724 }
25725}
25726#[doc = "Unsigned Subtract Wide"]
25727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
25728#[inline(always)]
25729#[target_feature(enable = "neon")]
25730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25731#[cfg_attr(test, assert_instr(usubw2))]
25732pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
25733 unsafe {
25734 let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25735 simd_sub(a, simd_cast(c))
25736 }
25737}
25738#[doc = "Unsigned Subtract Wide"]
25739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
25740#[inline(always)]
25741#[target_feature(enable = "neon")]
25742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25743#[cfg_attr(test, assert_instr(usubw2))]
25744pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
25745 unsafe {
25746 let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25747 simd_sub(a, simd_cast(c))
25748 }
25749}
25750#[doc = "Unsigned Subtract Wide"]
25751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
25752#[inline(always)]
25753#[target_feature(enable = "neon")]
25754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25755#[cfg_attr(test, assert_instr(usubw2))]
25756pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
25757 unsafe {
25758 let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25759 simd_sub(a, simd_cast(c))
25760 }
25761}
25762#[doc = "Table look-up"]
25763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
25764#[inline(always)]
25765#[target_feature(enable = "neon")]
25766#[cfg_attr(test, assert_instr(tbl))]
25767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25768pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25769 vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
25770 {
25771 transmute(b)
25772 }
25773 })
25774}
25775#[doc = "Table look-up"]
25776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
25777#[inline(always)]
25778#[target_feature(enable = "neon")]
25779#[cfg_attr(test, assert_instr(tbl))]
25780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25781pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25782 vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
25783}
25784#[doc = "Table look-up"]
25785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
25786#[inline(always)]
25787#[target_feature(enable = "neon")]
25788#[cfg_attr(test, assert_instr(tbl))]
25789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25790pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
25791 vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
25792}
25793#[doc = "Table look-up"]
25794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
25795#[inline(always)]
25796#[target_feature(enable = "neon")]
25797#[cfg_attr(test, assert_instr(tbl))]
25798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25799pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
25800 unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
25801}
25802#[doc = "Table look-up"]
25803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
25804#[inline(always)]
25805#[target_feature(enable = "neon")]
25806#[cfg_attr(test, assert_instr(tbl))]
25807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25808pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
25809 unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
25810}
25811#[doc = "Table look-up"]
25812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
25813#[inline(always)]
25814#[target_feature(enable = "neon")]
25815#[cfg_attr(test, assert_instr(tbl))]
25816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25817pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
25818 unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
25819}
25820#[doc = "Table look-up"]
25821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
25822#[inline(always)]
25823#[target_feature(enable = "neon")]
25824#[cfg_attr(test, assert_instr(tbl))]
25825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25826pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
25827 let x = int8x16x2_t(
25828 vcombine_s8(a.0, a.1),
25829 vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
25830 );
25831 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25832}
25833#[doc = "Table look-up"]
25834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
25835#[inline(always)]
25836#[target_feature(enable = "neon")]
25837#[cfg_attr(test, assert_instr(tbl))]
25838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25839pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
25840 let x = uint8x16x2_t(
25841 vcombine_u8(a.0, a.1),
25842 vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
25843 );
25844 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25845}
25846#[doc = "Table look-up"]
25847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
25848#[inline(always)]
25849#[target_feature(enable = "neon")]
25850#[cfg_attr(test, assert_instr(tbl))]
25851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25852pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
25853 let x = poly8x16x2_t(
25854 vcombine_p8(a.0, a.1),
25855 vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
25856 );
25857 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25858}
25859#[doc = "Table look-up"]
25860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
25861#[inline(always)]
25862#[target_feature(enable = "neon")]
25863#[cfg_attr(test, assert_instr(tbl))]
25864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25865pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
25866 let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
25867 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25868}
25869#[doc = "Table look-up"]
25870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
25871#[inline(always)]
25872#[target_feature(enable = "neon")]
25873#[cfg_attr(test, assert_instr(tbl))]
25874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25875pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
25876 let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
25877 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25878}
25879#[doc = "Table look-up"]
25880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
25881#[inline(always)]
25882#[target_feature(enable = "neon")]
25883#[cfg_attr(test, assert_instr(tbl))]
25884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25885pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
25886 let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
25887 unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25888}
25889#[doc = "Extended table look-up"]
25890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
25891#[inline(always)]
25892#[target_feature(enable = "neon")]
25893#[cfg_attr(test, assert_instr(tbx))]
25894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25895pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
25896 unsafe {
25897 simd_select(
25898 simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
25899 transmute(vqtbx1(
25900 transmute(a),
25901 transmute(vcombine_s8(b, crate::mem::zeroed())),
25902 transmute(c),
25903 )),
25904 a,
25905 )
25906 }
25907}
25908#[doc = "Extended table look-up"]
25909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
25910#[inline(always)]
25911#[target_feature(enable = "neon")]
25912#[cfg_attr(test, assert_instr(tbx))]
25913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25914pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
25915 unsafe {
25916 simd_select(
25917 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25918 transmute(vqtbx1(
25919 transmute(a),
25920 transmute(vcombine_u8(b, crate::mem::zeroed())),
25921 c,
25922 )),
25923 a,
25924 )
25925 }
25926}
25927#[doc = "Extended table look-up"]
25928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
25929#[inline(always)]
25930#[target_feature(enable = "neon")]
25931#[cfg_attr(test, assert_instr(tbx))]
25932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25933pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
25934 unsafe {
25935 simd_select(
25936 simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25937 transmute(vqtbx1(
25938 transmute(a),
25939 transmute(vcombine_p8(b, crate::mem::zeroed())),
25940 c,
25941 )),
25942 a,
25943 )
25944 }
25945}
25946#[doc = "Extended table look-up"]
25947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
25948#[inline(always)]
25949#[target_feature(enable = "neon")]
25950#[cfg_attr(test, assert_instr(tbx))]
25951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25952pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
25953 unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
25954}
25955#[doc = "Extended table look-up"]
25956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
25957#[inline(always)]
25958#[target_feature(enable = "neon")]
25959#[cfg_attr(test, assert_instr(tbx))]
25960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25961pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
25962 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
25963}
25964#[doc = "Extended table look-up"]
25965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
25966#[inline(always)]
25967#[target_feature(enable = "neon")]
25968#[cfg_attr(test, assert_instr(tbx))]
25969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25970pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
25971 unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
25972}
25973#[doc = "Extended table look-up"]
25974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
25975#[inline(always)]
25976#[target_feature(enable = "neon")]
25977#[cfg_attr(test, assert_instr(tbx))]
25978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25979pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
25980 let x = int8x16x2_t(
25981 vcombine_s8(b.0, b.1),
25982 vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
25983 );
25984 unsafe {
25985 transmute(simd_select(
25986 simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
25987 transmute(vqtbx2(
25988 transmute(a),
25989 transmute(x.0),
25990 transmute(x.1),
25991 transmute(c),
25992 )),
25993 a,
25994 ))
25995 }
25996}
25997#[doc = "Extended table look-up"]
25998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
25999#[inline(always)]
26000#[target_feature(enable = "neon")]
26001#[cfg_attr(test, assert_instr(tbx))]
26002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26003pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
26004 let x = uint8x16x2_t(
26005 vcombine_u8(b.0, b.1),
26006 vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
26007 );
26008 unsafe {
26009 transmute(simd_select(
26010 simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26011 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26012 a,
26013 ))
26014 }
26015}
26016#[doc = "Extended table look-up"]
26017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
26018#[inline(always)]
26019#[target_feature(enable = "neon")]
26020#[cfg_attr(test, assert_instr(tbx))]
26021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26022pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
26023 let x = poly8x16x2_t(
26024 vcombine_p8(b.0, b.1),
26025 vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
26026 );
26027 unsafe {
26028 transmute(simd_select(
26029 simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26030 transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26031 a,
26032 ))
26033 }
26034}
26035#[doc = "Extended table look-up"]
26036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
26037#[inline(always)]
26038#[target_feature(enable = "neon")]
26039#[cfg_attr(test, assert_instr(tbx))]
26040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26041pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
26042 unsafe {
26043 vqtbx2(
26044 transmute(a),
26045 transmute(vcombine_s8(b.0, b.1)),
26046 transmute(vcombine_s8(b.2, b.3)),
26047 transmute(c),
26048 )
26049 }
26050}
26051#[doc = "Extended table look-up"]
26052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
26053#[inline(always)]
26054#[target_feature(enable = "neon")]
26055#[cfg_attr(test, assert_instr(tbx))]
26056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26057pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
26058 unsafe {
26059 transmute(vqtbx2(
26060 transmute(a),
26061 transmute(vcombine_u8(b.0, b.1)),
26062 transmute(vcombine_u8(b.2, b.3)),
26063 c,
26064 ))
26065 }
26066}
26067#[doc = "Extended table look-up"]
26068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
26069#[inline(always)]
26070#[target_feature(enable = "neon")]
26071#[cfg_attr(test, assert_instr(tbx))]
26072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26073pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
26074 unsafe {
26075 transmute(vqtbx2(
26076 transmute(a),
26077 transmute(vcombine_p8(b.0, b.1)),
26078 transmute(vcombine_p8(b.2, b.3)),
26079 c,
26080 ))
26081 }
26082}
26083#[doc = "Transpose vectors"]
26084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
26085#[inline(always)]
26086#[target_feature(enable = "neon,fp16")]
26087#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26088#[cfg(not(target_arch = "arm64ec"))]
26089#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26090pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26091 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26092}
26093#[doc = "Transpose vectors"]
26094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
26095#[inline(always)]
26096#[target_feature(enable = "neon,fp16")]
26097#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26098#[cfg(not(target_arch = "arm64ec"))]
26099#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26100pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26101 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26102}
26103#[doc = "Transpose vectors"]
26104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
26105#[inline(always)]
26106#[target_feature(enable = "neon")]
26107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26108#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26109pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26110 unsafe { simd_shuffle!(a, b, [0, 2]) }
26111}
26112#[doc = "Transpose vectors"]
26113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
26114#[inline(always)]
26115#[target_feature(enable = "neon")]
26116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26117#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26118pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26119 unsafe { simd_shuffle!(a, b, [0, 2]) }
26120}
26121#[doc = "Transpose vectors"]
26122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
26123#[inline(always)]
26124#[target_feature(enable = "neon")]
26125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26126#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26127pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26128 unsafe { simd_shuffle!(a, b, [0, 2]) }
26129}
26130#[doc = "Transpose vectors"]
26131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
26132#[inline(always)]
26133#[target_feature(enable = "neon")]
26134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26135#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26136pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26137 unsafe { simd_shuffle!(a, b, [0, 2]) }
26138}
26139#[doc = "Transpose vectors"]
26140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
26141#[inline(always)]
26142#[target_feature(enable = "neon")]
26143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26144#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26145pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26146 unsafe { simd_shuffle!(a, b, [0, 2]) }
26147}
26148#[doc = "Transpose vectors"]
26149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
26150#[inline(always)]
26151#[target_feature(enable = "neon")]
26152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26153#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26154pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26155 unsafe { simd_shuffle!(a, b, [0, 2]) }
26156}
26157#[doc = "Transpose vectors"]
26158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
26159#[inline(always)]
26160#[target_feature(enable = "neon")]
26161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26162#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26163pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26164 unsafe { simd_shuffle!(a, b, [0, 2]) }
26165}
26166#[doc = "Transpose vectors"]
26167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
26168#[inline(always)]
26169#[target_feature(enable = "neon")]
26170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26171#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26172pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26173 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26174}
26175#[doc = "Transpose vectors"]
26176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
26177#[inline(always)]
26178#[target_feature(enable = "neon")]
26179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26180#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26181pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26182 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26183}
26184#[doc = "Transpose vectors"]
26185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
26186#[inline(always)]
26187#[target_feature(enable = "neon")]
26188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26189#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26190pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26191 unsafe {
26192 simd_shuffle!(
26193 a,
26194 b,
26195 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26196 )
26197 }
26198}
26199#[doc = "Transpose vectors"]
26200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
26201#[inline(always)]
26202#[target_feature(enable = "neon")]
26203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26204#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26205pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26206 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26207}
26208#[doc = "Transpose vectors"]
26209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
26210#[inline(always)]
26211#[target_feature(enable = "neon")]
26212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26213#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26214pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26215 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26216}
26217#[doc = "Transpose vectors"]
26218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
26219#[inline(always)]
26220#[target_feature(enable = "neon")]
26221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26222#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26223pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26224 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26225}
26226#[doc = "Transpose vectors"]
26227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
26228#[inline(always)]
26229#[target_feature(enable = "neon")]
26230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26231#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26232pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26233 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26234}
26235#[doc = "Transpose vectors"]
26236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
26237#[inline(always)]
26238#[target_feature(enable = "neon")]
26239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26240#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26241pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26242 unsafe {
26243 simd_shuffle!(
26244 a,
26245 b,
26246 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26247 )
26248 }
26249}
26250#[doc = "Transpose vectors"]
26251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
26252#[inline(always)]
26253#[target_feature(enable = "neon")]
26254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26255#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26256pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26257 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26258}
26259#[doc = "Transpose vectors"]
26260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
26261#[inline(always)]
26262#[target_feature(enable = "neon")]
26263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26264#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26265pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26266 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26267}
26268#[doc = "Transpose vectors"]
26269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
26270#[inline(always)]
26271#[target_feature(enable = "neon")]
26272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26273#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26274pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26275 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26276}
26277#[doc = "Transpose vectors"]
26278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
26279#[inline(always)]
26280#[target_feature(enable = "neon")]
26281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26282#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26283pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26284 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26285}
26286#[doc = "Transpose vectors"]
26287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
26288#[inline(always)]
26289#[target_feature(enable = "neon")]
26290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26291#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26292pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26293 unsafe {
26294 simd_shuffle!(
26295 a,
26296 b,
26297 [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26298 )
26299 }
26300}
26301#[doc = "Transpose vectors"]
26302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
26303#[inline(always)]
26304#[target_feature(enable = "neon")]
26305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26306#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26307pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26308 unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26309}
26310#[doc = "Transpose vectors"]
26311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
26312#[inline(always)]
26313#[target_feature(enable = "neon")]
26314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26315#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26316pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26317 unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26318}
26319#[doc = "Transpose vectors"]
26320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
26321#[inline(always)]
26322#[target_feature(enable = "neon,fp16")]
26323#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26324#[cfg(not(target_arch = "arm64ec"))]
26325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26326pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26327 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26328}
26329#[doc = "Transpose vectors"]
26330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
26331#[inline(always)]
26332#[target_feature(enable = "neon,fp16")]
26333#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26334#[cfg(not(target_arch = "arm64ec"))]
26335#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26336pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26337 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26338}
26339#[doc = "Transpose vectors"]
26340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
26341#[inline(always)]
26342#[target_feature(enable = "neon")]
26343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26344#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26345pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26346 unsafe { simd_shuffle!(a, b, [1, 3]) }
26347}
26348#[doc = "Transpose vectors"]
26349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
26350#[inline(always)]
26351#[target_feature(enable = "neon")]
26352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26353#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26354pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26355 unsafe { simd_shuffle!(a, b, [1, 3]) }
26356}
26357#[doc = "Transpose vectors"]
26358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
26359#[inline(always)]
26360#[target_feature(enable = "neon")]
26361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26362#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26363pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26364 unsafe { simd_shuffle!(a, b, [1, 3]) }
26365}
26366#[doc = "Transpose vectors"]
26367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
26368#[inline(always)]
26369#[target_feature(enable = "neon")]
26370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26371#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26372pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26373 unsafe { simd_shuffle!(a, b, [1, 3]) }
26374}
26375#[doc = "Transpose vectors"]
26376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
26377#[inline(always)]
26378#[target_feature(enable = "neon")]
26379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26380#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26381pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26382 unsafe { simd_shuffle!(a, b, [1, 3]) }
26383}
26384#[doc = "Transpose vectors"]
26385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
26386#[inline(always)]
26387#[target_feature(enable = "neon")]
26388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26389#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26390pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26391 unsafe { simd_shuffle!(a, b, [1, 3]) }
26392}
26393#[doc = "Transpose vectors"]
26394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
26395#[inline(always)]
26396#[target_feature(enable = "neon")]
26397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26398#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26399pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26400 unsafe { simd_shuffle!(a, b, [1, 3]) }
26401}
26402#[doc = "Transpose vectors"]
26403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
26404#[inline(always)]
26405#[target_feature(enable = "neon")]
26406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26407#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26408pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26409 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26410}
26411#[doc = "Transpose vectors"]
26412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
26413#[inline(always)]
26414#[target_feature(enable = "neon")]
26415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26416#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26417pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26418 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26419}
26420#[doc = "Transpose vectors"]
26421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
26422#[inline(always)]
26423#[target_feature(enable = "neon")]
26424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26425#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26426pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26427 unsafe {
26428 simd_shuffle!(
26429 a,
26430 b,
26431 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26432 )
26433 }
26434}
26435#[doc = "Transpose vectors"]
26436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
26437#[inline(always)]
26438#[target_feature(enable = "neon")]
26439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26440#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26441pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26442 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26443}
26444#[doc = "Transpose vectors"]
26445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
26446#[inline(always)]
26447#[target_feature(enable = "neon")]
26448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26449#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26450pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26451 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26452}
26453#[doc = "Transpose vectors"]
26454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
26455#[inline(always)]
26456#[target_feature(enable = "neon")]
26457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26458#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26459pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26460 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26461}
26462#[doc = "Transpose vectors"]
26463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
26464#[inline(always)]
26465#[target_feature(enable = "neon")]
26466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26467#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26468pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26469 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26470}
26471#[doc = "Transpose vectors"]
26472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
26473#[inline(always)]
26474#[target_feature(enable = "neon")]
26475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26476#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26477pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26478 unsafe {
26479 simd_shuffle!(
26480 a,
26481 b,
26482 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26483 )
26484 }
26485}
26486#[doc = "Transpose vectors"]
26487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
26488#[inline(always)]
26489#[target_feature(enable = "neon")]
26490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26491#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26492pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26493 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26494}
26495#[doc = "Transpose vectors"]
26496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
26497#[inline(always)]
26498#[target_feature(enable = "neon")]
26499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26500#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26501pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26502 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26503}
26504#[doc = "Transpose vectors"]
26505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
26506#[inline(always)]
26507#[target_feature(enable = "neon")]
26508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26509#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26510pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26511 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26512}
26513#[doc = "Transpose vectors"]
26514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
26515#[inline(always)]
26516#[target_feature(enable = "neon")]
26517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26518#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26519pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26520 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26521}
26522#[doc = "Transpose vectors"]
26523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
26524#[inline(always)]
26525#[target_feature(enable = "neon")]
26526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26527#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26528pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26529 unsafe {
26530 simd_shuffle!(
26531 a,
26532 b,
26533 [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26534 )
26535 }
26536}
26537#[doc = "Transpose vectors"]
26538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
26539#[inline(always)]
26540#[target_feature(enable = "neon")]
26541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26542#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26543pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26544 unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26545}
26546#[doc = "Transpose vectors"]
26547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
26548#[inline(always)]
26549#[target_feature(enable = "neon")]
26550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26551#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26552pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26553 unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26554}
26555#[doc = "Signed compare bitwise Test bits nonzero"]
26556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
26557#[inline(always)]
26558#[target_feature(enable = "neon")]
26559#[cfg_attr(test, assert_instr(cmtst))]
26560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26561pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
26562 unsafe {
26563 let c: int64x1_t = simd_and(a, b);
26564 let d: i64x1 = i64x1::new(0);
26565 simd_ne(c, transmute(d))
26566 }
26567}
26568#[doc = "Signed compare bitwise Test bits nonzero"]
26569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
26570#[inline(always)]
26571#[target_feature(enable = "neon")]
26572#[cfg_attr(test, assert_instr(cmtst))]
26573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26574pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
26575 unsafe {
26576 let c: int64x2_t = simd_and(a, b);
26577 let d: i64x2 = i64x2::new(0, 0);
26578 simd_ne(c, transmute(d))
26579 }
26580}
26581#[doc = "Signed compare bitwise Test bits nonzero"]
26582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
26583#[inline(always)]
26584#[target_feature(enable = "neon")]
26585#[cfg_attr(test, assert_instr(cmtst))]
26586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26587pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
26588 unsafe {
26589 let c: poly64x1_t = simd_and(a, b);
26590 let d: i64x1 = i64x1::new(0);
26591 simd_ne(c, transmute(d))
26592 }
26593}
26594#[doc = "Signed compare bitwise Test bits nonzero"]
26595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
26596#[inline(always)]
26597#[target_feature(enable = "neon")]
26598#[cfg_attr(test, assert_instr(cmtst))]
26599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26600pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
26601 unsafe {
26602 let c: poly64x2_t = simd_and(a, b);
26603 let d: i64x2 = i64x2::new(0, 0);
26604 simd_ne(c, transmute(d))
26605 }
26606}
26607#[doc = "Unsigned compare bitwise Test bits nonzero"]
26608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
26609#[inline(always)]
26610#[target_feature(enable = "neon")]
26611#[cfg_attr(test, assert_instr(cmtst))]
26612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26613pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
26614 unsafe {
26615 let c: uint64x1_t = simd_and(a, b);
26616 let d: u64x1 = u64x1::new(0);
26617 simd_ne(c, transmute(d))
26618 }
26619}
26620#[doc = "Unsigned compare bitwise Test bits nonzero"]
26621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
26622#[inline(always)]
26623#[target_feature(enable = "neon")]
26624#[cfg_attr(test, assert_instr(cmtst))]
26625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26626pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26627 unsafe {
26628 let c: uint64x2_t = simd_and(a, b);
26629 let d: u64x2 = u64x2::new(0, 0);
26630 simd_ne(c, transmute(d))
26631 }
26632}
26633#[doc = "Compare bitwise test bits nonzero"]
26634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
26635#[inline(always)]
26636#[target_feature(enable = "neon")]
26637#[cfg_attr(test, assert_instr(tst))]
26638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26639pub fn vtstd_s64(a: i64, b: i64) -> u64 {
26640 unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
26641}
26642#[doc = "Compare bitwise test bits nonzero"]
26643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
26644#[inline(always)]
26645#[target_feature(enable = "neon")]
26646#[cfg_attr(test, assert_instr(tst))]
26647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26648pub fn vtstd_u64(a: u64, b: u64) -> u64 {
26649 unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
26650}
26651#[doc = "Signed saturating Accumulate of Unsigned value."]
26652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
26653#[inline(always)]
26654#[target_feature(enable = "neon")]
26655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26656#[cfg_attr(test, assert_instr(suqadd))]
26657pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
26658 unsafe extern "unadjusted" {
26659 #[cfg_attr(
26660 any(target_arch = "aarch64", target_arch = "arm64ec"),
26661 link_name = "llvm.aarch64.neon.suqadd.v8i8"
26662 )]
26663 fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
26664 }
26665 unsafe { _vuqadd_s8(a, b) }
26666}
26667#[doc = "Signed saturating Accumulate of Unsigned value."]
26668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
26669#[inline(always)]
26670#[target_feature(enable = "neon")]
26671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26672#[cfg_attr(test, assert_instr(suqadd))]
26673pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
26674 unsafe extern "unadjusted" {
26675 #[cfg_attr(
26676 any(target_arch = "aarch64", target_arch = "arm64ec"),
26677 link_name = "llvm.aarch64.neon.suqadd.v16i8"
26678 )]
26679 fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
26680 }
26681 unsafe { _vuqaddq_s8(a, b) }
26682}
26683#[doc = "Signed saturating Accumulate of Unsigned value."]
26684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
26685#[inline(always)]
26686#[target_feature(enable = "neon")]
26687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26688#[cfg_attr(test, assert_instr(suqadd))]
26689pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
26690 unsafe extern "unadjusted" {
26691 #[cfg_attr(
26692 any(target_arch = "aarch64", target_arch = "arm64ec"),
26693 link_name = "llvm.aarch64.neon.suqadd.v4i16"
26694 )]
26695 fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
26696 }
26697 unsafe { _vuqadd_s16(a, b) }
26698}
26699#[doc = "Signed saturating Accumulate of Unsigned value."]
26700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
26701#[inline(always)]
26702#[target_feature(enable = "neon")]
26703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26704#[cfg_attr(test, assert_instr(suqadd))]
26705pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
26706 unsafe extern "unadjusted" {
26707 #[cfg_attr(
26708 any(target_arch = "aarch64", target_arch = "arm64ec"),
26709 link_name = "llvm.aarch64.neon.suqadd.v8i16"
26710 )]
26711 fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
26712 }
26713 unsafe { _vuqaddq_s16(a, b) }
26714}
26715#[doc = "Signed saturating Accumulate of Unsigned value."]
26716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
26717#[inline(always)]
26718#[target_feature(enable = "neon")]
26719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26720#[cfg_attr(test, assert_instr(suqadd))]
26721pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
26722 unsafe extern "unadjusted" {
26723 #[cfg_attr(
26724 any(target_arch = "aarch64", target_arch = "arm64ec"),
26725 link_name = "llvm.aarch64.neon.suqadd.v2i32"
26726 )]
26727 fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
26728 }
26729 unsafe { _vuqadd_s32(a, b) }
26730}
26731#[doc = "Signed saturating Accumulate of Unsigned value."]
26732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
26733#[inline(always)]
26734#[target_feature(enable = "neon")]
26735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26736#[cfg_attr(test, assert_instr(suqadd))]
26737pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
26738 unsafe extern "unadjusted" {
26739 #[cfg_attr(
26740 any(target_arch = "aarch64", target_arch = "arm64ec"),
26741 link_name = "llvm.aarch64.neon.suqadd.v4i32"
26742 )]
26743 fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
26744 }
26745 unsafe { _vuqaddq_s32(a, b) }
26746}
26747#[doc = "Signed saturating Accumulate of Unsigned value."]
26748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
26749#[inline(always)]
26750#[target_feature(enable = "neon")]
26751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26752#[cfg_attr(test, assert_instr(suqadd))]
26753pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
26754 unsafe extern "unadjusted" {
26755 #[cfg_attr(
26756 any(target_arch = "aarch64", target_arch = "arm64ec"),
26757 link_name = "llvm.aarch64.neon.suqadd.v1i64"
26758 )]
26759 fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
26760 }
26761 unsafe { _vuqadd_s64(a, b) }
26762}
26763#[doc = "Signed saturating Accumulate of Unsigned value."]
26764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
26765#[inline(always)]
26766#[target_feature(enable = "neon")]
26767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26768#[cfg_attr(test, assert_instr(suqadd))]
26769pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
26770 unsafe extern "unadjusted" {
26771 #[cfg_attr(
26772 any(target_arch = "aarch64", target_arch = "arm64ec"),
26773 link_name = "llvm.aarch64.neon.suqadd.v2i64"
26774 )]
26775 fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
26776 }
26777 unsafe { _vuqaddq_s64(a, b) }
26778}
26779#[doc = "Signed saturating accumulate of unsigned value"]
26780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
26781#[inline(always)]
26782#[target_feature(enable = "neon")]
26783#[cfg_attr(test, assert_instr(suqadd))]
26784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26785pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
26786 unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
26787}
26788#[doc = "Signed saturating accumulate of unsigned value"]
26789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
26790#[inline(always)]
26791#[target_feature(enable = "neon")]
26792#[cfg_attr(test, assert_instr(suqadd))]
26793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26794pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
26795 unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
26796}
26797#[doc = "Signed saturating accumulate of unsigned value"]
26798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
26799#[inline(always)]
26800#[target_feature(enable = "neon")]
26801#[cfg_attr(test, assert_instr(suqadd))]
26802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26803pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
26804 unsafe extern "unadjusted" {
26805 #[cfg_attr(
26806 any(target_arch = "aarch64", target_arch = "arm64ec"),
26807 link_name = "llvm.aarch64.neon.suqadd.i64"
26808 )]
26809 fn _vuqaddd_s64(a: i64, b: u64) -> i64;
26810 }
26811 unsafe { _vuqaddd_s64(a, b) }
26812}
26813#[doc = "Signed saturating accumulate of unsigned value"]
26814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
26815#[inline(always)]
26816#[target_feature(enable = "neon")]
26817#[cfg_attr(test, assert_instr(suqadd))]
26818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26819pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
26820 unsafe extern "unadjusted" {
26821 #[cfg_attr(
26822 any(target_arch = "aarch64", target_arch = "arm64ec"),
26823 link_name = "llvm.aarch64.neon.suqadd.i32"
26824 )]
26825 fn _vuqadds_s32(a: i32, b: u32) -> i32;
26826 }
26827 unsafe { _vuqadds_s32(a, b) }
26828}
26829#[doc = "Unzip vectors"]
26830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
26831#[inline(always)]
26832#[target_feature(enable = "neon,fp16")]
26833#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26834#[cfg(not(target_arch = "arm64ec"))]
26835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26836pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26837 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26838}
26839#[doc = "Unzip vectors"]
26840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
26841#[inline(always)]
26842#[target_feature(enable = "neon,fp16")]
26843#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26844#[cfg(not(target_arch = "arm64ec"))]
26845#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26846pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26847 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26848}
26849#[doc = "Unzip vectors"]
26850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
26851#[inline(always)]
26852#[target_feature(enable = "neon")]
26853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26854#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26855pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26856 unsafe { simd_shuffle!(a, b, [0, 2]) }
26857}
26858#[doc = "Unzip vectors"]
26859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
26860#[inline(always)]
26861#[target_feature(enable = "neon")]
26862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26863#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26864pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26865 unsafe { simd_shuffle!(a, b, [0, 2]) }
26866}
26867#[doc = "Unzip vectors"]
26868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
26869#[inline(always)]
26870#[target_feature(enable = "neon")]
26871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26872#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26873pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26874 unsafe { simd_shuffle!(a, b, [0, 2]) }
26875}
26876#[doc = "Unzip vectors"]
26877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
26878#[inline(always)]
26879#[target_feature(enable = "neon")]
26880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26881#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26882pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26883 unsafe { simd_shuffle!(a, b, [0, 2]) }
26884}
26885#[doc = "Unzip vectors"]
26886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
26887#[inline(always)]
26888#[target_feature(enable = "neon")]
26889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26890#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26891pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26892 unsafe { simd_shuffle!(a, b, [0, 2]) }
26893}
26894#[doc = "Unzip vectors"]
26895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
26896#[inline(always)]
26897#[target_feature(enable = "neon")]
26898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26899#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26900pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26901 unsafe { simd_shuffle!(a, b, [0, 2]) }
26902}
26903#[doc = "Unzip vectors"]
26904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
26905#[inline(always)]
26906#[target_feature(enable = "neon")]
26907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26908#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26909pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26910 unsafe { simd_shuffle!(a, b, [0, 2]) }
26911}
26912#[doc = "Unzip vectors"]
26913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
26914#[inline(always)]
26915#[target_feature(enable = "neon")]
26916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26917#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26918pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26919 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26920}
26921#[doc = "Unzip vectors"]
26922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
26923#[inline(always)]
26924#[target_feature(enable = "neon")]
26925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26926#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26927pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26928 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26929}
26930#[doc = "Unzip vectors"]
26931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
26932#[inline(always)]
26933#[target_feature(enable = "neon")]
26934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26935#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26936pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26937 unsafe {
26938 simd_shuffle!(
26939 a,
26940 b,
26941 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26942 )
26943 }
26944}
26945#[doc = "Unzip vectors"]
26946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
26947#[inline(always)]
26948#[target_feature(enable = "neon")]
26949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26950#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26951pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26952 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26953}
26954#[doc = "Unzip vectors"]
26955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
26956#[inline(always)]
26957#[target_feature(enable = "neon")]
26958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26959#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26960pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26961 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26962}
26963#[doc = "Unzip vectors"]
26964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
26965#[inline(always)]
26966#[target_feature(enable = "neon")]
26967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26968#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26969pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26970 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26971}
26972#[doc = "Unzip vectors"]
26973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
26974#[inline(always)]
26975#[target_feature(enable = "neon")]
26976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26977#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26978pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26979 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26980}
26981#[doc = "Unzip vectors"]
26982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
26983#[inline(always)]
26984#[target_feature(enable = "neon")]
26985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26986#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26987pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26988 unsafe {
26989 simd_shuffle!(
26990 a,
26991 b,
26992 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26993 )
26994 }
26995}
26996#[doc = "Unzip vectors"]
26997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
26998#[inline(always)]
26999#[target_feature(enable = "neon")]
27000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27001#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27002pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27003 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27004}
27005#[doc = "Unzip vectors"]
27006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
27007#[inline(always)]
27008#[target_feature(enable = "neon")]
27009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27010#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27011pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27012 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27013}
27014#[doc = "Unzip vectors"]
27015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
27016#[inline(always)]
27017#[target_feature(enable = "neon")]
27018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27019#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27020pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27021 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27022}
27023#[doc = "Unzip vectors"]
27024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
27025#[inline(always)]
27026#[target_feature(enable = "neon")]
27027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27028#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27029pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27030 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27031}
27032#[doc = "Unzip vectors"]
27033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
27034#[inline(always)]
27035#[target_feature(enable = "neon")]
27036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27037#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27038pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27039 unsafe {
27040 simd_shuffle!(
27041 a,
27042 b,
27043 [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27044 )
27045 }
27046}
27047#[doc = "Unzip vectors"]
27048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
27049#[inline(always)]
27050#[target_feature(enable = "neon")]
27051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27052#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27053pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27054 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27055}
27056#[doc = "Unzip vectors"]
27057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
27058#[inline(always)]
27059#[target_feature(enable = "neon")]
27060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27061#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27062pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27063 unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27064}
27065#[doc = "Unzip vectors"]
27066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
27067#[inline(always)]
27068#[target_feature(enable = "neon,fp16")]
27069#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27070#[cfg(not(target_arch = "arm64ec"))]
27071#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27072pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27073 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27074}
27075#[doc = "Unzip vectors"]
27076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
27077#[inline(always)]
27078#[target_feature(enable = "neon,fp16")]
27079#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27080#[cfg(not(target_arch = "arm64ec"))]
27081#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27082pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27083 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27084}
27085#[doc = "Unzip vectors"]
27086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
27087#[inline(always)]
27088#[target_feature(enable = "neon")]
27089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27090#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27091pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27092 unsafe { simd_shuffle!(a, b, [1, 3]) }
27093}
27094#[doc = "Unzip vectors"]
27095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
27096#[inline(always)]
27097#[target_feature(enable = "neon")]
27098#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27099#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27100pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27101 unsafe { simd_shuffle!(a, b, [1, 3]) }
27102}
27103#[doc = "Unzip vectors"]
27104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
27105#[inline(always)]
27106#[target_feature(enable = "neon")]
27107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27108#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27109pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27110 unsafe { simd_shuffle!(a, b, [1, 3]) }
27111}
27112#[doc = "Unzip vectors"]
27113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
27114#[inline(always)]
27115#[target_feature(enable = "neon")]
27116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27117#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27118pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27119 unsafe { simd_shuffle!(a, b, [1, 3]) }
27120}
27121#[doc = "Unzip vectors"]
27122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
27123#[inline(always)]
27124#[target_feature(enable = "neon")]
27125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27126#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27127pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27128 unsafe { simd_shuffle!(a, b, [1, 3]) }
27129}
27130#[doc = "Unzip vectors"]
27131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
27132#[inline(always)]
27133#[target_feature(enable = "neon")]
27134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27135#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27136pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27137 unsafe { simd_shuffle!(a, b, [1, 3]) }
27138}
27139#[doc = "Unzip vectors"]
27140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
27141#[inline(always)]
27142#[target_feature(enable = "neon")]
27143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27144#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27145pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27146 unsafe { simd_shuffle!(a, b, [1, 3]) }
27147}
27148#[doc = "Unzip vectors"]
27149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
27150#[inline(always)]
27151#[target_feature(enable = "neon")]
27152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27153#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27154pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27155 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27156}
27157#[doc = "Unzip vectors"]
27158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
27159#[inline(always)]
27160#[target_feature(enable = "neon")]
27161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27162#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27163pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27164 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27165}
27166#[doc = "Unzip vectors"]
27167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
27168#[inline(always)]
27169#[target_feature(enable = "neon")]
27170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27171#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27172pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27173 unsafe {
27174 simd_shuffle!(
27175 a,
27176 b,
27177 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27178 )
27179 }
27180}
27181#[doc = "Unzip vectors"]
27182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
27183#[inline(always)]
27184#[target_feature(enable = "neon")]
27185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27186#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27187pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27188 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27189}
27190#[doc = "Unzip vectors"]
27191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
27192#[inline(always)]
27193#[target_feature(enable = "neon")]
27194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27195#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27196pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27197 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27198}
27199#[doc = "Unzip vectors"]
27200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
27201#[inline(always)]
27202#[target_feature(enable = "neon")]
27203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27204#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27205pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27206 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27207}
27208#[doc = "Unzip vectors"]
27209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
27210#[inline(always)]
27211#[target_feature(enable = "neon")]
27212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27213#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27214pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27215 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27216}
27217#[doc = "Unzip vectors"]
27218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
27219#[inline(always)]
27220#[target_feature(enable = "neon")]
27221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27222#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27223pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27224 unsafe {
27225 simd_shuffle!(
27226 a,
27227 b,
27228 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27229 )
27230 }
27231}
27232#[doc = "Unzip vectors"]
27233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
27234#[inline(always)]
27235#[target_feature(enable = "neon")]
27236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27237#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27238pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27239 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27240}
27241#[doc = "Unzip vectors"]
27242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
27243#[inline(always)]
27244#[target_feature(enable = "neon")]
27245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27246#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27247pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27248 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27249}
27250#[doc = "Unzip vectors"]
27251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
27252#[inline(always)]
27253#[target_feature(enable = "neon")]
27254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27255#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27256pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27257 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27258}
27259#[doc = "Unzip vectors"]
27260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
27261#[inline(always)]
27262#[target_feature(enable = "neon")]
27263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27264#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27265pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27266 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27267}
27268#[doc = "Unzip vectors"]
27269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
27270#[inline(always)]
27271#[target_feature(enable = "neon")]
27272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27273#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27274pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27275 unsafe {
27276 simd_shuffle!(
27277 a,
27278 b,
27279 [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27280 )
27281 }
27282}
27283#[doc = "Unzip vectors"]
27284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
27285#[inline(always)]
27286#[target_feature(enable = "neon")]
27287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27288#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27289pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27290 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27291}
27292#[doc = "Unzip vectors"]
27293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
27294#[inline(always)]
27295#[target_feature(enable = "neon")]
27296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27297#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27298pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27299 unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27300}
27301#[doc = "Exclusive OR and rotate"]
27302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
27303#[inline(always)]
27304#[target_feature(enable = "neon,sha3")]
27305#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
27306#[rustc_legacy_const_generics(2)]
27307#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
27308pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27309 static_assert_uimm_bits!(IMM6, 6);
27310 unsafe extern "unadjusted" {
27311 #[cfg_attr(
27312 any(target_arch = "aarch64", target_arch = "arm64ec"),
27313 link_name = "llvm.aarch64.crypto.xar"
27314 )]
27315 fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
27316 }
27317 unsafe { _vxarq_u64(a, b, IMM6 as i64) }
27318}
27319#[doc = "Zip vectors"]
27320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
27321#[inline(always)]
27322#[target_feature(enable = "neon,fp16")]
27323#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27324#[cfg(not(target_arch = "arm64ec"))]
27325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27326pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27327 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27328}
27329#[doc = "Zip vectors"]
27330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
27331#[inline(always)]
27332#[target_feature(enable = "neon,fp16")]
27333#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27334#[cfg(not(target_arch = "arm64ec"))]
27335#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27336pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27337 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27338}
27339#[doc = "Zip vectors"]
27340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
27341#[inline(always)]
27342#[target_feature(enable = "neon")]
27343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27344#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27345pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27346 unsafe { simd_shuffle!(a, b, [0, 2]) }
27347}
27348#[doc = "Zip vectors"]
27349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
27350#[inline(always)]
27351#[target_feature(enable = "neon")]
27352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27353#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27354pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27355 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27356}
27357#[doc = "Zip vectors"]
27358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
27359#[inline(always)]
27360#[target_feature(enable = "neon")]
27361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27362#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27363pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27364 unsafe { simd_shuffle!(a, b, [0, 2]) }
27365}
27366#[doc = "Zip vectors"]
27367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
27368#[inline(always)]
27369#[target_feature(enable = "neon")]
27370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27371#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27372pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27373 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27374}
27375#[doc = "Zip vectors"]
27376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
27377#[inline(always)]
27378#[target_feature(enable = "neon")]
27379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27380#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27381pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27382 unsafe {
27383 simd_shuffle!(
27384 a,
27385 b,
27386 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27387 )
27388 }
27389}
27390#[doc = "Zip vectors"]
27391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
27392#[inline(always)]
27393#[target_feature(enable = "neon")]
27394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27395#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27396pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27397 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27398}
27399#[doc = "Zip vectors"]
27400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
27401#[inline(always)]
27402#[target_feature(enable = "neon")]
27403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27404#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27405pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27406 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27407}
27408#[doc = "Zip vectors"]
27409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
27410#[inline(always)]
27411#[target_feature(enable = "neon")]
27412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27413#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27414pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27415 unsafe { simd_shuffle!(a, b, [0, 2]) }
27416}
27417#[doc = "Zip vectors"]
27418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
27419#[inline(always)]
27420#[target_feature(enable = "neon")]
27421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27422#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27423pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27424 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27425}
27426#[doc = "Zip vectors"]
27427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
27428#[inline(always)]
27429#[target_feature(enable = "neon")]
27430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27431#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27432pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27433 unsafe { simd_shuffle!(a, b, [0, 2]) }
27434}
27435#[doc = "Zip vectors"]
27436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
27437#[inline(always)]
27438#[target_feature(enable = "neon")]
27439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27440#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27441pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27442 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27443}
27444#[doc = "Zip vectors"]
27445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
27446#[inline(always)]
27447#[target_feature(enable = "neon")]
27448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27449#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27450pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27451 unsafe {
27452 simd_shuffle!(
27453 a,
27454 b,
27455 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27456 )
27457 }
27458}
27459#[doc = "Zip vectors"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
27461#[inline(always)]
27462#[target_feature(enable = "neon")]
27463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27464#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27465pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27466 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27467}
27468#[doc = "Zip vectors"]
27469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
27470#[inline(always)]
27471#[target_feature(enable = "neon")]
27472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27473#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27474pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27475 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27476}
27477#[doc = "Zip vectors"]
27478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
27479#[inline(always)]
27480#[target_feature(enable = "neon")]
27481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27482#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27483pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27484 unsafe { simd_shuffle!(a, b, [0, 2]) }
27485}
27486#[doc = "Zip vectors"]
27487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
27488#[inline(always)]
27489#[target_feature(enable = "neon")]
27490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27491#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27492pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27493 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27494}
27495#[doc = "Zip vectors"]
27496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
27497#[inline(always)]
27498#[target_feature(enable = "neon")]
27499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27500#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27501pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27502 unsafe { simd_shuffle!(a, b, [0, 2]) }
27503}
27504#[doc = "Zip vectors"]
27505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
27506#[inline(always)]
27507#[target_feature(enable = "neon")]
27508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27509#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27510pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27511 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27512}
27513#[doc = "Zip vectors"]
27514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
27515#[inline(always)]
27516#[target_feature(enable = "neon")]
27517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27518#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27519pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27520 unsafe {
27521 simd_shuffle!(
27522 a,
27523 b,
27524 [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27525 )
27526 }
27527}
27528#[doc = "Zip vectors"]
27529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
27530#[inline(always)]
27531#[target_feature(enable = "neon")]
27532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27533#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27534pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27535 unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27536}
27537#[doc = "Zip vectors"]
27538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
27539#[inline(always)]
27540#[target_feature(enable = "neon")]
27541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27542#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27543pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27544 unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27545}
27546#[doc = "Zip vectors"]
27547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
27548#[inline(always)]
27549#[target_feature(enable = "neon")]
27550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27551#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27552pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27553 unsafe { simd_shuffle!(a, b, [0, 2]) }
27554}
27555#[doc = "Zip vectors"]
27556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
27557#[inline(always)]
27558#[target_feature(enable = "neon,fp16")]
27559#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27560#[cfg(not(target_arch = "arm64ec"))]
27561#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27562pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27563 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27564}
27565#[doc = "Zip vectors"]
27566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
27567#[inline(always)]
27568#[target_feature(enable = "neon,fp16")]
27569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27570#[cfg(not(target_arch = "arm64ec"))]
27571#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27572pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27573 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27574}
27575#[doc = "Zip vectors"]
27576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
27577#[inline(always)]
27578#[target_feature(enable = "neon")]
27579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27580#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27581pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27582 unsafe { simd_shuffle!(a, b, [1, 3]) }
27583}
27584#[doc = "Zip vectors"]
27585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
27586#[inline(always)]
27587#[target_feature(enable = "neon")]
27588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27589#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27590pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27591 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27592}
27593#[doc = "Zip vectors"]
27594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
27595#[inline(always)]
27596#[target_feature(enable = "neon")]
27597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27598#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27599pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27600 unsafe { simd_shuffle!(a, b, [1, 3]) }
27601}
27602#[doc = "Zip vectors"]
27603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
27604#[inline(always)]
27605#[target_feature(enable = "neon")]
27606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27607#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27608pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27609 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27610}
27611#[doc = "Zip vectors"]
27612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
27613#[inline(always)]
27614#[target_feature(enable = "neon")]
27615#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27616#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27617pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27618 unsafe {
27619 simd_shuffle!(
27620 a,
27621 b,
27622 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27623 )
27624 }
27625}
27626#[doc = "Zip vectors"]
27627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
27628#[inline(always)]
27629#[target_feature(enable = "neon")]
27630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27631#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27632pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27633 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27634}
27635#[doc = "Zip vectors"]
27636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
27637#[inline(always)]
27638#[target_feature(enable = "neon")]
27639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27640#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27641pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27642 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27643}
27644#[doc = "Zip vectors"]
27645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
27646#[inline(always)]
27647#[target_feature(enable = "neon")]
27648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27649#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27650pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27651 unsafe { simd_shuffle!(a, b, [1, 3]) }
27652}
27653#[doc = "Zip vectors"]
27654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
27655#[inline(always)]
27656#[target_feature(enable = "neon")]
27657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27658#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27659pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27660 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27661}
27662#[doc = "Zip vectors"]
27663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
27664#[inline(always)]
27665#[target_feature(enable = "neon")]
27666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27667#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27668pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27669 unsafe { simd_shuffle!(a, b, [1, 3]) }
27670}
27671#[doc = "Zip vectors"]
27672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
27673#[inline(always)]
27674#[target_feature(enable = "neon")]
27675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27676#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27677pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27678 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27679}
27680#[doc = "Zip vectors"]
27681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
27682#[inline(always)]
27683#[target_feature(enable = "neon")]
27684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27685#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27686pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27687 unsafe {
27688 simd_shuffle!(
27689 a,
27690 b,
27691 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27692 )
27693 }
27694}
27695#[doc = "Zip vectors"]
27696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
27697#[inline(always)]
27698#[target_feature(enable = "neon")]
27699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27700#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27701pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27702 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27703}
27704#[doc = "Zip vectors"]
27705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
27706#[inline(always)]
27707#[target_feature(enable = "neon")]
27708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27709#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27710pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27711 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27712}
27713#[doc = "Zip vectors"]
27714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
27715#[inline(always)]
27716#[target_feature(enable = "neon")]
27717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27718#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27719pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27720 unsafe { simd_shuffle!(a, b, [1, 3]) }
27721}
27722#[doc = "Zip vectors"]
27723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
27724#[inline(always)]
27725#[target_feature(enable = "neon")]
27726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27727#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27728pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27729 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27730}
27731#[doc = "Zip vectors"]
27732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
27733#[inline(always)]
27734#[target_feature(enable = "neon")]
27735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27736#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27737pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27738 unsafe { simd_shuffle!(a, b, [1, 3]) }
27739}
27740#[doc = "Zip vectors"]
27741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
27742#[inline(always)]
27743#[target_feature(enable = "neon")]
27744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27745#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27746pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27747 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27748}
27749#[doc = "Zip vectors"]
27750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
27751#[inline(always)]
27752#[target_feature(enable = "neon")]
27753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27754#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27755pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27756 unsafe {
27757 simd_shuffle!(
27758 a,
27759 b,
27760 [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27761 )
27762 }
27763}
27764#[doc = "Zip vectors"]
27765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
27766#[inline(always)]
27767#[target_feature(enable = "neon")]
27768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27769#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27770pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27771 unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27772}
27773#[doc = "Zip vectors"]
27774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
27775#[inline(always)]
27776#[target_feature(enable = "neon")]
27777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27778#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27779pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27780 unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27781}
27782#[doc = "Zip vectors"]
27783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
27784#[inline(always)]
27785#[target_feature(enable = "neon")]
27786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27788pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27789 unsafe { simd_shuffle!(a, b, [1, 3]) }
27790}