1#![allow(non_camel_case_types)]
4
5#[rustfmt::skip]
6mod generated;
7#[rustfmt::skip]
8#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9pub use self::generated::*;
10
11use crate::{
14 core_arch::{arm_shared::*, simd::*},
15 hint::unreachable_unchecked,
16 intrinsics::simd::*,
17 mem::transmute,
18};
19#[cfg(test)]
20use stdarch_test::assert_instr;
21
22types! {
23 #![stable(feature = "neon_intrinsics", since = "1.59.0")]
24
25 pub struct float64x1_t(1 x f64); pub struct float64x2_t(2 x f64);
29}
30
31#[repr(C)]
33#[derive(Copy, Clone, Debug)]
34#[stable(feature = "neon_intrinsics", since = "1.59.0")]
35pub struct float64x1x2_t(pub float64x1_t, pub float64x1_t);
36#[repr(C)]
38#[derive(Copy, Clone, Debug)]
39#[stable(feature = "neon_intrinsics", since = "1.59.0")]
40pub struct float64x1x3_t(pub float64x1_t, pub float64x1_t, pub float64x1_t);
41#[repr(C)]
43#[derive(Copy, Clone, Debug)]
44#[stable(feature = "neon_intrinsics", since = "1.59.0")]
45pub struct float64x1x4_t(
46 pub float64x1_t,
47 pub float64x1_t,
48 pub float64x1_t,
49 pub float64x1_t,
50);
51
52#[repr(C)]
54#[derive(Copy, Clone, Debug)]
55#[stable(feature = "neon_intrinsics", since = "1.59.0")]
56pub struct float64x2x2_t(pub float64x2_t, pub float64x2_t);
57#[repr(C)]
59#[derive(Copy, Clone, Debug)]
60#[stable(feature = "neon_intrinsics", since = "1.59.0")]
61pub struct float64x2x3_t(pub float64x2_t, pub float64x2_t, pub float64x2_t);
62#[repr(C)]
64#[derive(Copy, Clone, Debug)]
65#[stable(feature = "neon_intrinsics", since = "1.59.0")]
66pub struct float64x2x4_t(
67 pub float64x2_t,
68 pub float64x2_t,
69 pub float64x2_t,
70 pub float64x2_t,
71);
72
73#[inline]
75#[target_feature(enable = "neon")]
76#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
77#[rustc_legacy_const_generics(1, 3)]
78#[stable(feature = "neon_intrinsics", since = "1.59.0")]
79pub fn vcopy_lane_s64<const N1: i32, const N2: i32>(_a: int64x1_t, b: int64x1_t) -> int64x1_t {
80 static_assert!(N1 == 0);
81 static_assert!(N2 == 0);
82 b
83}
84
85#[inline]
87#[target_feature(enable = "neon")]
88#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
89#[rustc_legacy_const_generics(1, 3)]
90#[stable(feature = "neon_intrinsics", since = "1.59.0")]
91pub fn vcopy_lane_u64<const N1: i32, const N2: i32>(_a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
92 static_assert!(N1 == 0);
93 static_assert!(N2 == 0);
94 b
95}
96
97#[inline]
99#[target_feature(enable = "neon")]
100#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
101#[rustc_legacy_const_generics(1, 3)]
102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
103pub fn vcopy_lane_p64<const N1: i32, const N2: i32>(_a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
104 static_assert!(N1 == 0);
105 static_assert!(N2 == 0);
106 b
107}
108
109#[inline]
111#[target_feature(enable = "neon")]
112#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
113#[rustc_legacy_const_generics(1, 3)]
114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
115pub fn vcopy_lane_f64<const N1: i32, const N2: i32>(
116 _a: float64x1_t,
117 b: float64x1_t,
118) -> float64x1_t {
119 static_assert!(N1 == 0);
120 static_assert!(N2 == 0);
121 b
122}
123
124#[inline]
126#[target_feature(enable = "neon")]
127#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
128#[rustc_legacy_const_generics(1, 3)]
129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
130pub fn vcopy_laneq_s64<const LANE1: i32, const LANE2: i32>(
131 _a: int64x1_t,
132 b: int64x2_t,
133) -> int64x1_t {
134 static_assert!(LANE1 == 0);
135 static_assert_uimm_bits!(LANE2, 1);
136 unsafe { transmute::<i64, _>(simd_extract!(b, LANE2 as u32)) }
137}
138
139#[inline]
141#[target_feature(enable = "neon")]
142#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
143#[rustc_legacy_const_generics(1, 3)]
144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
145pub fn vcopy_laneq_u64<const LANE1: i32, const LANE2: i32>(
146 _a: uint64x1_t,
147 b: uint64x2_t,
148) -> uint64x1_t {
149 static_assert!(LANE1 == 0);
150 static_assert_uimm_bits!(LANE2, 1);
151 unsafe { transmute::<u64, _>(simd_extract!(b, LANE2 as u32)) }
152}
153
154#[inline]
156#[target_feature(enable = "neon")]
157#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
158#[rustc_legacy_const_generics(1, 3)]
159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
160pub fn vcopy_laneq_p64<const LANE1: i32, const LANE2: i32>(
161 _a: poly64x1_t,
162 b: poly64x2_t,
163) -> poly64x1_t {
164 static_assert!(LANE1 == 0);
165 static_assert_uimm_bits!(LANE2, 1);
166 unsafe { transmute::<u64, _>(simd_extract!(b, LANE2 as u32)) }
167}
168
169#[inline]
171#[target_feature(enable = "neon")]
172#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
173#[rustc_legacy_const_generics(1, 3)]
174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
175pub fn vcopy_laneq_f64<const LANE1: i32, const LANE2: i32>(
176 _a: float64x1_t,
177 b: float64x2_t,
178) -> float64x1_t {
179 static_assert!(LANE1 == 0);
180 static_assert_uimm_bits!(LANE2, 1);
181 unsafe { transmute::<f64, _>(simd_extract!(b, LANE2 as u32)) }
182}
183
184#[inline]
186#[target_feature(enable = "neon")]
187#[cfg_attr(test, assert_instr(ldr))]
188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
189pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t {
190 vld1_f64(ptr)
191}
192
193#[inline]
195#[target_feature(enable = "neon")]
196#[cfg_attr(test, assert_instr(ld1r))]
197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
198pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t {
199 let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.)));
200 simd_shuffle!(x, x, [0, 0])
201}
202
203#[inline]
205#[target_feature(enable = "neon")]
206#[rustc_legacy_const_generics(2)]
207#[cfg_attr(test, assert_instr(ldr, LANE = 0))]
208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
209pub unsafe fn vld1_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x1_t) -> float64x1_t {
210 static_assert!(LANE == 0);
211 simd_insert!(src, LANE as u32, *ptr)
212}
213
214#[inline]
216#[target_feature(enable = "neon")]
217#[rustc_legacy_const_generics(2)]
218#[cfg_attr(test, assert_instr(ld1, LANE = 1))]
219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
220pub unsafe fn vld1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
221 static_assert_uimm_bits!(LANE, 1);
222 simd_insert!(src, LANE as u32, *ptr)
223}
224
225#[inline]
229#[target_feature(enable = "neon")]
230#[cfg_attr(test, assert_instr(bsl))]
231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
232pub fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
233 let not = int64x1_t::splat(-1);
234 unsafe {
235 transmute(simd_or(
236 simd_and(a, transmute(b)),
237 simd_and(simd_xor(a, transmute(not)), transmute(c)),
238 ))
239 }
240}
241#[inline]
243#[target_feature(enable = "neon")]
244#[cfg_attr(test, assert_instr(bsl))]
245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
246pub fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t {
247 let not = int64x1_t::splat(-1);
248 unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) }
249}
250#[inline]
252#[target_feature(enable = "neon")]
253#[cfg_attr(test, assert_instr(bsl))]
254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
255pub fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
256 let not = int64x2_t::splat(-1);
257 unsafe {
258 transmute(simd_or(
259 simd_and(a, transmute(b)),
260 simd_and(simd_xor(a, transmute(not)), transmute(c)),
261 ))
262 }
263}
264#[inline]
266#[target_feature(enable = "neon")]
267#[cfg_attr(test, assert_instr(bsl))]
268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
269pub fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t {
270 let not = int64x2_t::splat(-1);
271 unsafe { simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c)) }
272}
273
274#[inline]
276#[target_feature(enable = "neon")]
277#[cfg_attr(test, assert_instr(fadd))]
278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
279pub fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
280 unsafe { simd_add(a, b) }
281}
282
283#[inline]
285#[target_feature(enable = "neon")]
286#[cfg_attr(test, assert_instr(fadd))]
287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
288pub fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
289 unsafe { simd_add(a, b) }
290}
291
292#[inline]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(add))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
298 unsafe { simd_add(a, b) }
299}
300
301#[inline]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(add))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
307 unsafe { simd_add(a, b) }
308}
309
310#[inline]
312#[target_feature(enable = "neon")]
313#[cfg_attr(test, assert_instr(add))]
314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
315pub fn vaddd_s64(a: i64, b: i64) -> i64 {
316 a.wrapping_add(b)
317}
318
319#[inline]
321#[target_feature(enable = "neon")]
322#[cfg_attr(test, assert_instr(add))]
323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
324pub fn vaddd_u64(a: u64, b: u64) -> u64 {
325 a.wrapping_add(b)
326}
327
328#[inline]
330#[target_feature(enable = "neon")]
331#[cfg_attr(test, assert_instr(nop, N = 0))]
332#[rustc_legacy_const_generics(2)]
333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
334pub fn vext_p64<const N: i32>(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t {
335 static_assert!(N == 0);
336 a
337}
338
339#[inline]
341#[target_feature(enable = "neon")]
342#[cfg_attr(test, assert_instr(nop, N = 0))]
343#[rustc_legacy_const_generics(2)]
344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
345pub fn vext_f64<const N: i32>(a: float64x1_t, _b: float64x1_t) -> float64x1_t {
346 static_assert!(N == 0);
347 a
348}
349
350#[inline]
352#[target_feature(enable = "neon")]
353#[cfg_attr(test, assert_instr(fmov))]
354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
355pub fn vdup_n_p64(value: p64) -> poly64x1_t {
356 unsafe { transmute(u64x1::new(value)) }
357}
358
359#[inline]
361#[target_feature(enable = "neon")]
362#[cfg_attr(test, assert_instr(nop))]
363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
364pub fn vdup_n_f64(value: f64) -> float64x1_t {
365 float64x1_t::splat(value)
366}
367
368#[inline]
370#[target_feature(enable = "neon")]
371#[cfg_attr(test, assert_instr(dup))]
372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
373pub fn vdupq_n_p64(value: p64) -> poly64x2_t {
374 unsafe { transmute(u64x2::new(value, value)) }
375}
376
377#[inline]
379#[target_feature(enable = "neon")]
380#[cfg_attr(test, assert_instr(dup))]
381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
382pub fn vdupq_n_f64(value: f64) -> float64x2_t {
383 float64x2_t::splat(value)
384}
385
386#[inline]
388#[target_feature(enable = "neon")]
389#[cfg_attr(test, assert_instr(fmov))]
390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
391pub fn vmov_n_p64(value: p64) -> poly64x1_t {
392 vdup_n_p64(value)
393}
394
395#[inline]
397#[target_feature(enable = "neon")]
398#[cfg_attr(test, assert_instr(nop))]
399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
400pub fn vmov_n_f64(value: f64) -> float64x1_t {
401 vdup_n_f64(value)
402}
403
404#[inline]
406#[target_feature(enable = "neon")]
407#[cfg_attr(test, assert_instr(dup))]
408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
409pub fn vmovq_n_p64(value: p64) -> poly64x2_t {
410 vdupq_n_p64(value)
411}
412
413#[inline]
415#[target_feature(enable = "neon")]
416#[cfg_attr(test, assert_instr(dup))]
417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
418pub fn vmovq_n_f64(value: f64) -> float64x2_t {
419 vdupq_n_f64(value)
420}
421
422#[inline]
424#[target_feature(enable = "neon")]
425#[cfg_attr(test, assert_instr(nop))]
426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
427pub fn vget_high_f64(a: float64x2_t) -> float64x1_t {
428 unsafe { float64x1_t([simd_extract!(a, 1)]) }
429}
430
431#[inline]
433#[target_feature(enable = "neon")]
434#[cfg_attr(test, assert_instr(ext))]
435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
436pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t {
437 unsafe { transmute(u64x1::new(simd_extract!(a, 1))) }
438}
439
440#[inline]
442#[target_feature(enable = "neon")]
443#[cfg_attr(test, assert_instr(nop))]
444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
445pub fn vget_low_f64(a: float64x2_t) -> float64x1_t {
446 unsafe { float64x1_t([simd_extract!(a, 0)]) }
447}
448
449#[inline]
451#[target_feature(enable = "neon")]
452#[cfg_attr(test, assert_instr(nop))]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454pub fn vget_low_p64(a: poly64x2_t) -> poly64x1_t {
455 unsafe { transmute(u64x1::new(simd_extract!(a, 0))) }
456}
457
458#[inline]
460#[target_feature(enable = "neon")]
461#[rustc_legacy_const_generics(1)]
462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
463#[cfg_attr(
464 all(test, any(target_arch = "aarch64", target_arch = "arm64ec")),
465 assert_instr(nop, IMM5 = 0)
466)]
467pub fn vget_lane_f64<const IMM5: i32>(v: float64x1_t) -> f64 {
468 static_assert!(IMM5 == 0);
469 unsafe { simd_extract!(v, IMM5 as u32) }
470}
471
472#[inline]
474#[target_feature(enable = "neon")]
475#[rustc_legacy_const_generics(1)]
476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
477#[cfg_attr(
478 all(test, any(target_arch = "aarch64", target_arch = "arm64ec")),
479 assert_instr(nop, IMM5 = 0)
480)]
481pub fn vgetq_lane_f64<const IMM5: i32>(v: float64x2_t) -> f64 {
482 static_assert_uimm_bits!(IMM5, 1);
483 unsafe { simd_extract!(v, IMM5 as u32) }
484}
485
486#[inline]
488#[target_feature(enable = "neon")]
489#[cfg_attr(test, assert_instr(mov))]
490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
491pub fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t {
492 unsafe { simd_shuffle!(low, high, [0, 1]) }
493}
494
495#[inline]
497#[target_feature(enable = "neon")]
498#[cfg_attr(test, assert_instr(nop, N = 2))]
499#[rustc_legacy_const_generics(1)]
500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
501pub fn vshld_n_s64<const N: i32>(a: i64) -> i64 {
502 static_assert_uimm_bits!(N, 6);
503 a << N
504}
505
506#[inline]
508#[target_feature(enable = "neon")]
509#[cfg_attr(test, assert_instr(nop, N = 2))]
510#[rustc_legacy_const_generics(1)]
511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
512pub fn vshld_n_u64<const N: i32>(a: u64) -> u64 {
513 static_assert_uimm_bits!(N, 6);
514 a << N
515}
516
517#[inline]
519#[target_feature(enable = "neon")]
520#[cfg_attr(test, assert_instr(nop, N = 2))]
521#[rustc_legacy_const_generics(1)]
522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
523pub fn vshrd_n_s64<const N: i32>(a: i64) -> i64 {
524 static_assert!(N >= 1 && N <= 64);
525 let n: i32 = if N == 64 { 63 } else { N };
526 a >> n
527}
528
529#[inline]
531#[target_feature(enable = "neon")]
532#[cfg_attr(test, assert_instr(nop, N = 2))]
533#[rustc_legacy_const_generics(1)]
534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
535pub fn vshrd_n_u64<const N: i32>(a: u64) -> u64 {
536 static_assert!(N >= 1 && N <= 64);
537 let n: i32 = if N == 64 {
538 return 0;
539 } else {
540 N
541 };
542 a >> n
543}
544
545#[inline]
547#[target_feature(enable = "neon")]
548#[cfg_attr(test, assert_instr(nop, N = 2))]
549#[rustc_legacy_const_generics(2)]
550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
551pub fn vsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
552 static_assert!(N >= 1 && N <= 64);
553 a.wrapping_add(vshrd_n_s64::<N>(b))
554}
555
556#[inline]
558#[target_feature(enable = "neon")]
559#[cfg_attr(test, assert_instr(nop, N = 2))]
560#[rustc_legacy_const_generics(2)]
561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
562pub fn vsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
563 static_assert!(N >= 1 && N <= 64);
564 a.wrapping_add(vshrd_n_u64::<N>(b))
565}
566
567#[cfg(test)]
568mod tests {
569 use crate::core_arch::aarch64::test_support::*;
570 use crate::core_arch::arm_shared::test_support::*;
571 use crate::core_arch::{aarch64::neon::*, aarch64::*, simd::*};
572 use std::mem::transmute;
573 use stdarch_test::simd_test;
574
575 #[simd_test(enable = "neon")]
576 unsafe fn test_vadd_f64() {
577 let a = 1.;
578 let b = 8.;
579 let e = 9.;
580 let r: f64 = transmute(vadd_f64(transmute(a), transmute(b)));
581 assert_eq!(r, e);
582 }
583
584 #[simd_test(enable = "neon")]
585 unsafe fn test_vaddq_f64() {
586 let a = f64x2::new(1., 2.);
587 let b = f64x2::new(8., 7.);
588 let e = f64x2::new(9., 9.);
589 let r: f64x2 = transmute(vaddq_f64(transmute(a), transmute(b)));
590 assert_eq!(r, e);
591 }
592
593 #[simd_test(enable = "neon")]
594 unsafe fn test_vadd_s64() {
595 let a = 1_i64;
596 let b = 8_i64;
597 let e = 9_i64;
598 let r: i64 = transmute(vadd_s64(transmute(a), transmute(b)));
599 assert_eq!(r, e);
600 }
601
602 #[simd_test(enable = "neon")]
603 unsafe fn test_vadd_u64() {
604 let a = 1_u64;
605 let b = 8_u64;
606 let e = 9_u64;
607 let r: u64 = transmute(vadd_u64(transmute(a), transmute(b)));
608 assert_eq!(r, e);
609 }
610
611 #[simd_test(enable = "neon")]
612 unsafe fn test_vaddd_s64() {
613 let a = 1_i64;
614 let b = 8_i64;
615 let e = 9_i64;
616 let r: i64 = vaddd_s64(a, b);
617 assert_eq!(r, e);
618 }
619
620 #[simd_test(enable = "neon")]
621 unsafe fn test_vaddd_u64() {
622 let a = 1_u64;
623 let b = 8_u64;
624 let e = 9_u64;
625 let r: u64 = vaddd_u64(a, b);
626 assert_eq!(r, e);
627 }
628
629 #[simd_test(enable = "neon")]
630 unsafe fn test_vext_p64() {
631 let a: i64x1 = i64x1::new(0);
632 let b: i64x1 = i64x1::new(1);
633 let e: i64x1 = i64x1::new(0);
634 let r: i64x1 = transmute(vext_p64::<0>(transmute(a), transmute(b)));
635 assert_eq!(r, e);
636 }
637
638 #[simd_test(enable = "neon")]
639 unsafe fn test_vext_f64() {
640 let a: f64x1 = f64x1::new(0.);
641 let b: f64x1 = f64x1::new(1.);
642 let e: f64x1 = f64x1::new(0.);
643 let r: f64x1 = transmute(vext_f64::<0>(transmute(a), transmute(b)));
644 assert_eq!(r, e);
645 }
646
647 #[simd_test(enable = "neon")]
648 unsafe fn test_vshld_n_s64() {
649 let a: i64 = 1;
650 let e: i64 = 4;
651 let r: i64 = vshld_n_s64::<2>(a);
652 assert_eq!(r, e);
653 }
654
655 #[simd_test(enable = "neon")]
656 unsafe fn test_vshld_n_u64() {
657 let a: u64 = 1;
658 let e: u64 = 4;
659 let r: u64 = vshld_n_u64::<2>(a);
660 assert_eq!(r, e);
661 }
662
663 #[simd_test(enable = "neon")]
664 unsafe fn test_vshrd_n_s64() {
665 let a: i64 = 4;
666 let e: i64 = 1;
667 let r: i64 = vshrd_n_s64::<2>(a);
668 assert_eq!(r, e);
669 }
670
671 #[simd_test(enable = "neon")]
672 unsafe fn test_vshrd_n_u64() {
673 let a: u64 = 4;
674 let e: u64 = 1;
675 let r: u64 = vshrd_n_u64::<2>(a);
676 assert_eq!(r, e);
677 }
678
679 #[simd_test(enable = "neon")]
680 unsafe fn test_vsrad_n_s64() {
681 let a: i64 = 1;
682 let b: i64 = 4;
683 let e: i64 = 2;
684 let r: i64 = vsrad_n_s64::<2>(a, b);
685 assert_eq!(r, e);
686 }
687
688 #[simd_test(enable = "neon")]
689 unsafe fn test_vsrad_n_u64() {
690 let a: u64 = 1;
691 let b: u64 = 4;
692 let e: u64 = 2;
693 let r: u64 = vsrad_n_u64::<2>(a, b);
694 assert_eq!(r, e);
695 }
696
697 #[simd_test(enable = "neon")]
698 unsafe fn test_vdup_n_f64() {
699 let a: f64 = 3.3;
700 let e = f64x1::new(3.3);
701 let r: f64x1 = transmute(vdup_n_f64(a));
702 assert_eq!(r, e);
703 }
704
705 #[simd_test(enable = "neon")]
706 unsafe fn test_vdup_n_p64() {
707 let a: u64 = 3;
708 let e = u64x1::new(3);
709 let r: u64x1 = transmute(vdup_n_p64(a));
710 assert_eq!(r, e);
711 }
712
713 #[simd_test(enable = "neon")]
714 unsafe fn test_vdupq_n_f64() {
715 let a: f64 = 3.3;
716 let e = f64x2::new(3.3, 3.3);
717 let r: f64x2 = transmute(vdupq_n_f64(a));
718 assert_eq!(r, e);
719 }
720
721 #[simd_test(enable = "neon")]
722 unsafe fn test_vdupq_n_p64() {
723 let a: u64 = 3;
724 let e = u64x2::new(3, 3);
725 let r: u64x2 = transmute(vdupq_n_p64(a));
726 assert_eq!(r, e);
727 }
728
729 #[simd_test(enable = "neon")]
730 unsafe fn test_vmov_n_p64() {
731 let a: u64 = 3;
732 let e = u64x1::new(3);
733 let r: u64x1 = transmute(vmov_n_p64(a));
734 assert_eq!(r, e);
735 }
736
737 #[simd_test(enable = "neon")]
738 unsafe fn test_vmov_n_f64() {
739 let a: f64 = 3.3;
740 let e = f64x1::new(3.3);
741 let r: f64x1 = transmute(vmov_n_f64(a));
742 assert_eq!(r, e);
743 }
744
745 #[simd_test(enable = "neon")]
746 unsafe fn test_vmovq_n_p64() {
747 let a: u64 = 3;
748 let e = u64x2::new(3, 3);
749 let r: u64x2 = transmute(vmovq_n_p64(a));
750 assert_eq!(r, e);
751 }
752
753 #[simd_test(enable = "neon")]
754 unsafe fn test_vmovq_n_f64() {
755 let a: f64 = 3.3;
756 let e = f64x2::new(3.3, 3.3);
757 let r: f64x2 = transmute(vmovq_n_f64(a));
758 assert_eq!(r, e);
759 }
760
761 #[simd_test(enable = "neon")]
762 unsafe fn test_vget_high_f64() {
763 let a = f64x2::new(1.0, 2.0);
764 let e = f64x1::new(2.0);
765 let r: f64x1 = transmute(vget_high_f64(transmute(a)));
766 assert_eq!(r, e);
767 }
768
769 #[simd_test(enable = "neon")]
770 unsafe fn test_vget_high_p64() {
771 let a = u64x2::new(1, 2);
772 let e = u64x1::new(2);
773 let r: u64x1 = transmute(vget_high_p64(transmute(a)));
774 assert_eq!(r, e);
775 }
776
777 #[simd_test(enable = "neon")]
778 unsafe fn test_vget_low_f64() {
779 let a = f64x2::new(1.0, 2.0);
780 let e = f64x1::new(1.0);
781 let r: f64x1 = transmute(vget_low_f64(transmute(a)));
782 assert_eq!(r, e);
783 }
784
785 #[simd_test(enable = "neon")]
786 unsafe fn test_vget_low_p64() {
787 let a = u64x2::new(1, 2);
788 let e = u64x1::new(1);
789 let r: u64x1 = transmute(vget_low_p64(transmute(a)));
790 assert_eq!(r, e);
791 }
792
793 #[simd_test(enable = "neon")]
794 unsafe fn test_vget_lane_f64() {
795 let v = f64x1::new(1.0);
796 let r = vget_lane_f64::<0>(transmute(v));
797 assert_eq!(r, 1.0);
798 }
799
800 #[simd_test(enable = "neon")]
801 unsafe fn test_vgetq_lane_f64() {
802 let v = f64x2::new(0.0, 1.0);
803 let r = vgetq_lane_f64::<1>(transmute(v));
804 assert_eq!(r, 1.0);
805 let r = vgetq_lane_f64::<0>(transmute(v));
806 assert_eq!(r, 0.0);
807 }
808
809 #[simd_test(enable = "neon")]
810 unsafe fn test_vcopy_lane_s64() {
811 let a: i64x1 = i64x1::new(1);
812 let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
813 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
814 let r: i64x1 = transmute(vcopy_lane_s64::<0, 0>(transmute(a), transmute(b)));
815 assert_eq!(r, e);
816 }
817
818 #[simd_test(enable = "neon")]
819 unsafe fn test_vcopy_lane_u64() {
820 let a: u64x1 = u64x1::new(1);
821 let b: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
822 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
823 let r: u64x1 = transmute(vcopy_lane_u64::<0, 0>(transmute(a), transmute(b)));
824 assert_eq!(r, e);
825 }
826
827 #[simd_test(enable = "neon")]
828 unsafe fn test_vcopy_lane_p64() {
829 let a: i64x1 = i64x1::new(1);
830 let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
831 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
832 let r: i64x1 = transmute(vcopy_lane_p64::<0, 0>(transmute(a), transmute(b)));
833 assert_eq!(r, e);
834 }
835
836 #[simd_test(enable = "neon")]
837 unsafe fn test_vcopy_lane_f64() {
838 let a: f64 = 1.;
839 let b: f64 = 0.;
840 let e: f64 = 0.;
841 let r: f64 = transmute(vcopy_lane_f64::<0, 0>(transmute(a), transmute(b)));
842 assert_eq!(r, e);
843 }
844
845 #[simd_test(enable = "neon")]
846 unsafe fn test_vcopy_laneq_s64() {
847 let a: i64x1 = i64x1::new(1);
848 let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
849 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
850 let r: i64x1 = transmute(vcopy_laneq_s64::<0, 1>(transmute(a), transmute(b)));
851 assert_eq!(r, e);
852 }
853
854 #[simd_test(enable = "neon")]
855 unsafe fn test_vcopy_laneq_u64() {
856 let a: u64x1 = u64x1::new(1);
857 let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
858 let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
859 let r: u64x1 = transmute(vcopy_laneq_u64::<0, 1>(transmute(a), transmute(b)));
860 assert_eq!(r, e);
861 }
862
863 #[simd_test(enable = "neon")]
864 unsafe fn test_vcopy_laneq_p64() {
865 let a: i64x1 = i64x1::new(1);
866 let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
867 let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
868 let r: i64x1 = transmute(vcopy_laneq_p64::<0, 1>(transmute(a), transmute(b)));
869 assert_eq!(r, e);
870 }
871
872 #[simd_test(enable = "neon")]
873 unsafe fn test_vcopy_laneq_f64() {
874 let a: f64 = 1.;
875 let b: f64x2 = f64x2::new(0., 0.5);
876 let e: f64 = 0.5;
877 let r: f64 = transmute(vcopy_laneq_f64::<0, 1>(transmute(a), transmute(b)));
878 assert_eq!(r, e);
879 }
880
881 #[simd_test(enable = "neon")]
882 unsafe fn test_vbsl_f64() {
883 let a = u64x1::new(0x8000000000000000);
884 let b = f64x1::new(-1.23f64);
885 let c = f64x1::new(2.34f64);
886 let e = f64x1::new(-2.34f64);
887 let r: f64x1 = transmute(vbsl_f64(transmute(a), transmute(b), transmute(c)));
888 assert_eq!(r, e);
889 }
890 #[simd_test(enable = "neon")]
891 unsafe fn test_vbsl_p64() {
892 let a = u64x1::new(1);
893 let b = u64x1::new(u64::MAX);
894 let c = u64x1::new(u64::MIN);
895 let e = u64x1::new(1);
896 let r: u64x1 = transmute(vbsl_p64(transmute(a), transmute(b), transmute(c)));
897 assert_eq!(r, e);
898 }
899 #[simd_test(enable = "neon")]
900 unsafe fn test_vbslq_f64() {
901 let a = u64x2::new(1, 0x8000000000000000);
902 let b = f64x2::new(f64::MAX, -1.23f64);
903 let c = f64x2::new(f64::MIN, 2.34f64);
904 let e = f64x2::new(f64::MIN, -2.34f64);
905 let r: f64x2 = transmute(vbslq_f64(transmute(a), transmute(b), transmute(c)));
906 assert_eq!(r, e);
907 }
908 #[simd_test(enable = "neon")]
909 unsafe fn test_vbslq_p64() {
910 let a = u64x2::new(u64::MAX, 1);
911 let b = u64x2::new(u64::MAX, u64::MAX);
912 let c = u64x2::new(u64::MIN, u64::MIN);
913 let e = u64x2::new(u64::MAX, 1);
914 let r: u64x2 = transmute(vbslq_p64(transmute(a), transmute(b), transmute(c)));
915 assert_eq!(r, e);
916 }
917
918 #[simd_test(enable = "neon")]
919 unsafe fn test_vld1_f64() {
920 let a: [f64; 2] = [0., 1.];
921 let e = f64x1::new(1.);
922 let r: f64x1 = transmute(vld1_f64(a[1..].as_ptr()));
923 assert_eq!(r, e)
924 }
925
926 #[simd_test(enable = "neon")]
927 unsafe fn test_vld1q_f64() {
928 let a: [f64; 3] = [0., 1., 2.];
929 let e = f64x2::new(1., 2.);
930 let r: f64x2 = transmute(vld1q_f64(a[1..].as_ptr()));
931 assert_eq!(r, e)
932 }
933
934 #[simd_test(enable = "neon")]
935 unsafe fn test_vld1_dup_f64() {
936 let a: [f64; 2] = [1., 42.];
937 let e = f64x1::new(42.);
938 let r: f64x1 = transmute(vld1_dup_f64(a[1..].as_ptr()));
939 assert_eq!(r, e)
940 }
941
942 #[simd_test(enable = "neon")]
943 unsafe fn test_vld1q_dup_f64() {
944 let elem: f64 = 42.;
945 let e = f64x2::new(42., 42.);
946 let r: f64x2 = transmute(vld1q_dup_f64(&elem));
947 assert_eq!(r, e)
948 }
949
950 #[simd_test(enable = "neon")]
951 unsafe fn test_vld1_lane_f64() {
952 let a = f64x1::new(0.);
953 let elem: f64 = 42.;
954 let e = f64x1::new(42.);
955 let r: f64x1 = transmute(vld1_lane_f64::<0>(&elem, transmute(a)));
956 assert_eq!(r, e)
957 }
958
959 #[simd_test(enable = "neon")]
960 unsafe fn test_vld1q_lane_f64() {
961 let a = f64x2::new(0., 1.);
962 let elem: f64 = 42.;
963 let e = f64x2::new(0., 42.);
964 let r: f64x2 = transmute(vld1q_lane_f64::<1>(&elem, transmute(a)));
965 assert_eq!(r, e)
966 }
967
968 #[simd_test(enable = "neon")]
969 unsafe fn test_vst1_f64() {
970 let mut vals = [0_f64; 2];
971 let a = f64x1::new(1.);
972
973 vst1_f64(vals[1..].as_mut_ptr(), transmute(a));
974
975 assert_eq!(vals[0], 0.);
976 assert_eq!(vals[1], 1.);
977 }
978
979 #[simd_test(enable = "neon")]
980 unsafe fn test_vst1q_f64() {
981 let mut vals = [0_f64; 3];
982 let a = f64x2::new(1., 2.);
983
984 vst1q_f64(vals[1..].as_mut_ptr(), transmute(a));
985
986 assert_eq!(vals[0], 0.);
987 assert_eq!(vals[1], 1.);
988 assert_eq!(vals[2], 2.);
989 }
990}
991
992#[cfg(test)]
993#[path = "../../arm_shared/neon/table_lookup_tests.rs"]
994mod table_lookup_tests;
995
996#[cfg(test)]
997#[path = "../../arm_shared/neon/shift_and_insert_tests.rs"]
998mod shift_and_insert_tests;
999
1000#[cfg(test)]
1001#[path = "../../arm_shared/neon/load_tests.rs"]
1002mod load_tests;
1003
1004#[cfg(test)]
1005#[path = "../../arm_shared/neon/store_tests.rs"]
1006mod store_tests;