1use crate::core_arch::arm_shared::{uint32x4_t, uint8x16_t};
2
3#[allow(improper_ctypes)]
4extern "unadjusted" {
5 #[cfg_attr(
6 any(target_arch = "aarch64", target_arch = "arm64ec"),
7 link_name = "llvm.aarch64.crypto.aese"
8 )]
9 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")]
10 fn vaeseq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
11 #[cfg_attr(
12 any(target_arch = "aarch64", target_arch = "arm64ec"),
13 link_name = "llvm.aarch64.crypto.aesd"
14 )]
15 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")]
16 fn vaesdq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
17 #[cfg_attr(
18 any(target_arch = "aarch64", target_arch = "arm64ec"),
19 link_name = "llvm.aarch64.crypto.aesmc"
20 )]
21 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")]
22 fn vaesmcq_u8_(data: uint8x16_t) -> uint8x16_t;
23 #[cfg_attr(
24 any(target_arch = "aarch64", target_arch = "arm64ec"),
25 link_name = "llvm.aarch64.crypto.aesimc"
26 )]
27 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")]
28 fn vaesimcq_u8_(data: uint8x16_t) -> uint8x16_t;
29
30 #[cfg_attr(
31 any(target_arch = "aarch64", target_arch = "arm64ec"),
32 link_name = "llvm.aarch64.crypto.sha1h"
33 )]
34 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")]
35 fn vsha1h_u32_(hash_e: u32) -> u32;
36 #[cfg_attr(
37 any(target_arch = "aarch64", target_arch = "arm64ec"),
38 link_name = "llvm.aarch64.crypto.sha1su0"
39 )]
40 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")]
41 fn vsha1su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t;
42 #[cfg_attr(
43 any(target_arch = "aarch64", target_arch = "arm64ec"),
44 link_name = "llvm.aarch64.crypto.sha1su1"
45 )]
46 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")]
47 fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
48 #[cfg_attr(
49 any(target_arch = "aarch64", target_arch = "arm64ec"),
50 link_name = "llvm.aarch64.crypto.sha1c"
51 )]
52 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")]
53 fn vsha1cq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
54 #[cfg_attr(
55 any(target_arch = "aarch64", target_arch = "arm64ec"),
56 link_name = "llvm.aarch64.crypto.sha1p"
57 )]
58 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")]
59 fn vsha1pq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
60 #[cfg_attr(
61 any(target_arch = "aarch64", target_arch = "arm64ec"),
62 link_name = "llvm.aarch64.crypto.sha1m"
63 )]
64 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")]
65 fn vsha1mq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
66
67 #[cfg_attr(
68 any(target_arch = "aarch64", target_arch = "arm64ec"),
69 link_name = "llvm.aarch64.crypto.sha256h"
70 )]
71 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")]
72 fn vsha256hq_u32_(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
73 #[cfg_attr(
74 any(target_arch = "aarch64", target_arch = "arm64ec"),
75 link_name = "llvm.aarch64.crypto.sha256h2"
76 )]
77 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")]
78 fn vsha256h2q_u32_(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
79 #[cfg_attr(
80 any(target_arch = "aarch64", target_arch = "arm64ec"),
81 link_name = "llvm.aarch64.crypto.sha256su0"
82 )]
83 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")]
84 fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
85 #[cfg_attr(
86 any(target_arch = "aarch64", target_arch = "arm64ec"),
87 link_name = "llvm.aarch64.crypto.sha256su1"
88 )]
89 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")]
90 fn vsha256su1q_u32_(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
91}
92
93#[cfg(test)]
94use stdarch_test::assert_instr;
95
96#[inline]
100#[target_feature(enable = "aes")]
101#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
102#[cfg_attr(test, assert_instr(aese))]
103#[cfg_attr(
104 target_arch = "arm",
105 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
106)]
107#[cfg_attr(
108 not(target_arch = "arm"),
109 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
110)]
111pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
112 vaeseq_u8_(data, key)
113}
114
115#[inline]
119#[target_feature(enable = "aes")]
120#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
121#[cfg_attr(test, assert_instr(aesd))]
122#[cfg_attr(
123 target_arch = "arm",
124 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
125)]
126#[cfg_attr(
127 not(target_arch = "arm"),
128 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
129)]
130pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
131 vaesdq_u8_(data, key)
132}
133
134#[inline]
138#[target_feature(enable = "aes")]
139#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
140#[cfg_attr(test, assert_instr(aesmc))]
141#[cfg_attr(
142 target_arch = "arm",
143 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
144)]
145#[cfg_attr(
146 not(target_arch = "arm"),
147 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
148)]
149pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
150 vaesmcq_u8_(data)
151}
152
153#[inline]
157#[target_feature(enable = "aes")]
158#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
159#[cfg_attr(test, assert_instr(aesimc))]
160#[cfg_attr(
161 target_arch = "arm",
162 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
163)]
164#[cfg_attr(
165 not(target_arch = "arm"),
166 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
167)]
168pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
169 vaesimcq_u8_(data)
170}
171
172#[inline]
176#[target_feature(enable = "sha2")]
177#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
178#[cfg_attr(test, assert_instr(sha1h))]
179#[cfg_attr(
180 target_arch = "arm",
181 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
182)]
183#[cfg_attr(
184 not(target_arch = "arm"),
185 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
186)]
187pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
188 vsha1h_u32_(hash_e)
189}
190
191#[inline]
195#[target_feature(enable = "sha2")]
196#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
197#[cfg_attr(test, assert_instr(sha1c))]
198#[cfg_attr(
199 target_arch = "arm",
200 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
201)]
202#[cfg_attr(
203 not(target_arch = "arm"),
204 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
205)]
206pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
207 vsha1cq_u32_(hash_abcd, hash_e, wk)
208}
209
210#[inline]
214#[target_feature(enable = "sha2")]
215#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
216#[cfg_attr(test, assert_instr(sha1m))]
217#[cfg_attr(
218 target_arch = "arm",
219 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
220)]
221#[cfg_attr(
222 not(target_arch = "arm"),
223 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
224)]
225pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
226 vsha1mq_u32_(hash_abcd, hash_e, wk)
227}
228
229#[inline]
233#[target_feature(enable = "sha2")]
234#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
235#[cfg_attr(test, assert_instr(sha1p))]
236#[cfg_attr(
237 target_arch = "arm",
238 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
239)]
240#[cfg_attr(
241 not(target_arch = "arm"),
242 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
243)]
244pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
245 vsha1pq_u32_(hash_abcd, hash_e, wk)
246}
247
248#[inline]
252#[target_feature(enable = "sha2")]
253#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
254#[cfg_attr(test, assert_instr(sha1su0))]
255#[cfg_attr(
256 target_arch = "arm",
257 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
258)]
259#[cfg_attr(
260 not(target_arch = "arm"),
261 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
262)]
263pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
264 vsha1su0q_u32_(w0_3, w4_7, w8_11)
265}
266
267#[inline]
271#[target_feature(enable = "sha2")]
272#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
273#[cfg_attr(test, assert_instr(sha1su1))]
274#[cfg_attr(
275 target_arch = "arm",
276 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
277)]
278#[cfg_attr(
279 not(target_arch = "arm"),
280 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
281)]
282pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
283 vsha1su1q_u32_(tw0_3, w12_15)
284}
285
286#[inline]
290#[target_feature(enable = "sha2")]
291#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
292#[cfg_attr(test, assert_instr(sha256h))]
293#[cfg_attr(
294 target_arch = "arm",
295 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
296)]
297#[cfg_attr(
298 not(target_arch = "arm"),
299 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
300)]
301pub unsafe fn vsha256hq_u32(
302 hash_abcd: uint32x4_t,
303 hash_efgh: uint32x4_t,
304 wk: uint32x4_t,
305) -> uint32x4_t {
306 vsha256hq_u32_(hash_abcd, hash_efgh, wk)
307}
308
309#[inline]
313#[target_feature(enable = "sha2")]
314#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
315#[cfg_attr(test, assert_instr(sha256h2))]
316#[cfg_attr(
317 target_arch = "arm",
318 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
319)]
320#[cfg_attr(
321 not(target_arch = "arm"),
322 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
323)]
324pub unsafe fn vsha256h2q_u32(
325 hash_efgh: uint32x4_t,
326 hash_abcd: uint32x4_t,
327 wk: uint32x4_t,
328) -> uint32x4_t {
329 vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
330}
331
332#[inline]
336#[target_feature(enable = "sha2")]
337#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
338#[cfg_attr(test, assert_instr(sha256su0))]
339#[cfg_attr(
340 target_arch = "arm",
341 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
342)]
343#[cfg_attr(
344 not(target_arch = "arm"),
345 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
346)]
347pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
348 vsha256su0q_u32_(w0_3, w4_7)
349}
350
351#[inline]
355#[target_feature(enable = "sha2")]
356#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
357#[cfg_attr(test, assert_instr(sha256su1))]
358#[cfg_attr(
359 target_arch = "arm",
360 unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")
361)]
362#[cfg_attr(
363 not(target_arch = "arm"),
364 stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
365)]
366pub unsafe fn vsha256su1q_u32(
367 tw0_3: uint32x4_t,
368 w8_11: uint32x4_t,
369 w12_15: uint32x4_t,
370) -> uint32x4_t {
371 vsha256su1q_u32_(tw0_3, w8_11, w12_15)
372}
373
374#[cfg(test)]
375mod tests {
376 use super::*;
377 use crate::core_arch::{arm_shared::*, simd::*};
378 use std::mem;
379 use stdarch_test::simd_test;
380
381 #[simd_test(enable = "aes")]
382 unsafe fn test_vaeseq_u8() {
383 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
384 let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
385 let r: u8x16 = mem::transmute(vaeseq_u8(data, key));
386 assert_eq!(
387 r,
388 u8x16::new(
389 124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118, 124, 123, 124, 197
390 )
391 );
392 }
393
394 #[simd_test(enable = "aes")]
395 unsafe fn test_vaesdq_u8() {
396 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
397 let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
398 let r: u8x16 = mem::transmute(vaesdq_u8(data, key));
399 assert_eq!(
400 r,
401 u8x16::new(9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56)
402 );
403 }
404
405 #[simd_test(enable = "aes")]
406 unsafe fn test_vaesmcq_u8() {
407 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
408 let r: u8x16 = mem::transmute(vaesmcq_u8(data));
409 assert_eq!(
410 r,
411 u8x16::new(3, 4, 9, 10, 15, 8, 21, 30, 3, 4, 9, 10, 15, 8, 21, 30)
412 );
413 }
414
415 #[simd_test(enable = "aes")]
416 unsafe fn test_vaesimcq_u8() {
417 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
418 let r: u8x16 = mem::transmute(vaesimcq_u8(data));
419 assert_eq!(
420 r,
421 u8x16::new(43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70)
422 );
423 }
424
425 #[simd_test(enable = "sha2")]
426 unsafe fn test_vsha1h_u32() {
427 assert_eq!(vsha1h_u32(0x1234), 0x048d);
428 assert_eq!(vsha1h_u32(0x5678), 0x159e);
429 }
430
431 #[simd_test(enable = "sha2")]
432 unsafe fn test_vsha1su0q_u32() {
433 let r: u32x4 = mem::transmute(vsha1su0q_u32(
434 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
435 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
436 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
437 ));
438 assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678));
439 }
440
441 #[simd_test(enable = "sha2")]
442 unsafe fn test_vsha1su1q_u32() {
443 let r: u32x4 = mem::transmute(vsha1su1q_u32(
444 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
445 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
446 ));
447 assert_eq!(
448 r,
449 u32x4::new(0x00008898, 0x00019988, 0x00008898, 0x0000acd0)
450 );
451 }
452
453 #[simd_test(enable = "sha2")]
454 unsafe fn test_vsha1cq_u32() {
455 let r: u32x4 = mem::transmute(vsha1cq_u32(
456 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
457 0x1234,
458 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
459 ));
460 assert_eq!(
461 r,
462 u32x4::new(0x8a32cbd8, 0x0c518a96, 0x0018a081, 0x0000c168)
463 );
464 }
465
466 #[simd_test(enable = "sha2")]
467 unsafe fn test_vsha1pq_u32() {
468 let r: u32x4 = mem::transmute(vsha1pq_u32(
469 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
470 0x1234,
471 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
472 ));
473 assert_eq!(
474 r,
475 u32x4::new(0x469f0ba3, 0x0a326147, 0x80145d7f, 0x00009f47)
476 );
477 }
478
479 #[simd_test(enable = "sha2")]
480 unsafe fn test_vsha1mq_u32() {
481 let r: u32x4 = mem::transmute(vsha1mq_u32(
482 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
483 0x1234,
484 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
485 ));
486 assert_eq!(
487 r,
488 u32x4::new(0xaa39693b, 0x0d51bf84, 0x001aa109, 0x0000d278)
489 );
490 }
491
492 #[simd_test(enable = "sha2")]
493 unsafe fn test_vsha256hq_u32() {
494 let r: u32x4 = mem::transmute(vsha256hq_u32(
495 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
496 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
497 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
498 ));
499 assert_eq!(
500 r,
501 u32x4::new(0x05e9aaa8, 0xec5f4c02, 0x20a1ea61, 0x28738cef)
502 );
503 }
504
505 #[simd_test(enable = "sha2")]
506 unsafe fn test_vsha256h2q_u32() {
507 let r: u32x4 = mem::transmute(vsha256h2q_u32(
508 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
509 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
510 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
511 ));
512 assert_eq!(
513 r,
514 u32x4::new(0x3745362e, 0x2fb51d00, 0xbd4c529b, 0x968b8516)
515 );
516 }
517
518 #[simd_test(enable = "sha2")]
519 unsafe fn test_vsha256su0q_u32() {
520 let r: u32x4 = mem::transmute(vsha256su0q_u32(
521 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
522 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
523 ));
524 assert_eq!(
525 r,
526 u32x4::new(0xe59e1c97, 0x5eaf68da, 0xd7bcb51f, 0x6c8de152)
527 );
528 }
529
530 #[simd_test(enable = "sha2")]
531 unsafe fn test_vsha256su1q_u32() {
532 let r: u32x4 = mem::transmute(vsha256su1q_u32(
533 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
534 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
535 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
536 ));
537 assert_eq!(
538 r,
539 u32x4::new(0x5e09e8d2, 0x74a6f16b, 0xc966606b, 0xa686ee9f)
540 );
541 }
542}