core/stdarch/crates/core_arch/src/riscv_shared/
zb.rs

1#[cfg(test)]
2use stdarch_test::assert_instr;
3
4#[cfg(target_arch = "riscv32")]
5unsafe extern "unadjusted" {
6    #[link_name = "llvm.riscv.orc.b.i32"]
7    fn _orc_b_32(rs: i32) -> i32;
8
9    #[link_name = "llvm.riscv.clmul.i32"]
10    fn _clmul_32(rs1: i32, rs2: i32) -> i32;
11
12    #[link_name = "llvm.riscv.clmulh.i32"]
13    fn _clmulh_32(rs1: i32, rs2: i32) -> i32;
14
15    #[link_name = "llvm.riscv.clmulr.i32"]
16    fn _clmulr_32(rs1: i32, rs2: i32) -> i32;
17}
18
19#[cfg(target_arch = "riscv64")]
20unsafe extern "unadjusted" {
21    #[link_name = "llvm.riscv.orc.b.i64"]
22    fn _orc_b_64(rs1: i64) -> i64;
23
24    #[link_name = "llvm.riscv.clmul.i64"]
25    fn _clmul_64(rs1: i64, rs2: i64) -> i64;
26
27    #[link_name = "llvm.riscv.clmulh.i64"]
28    fn _clmulh_64(rs1: i64, rs2: i64) -> i64;
29
30    #[link_name = "llvm.riscv.clmulr.i64"]
31    fn _clmulr_64(rs1: i64, rs2: i64) -> i64;
32}
33
34/// Bitwise OR-Combine, byte granule
35///
36/// Combines the bits within every byte through a reciprocal bitwise logical OR. This sets the bits of each byte in
37/// the result rd to all zeros if no bit within the respective byte of rs is set, or to all ones if any bit within the
38/// respective byte of rs is set.
39///
40/// Source: RISC-V Bit-Manipulation ISA-extensions
41///
42/// Version: v1.0.0
43///
44/// Section: 2.24
45#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
46#[target_feature(enable = "zbb")]
47#[cfg_attr(test, assert_instr(orc.b))]
48#[inline]
49pub fn orc_b(rs: usize) -> usize {
50    #[cfg(target_arch = "riscv32")]
51    unsafe {
52        _orc_b_32(rs as i32) as usize
53    }
54
55    #[cfg(target_arch = "riscv64")]
56    unsafe {
57        _orc_b_64(rs as i64) as usize
58    }
59}
60
61/// Carry-less multiply (low-part)
62///
63/// clmul produces the lower half of the 2·XLEN carry-less product.
64///
65/// Source: RISC-V Bit-Manipulation ISA-extensions
66///
67/// Version: v1.0.0
68///
69/// Section: 2.11
70#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
71#[target_feature(enable = "zbc")]
72#[cfg_attr(test, assert_instr(clmul))]
73#[inline]
74pub fn clmul(rs1: usize, rs2: usize) -> usize {
75    #[cfg(target_arch = "riscv32")]
76    unsafe {
77        _clmul_32(rs1 as i32, rs2 as i32) as usize
78    }
79
80    #[cfg(target_arch = "riscv64")]
81    unsafe {
82        _clmul_64(rs1 as i64, rs2 as i64) as usize
83    }
84}
85
86/// Carry-less multiply (high-part)
87///
88/// clmulh produces the upper half of the 2·XLEN carry-less product.
89///
90/// Source: RISC-V Bit-Manipulation ISA-extensions
91///
92/// Version: v1.0.0
93///
94/// Section: 2.12
95#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
96#[target_feature(enable = "zbc")]
97#[cfg_attr(test, assert_instr(clmulh))]
98#[inline]
99pub fn clmulh(rs1: usize, rs2: usize) -> usize {
100    #[cfg(target_arch = "riscv32")]
101    unsafe {
102        _clmulh_32(rs1 as i32, rs2 as i32) as usize
103    }
104
105    #[cfg(target_arch = "riscv64")]
106    unsafe {
107        _clmulh_64(rs1 as i64, rs2 as i64) as usize
108    }
109}
110
111/// Carry-less multiply (reversed)
112///
113/// clmulr produces bits 2·XLEN−2:XLEN-1 of the 2·XLEN carry-less product.
114///
115/// Source: RISC-V Bit-Manipulation ISA-extensions
116///
117/// Version: v1.0.0
118///
119/// Section: 2.13
120#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")]
121#[target_feature(enable = "zbc")]
122#[cfg_attr(test, assert_instr(clmulr))]
123#[inline]
124pub fn clmulr(rs1: usize, rs2: usize) -> usize {
125    #[cfg(target_arch = "riscv32")]
126    unsafe {
127        _clmulr_32(rs1 as i32, rs2 as i32) as usize
128    }
129
130    #[cfg(target_arch = "riscv64")]
131    unsafe {
132        _clmulr_64(rs1 as i64, rs2 as i64) as usize
133    }
134}