rustc_target/asm/
aarch64.rs

1use std::fmt;
2
3use rustc_data_structures::fx::FxIndexSet;
4use rustc_span::{Symbol, sym};
5
6use super::{InlineAsmArch, InlineAsmType, ModifierInfo};
7use crate::spec::{RelocModel, Target};
8
9def_reg_class! {
10    AArch64 AArch64InlineAsmRegClass {
11        reg,
12        vreg,
13        vreg_low16,
14        preg,
15    }
16}
17
18impl AArch64InlineAsmRegClass {
19    pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
20        match self {
21            Self::reg => &['w', 'x'],
22            Self::vreg | Self::vreg_low16 => &['b', 'h', 's', 'd', 'q', 'v'],
23            Self::preg => &[],
24        }
25    }
26
27    pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
28        None
29    }
30
31    pub fn suggest_modifier(self, _arch: InlineAsmArch, ty: InlineAsmType) -> Option<ModifierInfo> {
32        match self {
33            Self::reg => match ty.size().bits() {
34                64 => None,
35                _ => Some(('w', "w0", 32).into()),
36            },
37            Self::vreg | Self::vreg_low16 => match ty.size().bits() {
38                8 => Some(('b', "b0", 8).into()),
39                16 => Some(('h', "h0", 16).into()),
40                32 => Some(('s', "s0", 32).into()),
41                64 => Some(('d', "d0", 64).into()),
42                128 => Some(('q', "q0", 128).into()),
43                _ => None,
44            },
45            Self::preg => None,
46        }
47    }
48
49    pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<ModifierInfo> {
50        match self {
51            Self::reg => Some(('x', "x0", 64).into()),
52            Self::vreg | Self::vreg_low16 => Some(('v', "v0", 128).into()),
53            Self::preg => None,
54        }
55    }
56
57    pub fn supported_types(
58        self,
59        _arch: InlineAsmArch,
60    ) -> &'static [(InlineAsmType, Option<Symbol>)] {
61        match self {
62            Self::reg => types! { _: I8, I16, I32, I64, F16, F32, F64; },
63            Self::vreg | Self::vreg_low16 => types! {
64                neon: I8, I16, I32, I64, F16, F32, F64, F128,
65                    VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF16(4), VecF32(2), VecF64(1),
66                    VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF16(8), VecF32(4), VecF64(2);
67                // Note: When adding support for SVE vector types, they must be rejected for Arm64EC.
68            },
69            Self::preg => &[],
70        }
71    }
72}
73
74pub(crate) fn target_reserves_x18(target: &Target, target_features: &FxIndexSet<Symbol>) -> bool {
75    // See isX18ReservedByDefault in LLVM for targets reserve x18 by default:
76    // https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/TargetParser/AArch64TargetParser.cpp#L102-L105
77    // Note that +reserve-x18 is currently not set for the above targets.
78    target.os == "android"
79        || target.os == "fuchsia"
80        || target.env == "ohos"
81        || target.is_like_osx
82        || target.is_like_windows
83        || target_features.contains(&sym::reserve_x18)
84}
85
86fn reserved_x18(
87    _arch: InlineAsmArch,
88    _reloc_model: RelocModel,
89    target_features: &FxIndexSet<Symbol>,
90    target: &Target,
91    _is_clobber: bool,
92) -> Result<(), &'static str> {
93    if target_reserves_x18(target, target_features) {
94        Err("x18 is a reserved register on this target")
95    } else {
96        Ok(())
97    }
98}
99
100fn restricted_for_arm64ec(
101    arch: InlineAsmArch,
102    _reloc_model: RelocModel,
103    _target_features: &FxIndexSet<Symbol>,
104    _target: &Target,
105    _is_clobber: bool,
106) -> Result<(), &'static str> {
107    if arch == InlineAsmArch::Arm64EC {
108        Err("x13, x14, x23, x24, x28, v16-v31, p*, ffr cannot be used for Arm64EC")
109    } else {
110        Ok(())
111    }
112}
113
114def_regs! {
115    AArch64 AArch64InlineAsmReg AArch64InlineAsmRegClass {
116        x0: reg = ["x0", "w0"],
117        x1: reg = ["x1", "w1"],
118        x2: reg = ["x2", "w2"],
119        x3: reg = ["x3", "w3"],
120        x4: reg = ["x4", "w4"],
121        x5: reg = ["x5", "w5"],
122        x6: reg = ["x6", "w6"],
123        x7: reg = ["x7", "w7"],
124        x8: reg = ["x8", "w8"],
125        x9: reg = ["x9", "w9"],
126        x10: reg = ["x10", "w10"],
127        x11: reg = ["x11", "w11"],
128        x12: reg = ["x12", "w12"],
129        x13: reg = ["x13", "w13"] % restricted_for_arm64ec,
130        x14: reg = ["x14", "w14"] % restricted_for_arm64ec,
131        x15: reg = ["x15", "w15"],
132        x16: reg = ["x16", "w16"],
133        x17: reg = ["x17", "w17"],
134        x18: reg = ["x18", "w18"] % reserved_x18,
135        x20: reg = ["x20", "w20"],
136        x21: reg = ["x21", "w21"],
137        x22: reg = ["x22", "w22"],
138        x23: reg = ["x23", "w23"] % restricted_for_arm64ec,
139        x24: reg = ["x24", "w24"] % restricted_for_arm64ec,
140        x25: reg = ["x25", "w25"],
141        x26: reg = ["x26", "w26"],
142        x27: reg = ["x27", "w27"],
143        x28: reg = ["x28", "w28"] % restricted_for_arm64ec,
144        x30: reg = ["x30", "w30", "lr", "wlr"],
145        v0: vreg, vreg_low16 = ["v0", "b0", "h0", "s0", "d0", "q0", "z0"],
146        v1: vreg, vreg_low16 = ["v1", "b1", "h1", "s1", "d1", "q1", "z1"],
147        v2: vreg, vreg_low16 = ["v2", "b2", "h2", "s2", "d2", "q2", "z2"],
148        v3: vreg, vreg_low16 = ["v3", "b3", "h3", "s3", "d3", "q3", "z3"],
149        v4: vreg, vreg_low16 = ["v4", "b4", "h4", "s4", "d4", "q4", "z4"],
150        v5: vreg, vreg_low16 = ["v5", "b5", "h5", "s5", "d5", "q5", "z5"],
151        v6: vreg, vreg_low16 = ["v6", "b6", "h6", "s6", "d6", "q6", "z6"],
152        v7: vreg, vreg_low16 = ["v7", "b7", "h7", "s7", "d7", "q7", "z7"],
153        v8: vreg, vreg_low16 = ["v8", "b8", "h8", "s8", "d8", "q8", "z8"],
154        v9: vreg, vreg_low16 = ["v9", "b9", "h9", "s9", "d9", "q9", "z9"],
155        v10: vreg, vreg_low16 = ["v10", "b10", "h10", "s10", "d10", "q10", "z10"],
156        v11: vreg, vreg_low16 = ["v11", "b11", "h11", "s11", "d11", "q11", "z11"],
157        v12: vreg, vreg_low16 = ["v12", "b12", "h12", "s12", "d12", "q12", "z12"],
158        v13: vreg, vreg_low16 = ["v13", "b13", "h13", "s13", "d13", "q13", "z13"],
159        v14: vreg, vreg_low16 = ["v14", "b14", "h14", "s14", "d14", "q14", "z14"],
160        v15: vreg, vreg_low16 = ["v15", "b15", "h15", "s15", "d15", "q15", "z15"],
161        v16: vreg = ["v16", "b16", "h16", "s16", "d16", "q16", "z16"] % restricted_for_arm64ec,
162        v17: vreg = ["v17", "b17", "h17", "s17", "d17", "q17", "z17"] % restricted_for_arm64ec,
163        v18: vreg = ["v18", "b18", "h18", "s18", "d18", "q18", "z18"] % restricted_for_arm64ec,
164        v19: vreg = ["v19", "b19", "h19", "s19", "d19", "q19", "z19"] % restricted_for_arm64ec,
165        v20: vreg = ["v20", "b20", "h20", "s20", "d20", "q20", "z20"] % restricted_for_arm64ec,
166        v21: vreg = ["v21", "b21", "h21", "s21", "d21", "q21", "z21"] % restricted_for_arm64ec,
167        v22: vreg = ["v22", "b22", "h22", "s22", "d22", "q22", "z22"] % restricted_for_arm64ec,
168        v23: vreg = ["v23", "b23", "h23", "s23", "d23", "q23", "z23"] % restricted_for_arm64ec,
169        v24: vreg = ["v24", "b24", "h24", "s24", "d24", "q24", "z24"] % restricted_for_arm64ec,
170        v25: vreg = ["v25", "b25", "h25", "s25", "d25", "q25", "z25"] % restricted_for_arm64ec,
171        v26: vreg = ["v26", "b26", "h26", "s26", "d26", "q26", "z26"] % restricted_for_arm64ec,
172        v27: vreg = ["v27", "b27", "h27", "s27", "d27", "q27", "z27"] % restricted_for_arm64ec,
173        v28: vreg = ["v28", "b28", "h28", "s28", "d28", "q28", "z28"] % restricted_for_arm64ec,
174        v29: vreg = ["v29", "b29", "h29", "s29", "d29", "q29", "z29"] % restricted_for_arm64ec,
175        v30: vreg = ["v30", "b30", "h30", "s30", "d30", "q30", "z30"] % restricted_for_arm64ec,
176        v31: vreg = ["v31", "b31", "h31", "s31", "d31", "q31", "z31"] % restricted_for_arm64ec,
177        p0: preg = ["p0"] % restricted_for_arm64ec,
178        p1: preg = ["p1"] % restricted_for_arm64ec,
179        p2: preg = ["p2"] % restricted_for_arm64ec,
180        p3: preg = ["p3"] % restricted_for_arm64ec,
181        p4: preg = ["p4"] % restricted_for_arm64ec,
182        p5: preg = ["p5"] % restricted_for_arm64ec,
183        p6: preg = ["p6"] % restricted_for_arm64ec,
184        p7: preg = ["p7"] % restricted_for_arm64ec,
185        p8: preg = ["p8"] % restricted_for_arm64ec,
186        p9: preg = ["p9"] % restricted_for_arm64ec,
187        p10: preg = ["p10"] % restricted_for_arm64ec,
188        p11: preg = ["p11"] % restricted_for_arm64ec,
189        p12: preg = ["p12"] % restricted_for_arm64ec,
190        p13: preg = ["p13"] % restricted_for_arm64ec,
191        p14: preg = ["p14"] % restricted_for_arm64ec,
192        p15: preg = ["p15"] % restricted_for_arm64ec,
193        ffr: preg = ["ffr"] % restricted_for_arm64ec,
194        #error = ["x19", "w19"] =>
195            "x19 is used internally by LLVM and cannot be used as an operand for inline asm",
196        #error = ["x29", "w29", "fp", "wfp"] =>
197            "the frame pointer cannot be used as an operand for inline asm",
198        #error = ["sp", "wsp"] =>
199            "the stack pointer cannot be used as an operand for inline asm",
200        #error = ["xzr", "wzr"] =>
201            "the zero register cannot be used as an operand for inline asm",
202    }
203}
204
205impl AArch64InlineAsmReg {
206    pub fn emit(
207        self,
208        out: &mut dyn fmt::Write,
209        _arch: InlineAsmArch,
210        modifier: Option<char>,
211    ) -> fmt::Result {
212        let (prefix, index) = if let Some(index) = self.reg_index() {
213            (modifier.unwrap_or('x'), index)
214        } else if let Some(index) = self.vreg_index() {
215            (modifier.unwrap_or('v'), index)
216        } else {
217            return out.write_str(self.name());
218        };
219        assert!(index < 32);
220        write!(out, "{prefix}{index}")
221    }
222
223    /// If the register is an integer register then return its index.
224    pub fn reg_index(self) -> Option<u32> {
225        // Unlike `vreg_index`, we can't subtract `x0` to get the u32 because
226        // `x19` and `x29` are missing and the integer constants for the
227        // `x0`..`x30` enum variants don't all match the register number. E.g. the
228        // integer constant for `x18` is 18, but the constant for `x20` is 19.
229        use AArch64InlineAsmReg::*;
230        Some(match self {
231            x0 => 0,
232            x1 => 1,
233            x2 => 2,
234            x3 => 3,
235            x4 => 4,
236            x5 => 5,
237            x6 => 6,
238            x7 => 7,
239            x8 => 8,
240            x9 => 9,
241            x10 => 10,
242            x11 => 11,
243            x12 => 12,
244            x13 => 13,
245            x14 => 14,
246            x15 => 15,
247            x16 => 16,
248            x17 => 17,
249            x18 => 18,
250            // x19 is reserved
251            x20 => 20,
252            x21 => 21,
253            x22 => 22,
254            x23 => 23,
255            x24 => 24,
256            x25 => 25,
257            x26 => 26,
258            x27 => 27,
259            x28 => 28,
260            // x29 is reserved
261            x30 => 30,
262            _ => return None,
263        })
264    }
265
266    /// If the register is a vector register then return its index.
267    pub fn vreg_index(self) -> Option<u32> {
268        use AArch64InlineAsmReg::*;
269        if self as u32 >= v0 as u32 && self as u32 <= v31 as u32 {
270            return Some(self as u32 - v0 as u32);
271        }
272        None
273    }
274}