Skip to main content

rustc_codegen_llvm/
asm.rs

1use std::assert_matches;
2
3use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar};
4use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
5use rustc_codegen_ssa::mir::operand::OperandValue;
6use rustc_codegen_ssa::traits::*;
7use rustc_data_structures::fx::FxHashMap;
8use rustc_middle::ty::Instance;
9use rustc_middle::ty::layout::TyAndLayout;
10use rustc_middle::{bug, span_bug};
11use rustc_span::{Pos, Span, Symbol, sym};
12use rustc_target::asm::*;
13use smallvec::SmallVec;
14use tracing::debug;
15
16use crate::attributes;
17use crate::builder::Builder;
18use crate::common::Funclet;
19use crate::context::CodegenCx;
20use crate::llvm::{self, ToLlvmBool, Type, Value};
21use crate::type_of::LayoutLlvmExt;
22
23impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
24    fn codegen_inline_asm(
25        &mut self,
26        template: &[InlineAsmTemplatePiece],
27        operands: &[InlineAsmOperandRef<'tcx, Self>],
28        options: InlineAsmOptions,
29        line_spans: &[Span],
30        instance: Instance<'_>,
31        dest: Option<Self::BasicBlock>,
32        catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>,
33    ) {
34        let asm_arch = self.tcx.sess.asm_arch.unwrap();
35
36        // Collect the types of output operands
37        let mut constraints = ::alloc::vec::Vec::new()vec![];
38        let mut clobbers = ::alloc::vec::Vec::new()vec![];
39        let mut output_types = ::alloc::vec::Vec::new()vec![];
40        let mut op_idx = FxHashMap::default();
41        let mut clobbered_x87 = false;
42        for (idx, op) in operands.iter().enumerate() {
43            match *op {
44                InlineAsmOperandRef::Out { reg, late, place } => {
45                    let is_target_supported = |reg_class: InlineAsmRegClass| {
46                        for &(_, feature) in reg_class.supported_types(asm_arch, true) {
47                            if let Some(feature) = feature {
48                                if self
49                                    .tcx
50                                    .asm_target_features(instance.def_id())
51                                    .contains(&feature)
52                                {
53                                    return true;
54                                }
55                            } else {
56                                // Register class is unconditionally supported
57                                return true;
58                            }
59                        }
60                        false
61                    };
62
63                    let mut layout = None;
64                    let ty = if let Some(ref place) = place {
65                        layout = Some(&place.layout);
66                        llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout, instance)
67                    } else if #[allow(non_exhaustive_omitted_patterns)] match reg.reg_class() {
    InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg |
        X86InlineAsmRegClass::x87_reg) => true,
    _ => false,
}matches!(
68                        reg.reg_class(),
69                        InlineAsmRegClass::X86(
70                            X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
71                        )
72                    ) {
73                        // Special handling for x87/mmx registers: we always
74                        // clobber the whole set if one register is marked as
75                        // clobbered. This is due to the way LLVM handles the
76                        // FP stack in inline assembly.
77                        if !clobbered_x87 {
78                            clobbered_x87 = true;
79                            clobbers.push("~{st}".to_string());
80                            for i in 1..=7 {
81                                clobbers.push(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("~{{st({0})}}", i))
    })format!("~{{st({})}}", i));
82                            }
83                        }
84                        continue;
85                    } else if !is_target_supported(reg.reg_class())
86                        || reg.reg_class().is_clobber_only(asm_arch, true)
87                    {
88                        // We turn discarded outputs into clobber constraints
89                        // if the target feature needed by the register class is
90                        // disabled. This is necessary otherwise LLVM will try
91                        // to actually allocate a register for the dummy output.
92                        {
    match reg {
        InlineAsmRegOrRegClass::Reg(_) => {}
        ref left_val => {
            ::core::panicking::assert_matches_failed(left_val,
                "InlineAsmRegOrRegClass::Reg(_)",
                ::core::option::Option::None);
        }
    }
};assert_matches!(reg, InlineAsmRegOrRegClass::Reg(_));
93                        clobbers.push(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("~{0}", reg_to_llvm(reg, None)))
    })format!("~{}", reg_to_llvm(reg, None)));
94                        continue;
95                    } else {
96                        // If the output is discarded, we don't really care what
97                        // type is used. We're just using this to tell LLVM to
98                        // reserve the register.
99                        dummy_output_type(self.cx, reg.reg_class())
100                    };
101                    output_types.push(ty);
102                    op_idx.insert(idx, constraints.len());
103                    let prefix = if late { "=" } else { "=&" };
104                    constraints.push(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}{1}", prefix,
                reg_to_llvm(reg, layout)))
    })format!("{}{}", prefix, reg_to_llvm(reg, layout)));
105                }
106                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
107                    let layout = if let Some(ref out_place) = out_place {
108                        &out_place.layout
109                    } else {
110                        // LLVM required tied operands to have the same type,
111                        // so we just use the type of the input.
112                        &in_value.layout
113                    };
114                    let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout, instance);
115                    output_types.push(ty);
116                    op_idx.insert(idx, constraints.len());
117                    let prefix = if late { "=" } else { "=&" };
118                    constraints.push(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}{1}", prefix,
                reg_to_llvm(reg, Some(layout))))
    })format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
119                }
120                _ => {}
121            }
122        }
123
124        // Collect input operands
125        let mut inputs = ::alloc::vec::Vec::new()vec![];
126        for (idx, op) in operands.iter().enumerate() {
127            match *op {
128                InlineAsmOperandRef::In { reg, value } => {
129                    let llval = llvm_fixup_input(
130                        self,
131                        value.immediate(),
132                        reg.reg_class(),
133                        &value.layout,
134                        instance,
135                    );
136                    inputs.push(llval);
137                    op_idx.insert(idx, constraints.len());
138                    constraints.push(reg_to_llvm(reg, Some(&value.layout)));
139                }
140                InlineAsmOperandRef::InOut { reg, late, in_value, out_place: _ } => {
141                    let value = llvm_fixup_input(
142                        self,
143                        in_value.immediate(),
144                        reg.reg_class(),
145                        &in_value.layout,
146                        instance,
147                    );
148                    inputs.push(value);
149
150                    // In the case of fixed registers, we have the choice of
151                    // either using a tied operand or duplicating the constraint.
152                    // We prefer the latter because it matches the behavior of
153                    // Clang.
154                    if late && #[allow(non_exhaustive_omitted_patterns)] match reg {
    InlineAsmRegOrRegClass::Reg(_) => true,
    _ => false,
}matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
155                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)));
156                    } else {
157                        constraints.push(::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}", op_idx[&idx]))
    })format!("{}", op_idx[&idx]));
158                    }
159                }
160                InlineAsmOperandRef::SymFn { instance } => {
161                    inputs.push(self.cx.get_fn(instance));
162                    op_idx.insert(idx, constraints.len());
163                    constraints.push("s".to_string());
164                }
165                InlineAsmOperandRef::SymStatic { def_id } => {
166                    inputs.push(self.cx.get_static(def_id));
167                    op_idx.insert(idx, constraints.len());
168                    constraints.push("s".to_string());
169                }
170                _ => {}
171            }
172        }
173
174        // Build the template string
175        let mut labels = ::alloc::vec::Vec::new()vec![];
176        let mut template_str = String::new();
177        for piece in template {
178            match *piece {
179                InlineAsmTemplatePiece::String(ref s) => {
180                    if s.contains('$') {
181                        for c in s.chars() {
182                            if c == '$' {
183                                template_str.push_str("$$");
184                            } else {
185                                template_str.push(c);
186                            }
187                        }
188                    } else {
189                        template_str.push_str(s)
190                    }
191                }
192                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
193                    match operands[operand_idx] {
194                        InlineAsmOperandRef::In { reg, .. }
195                        | InlineAsmOperandRef::Out { reg, .. }
196                        | InlineAsmOperandRef::InOut { reg, .. } => {
197                            let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
198                            if let Some(modifier) = modifier {
199                                template_str.push_str(&::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("${{{0}:{1}}}",
                op_idx[&operand_idx], modifier))
    })format!(
200                                    "${{{}:{}}}",
201                                    op_idx[&operand_idx], modifier
202                                ));
203                            } else {
204                                template_str.push_str(&::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("${{{0}}}", op_idx[&operand_idx]))
    })format!("${{{}}}", op_idx[&operand_idx]));
205                            }
206                        }
207                        InlineAsmOperandRef::Const { ref string } => {
208                            // Const operands get injected directly into the template
209                            template_str.push_str(string);
210                        }
211                        InlineAsmOperandRef::SymFn { .. }
212                        | InlineAsmOperandRef::SymStatic { .. } => {
213                            // Only emit the raw symbol name
214                            template_str.push_str(&::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("${{{0}:c}}", op_idx[&operand_idx]))
    })format!("${{{}:c}}", op_idx[&operand_idx]));
215                        }
216                        InlineAsmOperandRef::Label { label } => {
217                            template_str.push_str(&::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("${{{0}:l}}", constraints.len()))
    })format!("${{{}:l}}", constraints.len()));
218                            constraints.push("!i".to_owned());
219                            labels.push(label);
220                        }
221                    }
222                }
223            }
224        }
225
226        constraints.append(&mut clobbers);
227        if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
228            match asm_arch {
229                InlineAsmArch::AArch64 | InlineAsmArch::Arm64EC | InlineAsmArch::Arm => {
230                    constraints.push("~{cc}".to_string());
231                }
232                InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
233                    constraints.extend_from_slice(&[
234                        "~{dirflag}".to_string(),
235                        "~{fpsr}".to_string(),
236                        "~{flags}".to_string(),
237                    ]);
238                }
239                InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
240                    constraints.extend_from_slice(&[
241                        "~{fflags}".to_string(),
242                        "~{vtype}".to_string(),
243                        "~{vl}".to_string(),
244                        "~{vxsat}".to_string(),
245                        "~{vxrm}".to_string(),
246                    ]);
247                }
248                InlineAsmArch::Avr => {
249                    constraints.push("~{sreg}".to_string());
250                }
251                InlineAsmArch::Nvptx64 => {}
252                InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
253                InlineAsmArch::Hexagon => {}
254                InlineAsmArch::LoongArch32 | InlineAsmArch::LoongArch64 => {
255                    constraints.extend_from_slice(&[
256                        "~{$fcc0}".to_string(),
257                        "~{$fcc1}".to_string(),
258                        "~{$fcc2}".to_string(),
259                        "~{$fcc3}".to_string(),
260                        "~{$fcc4}".to_string(),
261                        "~{$fcc5}".to_string(),
262                        "~{$fcc6}".to_string(),
263                        "~{$fcc7}".to_string(),
264                    ]);
265                }
266                InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
267                InlineAsmArch::S390x => {
268                    constraints.push("~{cc}".to_string());
269                }
270                InlineAsmArch::Sparc | InlineAsmArch::Sparc64 => {
271                    // In LLVM, ~{icc} represents icc and xcc in 64-bit code.
272                    // https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/Target/Sparc/SparcRegisterInfo.td#L64
273                    constraints.push("~{icc}".to_string());
274                    constraints.push("~{fcc0}".to_string());
275                    constraints.push("~{fcc1}".to_string());
276                    constraints.push("~{fcc2}".to_string());
277                    constraints.push("~{fcc3}".to_string());
278                }
279                InlineAsmArch::SpirV => {}
280                InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
281                InlineAsmArch::Bpf => {}
282                InlineAsmArch::Msp430 => {
283                    constraints.push("~{sr}".to_string());
284                }
285                InlineAsmArch::M68k => {
286                    constraints.push("~{ccr}".to_string());
287                }
288                InlineAsmArch::CSKY => {
289                    constraints.push("~{psr}".to_string());
290                }
291            }
292        }
293        if !options.contains(InlineAsmOptions::NOMEM) {
294            // This is actually ignored by LLVM, but it's probably best to keep
295            // it just in case. LLVM instead uses the ReadOnly/ReadNone
296            // attributes on the call instruction to optimize.
297            constraints.push("~{memory}".to_string());
298        }
299        let volatile = !options.contains(InlineAsmOptions::PURE);
300        let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
301        let output_type = match &output_types[..] {
302            [] => self.type_void(),
303            [ty] => ty,
304            tys => self.type_struct(tys, false),
305        };
306        let dialect = match asm_arch {
307            InlineAsmArch::X86 | InlineAsmArch::X86_64
308                if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
309            {
310                llvm::AsmDialect::Intel
311            }
312            _ => llvm::AsmDialect::Att,
313        };
314        let result = inline_asm_call(
315            self,
316            &template_str,
317            &constraints.join(","),
318            &inputs,
319            output_type,
320            &labels,
321            volatile,
322            alignstack,
323            dialect,
324            line_spans,
325            options.contains(InlineAsmOptions::MAY_UNWIND),
326            dest,
327            catch_funclet,
328        )
329        .unwrap_or_else(|| ::rustc_middle::util::bug::span_bug_fmt(line_spans[0],
    format_args!("LLVM asm constraint validation failed"))span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
330
331        let mut attrs = SmallVec::<[_; 2]>::new();
332        if options.contains(InlineAsmOptions::PURE) {
333            if options.contains(InlineAsmOptions::NOMEM) {
334                attrs.push(llvm::MemoryEffects::None.create_attr(self.cx.llcx));
335            } else if options.contains(InlineAsmOptions::READONLY) {
336                attrs.push(llvm::MemoryEffects::ReadOnly.create_attr(self.cx.llcx));
337            }
338            attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
339        } else if options.contains(InlineAsmOptions::NOMEM) {
340            attrs.push(llvm::MemoryEffects::InaccessibleMemOnly.create_attr(self.cx.llcx));
341        } else if options.contains(InlineAsmOptions::READONLY) {
342            attrs.push(llvm::MemoryEffects::ReadOnlyNotPure.create_attr(self.cx.llcx));
343        }
344        attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
345
346        // Write results to outputs. We need to do this for all possible control flow.
347        //
348        // Note that `dest` maybe populated with unreachable_block when asm goto with outputs
349        // is used (because we need to codegen callbr which always needs a destination), so
350        // here we use the NORETURN option to determine if `dest` should be used.
351        for block in (if options.contains(InlineAsmOptions::NORETURN) { None } else { Some(dest) })
352            .into_iter()
353            .chain(labels.iter().copied().map(Some))
354        {
355            if let Some(block) = block {
356                self.switch_to_block(block);
357            }
358
359            for (idx, op) in operands.iter().enumerate() {
360                if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
361                | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
362                {
363                    let value = if output_types.len() == 1 {
364                        result
365                    } else {
366                        self.extract_value(result, op_idx[&idx] as u64)
367                    };
368                    let value =
369                        llvm_fixup_output(self, value, reg.reg_class(), &place.layout, instance);
370                    OperandValue::Immediate(value).store(self, place);
371                }
372            }
373        }
374    }
375}
376
377impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
378    fn codegen_global_asm(
379        &mut self,
380        template: &[InlineAsmTemplatePiece],
381        operands: &[GlobalAsmOperandRef<'tcx>],
382        options: InlineAsmOptions,
383        _line_spans: &[Span],
384    ) {
385        let asm_arch = self.tcx.sess.asm_arch.unwrap();
386
387        // Build the template string
388        let mut template_str = String::new();
389
390        // On X86 platforms there are two assembly syntaxes. Rust uses intel by default,
391        // but AT&T can be specified explicitly.
392        if #[allow(non_exhaustive_omitted_patterns)] match asm_arch {
    InlineAsmArch::X86 | InlineAsmArch::X86_64 => true,
    _ => false,
}matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) {
393            if options.contains(InlineAsmOptions::ATT_SYNTAX) {
394                template_str.push_str(".att_syntax\n")
395            } else {
396                template_str.push_str(".intel_syntax\n")
397            }
398        }
399
400        for piece in template {
401            match *piece {
402                InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
403                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span } => {
404                    use rustc_codegen_ssa::back::symbol_export::escape_symbol_name;
405                    match operands[operand_idx] {
406                        GlobalAsmOperandRef::Const { ref string } => {
407                            // Const operands get injected directly into the
408                            // template. Note that we don't need to escape $
409                            // here unlike normal inline assembly.
410                            template_str.push_str(string);
411                        }
412                        GlobalAsmOperandRef::SymFn { instance } => {
413                            let llval = self.get_fn(instance);
414                            self.add_compiler_used_global(llval);
415                            let symbol = llvm::build_string(|s| unsafe {
416                                llvm::LLVMRustGetMangledName(llval, s);
417                            })
418                            .expect("symbol is not valid UTF-8");
419                            template_str.push_str(&escape_symbol_name(self.tcx, &symbol, span));
420                        }
421                        GlobalAsmOperandRef::SymStatic { def_id } => {
422                            let llval = self
423                                .renamed_statics
424                                .borrow()
425                                .get(&def_id)
426                                .copied()
427                                .unwrap_or_else(|| self.get_static(def_id));
428                            self.add_compiler_used_global(llval);
429                            let symbol = llvm::build_string(|s| unsafe {
430                                llvm::LLVMRustGetMangledName(llval, s);
431                            })
432                            .expect("symbol is not valid UTF-8");
433                            template_str.push_str(&escape_symbol_name(self.tcx, &symbol, span));
434                        }
435                    }
436                }
437            }
438        }
439
440        // Just to play it safe, if intel was used, reset the assembly syntax to att.
441        if #[allow(non_exhaustive_omitted_patterns)] match asm_arch {
    InlineAsmArch::X86 | InlineAsmArch::X86_64 => true,
    _ => false,
}matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
442            && !options.contains(InlineAsmOptions::ATT_SYNTAX)
443        {
444            template_str.push_str("\n.att_syntax\n");
445        }
446
447        llvm::append_module_inline_asm(self.llmod, template_str.as_bytes());
448    }
449
450    fn mangled_name(&self, instance: Instance<'tcx>) -> String {
451        let llval = self.get_fn(instance);
452        llvm::build_string(|s| unsafe {
453            llvm::LLVMRustGetMangledName(llval, s);
454        })
455        .expect("symbol is not valid UTF-8")
456    }
457}
458
459pub(crate) fn inline_asm_call<'ll>(
460    bx: &mut Builder<'_, 'll, '_>,
461    asm: &str,
462    cons: &str,
463    inputs: &[&'ll Value],
464    output: &'ll llvm::Type,
465    labels: &[&'ll llvm::BasicBlock],
466    volatile: bool,
467    alignstack: bool,
468    dia: llvm::AsmDialect,
469    line_spans: &[Span],
470    unwind: bool,
471    dest: Option<&'ll llvm::BasicBlock>,
472    catch_funclet: Option<(&'ll llvm::BasicBlock, Option<&Funclet<'ll>>)>,
473) -> Option<&'ll Value> {
474    let argtys = inputs
475        .iter()
476        .map(|v| {
477            {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/asm.rs:477",
                        "rustc_codegen_llvm::asm", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/asm.rs"),
                        ::tracing_core::__macro_support::Option::Some(477u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::asm"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("Asm Input Type: {0:?}",
                                                    *v) as &dyn Value))])
            });
    } else { ; }
};debug!("Asm Input Type: {:?}", *v);
478            bx.cx.val_ty(*v)
479        })
480        .collect::<Vec<_>>();
481
482    {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/asm.rs:482",
                        "rustc_codegen_llvm::asm", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/asm.rs"),
                        ::tracing_core::__macro_support::Option::Some(482u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::asm"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("Asm Output Type: {0:?}",
                                                    output) as &dyn Value))])
            });
    } else { ; }
};debug!("Asm Output Type: {:?}", output);
483    let fty = bx.cx.type_func(&argtys, output);
484
485    // Ask LLVM to verify that the constraints are well-formed.
486    let constraints_ok = unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr(), cons.len()) };
487    {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/asm.rs:487",
                        "rustc_codegen_llvm::asm", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/asm.rs"),
                        ::tracing_core::__macro_support::Option::Some(487u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::asm"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("constraint verification result: {0:?}",
                                                    constraints_ok) as &dyn Value))])
            });
    } else { ; }
};debug!("constraint verification result: {:?}", constraints_ok);
488    if !constraints_ok {
489        // LLVM has detected an issue with our constraints, so bail out.
490        return None;
491    }
492
493    let v = unsafe {
494        llvm::LLVMGetInlineAsm(
495            fty,
496            asm.as_ptr(),
497            asm.len(),
498            cons.as_ptr(),
499            cons.len(),
500            volatile.to_llvm_bool(),
501            alignstack.to_llvm_bool(),
502            dia,
503            unwind.to_llvm_bool(),
504        )
505    };
506
507    let call = if !labels.is_empty() {
508        if !catch_funclet.is_none() {
    ::core::panicking::panic("assertion failed: catch_funclet.is_none()")
};assert!(catch_funclet.is_none());
509        bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None)
510    } else if let Some((catch, funclet)) = catch_funclet {
511        bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None)
512    } else {
513        bx.call(fty, None, None, v, inputs, None, None)
514    };
515
516    // Store mark in a metadata node so we can map LLVM errors
517    // back to source locations. See #17552.
518    let key = "srcloc";
519    let kind = bx.get_md_kind_id(key);
520
521    // `srcloc` contains one 64-bit integer for each line of assembly code,
522    // where the lower 32 bits hold the lo byte position and the upper 32 bits
523    // hold the hi byte position.
524    let mut srcloc = ::alloc::vec::Vec::new()vec![];
525    if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
526        // LLVM inserts an extra line to add the ".intel_syntax", so add
527        // a dummy srcloc entry for it.
528        //
529        // Don't do this if we only have 1 line span since that may be
530        // due to the asm template string coming from a macro. LLVM will
531        // default to the first srcloc for lines that don't have an
532        // associated srcloc.
533        srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0)));
534    }
535    srcloc.extend(line_spans.iter().map(|span| {
536        llvm::LLVMValueAsMetadata(
537            bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)),
538        )
539    }));
540    bx.cx.set_metadata_node(call, kind, &srcloc);
541
542    Some(call)
543}
544
545/// If the register is an xmm/ymm/zmm register then return its index.
546fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
547    use X86InlineAsmReg::*;
548    match reg {
549        InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => {
550            Some(reg as u32 - xmm0 as u32)
551        }
552        InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => {
553            Some(reg as u32 - ymm0 as u32)
554        }
555        InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => {
556            Some(reg as u32 - zmm0 as u32)
557        }
558        _ => None,
559    }
560}
561
562/// If the register is an AArch64 integer register then return its index.
563fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
564    match reg {
565        InlineAsmReg::AArch64(r) => r.reg_index(),
566        _ => None,
567    }
568}
569
570/// If the register is an AArch64 vector register then return its index.
571fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
572    match reg {
573        InlineAsmReg::AArch64(reg) => reg.vreg_index(),
574        _ => None,
575    }
576}
577
578/// If the register is a Hexagon register pair then return its LLVM double register index.
579/// LLVM uses `d0`, `d1`, ... for Hexagon double registers in inline asm constraints,
580/// not the assembly-printed `r1:0`, `r3:2`, ... format.
581fn hexagon_reg_pair_index(reg: InlineAsmReg) -> Option<u32> {
582    match reg {
583        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r1_0) => Some(0),
584        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r3_2) => Some(1),
585        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r5_4) => Some(2),
586        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r7_6) => Some(3),
587        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r9_8) => Some(4),
588        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r11_10) => Some(5),
589        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r13_12) => Some(6),
590        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r15_14) => Some(7),
591        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r17_16) => Some(8),
592        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r21_20) => Some(10),
593        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r23_22) => Some(11),
594        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r25_24) => Some(12),
595        InlineAsmReg::Hexagon(HexagonInlineAsmReg::r27_26) => Some(13),
596        _ => None,
597    }
598}
599
600/// If the register is a Hexagon HVX vector pair then return its LLVM W-register index.
601/// LLVM uses `w0`, `w1`, ... for Hexagon vector pair registers in inline asm constraints.
602fn hexagon_vreg_pair_index(reg: InlineAsmReg) -> Option<u32> {
603    match reg {
604        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v1_0) => Some(0),
605        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v3_2) => Some(1),
606        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v5_4) => Some(2),
607        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v7_6) => Some(3),
608        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v9_8) => Some(4),
609        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v11_10) => Some(5),
610        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v13_12) => Some(6),
611        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v15_14) => Some(7),
612        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v17_16) => Some(8),
613        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v19_18) => Some(9),
614        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v21_20) => Some(10),
615        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v23_22) => Some(11),
616        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v25_24) => Some(12),
617        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v27_26) => Some(13),
618        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v29_28) => Some(14),
619        InlineAsmReg::Hexagon(HexagonInlineAsmReg::v31_30) => Some(15),
620        _ => None,
621    }
622}
623
624/// Converts a register class to an LLVM constraint code.
625fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
626    use InlineAsmRegClass::*;
627    match reg {
628        // For vector registers LLVM wants the register name to match the type size.
629        InlineAsmRegOrRegClass::Reg(reg) => {
630            if let Some(idx) = xmm_reg_index(reg) {
631                let class = if let Some(layout) = layout {
632                    match layout.size.bytes() {
633                        64 => 'z',
634                        32 => 'y',
635                        _ => 'x',
636                    }
637                } else {
638                    // We use f32 as the type for discarded outputs
639                    'x'
640                };
641                ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{{0}mm{1}}}", class, idx))
    })format!("{{{}mm{}}}", class, idx)
642            } else if let Some(idx) = a64_reg_index(reg) {
643                let class = if let Some(layout) = layout {
644                    match layout.size.bytes() {
645                        8 => 'x',
646                        _ => 'w',
647                    }
648                } else {
649                    // We use i32 as the type for discarded outputs
650                    'w'
651                };
652                if class == 'x' && reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
653                    // LLVM doesn't recognize x30. use lr instead.
654                    "{lr}".to_string()
655                } else {
656                    ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{{0}{1}}}", class, idx))
    })format!("{{{}{}}}", class, idx)
657                }
658            } else if let Some(idx) = a64_vreg_index(reg) {
659                let class = if let Some(layout) = layout {
660                    match layout.size.bytes() {
661                        16 => 'q',
662                        8 => 'd',
663                        4 => 's',
664                        2 => 'h',
665                        1 => 'd', // We fixup i8 to i8x8
666                        _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
667                    }
668                } else {
669                    // We use i64x2 as the type for discarded outputs
670                    'q'
671                };
672                ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{{0}{1}}}", class, idx))
    })format!("{{{}{}}}", class, idx)
673            } else if let Some(idx) = hexagon_reg_pair_index(reg) {
674                // LLVM uses `dN` for Hexagon double registers, not the `rN+1:N` asm syntax.
675                ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{d{0}}}", idx))
    })format!("{{d{}}}", idx)
676            } else if let Some(idx) = hexagon_vreg_pair_index(reg) {
677                // LLVM uses `wN` for Hexagon HVX vector pair registers.
678                ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{w{0}}}", idx))
    })format!("{{w{}}}", idx)
679            } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
680                // LLVM doesn't recognize r14
681                "{lr}".to_string()
682            } else {
683                ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{{{0}}}", reg.name()))
    })format!("{{{}}}", reg.name())
684            }
685        }
686        // The constraints can be retrieved from
687        // https://llvm.org/docs/LangRef.html#supported-constraint-code-list
688        InlineAsmRegOrRegClass::RegClass(reg) => match reg {
689            AArch64(AArch64InlineAsmRegClass::reg) => "r",
690            AArch64(AArch64InlineAsmRegClass::vreg) => "w",
691            AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
692            AArch64(AArch64InlineAsmRegClass::preg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
693            Arm(ArmInlineAsmRegClass::reg) => "r",
694            Arm(ArmInlineAsmRegClass::sreg)
695            | Arm(ArmInlineAsmRegClass::dreg_low16)
696            | Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
697            Arm(ArmInlineAsmRegClass::sreg_low16)
698            | Arm(ArmInlineAsmRegClass::dreg_low8)
699            | Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
700            Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w",
701            Hexagon(HexagonInlineAsmRegClass::reg) => "r",
702            Hexagon(HexagonInlineAsmRegClass::reg_pair) => "r",
703            Hexagon(HexagonInlineAsmRegClass::preg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
704            Hexagon(HexagonInlineAsmRegClass::vreg) => "v",
705            Hexagon(HexagonInlineAsmRegClass::vreg_pair) => "v",
706            Hexagon(HexagonInlineAsmRegClass::qreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
707            LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
708            LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
709            Mips(MipsInlineAsmRegClass::reg) => "r",
710            Mips(MipsInlineAsmRegClass::freg) => "f",
711            Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
712            Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
713            Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
714            PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
715            PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
716            PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
717            PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
718            PowerPC(PowerPCInlineAsmRegClass::vsreg) => "^wa",
719            PowerPC(
720                PowerPCInlineAsmRegClass::cr
721                | PowerPCInlineAsmRegClass::ctr
722                | PowerPCInlineAsmRegClass::lr
723                | PowerPCInlineAsmRegClass::xer
724                | PowerPCInlineAsmRegClass::spe_acc,
725            ) => {
726                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only")
727            }
728            RiscV(RiscVInlineAsmRegClass::reg) => "r",
729            RiscV(RiscVInlineAsmRegClass::freg) => "f",
730            RiscV(RiscVInlineAsmRegClass::vreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
731            X86(X86InlineAsmRegClass::reg) => "r",
732            X86(X86InlineAsmRegClass::reg_abcd) => "Q",
733            X86(X86InlineAsmRegClass::reg_byte) => "q",
734            X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x",
735            X86(X86InlineAsmRegClass::zmm_reg) => "v",
736            X86(X86InlineAsmRegClass::kreg) => "^Yk",
737            X86(
738                X86InlineAsmRegClass::x87_reg
739                | X86InlineAsmRegClass::mmx_reg
740                | X86InlineAsmRegClass::kreg0
741                | X86InlineAsmRegClass::tmm_reg,
742            ) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
743            Wasm(WasmInlineAsmRegClass::local) => "r",
744            Bpf(BpfInlineAsmRegClass::reg) => "r",
745            Bpf(BpfInlineAsmRegClass::wreg) => "w",
746            Avr(AvrInlineAsmRegClass::reg) => "r",
747            Avr(AvrInlineAsmRegClass::reg_upper) => "d",
748            Avr(AvrInlineAsmRegClass::reg_pair) => "r",
749            Avr(AvrInlineAsmRegClass::reg_iw) => "w",
750            Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
751            S390x(S390xInlineAsmRegClass::reg) => "r",
752            S390x(S390xInlineAsmRegClass::reg_addr) => "a",
753            S390x(S390xInlineAsmRegClass::freg) => "f",
754            S390x(S390xInlineAsmRegClass::vreg) => "v",
755            S390x(S390xInlineAsmRegClass::areg) => {
756                {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only")
757            }
758            Sparc(SparcInlineAsmRegClass::reg) => "r",
759            Sparc(SparcInlineAsmRegClass::yreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
760            Msp430(Msp430InlineAsmRegClass::reg) => "r",
761            M68k(M68kInlineAsmRegClass::reg) => "r",
762            M68k(M68kInlineAsmRegClass::reg_addr) => "a",
763            M68k(M68kInlineAsmRegClass::reg_data) => "d",
764            CSKY(CSKYInlineAsmRegClass::reg) => "r",
765            CSKY(CSKYInlineAsmRegClass::freg) => "f",
766            SpirV(SpirVInlineAsmRegClass::reg) => ::rustc_middle::util::bug::bug_fmt(format_args!("LLVM backend does not support SPIR-V"))bug!("LLVM backend does not support SPIR-V"),
767            Err => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
768        }
769        .to_string(),
770    }
771}
772
773/// Converts a modifier into LLVM's equivalent modifier.
774fn modifier_to_llvm(
775    arch: InlineAsmArch,
776    reg: InlineAsmRegClass,
777    modifier: Option<char>,
778) -> Option<char> {
779    use InlineAsmRegClass::*;
780    // The modifiers can be retrieved from
781    // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
782    match reg {
783        AArch64(AArch64InlineAsmRegClass::reg) => modifier,
784        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
785            if modifier == Some('v') {
786                None
787            } else {
788                modifier
789            }
790        }
791        AArch64(AArch64InlineAsmRegClass::preg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
792        Arm(ArmInlineAsmRegClass::reg) => None,
793        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None,
794        Arm(ArmInlineAsmRegClass::dreg)
795        | Arm(ArmInlineAsmRegClass::dreg_low16)
796        | Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
797        Arm(ArmInlineAsmRegClass::qreg)
798        | Arm(ArmInlineAsmRegClass::qreg_low8)
799        | Arm(ArmInlineAsmRegClass::qreg_low4) => {
800            if modifier.is_none() {
801                Some('q')
802            } else {
803                modifier
804            }
805        }
806        Hexagon(_) => None,
807        LoongArch(_) => None,
808        Mips(_) => None,
809        Nvptx(_) => None,
810        PowerPC(PowerPCInlineAsmRegClass::vsreg) => {
811            // The documentation for the 'x' modifier is missing for llvm, and the gcc
812            // documentation is simply "use this for any vsx argument". It is needed
813            // to ensure the correct vsx register number is used.
814            if modifier.is_none() { Some('x') } else { modifier }
815        }
816        PowerPC(_) => None,
817        RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None,
818        RiscV(RiscVInlineAsmRegClass::vreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
819        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
820            None if arch == InlineAsmArch::X86_64 => Some('q'),
821            None => Some('k'),
822            Some('l') => Some('b'),
823            Some('h') => Some('h'),
824            Some('x') => Some('w'),
825            Some('e') => Some('k'),
826            Some('r') => Some('q'),
827            _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
828        },
829        X86(X86InlineAsmRegClass::reg_byte) => None,
830        X86(reg @ X86InlineAsmRegClass::xmm_reg)
831        | X86(reg @ X86InlineAsmRegClass::ymm_reg)
832        | X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
833            (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
834            (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
835            (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
836            (_, Some('x')) => Some('x'),
837            (_, Some('y')) => Some('t'),
838            (_, Some('z')) => Some('g'),
839            _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
840        },
841        X86(X86InlineAsmRegClass::kreg) => None,
842        X86(
843            X86InlineAsmRegClass::x87_reg
844            | X86InlineAsmRegClass::mmx_reg
845            | X86InlineAsmRegClass::kreg0
846            | X86InlineAsmRegClass::tmm_reg,
847        ) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
848        Wasm(WasmInlineAsmRegClass::local) => None,
849        Bpf(_) => None,
850        Avr(AvrInlineAsmRegClass::reg_pair)
851        | Avr(AvrInlineAsmRegClass::reg_iw)
852        | Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
853            Some('h') => Some('B'),
854            Some('l') => Some('A'),
855            _ => None,
856        },
857        Avr(_) => None,
858        S390x(_) => None,
859        Sparc(_) => None,
860        Msp430(_) => None,
861        SpirV(SpirVInlineAsmRegClass::reg) => ::rustc_middle::util::bug::bug_fmt(format_args!("LLVM backend does not support SPIR-V"))bug!("LLVM backend does not support SPIR-V"),
862        M68k(_) => None,
863        CSKY(_) => None,
864        Err => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
865    }
866}
867
868/// Type to use for outputs that are discarded. It doesn't really matter what
869/// the type is, as long as it is valid for the constraint code.
870fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
871    use InlineAsmRegClass::*;
872    match reg {
873        AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
874        AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
875            cx.type_vector(cx.type_i64(), 2)
876        }
877        AArch64(AArch64InlineAsmRegClass::preg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
878        Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
879        Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
880        Arm(ArmInlineAsmRegClass::dreg)
881        | Arm(ArmInlineAsmRegClass::dreg_low16)
882        | Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
883        Arm(ArmInlineAsmRegClass::qreg)
884        | Arm(ArmInlineAsmRegClass::qreg_low8)
885        | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2),
886        Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
887        Hexagon(HexagonInlineAsmRegClass::reg_pair) => cx.type_i64(),
888        Hexagon(HexagonInlineAsmRegClass::preg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
889        Hexagon(HexagonInlineAsmRegClass::vreg) => {
890            // HVX vector register size depends on the HVX mode.
891            // LLVM's "v" constraint requires the exact vector width.
892            if cx.tcx.sess.unstable_target_features.contains(&sym::hvx_length128b) {
893                cx.type_vector(cx.type_i32(), 32) // 1024-bit for 128B mode
894            } else {
895                cx.type_vector(cx.type_i32(), 16) // 512-bit for 64B mode
896            }
897        }
898        Hexagon(HexagonInlineAsmRegClass::vreg_pair) => {
899            if cx.tcx.sess.unstable_target_features.contains(&sym::hvx_length128b) {
900                cx.type_vector(cx.type_i32(), 64) // 2048-bit for 128B mode
901            } else {
902                cx.type_vector(cx.type_i32(), 32) // 1024-bit for 64B mode
903            }
904        }
905        Hexagon(HexagonInlineAsmRegClass::qreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
906        LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
907        LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
908        Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
909        Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
910        Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
911        Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
912        Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
913        PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
914        PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
915        PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
916        PowerPC(PowerPCInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i32(), 4),
917        PowerPC(PowerPCInlineAsmRegClass::vsreg) => cx.type_vector(cx.type_i32(), 4),
918        PowerPC(
919            PowerPCInlineAsmRegClass::cr
920            | PowerPCInlineAsmRegClass::ctr
921            | PowerPCInlineAsmRegClass::lr
922            | PowerPCInlineAsmRegClass::xer
923            | PowerPCInlineAsmRegClass::spe_acc,
924        ) => {
925            {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only")
926        }
927        RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
928        RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
929        RiscV(RiscVInlineAsmRegClass::vreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
930        X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
931        X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
932        X86(X86InlineAsmRegClass::xmm_reg)
933        | X86(X86InlineAsmRegClass::ymm_reg)
934        | X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
935        X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
936        X86(
937            X86InlineAsmRegClass::x87_reg
938            | X86InlineAsmRegClass::mmx_reg
939            | X86InlineAsmRegClass::kreg0
940            | X86InlineAsmRegClass::tmm_reg,
941        ) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
942        Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
943        Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
944        Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
945        Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
946        Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
947        Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
948        Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
949        Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
950        S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(),
951        S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
952        S390x(S390xInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i64(), 2),
953        S390x(S390xInlineAsmRegClass::areg) => {
954            {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only")
955        }
956        Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(),
957        Sparc(SparcInlineAsmRegClass::yreg) => {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("clobber-only")));
}unreachable!("clobber-only"),
958        Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
959        M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
960        M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
961        M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
962        CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
963        CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
964        SpirV(SpirVInlineAsmRegClass::reg) => ::rustc_middle::util::bug::bug_fmt(format_args!("LLVM backend does not support SPIR-V"))bug!("LLVM backend does not support SPIR-V"),
965        Err => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
966    }
967}
968
969/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
970/// the equivalent integer type.
971fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
972    let dl = &cx.tcx.data_layout;
973    match scalar.primitive() {
974        Primitive::Int(Integer::I8, _) => cx.type_i8(),
975        Primitive::Int(Integer::I16, _) => cx.type_i16(),
976        Primitive::Int(Integer::I32, _) => cx.type_i32(),
977        Primitive::Int(Integer::I64, _) => cx.type_i64(),
978        Primitive::Float(Float::F16) => cx.type_f16(),
979        Primitive::Float(Float::F32) => cx.type_f32(),
980        Primitive::Float(Float::F64) => cx.type_f64(),
981        Primitive::Float(Float::F128) => cx.type_f128(),
982        // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
983        Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
984        _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
985    }
986}
987
988fn any_target_feature_enabled(
989    cx: &CodegenCx<'_, '_>,
990    instance: Instance<'_>,
991    features: &[Symbol],
992) -> bool {
993    let enabled = cx.tcx.asm_target_features(instance.def_id());
994    features.iter().any(|feat| enabled.contains(feat))
995}
996
997/// Fix up an input value to work around LLVM bugs.
998fn llvm_fixup_input<'ll, 'tcx>(
999    bx: &mut Builder<'_, 'll, 'tcx>,
1000    mut value: &'ll Value,
1001    reg: InlineAsmRegClass,
1002    layout: &TyAndLayout<'tcx>,
1003    instance: Instance<'_>,
1004) -> &'ll Value {
1005    use InlineAsmRegClass::*;
1006    let dl = &bx.tcx.data_layout;
1007    match (reg, layout.backend_repr) {
1008        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
1009            if let Primitive::Int(Integer::I8, _) = s.primitive() {
1010                let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
1011                bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
1012            } else {
1013                value
1014            }
1015        }
1016        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
1017            if s.primitive() != Primitive::Float(Float::F128) =>
1018        {
1019            let elem_ty = llvm_asm_scalar_type(bx.cx, s);
1020            let count = 16 / layout.size.bytes();
1021            let vec_ty = bx.cx.type_vector(elem_ty, count);
1022            // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
1023            if let Primitive::Pointer(_) = s.primitive() {
1024                let t = bx.type_from_integer(dl.ptr_sized_integer());
1025                value = bx.ptrtoint(value, t);
1026            }
1027            bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
1028        }
1029        (
1030            AArch64(AArch64InlineAsmRegClass::vreg_low16),
1031            BackendRepr::SimdVector { element, count },
1032        ) if layout.size.bytes() == 8 => {
1033            let elem_ty = llvm_asm_scalar_type(bx.cx, element);
1034            let vec_ty = bx.cx.type_vector(elem_ty, count);
1035            let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
1036            bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
1037        }
1038        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
1039            if s.primitive() == Primitive::Float(Float::F64) =>
1040        {
1041            bx.bitcast(value, bx.cx.type_i64())
1042        }
1043        (
1044            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1045            BackendRepr::SimdVector { .. },
1046        ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
1047        (
1048            X86(
1049                X86InlineAsmRegClass::xmm_reg
1050                | X86InlineAsmRegClass::ymm_reg
1051                | X86InlineAsmRegClass::zmm_reg,
1052            ),
1053            BackendRepr::Scalar(s),
1054        ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
1055            && s.primitive() == Primitive::Float(Float::F128) =>
1056        {
1057            bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
1058        }
1059        (
1060            X86(
1061                X86InlineAsmRegClass::xmm_reg
1062                | X86InlineAsmRegClass::ymm_reg
1063                | X86InlineAsmRegClass::zmm_reg,
1064            ),
1065            BackendRepr::Scalar(s),
1066        ) if s.primitive() == Primitive::Float(Float::F16) => {
1067            let value = bx.insert_element(
1068                bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
1069                value,
1070                bx.const_usize(0),
1071            );
1072            bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
1073        }
1074        (
1075            X86(
1076                X86InlineAsmRegClass::xmm_reg
1077                | X86InlineAsmRegClass::ymm_reg
1078                | X86InlineAsmRegClass::zmm_reg,
1079            ),
1080            BackendRepr::SimdVector { element, count: count @ (8 | 16) },
1081        ) if element.primitive() == Primitive::Float(Float::F16) => {
1082            bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
1083        }
1084        (
1085            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1086            BackendRepr::Scalar(s),
1087        ) => {
1088            if let Primitive::Int(Integer::I32, _) = s.primitive() {
1089                bx.bitcast(value, bx.cx.type_f32())
1090            } else {
1091                value
1092            }
1093        }
1094        (
1095            Arm(
1096                ArmInlineAsmRegClass::dreg
1097                | ArmInlineAsmRegClass::dreg_low8
1098                | ArmInlineAsmRegClass::dreg_low16,
1099            ),
1100            BackendRepr::Scalar(s),
1101        ) => {
1102            if let Primitive::Int(Integer::I64, _) = s.primitive() {
1103                bx.bitcast(value, bx.cx.type_f64())
1104            } else {
1105                value
1106            }
1107        }
1108        (
1109            Arm(
1110                ArmInlineAsmRegClass::dreg
1111                | ArmInlineAsmRegClass::dreg_low8
1112                | ArmInlineAsmRegClass::dreg_low16
1113                | ArmInlineAsmRegClass::qreg
1114                | ArmInlineAsmRegClass::qreg_low4
1115                | ArmInlineAsmRegClass::qreg_low8,
1116            ),
1117            BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1118        ) if element.primitive() == Primitive::Float(Float::F16) => {
1119            bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
1120        }
1121        (LoongArch(LoongArchInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1122            if s.primitive() == Primitive::Float(Float::F16) =>
1123        {
1124            // Smaller floats are always "NaN-boxed" inside larger floats on LoongArch.
1125            let value = bx.bitcast(value, bx.type_i16());
1126            let value = bx.zext(value, bx.type_i32());
1127            let value = bx.or(value, bx.const_u32(0xFFFF_0000));
1128            bx.bitcast(value, bx.type_f32())
1129        }
1130        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1131            match s.primitive() {
1132                // MIPS only supports register-length arithmetics.
1133                Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
1134                Primitive::Float(Float::F32) => bx.bitcast(value, bx.cx.type_i32()),
1135                Primitive::Float(Float::F64) => bx.bitcast(value, bx.cx.type_i64()),
1136                _ => value,
1137            }
1138        }
1139        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1140            if s.primitive() == Primitive::Float(Float::F16)
1141                && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
1142        {
1143            // Smaller floats are always "NaN-boxed" inside larger floats on RISC-V.
1144            let value = bx.bitcast(value, bx.type_i16());
1145            let value = bx.zext(value, bx.type_i32());
1146            let value = bx.or(value, bx.const_u32(0xFFFF_0000));
1147            bx.bitcast(value, bx.type_f32())
1148        }
1149        (
1150            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1151            BackendRepr::Scalar(s),
1152        ) if s.primitive() == Primitive::Float(Float::F32) => {
1153            let value = bx.insert_element(
1154                bx.const_undef(bx.type_vector(bx.type_f32(), 4)),
1155                value,
1156                bx.const_usize(0),
1157            );
1158            bx.bitcast(value, bx.type_vector(bx.type_f32(), 4))
1159        }
1160        (
1161            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1162            BackendRepr::Scalar(s),
1163        ) if s.primitive() == Primitive::Float(Float::F64) => {
1164            let value = bx.insert_element(
1165                bx.const_undef(bx.type_vector(bx.type_f64(), 2)),
1166                value,
1167                bx.const_usize(0),
1168            );
1169            bx.bitcast(value, bx.type_vector(bx.type_f64(), 2))
1170        }
1171        _ => value,
1172    }
1173}
1174
1175/// Fix up an output value to work around LLVM bugs.
1176fn llvm_fixup_output<'ll, 'tcx>(
1177    bx: &mut Builder<'_, 'll, 'tcx>,
1178    mut value: &'ll Value,
1179    reg: InlineAsmRegClass,
1180    layout: &TyAndLayout<'tcx>,
1181    instance: Instance<'_>,
1182) -> &'ll Value {
1183    use InlineAsmRegClass::*;
1184    match (reg, layout.backend_repr) {
1185        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
1186            if let Primitive::Int(Integer::I8, _) = s.primitive() {
1187                bx.extract_element(value, bx.const_i32(0))
1188            } else {
1189                value
1190            }
1191        }
1192        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
1193            if s.primitive() != Primitive::Float(Float::F128) =>
1194        {
1195            value = bx.extract_element(value, bx.const_i32(0));
1196            if let Primitive::Pointer(_) = s.primitive() {
1197                value = bx.inttoptr(value, layout.llvm_type(bx.cx));
1198            }
1199            value
1200        }
1201        (
1202            AArch64(AArch64InlineAsmRegClass::vreg_low16),
1203            BackendRepr::SimdVector { element, count },
1204        ) if layout.size.bytes() == 8 => {
1205            let elem_ty = llvm_asm_scalar_type(bx.cx, element);
1206            let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
1207            let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
1208            bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
1209        }
1210        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
1211            if s.primitive() == Primitive::Float(Float::F64) =>
1212        {
1213            bx.bitcast(value, bx.cx.type_f64())
1214        }
1215        (
1216            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1217            BackendRepr::SimdVector { .. },
1218        ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
1219        (
1220            X86(
1221                X86InlineAsmRegClass::xmm_reg
1222                | X86InlineAsmRegClass::ymm_reg
1223                | X86InlineAsmRegClass::zmm_reg,
1224            ),
1225            BackendRepr::Scalar(s),
1226        ) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
1227            && s.primitive() == Primitive::Float(Float::F128) =>
1228        {
1229            bx.bitcast(value, bx.type_f128())
1230        }
1231        (
1232            X86(
1233                X86InlineAsmRegClass::xmm_reg
1234                | X86InlineAsmRegClass::ymm_reg
1235                | X86InlineAsmRegClass::zmm_reg,
1236            ),
1237            BackendRepr::Scalar(s),
1238        ) if s.primitive() == Primitive::Float(Float::F16) => {
1239            let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
1240            bx.extract_element(value, bx.const_usize(0))
1241        }
1242        (
1243            X86(
1244                X86InlineAsmRegClass::xmm_reg
1245                | X86InlineAsmRegClass::ymm_reg
1246                | X86InlineAsmRegClass::zmm_reg,
1247            ),
1248            BackendRepr::SimdVector { element, count: count @ (8 | 16) },
1249        ) if element.primitive() == Primitive::Float(Float::F16) => {
1250            bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
1251        }
1252        (
1253            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1254            BackendRepr::Scalar(s),
1255        ) => {
1256            if let Primitive::Int(Integer::I32, _) = s.primitive() {
1257                bx.bitcast(value, bx.cx.type_i32())
1258            } else {
1259                value
1260            }
1261        }
1262        (
1263            Arm(
1264                ArmInlineAsmRegClass::dreg
1265                | ArmInlineAsmRegClass::dreg_low8
1266                | ArmInlineAsmRegClass::dreg_low16,
1267            ),
1268            BackendRepr::Scalar(s),
1269        ) => {
1270            if let Primitive::Int(Integer::I64, _) = s.primitive() {
1271                bx.bitcast(value, bx.cx.type_i64())
1272            } else {
1273                value
1274            }
1275        }
1276        (
1277            Arm(
1278                ArmInlineAsmRegClass::dreg
1279                | ArmInlineAsmRegClass::dreg_low8
1280                | ArmInlineAsmRegClass::dreg_low16
1281                | ArmInlineAsmRegClass::qreg
1282                | ArmInlineAsmRegClass::qreg_low4
1283                | ArmInlineAsmRegClass::qreg_low8,
1284            ),
1285            BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1286        ) if element.primitive() == Primitive::Float(Float::F16) => {
1287            bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
1288        }
1289        (LoongArch(LoongArchInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1290            if s.primitive() == Primitive::Float(Float::F16) =>
1291        {
1292            let value = bx.bitcast(value, bx.type_i32());
1293            let value = bx.trunc(value, bx.type_i16());
1294            bx.bitcast(value, bx.type_f16())
1295        }
1296        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1297            match s.primitive() {
1298                // MIPS only supports register-length arithmetics.
1299                Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
1300                Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
1301                Primitive::Float(Float::F32) => bx.bitcast(value, bx.cx.type_f32()),
1302                Primitive::Float(Float::F64) => bx.bitcast(value, bx.cx.type_f64()),
1303                _ => value,
1304            }
1305        }
1306        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1307            if s.primitive() == Primitive::Float(Float::F16)
1308                && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
1309        {
1310            let value = bx.bitcast(value, bx.type_i32());
1311            let value = bx.trunc(value, bx.type_i16());
1312            bx.bitcast(value, bx.type_f16())
1313        }
1314        (
1315            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1316            BackendRepr::Scalar(s),
1317        ) if s.primitive() == Primitive::Float(Float::F32) => {
1318            let value = bx.bitcast(value, bx.type_vector(bx.type_f32(), 4));
1319            bx.extract_element(value, bx.const_usize(0))
1320        }
1321        (
1322            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1323            BackendRepr::Scalar(s),
1324        ) if s.primitive() == Primitive::Float(Float::F64) => {
1325            let value = bx.bitcast(value, bx.type_vector(bx.type_f64(), 2));
1326            bx.extract_element(value, bx.const_usize(0))
1327        }
1328        _ => value,
1329    }
1330}
1331
1332/// Output type to use for llvm_fixup_output.
1333fn llvm_fixup_output_type<'ll, 'tcx>(
1334    cx: &CodegenCx<'ll, 'tcx>,
1335    reg: InlineAsmRegClass,
1336    layout: &TyAndLayout<'tcx>,
1337    instance: Instance<'_>,
1338) -> &'ll Type {
1339    use InlineAsmRegClass::*;
1340    match (reg, layout.backend_repr) {
1341        (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
1342            if let Primitive::Int(Integer::I8, _) = s.primitive() {
1343                cx.type_vector(cx.type_i8(), 8)
1344            } else {
1345                layout.llvm_type(cx)
1346            }
1347        }
1348        (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
1349            if s.primitive() != Primitive::Float(Float::F128) =>
1350        {
1351            let elem_ty = llvm_asm_scalar_type(cx, s);
1352            let count = 16 / layout.size.bytes();
1353            cx.type_vector(elem_ty, count)
1354        }
1355        (
1356            AArch64(AArch64InlineAsmRegClass::vreg_low16),
1357            BackendRepr::SimdVector { element, count },
1358        ) if layout.size.bytes() == 8 => {
1359            let elem_ty = llvm_asm_scalar_type(cx, element);
1360            cx.type_vector(elem_ty, count * 2)
1361        }
1362        (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
1363            if s.primitive() == Primitive::Float(Float::F64) =>
1364        {
1365            cx.type_i64()
1366        }
1367        (
1368            X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1369            BackendRepr::SimdVector { .. },
1370        ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
1371        (
1372            X86(
1373                X86InlineAsmRegClass::xmm_reg
1374                | X86InlineAsmRegClass::ymm_reg
1375                | X86InlineAsmRegClass::zmm_reg,
1376            ),
1377            BackendRepr::Scalar(s),
1378        ) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
1379            && s.primitive() == Primitive::Float(Float::F128) =>
1380        {
1381            cx.type_vector(cx.type_i32(), 4)
1382        }
1383        (
1384            X86(
1385                X86InlineAsmRegClass::xmm_reg
1386                | X86InlineAsmRegClass::ymm_reg
1387                | X86InlineAsmRegClass::zmm_reg,
1388            ),
1389            BackendRepr::Scalar(s),
1390        ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
1391        (
1392            X86(
1393                X86InlineAsmRegClass::xmm_reg
1394                | X86InlineAsmRegClass::ymm_reg
1395                | X86InlineAsmRegClass::zmm_reg,
1396            ),
1397            BackendRepr::SimdVector { element, count: count @ (8 | 16) },
1398        ) if element.primitive() == Primitive::Float(Float::F16) => {
1399            cx.type_vector(cx.type_i16(), count)
1400        }
1401        (
1402            Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1403            BackendRepr::Scalar(s),
1404        ) => {
1405            if let Primitive::Int(Integer::I32, _) = s.primitive() {
1406                cx.type_f32()
1407            } else {
1408                layout.llvm_type(cx)
1409            }
1410        }
1411        (
1412            Arm(
1413                ArmInlineAsmRegClass::dreg
1414                | ArmInlineAsmRegClass::dreg_low8
1415                | ArmInlineAsmRegClass::dreg_low16,
1416            ),
1417            BackendRepr::Scalar(s),
1418        ) => {
1419            if let Primitive::Int(Integer::I64, _) = s.primitive() {
1420                cx.type_f64()
1421            } else {
1422                layout.llvm_type(cx)
1423            }
1424        }
1425        (
1426            Arm(
1427                ArmInlineAsmRegClass::dreg
1428                | ArmInlineAsmRegClass::dreg_low8
1429                | ArmInlineAsmRegClass::dreg_low16
1430                | ArmInlineAsmRegClass::qreg
1431                | ArmInlineAsmRegClass::qreg_low4
1432                | ArmInlineAsmRegClass::qreg_low8,
1433            ),
1434            BackendRepr::SimdVector { element, count: count @ (4 | 8) },
1435        ) if element.primitive() == Primitive::Float(Float::F16) => {
1436            cx.type_vector(cx.type_i16(), count)
1437        }
1438        (LoongArch(LoongArchInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1439            if s.primitive() == Primitive::Float(Float::F16) =>
1440        {
1441            cx.type_f32()
1442        }
1443        (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
1444            match s.primitive() {
1445                // MIPS only supports register-length arithmetics.
1446                Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
1447                Primitive::Float(Float::F32) => cx.type_i32(),
1448                Primitive::Float(Float::F64) => cx.type_i64(),
1449                _ => layout.llvm_type(cx),
1450            }
1451        }
1452        (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
1453            if s.primitive() == Primitive::Float(Float::F16)
1454                && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
1455        {
1456            cx.type_f32()
1457        }
1458        (
1459            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1460            BackendRepr::Scalar(s),
1461        ) if s.primitive() == Primitive::Float(Float::F32) => cx.type_vector(cx.type_f32(), 4),
1462        (
1463            PowerPC(PowerPCInlineAsmRegClass::vreg | PowerPCInlineAsmRegClass::vsreg),
1464            BackendRepr::Scalar(s),
1465        ) if s.primitive() == Primitive::Float(Float::F64) => cx.type_vector(cx.type_f64(), 2),
1466        _ => layout.llvm_type(cx),
1467    }
1468}