rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{self as abi, Align, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
7use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
10use rustc_codegen_ssa::traits::*;
11use rustc_hir as hir;
12use rustc_middle::mir::BinOp;
13use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
14use rustc_middle::ty::{self, GenericArgsRef, Ty};
15use rustc_middle::{bug, span_bug};
16use rustc_span::{Span, Symbol, sym};
17use rustc_target::spec::{HasTargetSpec, PanicStrategy};
18use tracing::debug;
19
20use crate::abi::{ExternAbi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
21use crate::builder::Builder;
22use crate::context::CodegenCx;
23use crate::llvm::{self, Metadata};
24use crate::type_::Type;
25use crate::type_of::LayoutLlvmExt;
26use crate::va_arg::emit_va_arg;
27use crate::value::Value;
28
29fn get_simple_intrinsic<'ll>(
30    cx: &CodegenCx<'ll, '_>,
31    name: Symbol,
32) -> Option<(&'ll Type, &'ll Value)> {
33    let llvm_name = match name {
34        sym::sqrtf16 => "llvm.sqrt.f16",
35        sym::sqrtf32 => "llvm.sqrt.f32",
36        sym::sqrtf64 => "llvm.sqrt.f64",
37        sym::sqrtf128 => "llvm.sqrt.f128",
38
39        sym::powif16 => "llvm.powi.f16.i32",
40        sym::powif32 => "llvm.powi.f32.i32",
41        sym::powif64 => "llvm.powi.f64.i32",
42        sym::powif128 => "llvm.powi.f128.i32",
43
44        sym::sinf16 => "llvm.sin.f16",
45        sym::sinf32 => "llvm.sin.f32",
46        sym::sinf64 => "llvm.sin.f64",
47        sym::sinf128 => "llvm.sin.f128",
48
49        sym::cosf16 => "llvm.cos.f16",
50        sym::cosf32 => "llvm.cos.f32",
51        sym::cosf64 => "llvm.cos.f64",
52        sym::cosf128 => "llvm.cos.f128",
53
54        sym::powf16 => "llvm.pow.f16",
55        sym::powf32 => "llvm.pow.f32",
56        sym::powf64 => "llvm.pow.f64",
57        sym::powf128 => "llvm.pow.f128",
58
59        sym::expf16 => "llvm.exp.f16",
60        sym::expf32 => "llvm.exp.f32",
61        sym::expf64 => "llvm.exp.f64",
62        sym::expf128 => "llvm.exp.f128",
63
64        sym::exp2f16 => "llvm.exp2.f16",
65        sym::exp2f32 => "llvm.exp2.f32",
66        sym::exp2f64 => "llvm.exp2.f64",
67        sym::exp2f128 => "llvm.exp2.f128",
68
69        sym::logf16 => "llvm.log.f16",
70        sym::logf32 => "llvm.log.f32",
71        sym::logf64 => "llvm.log.f64",
72        sym::logf128 => "llvm.log.f128",
73
74        sym::log10f16 => "llvm.log10.f16",
75        sym::log10f32 => "llvm.log10.f32",
76        sym::log10f64 => "llvm.log10.f64",
77        sym::log10f128 => "llvm.log10.f128",
78
79        sym::log2f16 => "llvm.log2.f16",
80        sym::log2f32 => "llvm.log2.f32",
81        sym::log2f64 => "llvm.log2.f64",
82        sym::log2f128 => "llvm.log2.f128",
83
84        sym::fmaf16 => "llvm.fma.f16",
85        sym::fmaf32 => "llvm.fma.f32",
86        sym::fmaf64 => "llvm.fma.f64",
87        sym::fmaf128 => "llvm.fma.f128",
88
89        sym::fmuladdf16 => "llvm.fmuladd.f16",
90        sym::fmuladdf32 => "llvm.fmuladd.f32",
91        sym::fmuladdf64 => "llvm.fmuladd.f64",
92        sym::fmuladdf128 => "llvm.fmuladd.f128",
93
94        sym::fabsf16 => "llvm.fabs.f16",
95        sym::fabsf32 => "llvm.fabs.f32",
96        sym::fabsf64 => "llvm.fabs.f64",
97        sym::fabsf128 => "llvm.fabs.f128",
98
99        sym::minnumf16 => "llvm.minnum.f16",
100        sym::minnumf32 => "llvm.minnum.f32",
101        sym::minnumf64 => "llvm.minnum.f64",
102        sym::minnumf128 => "llvm.minnum.f128",
103
104        sym::maxnumf16 => "llvm.maxnum.f16",
105        sym::maxnumf32 => "llvm.maxnum.f32",
106        sym::maxnumf64 => "llvm.maxnum.f64",
107        sym::maxnumf128 => "llvm.maxnum.f128",
108
109        sym::copysignf16 => "llvm.copysign.f16",
110        sym::copysignf32 => "llvm.copysign.f32",
111        sym::copysignf64 => "llvm.copysign.f64",
112        sym::copysignf128 => "llvm.copysign.f128",
113
114        sym::floorf16 => "llvm.floor.f16",
115        sym::floorf32 => "llvm.floor.f32",
116        sym::floorf64 => "llvm.floor.f64",
117        sym::floorf128 => "llvm.floor.f128",
118
119        sym::ceilf16 => "llvm.ceil.f16",
120        sym::ceilf32 => "llvm.ceil.f32",
121        sym::ceilf64 => "llvm.ceil.f64",
122        sym::ceilf128 => "llvm.ceil.f128",
123
124        sym::truncf16 => "llvm.trunc.f16",
125        sym::truncf32 => "llvm.trunc.f32",
126        sym::truncf64 => "llvm.trunc.f64",
127        sym::truncf128 => "llvm.trunc.f128",
128
129        sym::rintf16 => "llvm.rint.f16",
130        sym::rintf32 => "llvm.rint.f32",
131        sym::rintf64 => "llvm.rint.f64",
132        sym::rintf128 => "llvm.rint.f128",
133
134        sym::nearbyintf16 => "llvm.nearbyint.f16",
135        sym::nearbyintf32 => "llvm.nearbyint.f32",
136        sym::nearbyintf64 => "llvm.nearbyint.f64",
137        sym::nearbyintf128 => "llvm.nearbyint.f128",
138
139        sym::roundf16 => "llvm.round.f16",
140        sym::roundf32 => "llvm.round.f32",
141        sym::roundf64 => "llvm.round.f64",
142        sym::roundf128 => "llvm.round.f128",
143
144        sym::ptr_mask => "llvm.ptrmask",
145
146        sym::roundevenf16 => "llvm.roundeven.f16",
147        sym::roundevenf32 => "llvm.roundeven.f32",
148        sym::roundevenf64 => "llvm.roundeven.f64",
149        sym::roundevenf128 => "llvm.roundeven.f128",
150
151        _ => return None,
152    };
153    Some(cx.get_intrinsic(llvm_name))
154}
155
156impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
157    fn codegen_intrinsic_call(
158        &mut self,
159        instance: ty::Instance<'tcx>,
160        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
161        args: &[OperandRef<'tcx, &'ll Value>],
162        llresult: &'ll Value,
163        span: Span,
164    ) -> Result<(), ty::Instance<'tcx>> {
165        let tcx = self.tcx;
166        let callee_ty = instance.ty(tcx, self.typing_env());
167
168        let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
169            bug!("expected fn item type, found {}", callee_ty);
170        };
171
172        let sig = callee_ty.fn_sig(tcx);
173        let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
174        let arg_tys = sig.inputs();
175        let ret_ty = sig.output();
176        let name = tcx.item_name(def_id);
177
178        let llret_ty = self.layout_of(ret_ty).llvm_type(self);
179        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
180
181        let simple = get_simple_intrinsic(self, name);
182        let llval = match name {
183            _ if simple.is_some() => {
184                let (simple_ty, simple_fn) = simple.unwrap();
185                self.call(
186                    simple_ty,
187                    None,
188                    None,
189                    simple_fn,
190                    &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
191                    None,
192                    Some(instance),
193                )
194            }
195            sym::is_val_statically_known => {
196                let intrinsic_type = args[0].layout.immediate_llvm_type(self.cx);
197                let kind = self.type_kind(intrinsic_type);
198                let intrinsic_name = match kind {
199                    TypeKind::Pointer | TypeKind::Integer => {
200                        Some(format!("llvm.is.constant.{intrinsic_type:?}"))
201                    }
202                    // LLVM float types' intrinsic names differ from their type names.
203                    TypeKind::Half => Some(format!("llvm.is.constant.f16")),
204                    TypeKind::Float => Some(format!("llvm.is.constant.f32")),
205                    TypeKind::Double => Some(format!("llvm.is.constant.f64")),
206                    TypeKind::FP128 => Some(format!("llvm.is.constant.f128")),
207                    _ => None,
208                };
209                if let Some(intrinsic_name) = intrinsic_name {
210                    self.call_intrinsic(&intrinsic_name, &[args[0].immediate()])
211                } else {
212                    self.const_bool(false)
213                }
214            }
215            sym::select_unpredictable => {
216                let cond = args[0].immediate();
217                assert_eq!(args[1].layout, args[2].layout);
218                let select = |bx: &mut Self, true_val, false_val| {
219                    let result = bx.select(cond, true_val, false_val);
220                    bx.set_unpredictable(&result);
221                    result
222                };
223                match (args[1].val, args[2].val) {
224                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
225                        assert!(true_val.llextra.is_none());
226                        assert!(false_val.llextra.is_none());
227                        assert_eq!(true_val.align, false_val.align);
228                        let ptr = select(self, true_val.llval, false_val.llval);
229                        let selected =
230                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
231                        selected.store(self, result);
232                        return Ok(());
233                    }
234                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
235                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
236                        let true_val = args[1].immediate_or_packed_pair(self);
237                        let false_val = args[2].immediate_or_packed_pair(self);
238                        select(self, true_val, false_val)
239                    }
240                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
241                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
242                }
243            }
244            sym::catch_unwind => {
245                catch_unwind_intrinsic(
246                    self,
247                    args[0].immediate(),
248                    args[1].immediate(),
249                    args[2].immediate(),
250                    llresult,
251                );
252                return Ok(());
253            }
254            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
255            sym::va_copy => {
256                self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
257            }
258            sym::va_arg => {
259                match fn_abi.ret.layout.backend_repr {
260                    abi::BackendRepr::Scalar(scalar) => {
261                        match scalar.primitive() {
262                            Primitive::Int(..) => {
263                                if self.cx().size_of(ret_ty).bytes() < 4 {
264                                    // `va_arg` should not be called on an integer type
265                                    // less than 4 bytes in length. If it is, promote
266                                    // the integer to an `i32` and truncate the result
267                                    // back to the smaller type.
268                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
269                                    self.trunc(promoted_result, llret_ty)
270                                } else {
271                                    emit_va_arg(self, args[0], ret_ty)
272                                }
273                            }
274                            Primitive::Float(Float::F16) => {
275                                bug!("the va_arg intrinsic does not work with `f16`")
276                            }
277                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
278                                emit_va_arg(self, args[0], ret_ty)
279                            }
280                            // `va_arg` should never be used with the return type f32.
281                            Primitive::Float(Float::F32) => {
282                                bug!("the va_arg intrinsic does not work with `f32`")
283                            }
284                            Primitive::Float(Float::F128) => {
285                                bug!("the va_arg intrinsic does not work with `f128`")
286                            }
287                        }
288                    }
289                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
290                }
291            }
292
293            sym::volatile_load | sym::unaligned_volatile_load => {
294                let tp_ty = fn_args.type_at(0);
295                let ptr = args[0].immediate();
296                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
297                    let llty = ty.llvm_type(self);
298                    self.volatile_load(llty, ptr)
299                } else {
300                    self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
301                };
302                let align = if name == sym::unaligned_volatile_load {
303                    1
304                } else {
305                    self.align_of(tp_ty).bytes() as u32
306                };
307                unsafe {
308                    llvm::LLVMSetAlignment(load, align);
309                }
310                if !result.layout.is_zst() {
311                    self.store_to_place(load, result.val);
312                }
313                return Ok(());
314            }
315            sym::volatile_store => {
316                let dst = args[0].deref(self.cx());
317                args[1].val.volatile_store(self, dst);
318                return Ok(());
319            }
320            sym::unaligned_volatile_store => {
321                let dst = args[0].deref(self.cx());
322                args[1].val.unaligned_volatile_store(self, dst);
323                return Ok(());
324            }
325            sym::prefetch_read_data
326            | sym::prefetch_write_data
327            | sym::prefetch_read_instruction
328            | sym::prefetch_write_instruction => {
329                let (rw, cache_type) = match name {
330                    sym::prefetch_read_data => (0, 1),
331                    sym::prefetch_write_data => (1, 1),
332                    sym::prefetch_read_instruction => (0, 0),
333                    sym::prefetch_write_instruction => (1, 0),
334                    _ => bug!(),
335                };
336                self.call_intrinsic(
337                    "llvm.prefetch",
338                    &[
339                        args[0].immediate(),
340                        self.const_i32(rw),
341                        args[1].immediate(),
342                        self.const_i32(cache_type),
343                    ],
344                )
345            }
346            sym::carrying_mul_add => {
347                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
348
349                let wide_llty = self.type_ix(size.bits() * 2);
350                let args = args.as_array().unwrap();
351                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
352
353                let wide = if signed {
354                    let prod = self.unchecked_smul(a, b);
355                    let acc = self.unchecked_sadd(prod, c);
356                    self.unchecked_sadd(acc, d)
357                } else {
358                    let prod = self.unchecked_umul(a, b);
359                    let acc = self.unchecked_uadd(prod, c);
360                    self.unchecked_uadd(acc, d)
361                };
362
363                let narrow_llty = self.type_ix(size.bits());
364                let low = self.trunc(wide, narrow_llty);
365                let bits_const = self.const_uint(wide_llty, size.bits());
366                // No need for ashr when signed; LLVM changes it to lshr anyway.
367                let high = self.lshr(wide, bits_const);
368                // FIXME: could be `trunc nuw`, even for signed.
369                let high = self.trunc(high, narrow_llty);
370
371                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
372                let pair = self.const_poison(pair_llty);
373                let pair = self.insert_value(pair, low, 0);
374                let pair = self.insert_value(pair, high, 1);
375                pair
376            }
377            sym::ctlz
378            | sym::ctlz_nonzero
379            | sym::cttz
380            | sym::cttz_nonzero
381            | sym::ctpop
382            | sym::bswap
383            | sym::bitreverse
384            | sym::rotate_left
385            | sym::rotate_right
386            | sym::saturating_add
387            | sym::saturating_sub => {
388                let ty = arg_tys[0];
389                if !ty.is_integral() {
390                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
391                        span,
392                        name,
393                        ty,
394                    });
395                    return Ok(());
396                }
397                let (size, signed) = ty.int_size_and_signed(self.tcx);
398                let width = size.bits();
399                match name {
400                    sym::ctlz | sym::cttz => {
401                        let y = self.const_bool(false);
402                        let ret = self.call_intrinsic(
403                            &format!("llvm.{name}.i{width}"),
404                            &[args[0].immediate(), y],
405                        );
406
407                        self.intcast(ret, llret_ty, false)
408                    }
409                    sym::ctlz_nonzero => {
410                        let y = self.const_bool(true);
411                        let llvm_name = &format!("llvm.ctlz.i{width}");
412                        let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
413                        self.intcast(ret, llret_ty, false)
414                    }
415                    sym::cttz_nonzero => {
416                        let y = self.const_bool(true);
417                        let llvm_name = &format!("llvm.cttz.i{width}");
418                        let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
419                        self.intcast(ret, llret_ty, false)
420                    }
421                    sym::ctpop => {
422                        let ret = self.call_intrinsic(
423                            &format!("llvm.ctpop.i{width}"),
424                            &[args[0].immediate()],
425                        );
426                        self.intcast(ret, llret_ty, false)
427                    }
428                    sym::bswap => {
429                        if width == 8 {
430                            args[0].immediate() // byte swap a u8/i8 is just a no-op
431                        } else {
432                            self.call_intrinsic(
433                                &format!("llvm.bswap.i{width}"),
434                                &[args[0].immediate()],
435                            )
436                        }
437                    }
438                    sym::bitreverse => self.call_intrinsic(
439                        &format!("llvm.bitreverse.i{width}"),
440                        &[args[0].immediate()],
441                    ),
442                    sym::rotate_left | sym::rotate_right => {
443                        let is_left = name == sym::rotate_left;
444                        let val = args[0].immediate();
445                        let raw_shift = args[1].immediate();
446                        // rotate = funnel shift with first two args the same
447                        let llvm_name =
448                            &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
449
450                        // llvm expects shift to be the same type as the values, but rust
451                        // always uses `u32`.
452                        let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
453
454                        self.call_intrinsic(llvm_name, &[val, val, raw_shift])
455                    }
456                    sym::saturating_add | sym::saturating_sub => {
457                        let is_add = name == sym::saturating_add;
458                        let lhs = args[0].immediate();
459                        let rhs = args[1].immediate();
460                        let llvm_name = &format!(
461                            "llvm.{}{}.sat.i{}",
462                            if signed { 's' } else { 'u' },
463                            if is_add { "add" } else { "sub" },
464                            width
465                        );
466                        self.call_intrinsic(llvm_name, &[lhs, rhs])
467                    }
468                    _ => bug!(),
469                }
470            }
471
472            sym::raw_eq => {
473                use abi::BackendRepr::*;
474                let tp_ty = fn_args.type_at(0);
475                let layout = self.layout_of(tp_ty).layout;
476                let use_integer_compare = match layout.backend_repr() {
477                    Scalar(_) | ScalarPair(_, _) => true,
478                    Uninhabited | Vector { .. } => false,
479                    Memory { .. } => {
480                        // For rusty ABIs, small aggregates are actually passed
481                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
482                        // so we re-use that same threshold here.
483                        layout.size() <= self.data_layout().pointer_size * 2
484                    }
485                };
486
487                let a = args[0].immediate();
488                let b = args[1].immediate();
489                if layout.size().bytes() == 0 {
490                    self.const_bool(true)
491                } else if use_integer_compare {
492                    let integer_ty = self.type_ix(layout.size().bits());
493                    let a_val = self.load(integer_ty, a, layout.align().abi);
494                    let b_val = self.load(integer_ty, b, layout.align().abi);
495                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
496                } else {
497                    let n = self.const_usize(layout.size().bytes());
498                    let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
499                    match self.cx.sess().target.arch.as_ref() {
500                        "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
501                        _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
502                    }
503                }
504            }
505
506            sym::compare_bytes => {
507                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
508                let cmp = self.call_intrinsic(
509                    "memcmp",
510                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
511                );
512                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
513                self.sext(cmp, self.type_ix(32))
514            }
515
516            sym::black_box => {
517                args[0].val.store(self, result);
518                let result_val_span = [result.val.llval];
519                // We need to "use" the argument in some way LLVM can't introspect, and on
520                // targets that support it we can typically leverage inline assembly to do
521                // this. LLVM's interpretation of inline assembly is that it's, well, a black
522                // box. This isn't the greatest implementation since it probably deoptimizes
523                // more than we want, but it's so far good enough.
524                //
525                // For zero-sized types, the location pointed to by the result may be
526                // uninitialized. Do not "use" the result in this case; instead just clobber
527                // the memory.
528                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
529                    ("~{memory}", &[])
530                } else {
531                    ("r,~{memory}", &result_val_span)
532                };
533                crate::asm::inline_asm_call(
534                    self,
535                    "",
536                    constraint,
537                    inputs,
538                    self.type_void(),
539                    &[],
540                    true,
541                    false,
542                    llvm::AsmDialect::Att,
543                    &[span],
544                    false,
545                    None,
546                    None,
547                )
548                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
549
550                // We have copied the value to `result` already.
551                return Ok(());
552            }
553
554            _ if name.as_str().starts_with("simd_") => {
555                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
556                // This gives them the expected layout of a regular #[repr(simd)] vector.
557                let mut loaded_args = Vec::new();
558                for (ty, arg) in arg_tys.iter().zip(args) {
559                    loaded_args.push(
560                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
561                        // with reduced alignment and no padding) rather than as immediates.
562                        // We can use a vector load to fix the layout and turn the argument
563                        // into an immediate.
564                        if ty.is_simd()
565                            && let OperandValue::Ref(place) = arg.val
566                        {
567                            let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
568                            let elem_ll_ty = match elem_ty.kind() {
569                                ty::Float(f) => self.type_float_from_ty(*f),
570                                ty::Int(i) => self.type_int_from_ty(*i),
571                                ty::Uint(u) => self.type_uint_from_ty(*u),
572                                ty::RawPtr(_, _) => self.type_ptr(),
573                                _ => unreachable!(),
574                            };
575                            let loaded =
576                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
577                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
578                        } else {
579                            *arg
580                        },
581                    );
582                }
583
584                let llret_ty = if ret_ty.is_simd()
585                    && let abi::BackendRepr::Memory { .. } =
586                        self.layout_of(ret_ty).layout.backend_repr
587                {
588                    let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
589                    let elem_ll_ty = match elem_ty.kind() {
590                        ty::Float(f) => self.type_float_from_ty(*f),
591                        ty::Int(i) => self.type_int_from_ty(*i),
592                        ty::Uint(u) => self.type_uint_from_ty(*u),
593                        ty::RawPtr(_, _) => self.type_ptr(),
594                        _ => unreachable!(),
595                    };
596                    self.type_vector(elem_ll_ty, size)
597                } else {
598                    llret_ty
599                };
600
601                match generic_simd_intrinsic(
602                    self,
603                    name,
604                    callee_ty,
605                    fn_args,
606                    &loaded_args,
607                    ret_ty,
608                    llret_ty,
609                    span,
610                ) {
611                    Ok(llval) => llval,
612                    // If there was an error, just skip this invocation... we'll abort compilation
613                    // anyway, but we can keep codegen'ing to find more errors.
614                    Err(()) => return Ok(()),
615                }
616            }
617
618            _ => {
619                debug!("unknown intrinsic '{}' -- falling back to default body", name);
620                // Call the fallback body instead of generating the intrinsic code
621                return Err(ty::Instance::new(instance.def_id(), instance.args));
622            }
623        };
624
625        if !fn_abi.ret.is_ignore() {
626            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
627                self.store(llval, result.val.llval, result.val.align);
628            } else {
629                OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
630                    .val
631                    .store(self, result);
632            }
633        }
634        Ok(())
635    }
636
637    fn abort(&mut self) {
638        self.call_intrinsic("llvm.trap", &[]);
639    }
640
641    fn assume(&mut self, val: Self::Value) {
642        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
643            self.call_intrinsic("llvm.assume", &[val]);
644        }
645    }
646
647    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
648        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
649            self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
650        } else {
651            cond
652        }
653    }
654
655    fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value {
656        // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
657        // optimization pass replaces calls to this intrinsic with code to test type membership.
658        let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) };
659        self.call_intrinsic("llvm.type.test", &[pointer, typeid])
660    }
661
662    fn type_checked_load(
663        &mut self,
664        llvtable: &'ll Value,
665        vtable_byte_offset: u64,
666        typeid: &'ll Metadata,
667    ) -> Self::Value {
668        let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) };
669        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
670        let type_checked_load =
671            self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
672        self.extract_value(type_checked_load, 0)
673    }
674
675    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
676        self.call_intrinsic("llvm.va_start", &[va_list])
677    }
678
679    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
680        self.call_intrinsic("llvm.va_end", &[va_list])
681    }
682}
683
684fn catch_unwind_intrinsic<'ll>(
685    bx: &mut Builder<'_, 'll, '_>,
686    try_func: &'ll Value,
687    data: &'ll Value,
688    catch_func: &'ll Value,
689    dest: &'ll Value,
690) {
691    if bx.sess().panic_strategy() == PanicStrategy::Abort {
692        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
693        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
694        // Return 0 unconditionally from the intrinsic call;
695        // we can never unwind.
696        let ret_align = bx.tcx().data_layout.i32_align.abi;
697        bx.store(bx.const_i32(0), dest, ret_align);
698    } else if wants_msvc_seh(bx.sess()) {
699        codegen_msvc_try(bx, try_func, data, catch_func, dest);
700    } else if wants_wasm_eh(bx.sess()) {
701        codegen_wasm_try(bx, try_func, data, catch_func, dest);
702    } else if bx.sess().target.os == "emscripten" {
703        codegen_emcc_try(bx, try_func, data, catch_func, dest);
704    } else {
705        codegen_gnu_try(bx, try_func, data, catch_func, dest);
706    }
707}
708
709// MSVC's definition of the `rust_try` function.
710//
711// This implementation uses the new exception handling instructions in LLVM
712// which have support in LLVM for SEH on MSVC targets. Although these
713// instructions are meant to work for all targets, as of the time of this
714// writing, however, LLVM does not recommend the usage of these new instructions
715// as the old ones are still more optimized.
716fn codegen_msvc_try<'ll>(
717    bx: &mut Builder<'_, 'll, '_>,
718    try_func: &'ll Value,
719    data: &'ll Value,
720    catch_func: &'ll Value,
721    dest: &'ll Value,
722) {
723    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
724        bx.set_personality_fn(bx.eh_personality());
725
726        let normal = bx.append_sibling_block("normal");
727        let catchswitch = bx.append_sibling_block("catchswitch");
728        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
729        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
730        let caught = bx.append_sibling_block("caught");
731
732        let try_func = llvm::get_param(bx.llfn(), 0);
733        let data = llvm::get_param(bx.llfn(), 1);
734        let catch_func = llvm::get_param(bx.llfn(), 2);
735
736        // We're generating an IR snippet that looks like:
737        //
738        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
739        //      %slot = alloca i8*
740        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
741        //
742        //   normal:
743        //      ret i32 0
744        //
745        //   catchswitch:
746        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
747        //
748        //   catchpad_rust:
749        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
750        //      %ptr = load %slot
751        //      call %catch_func(%data, %ptr)
752        //      catchret from %tok to label %caught
753        //
754        //   catchpad_foreign:
755        //      %tok = catchpad within %cs [null, 64, null]
756        //      call %catch_func(%data, null)
757        //      catchret from %tok to label %caught
758        //
759        //   caught:
760        //      ret i32 1
761        //   }
762        //
763        // This structure follows the basic usage of throw/try/catch in LLVM.
764        // For example, compile this C++ snippet to see what LLVM generates:
765        //
766        //      struct rust_panic {
767        //          rust_panic(const rust_panic&);
768        //          ~rust_panic();
769        //
770        //          void* x[2];
771        //      };
772        //
773        //      int __rust_try(
774        //          void (*try_func)(void*),
775        //          void *data,
776        //          void (*catch_func)(void*, void*) noexcept
777        //      ) {
778        //          try {
779        //              try_func(data);
780        //              return 0;
781        //          } catch(rust_panic& a) {
782        //              catch_func(data, &a);
783        //              return 1;
784        //          } catch(...) {
785        //              catch_func(data, NULL);
786        //              return 1;
787        //          }
788        //      }
789        //
790        // More information can be found in libstd's seh.rs implementation.
791        let ptr_size = bx.tcx().data_layout.pointer_size;
792        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
793        let slot = bx.alloca(ptr_size, ptr_align);
794        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
795        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
796
797        bx.switch_to_block(normal);
798        bx.ret(bx.const_i32(0));
799
800        bx.switch_to_block(catchswitch);
801        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
802
803        // We can't use the TypeDescriptor defined in libpanic_unwind because it
804        // might be in another DLL and the SEH encoding only supports specifying
805        // a TypeDescriptor from the current module.
806        //
807        // However this isn't an issue since the MSVC runtime uses string
808        // comparison on the type name to match TypeDescriptors rather than
809        // pointer equality.
810        //
811        // So instead we generate a new TypeDescriptor in each module that uses
812        // `try` and let the linker merge duplicate definitions in the same
813        // module.
814        //
815        // When modifying, make sure that the type_name string exactly matches
816        // the one used in library/panic_unwind/src/seh.rs.
817        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
818        let type_name = bx.const_bytes(b"rust_panic\0");
819        let type_info =
820            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
821        let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
822
823        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
824        if bx.cx.tcx.sess.target.supports_comdat() {
825            llvm::SetUniqueComdat(bx.llmod, tydesc);
826        }
827        llvm::set_initializer(tydesc, type_info);
828
829        // The flag value of 8 indicates that we are catching the exception by
830        // reference instead of by value. We can't use catch by value because
831        // that requires copying the exception object, which we don't support
832        // since our exception object effectively contains a Box.
833        //
834        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
835        bx.switch_to_block(catchpad_rust);
836        let flags = bx.const_i32(8);
837        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
838        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
839        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
840        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
841        bx.catch_ret(&funclet, caught);
842
843        // The flag value of 64 indicates a "catch-all".
844        bx.switch_to_block(catchpad_foreign);
845        let flags = bx.const_i32(64);
846        let null = bx.const_null(bx.type_ptr());
847        let funclet = bx.catch_pad(cs, &[null, flags, null]);
848        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
849        bx.catch_ret(&funclet, caught);
850
851        bx.switch_to_block(caught);
852        bx.ret(bx.const_i32(1));
853    });
854
855    // Note that no invoke is used here because by definition this function
856    // can't panic (that's what it's catching).
857    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
858    let i32_align = bx.tcx().data_layout.i32_align.abi;
859    bx.store(ret, dest, i32_align);
860}
861
862// WASM's definition of the `rust_try` function.
863fn codegen_wasm_try<'ll>(
864    bx: &mut Builder<'_, 'll, '_>,
865    try_func: &'ll Value,
866    data: &'ll Value,
867    catch_func: &'ll Value,
868    dest: &'ll Value,
869) {
870    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
871        bx.set_personality_fn(bx.eh_personality());
872
873        let normal = bx.append_sibling_block("normal");
874        let catchswitch = bx.append_sibling_block("catchswitch");
875        let catchpad = bx.append_sibling_block("catchpad");
876        let caught = bx.append_sibling_block("caught");
877
878        let try_func = llvm::get_param(bx.llfn(), 0);
879        let data = llvm::get_param(bx.llfn(), 1);
880        let catch_func = llvm::get_param(bx.llfn(), 2);
881
882        // We're generating an IR snippet that looks like:
883        //
884        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
885        //      %slot = alloca i8*
886        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
887        //
888        //   normal:
889        //      ret i32 0
890        //
891        //   catchswitch:
892        //      %cs = catchswitch within none [%catchpad] unwind to caller
893        //
894        //   catchpad:
895        //      %tok = catchpad within %cs [null]
896        //      %ptr = call @llvm.wasm.get.exception(token %tok)
897        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
898        //      call %catch_func(%data, %ptr)
899        //      catchret from %tok to label %caught
900        //
901        //   caught:
902        //      ret i32 1
903        //   }
904        //
905        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
906        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
907
908        bx.switch_to_block(normal);
909        bx.ret(bx.const_i32(0));
910
911        bx.switch_to_block(catchswitch);
912        let cs = bx.catch_switch(None, None, &[catchpad]);
913
914        bx.switch_to_block(catchpad);
915        let null = bx.const_null(bx.type_ptr());
916        let funclet = bx.catch_pad(cs, &[null]);
917
918        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
919        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
920
921        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
922        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
923        bx.catch_ret(&funclet, caught);
924
925        bx.switch_to_block(caught);
926        bx.ret(bx.const_i32(1));
927    });
928
929    // Note that no invoke is used here because by definition this function
930    // can't panic (that's what it's catching).
931    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
932    let i32_align = bx.tcx().data_layout.i32_align.abi;
933    bx.store(ret, dest, i32_align);
934}
935
936// Definition of the standard `try` function for Rust using the GNU-like model
937// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
938// instructions).
939//
940// This codegen is a little surprising because we always call a shim
941// function instead of inlining the call to `invoke` manually here. This is done
942// because in LLVM we're only allowed to have one personality per function
943// definition. The call to the `try` intrinsic is being inlined into the
944// function calling it, and that function may already have other personality
945// functions in play. By calling a shim we're guaranteed that our shim will have
946// the right personality function.
947fn codegen_gnu_try<'ll>(
948    bx: &mut Builder<'_, 'll, '_>,
949    try_func: &'ll Value,
950    data: &'ll Value,
951    catch_func: &'ll Value,
952    dest: &'ll Value,
953) {
954    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
955        // Codegens the shims described above:
956        //
957        //   bx:
958        //      invoke %try_func(%data) normal %normal unwind %catch
959        //
960        //   normal:
961        //      ret 0
962        //
963        //   catch:
964        //      (%ptr, _) = landingpad
965        //      call %catch_func(%data, %ptr)
966        //      ret 1
967        let then = bx.append_sibling_block("then");
968        let catch = bx.append_sibling_block("catch");
969
970        let try_func = llvm::get_param(bx.llfn(), 0);
971        let data = llvm::get_param(bx.llfn(), 1);
972        let catch_func = llvm::get_param(bx.llfn(), 2);
973        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
974        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
975
976        bx.switch_to_block(then);
977        bx.ret(bx.const_i32(0));
978
979        // Type indicator for the exception being thrown.
980        //
981        // The first value in this tuple is a pointer to the exception object
982        // being thrown. The second value is a "selector" indicating which of
983        // the landing pad clauses the exception's type had been matched to.
984        // rust_try ignores the selector.
985        bx.switch_to_block(catch);
986        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
987        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
988        let tydesc = bx.const_null(bx.type_ptr());
989        bx.add_clause(vals, tydesc);
990        let ptr = bx.extract_value(vals, 0);
991        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
992        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
993        bx.ret(bx.const_i32(1));
994    });
995
996    // Note that no invoke is used here because by definition this function
997    // can't panic (that's what it's catching).
998    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
999    let i32_align = bx.tcx().data_layout.i32_align.abi;
1000    bx.store(ret, dest, i32_align);
1001}
1002
1003// Variant of codegen_gnu_try used for emscripten where Rust panics are
1004// implemented using C++ exceptions. Here we use exceptions of a specific type
1005// (`struct rust_panic`) to represent Rust panics.
1006fn codegen_emcc_try<'ll>(
1007    bx: &mut Builder<'_, 'll, '_>,
1008    try_func: &'ll Value,
1009    data: &'ll Value,
1010    catch_func: &'ll Value,
1011    dest: &'ll Value,
1012) {
1013    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1014        // Codegens the shims described above:
1015        //
1016        //   bx:
1017        //      invoke %try_func(%data) normal %normal unwind %catch
1018        //
1019        //   normal:
1020        //      ret 0
1021        //
1022        //   catch:
1023        //      (%ptr, %selector) = landingpad
1024        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
1025        //      %is_rust_panic = %selector == %rust_typeid
1026        //      %catch_data = alloca { i8*, i8 }
1027        //      %catch_data[0] = %ptr
1028        //      %catch_data[1] = %is_rust_panic
1029        //      call %catch_func(%data, %catch_data)
1030        //      ret 1
1031        let then = bx.append_sibling_block("then");
1032        let catch = bx.append_sibling_block("catch");
1033
1034        let try_func = llvm::get_param(bx.llfn(), 0);
1035        let data = llvm::get_param(bx.llfn(), 1);
1036        let catch_func = llvm::get_param(bx.llfn(), 2);
1037        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1038        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1039
1040        bx.switch_to_block(then);
1041        bx.ret(bx.const_i32(0));
1042
1043        // Type indicator for the exception being thrown.
1044        //
1045        // The first value in this tuple is a pointer to the exception object
1046        // being thrown. The second value is a "selector" indicating which of
1047        // the landing pad clauses the exception's type had been matched to.
1048        bx.switch_to_block(catch);
1049        let tydesc = bx.eh_catch_typeinfo();
1050        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1051        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1052        bx.add_clause(vals, tydesc);
1053        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1054        let ptr = bx.extract_value(vals, 0);
1055        let selector = bx.extract_value(vals, 1);
1056
1057        // Check if the typeid we got is the one for a Rust panic.
1058        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
1059        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1060        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1061
1062        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1063        // create an alloca and pass a pointer to that.
1064        let ptr_size = bx.tcx().data_layout.pointer_size;
1065        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1066        let i8_align = bx.tcx().data_layout.i8_align.abi;
1067        // Required in order for there to be no padding between the fields.
1068        assert!(i8_align <= ptr_align);
1069        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1070        bx.store(ptr, catch_data, ptr_align);
1071        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1072        bx.store(is_rust_panic, catch_data_1, i8_align);
1073
1074        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1075        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1076        bx.ret(bx.const_i32(1));
1077    });
1078
1079    // Note that no invoke is used here because by definition this function
1080    // can't panic (that's what it's catching).
1081    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1082    let i32_align = bx.tcx().data_layout.i32_align.abi;
1083    bx.store(ret, dest, i32_align);
1084}
1085
1086// Helper function to give a Block to a closure to codegen a shim function.
1087// This is currently primarily used for the `try` intrinsic functions above.
1088fn gen_fn<'a, 'll, 'tcx>(
1089    cx: &'a CodegenCx<'ll, 'tcx>,
1090    name: &str,
1091    rust_fn_sig: ty::PolyFnSig<'tcx>,
1092    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1093) -> (&'ll Type, &'ll Value) {
1094    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1095    let llty = fn_abi.llvm_type(cx);
1096    let llfn = cx.declare_fn(name, fn_abi, None);
1097    cx.set_frame_pointer_type(llfn);
1098    cx.apply_target_cpu_attr(llfn);
1099    // FIXME(eddyb) find a nicer way to do this.
1100    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1101    let llbb = Builder::append_block(cx, llfn, "entry-block");
1102    let bx = Builder::build(cx, llbb);
1103    codegen(bx);
1104    (llty, llfn)
1105}
1106
1107// Helper function used to get a handle to the `__rust_try` function used to
1108// catch exceptions.
1109//
1110// This function is only generated once and is then cached.
1111fn get_rust_try_fn<'a, 'll, 'tcx>(
1112    cx: &'a CodegenCx<'ll, 'tcx>,
1113    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1114) -> (&'ll Type, &'ll Value) {
1115    if let Some(llfn) = cx.rust_try_fn.get() {
1116        return llfn;
1117    }
1118
1119    // Define the type up front for the signature of the rust_try function.
1120    let tcx = cx.tcx;
1121    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1122    // `unsafe fn(*mut i8) -> ()`
1123    let try_fn_ty = Ty::new_fn_ptr(
1124        tcx,
1125        ty::Binder::dummy(tcx.mk_fn_sig(
1126            [i8p],
1127            tcx.types.unit,
1128            false,
1129            hir::Safety::Unsafe,
1130            ExternAbi::Rust,
1131        )),
1132    );
1133    // `unsafe fn(*mut i8, *mut i8) -> ()`
1134    let catch_fn_ty = Ty::new_fn_ptr(
1135        tcx,
1136        ty::Binder::dummy(tcx.mk_fn_sig(
1137            [i8p, i8p],
1138            tcx.types.unit,
1139            false,
1140            hir::Safety::Unsafe,
1141            ExternAbi::Rust,
1142        )),
1143    );
1144    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1145    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1146        [try_fn_ty, i8p, catch_fn_ty],
1147        tcx.types.i32,
1148        false,
1149        hir::Safety::Unsafe,
1150        ExternAbi::Rust,
1151    ));
1152    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1153    cx.rust_try_fn.set(Some(rust_try));
1154    rust_try
1155}
1156
1157fn generic_simd_intrinsic<'ll, 'tcx>(
1158    bx: &mut Builder<'_, 'll, 'tcx>,
1159    name: Symbol,
1160    callee_ty: Ty<'tcx>,
1161    fn_args: GenericArgsRef<'tcx>,
1162    args: &[OperandRef<'tcx, &'ll Value>],
1163    ret_ty: Ty<'tcx>,
1164    llret_ty: &'ll Type,
1165    span: Span,
1166) -> Result<&'ll Value, ()> {
1167    macro_rules! return_error {
1168        ($diag: expr) => {{
1169            bx.sess().dcx().emit_err($diag);
1170            return Err(());
1171        }};
1172    }
1173
1174    macro_rules! require {
1175        ($cond: expr, $diag: expr) => {
1176            if !$cond {
1177                return_error!($diag);
1178            }
1179        };
1180    }
1181
1182    macro_rules! require_simd {
1183        ($ty: expr, $variant:ident) => {{
1184            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1185            $ty.simd_size_and_type(bx.tcx())
1186        }};
1187    }
1188
1189    /// Returns the bitwidth of the `$ty` argument if it is an `Int` type.
1190    macro_rules! require_int_ty {
1191        ($ty: expr, $diag: expr) => {
1192            match $ty {
1193                ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1194                _ => {
1195                    return_error!($diag);
1196                }
1197            }
1198        };
1199    }
1200
1201    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1202    macro_rules! require_int_or_uint_ty {
1203        ($ty: expr, $diag: expr) => {
1204            match $ty {
1205                ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1206                ty::Uint(i) => {
1207                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
1208                }
1209                _ => {
1210                    return_error!($diag);
1211                }
1212            }
1213        };
1214    }
1215
1216    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1217    /// down to an i1 based mask that can be used by llvm intrinsics.
1218    ///
1219    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1220    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1221    /// but codegen for several targets is better if we consider the highest bit by shifting.
1222    ///
1223    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1224    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1225    /// instead the mask can be used as is.
1226    ///
1227    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1228    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1229    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1230        bx: &mut Builder<'a, 'll, 'tcx>,
1231        i_xn: &'ll Value,
1232        in_elem_bitwidth: u64,
1233        in_len: u64,
1234    ) -> &'ll Value {
1235        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1236        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1237        let shift_indices = vec![shift_idx; in_len as _];
1238        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1239        // Truncate vector to an <i1 x N>
1240        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1241    }
1242
1243    let tcx = bx.tcx();
1244    let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx));
1245    let arg_tys = sig.inputs();
1246
1247    // Sanity-check: all vector arguments must be immediates.
1248    if cfg!(debug_assertions) {
1249        for (ty, arg) in arg_tys.iter().zip(args) {
1250            if ty.is_simd() {
1251                assert_matches!(arg.val, OperandValue::Immediate(_));
1252            }
1253        }
1254    }
1255
1256    if name == sym::simd_select_bitmask {
1257        let (len, _) = require_simd!(arg_tys[1], SimdArgument);
1258
1259        let expected_int_bits = len.max(8).next_power_of_two();
1260        let expected_bytes = len.div_ceil(8);
1261
1262        let mask_ty = arg_tys[0];
1263        let mask = match mask_ty.kind() {
1264            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1265            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1266            ty::Array(elem, len)
1267                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1268                    && len
1269                        .try_to_target_usize(bx.tcx)
1270                        .expect("expected monomorphic const in codegen")
1271                        == expected_bytes =>
1272            {
1273                let place = PlaceRef::alloca(bx, args[0].layout);
1274                args[0].val.store(bx, place);
1275                let int_ty = bx.type_ix(expected_bytes * 8);
1276                bx.load(int_ty, place.val.llval, Align::ONE)
1277            }
1278            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1279                span,
1280                name,
1281                mask_ty,
1282                expected_int_bits,
1283                expected_bytes
1284            }),
1285        };
1286
1287        let i1 = bx.type_i1();
1288        let im = bx.type_ix(len);
1289        let i1xn = bx.type_vector(i1, len);
1290        let m_im = bx.trunc(mask, im);
1291        let m_i1s = bx.bitcast(m_im, i1xn);
1292        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1293    }
1294
1295    // every intrinsic below takes a SIMD vector as its first argument
1296    let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
1297    let in_ty = arg_tys[0];
1298
1299    let comparison = match name {
1300        sym::simd_eq => Some(BinOp::Eq),
1301        sym::simd_ne => Some(BinOp::Ne),
1302        sym::simd_lt => Some(BinOp::Lt),
1303        sym::simd_le => Some(BinOp::Le),
1304        sym::simd_gt => Some(BinOp::Gt),
1305        sym::simd_ge => Some(BinOp::Ge),
1306        _ => None,
1307    };
1308
1309    if let Some(cmp_op) = comparison {
1310        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1311
1312        require!(
1313            in_len == out_len,
1314            InvalidMonomorphization::ReturnLengthInputType {
1315                span,
1316                name,
1317                in_len,
1318                in_ty,
1319                ret_ty,
1320                out_len
1321            }
1322        );
1323        require!(
1324            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1325            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1326        );
1327
1328        return Ok(compare_simd_types(
1329            bx,
1330            args[0].immediate(),
1331            args[1].immediate(),
1332            in_elem,
1333            llret_ty,
1334            cmp_op,
1335        ));
1336    }
1337
1338    if name == sym::simd_shuffle_generic {
1339        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1340        let n = idx.len() as u64;
1341
1342        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1343        require!(
1344            out_len == n,
1345            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1346        );
1347        require!(
1348            in_elem == out_ty,
1349            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1350        );
1351
1352        let total_len = in_len * 2;
1353
1354        let indices: Option<Vec<_>> = idx
1355            .iter()
1356            .enumerate()
1357            .map(|(arg_idx, val)| {
1358                let idx = val.unwrap_leaf().to_i32();
1359                if idx >= i32::try_from(total_len).unwrap() {
1360                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1361                        span,
1362                        name,
1363                        arg_idx: arg_idx as u64,
1364                        total_len: total_len.into(),
1365                    });
1366                    None
1367                } else {
1368                    Some(bx.const_i32(idx))
1369                }
1370            })
1371            .collect();
1372        let Some(indices) = indices else {
1373            return Ok(bx.const_null(llret_ty));
1374        };
1375
1376        return Ok(bx.shuffle_vector(
1377            args[0].immediate(),
1378            args[1].immediate(),
1379            bx.const_vector(&indices),
1380        ));
1381    }
1382
1383    if name == sym::simd_shuffle {
1384        // Make sure this is actually a SIMD vector.
1385        let idx_ty = args[2].layout.ty;
1386        let n: u64 = if idx_ty.is_simd()
1387            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1388        {
1389            idx_ty.simd_size_and_type(bx.cx.tcx).0
1390        } else {
1391            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1392        };
1393
1394        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1395        require!(
1396            out_len == n,
1397            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1398        );
1399        require!(
1400            in_elem == out_ty,
1401            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1402        );
1403
1404        let total_len = u128::from(in_len) * 2;
1405
1406        // Check that the indices are in-bounds.
1407        let indices = args[2].immediate();
1408        for i in 0..n {
1409            let val = bx.const_get_elt(indices, i as u64);
1410            let idx = bx
1411                .const_to_opt_u128(val, true)
1412                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1413            if idx >= total_len {
1414                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1415                    span,
1416                    name,
1417                    arg_idx: i,
1418                    total_len,
1419                });
1420            }
1421        }
1422
1423        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1424    }
1425
1426    if name == sym::simd_insert {
1427        require!(
1428            in_elem == arg_tys[2],
1429            InvalidMonomorphization::InsertedType {
1430                span,
1431                name,
1432                in_elem,
1433                in_ty,
1434                out_ty: arg_tys[2]
1435            }
1436        );
1437        let idx = bx
1438            .const_to_opt_u128(args[1].immediate(), false)
1439            .expect("typeck should have ensure that this is a const");
1440        if idx >= in_len.into() {
1441            return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1442                span,
1443                name,
1444                arg_idx: 1,
1445                total_len: in_len.into(),
1446            });
1447        }
1448        return Ok(bx.insert_element(
1449            args[0].immediate(),
1450            args[2].immediate(),
1451            bx.const_i32(idx as i32),
1452        ));
1453    }
1454    if name == sym::simd_extract {
1455        require!(
1456            ret_ty == in_elem,
1457            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1458        );
1459        let idx = bx
1460            .const_to_opt_u128(args[1].immediate(), false)
1461            .expect("typeck should have ensure that this is a const");
1462        if idx >= in_len.into() {
1463            return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1464                span,
1465                name,
1466                arg_idx: 1,
1467                total_len: in_len.into(),
1468            });
1469        }
1470        return Ok(bx.extract_element(args[0].immediate(), bx.const_i32(idx as i32)));
1471    }
1472
1473    if name == sym::simd_select {
1474        let m_elem_ty = in_elem;
1475        let m_len = in_len;
1476        let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
1477        require!(
1478            m_len == v_len,
1479            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1480        );
1481        let in_elem_bitwidth = require_int_ty!(
1482            m_elem_ty.kind(),
1483            InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }
1484        );
1485        let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1486        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1487    }
1488
1489    if name == sym::simd_bitmask {
1490        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1491        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1492        // * an unsigned integer
1493        // * an array of `u8`
1494        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1495        //
1496        // The bit order of the result depends on the byte endianness, LSB-first for little
1497        // endian and MSB-first for big endian.
1498        let expected_int_bits = in_len.max(8).next_power_of_two();
1499        let expected_bytes = in_len.div_ceil(8);
1500
1501        // Integer vector <i{in_bitwidth} x in_len>:
1502        let in_elem_bitwidth = require_int_or_uint_ty!(
1503            in_elem.kind(),
1504            InvalidMonomorphization::VectorArgument { span, name, in_ty, in_elem }
1505        );
1506
1507        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1508        // Bitcast <i1 x N> to iN:
1509        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1510
1511        match ret_ty.kind() {
1512            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1513                // Zero-extend iN to the bitmask type:
1514                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1515            }
1516            ty::Array(elem, len)
1517                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1518                    && len
1519                        .try_to_target_usize(bx.tcx)
1520                        .expect("expected monomorphic const in codegen")
1521                        == expected_bytes =>
1522            {
1523                // Zero-extend iN to the array length:
1524                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1525
1526                // Convert the integer to a byte array
1527                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1528                bx.store(ze, ptr, Align::ONE);
1529                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1530                return Ok(bx.load(array_ty, ptr, Align::ONE));
1531            }
1532            _ => return_error!(InvalidMonomorphization::CannotReturn {
1533                span,
1534                name,
1535                ret_ty,
1536                expected_int_bits,
1537                expected_bytes
1538            }),
1539        }
1540    }
1541
1542    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1543        name: Symbol,
1544        in_elem: Ty<'_>,
1545        in_ty: Ty<'_>,
1546        in_len: u64,
1547        bx: &mut Builder<'_, 'll, 'tcx>,
1548        span: Span,
1549        args: &[OperandRef<'tcx, &'ll Value>],
1550    ) -> Result<&'ll Value, ()> {
1551        macro_rules! return_error {
1552            ($diag: expr) => {{
1553                bx.sess().dcx().emit_err($diag);
1554                return Err(());
1555            }};
1556        }
1557
1558        let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
1559            let elem_ty = bx.cx.type_float_from_ty(*f);
1560            match f.bit_width() {
1561                16 => ("f16", elem_ty),
1562                32 => ("f32", elem_ty),
1563                64 => ("f64", elem_ty),
1564                128 => ("f128", elem_ty),
1565                _ => return_error!(InvalidMonomorphization::FloatingPointVector {
1566                    span,
1567                    name,
1568                    f_ty: *f,
1569                    in_ty,
1570                }),
1571            }
1572        } else {
1573            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1574        };
1575
1576        let vec_ty = bx.type_vector(elem_ty, in_len);
1577
1578        let (intr_name, fn_ty) = match name {
1579            sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
1580            sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
1581            sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
1582            sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
1583            sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
1584            sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
1585            sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
1586            sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
1587            sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
1588            sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1589            sym::simd_relaxed_fma => ("fmuladd", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1590            sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
1591            sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
1592            sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
1593            sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
1594            sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
1595            sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
1596            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1597        };
1598        let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
1599        let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
1600        let c = bx.call(
1601            fn_ty,
1602            None,
1603            None,
1604            f,
1605            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1606            None,
1607            None,
1608        );
1609        Ok(c)
1610    }
1611
1612    if std::matches!(
1613        name,
1614        sym::simd_ceil
1615            | sym::simd_fabs
1616            | sym::simd_fcos
1617            | sym::simd_fexp2
1618            | sym::simd_fexp
1619            | sym::simd_flog10
1620            | sym::simd_flog2
1621            | sym::simd_flog
1622            | sym::simd_floor
1623            | sym::simd_fma
1624            | sym::simd_fpow
1625            | sym::simd_fpowi
1626            | sym::simd_fsin
1627            | sym::simd_fsqrt
1628            | sym::simd_relaxed_fma
1629            | sym::simd_round
1630            | sym::simd_trunc
1631    ) {
1632        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1633    }
1634
1635    // FIXME: use:
1636    //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1637    //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1638    fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
1639        match *elem_ty.kind() {
1640            ty::Int(v) => format!(
1641                "v{}i{}",
1642                vec_len,
1643                // Normalize to prevent crash if v: IntTy::Isize
1644                v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1645            ),
1646            ty::Uint(v) => format!(
1647                "v{}i{}",
1648                vec_len,
1649                // Normalize to prevent crash if v: UIntTy::Usize
1650                v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1651            ),
1652            ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
1653            ty::RawPtr(_, _) => format!("v{}p0", vec_len),
1654            _ => unreachable!(),
1655        }
1656    }
1657
1658    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1659        let elem_ty = match *elem_ty.kind() {
1660            ty::Int(v) => cx.type_int_from_ty(v),
1661            ty::Uint(v) => cx.type_uint_from_ty(v),
1662            ty::Float(v) => cx.type_float_from_ty(v),
1663            ty::RawPtr(_, _) => cx.type_ptr(),
1664            _ => unreachable!(),
1665        };
1666        cx.type_vector(elem_ty, vec_len)
1667    }
1668
1669    if name == sym::simd_gather {
1670        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1671        //             mask: <N x i{M}>) -> <N x T>
1672        // * N: number of elements in the input vectors
1673        // * T: type of the element to load
1674        // * M: any integer width is supported, will be truncated to i1
1675
1676        // All types must be simd vector types
1677
1678        // The second argument must be a simd vector with an element type that's a pointer
1679        // to the element type of the first argument
1680        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1681        let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
1682        // The element type of the third argument must be a signed integer type of any width:
1683        let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
1684        require_simd!(ret_ty, SimdReturn);
1685
1686        // Of the same length:
1687        require!(
1688            in_len == out_len,
1689            InvalidMonomorphization::SecondArgumentLength {
1690                span,
1691                name,
1692                in_len,
1693                in_ty,
1694                arg_ty: arg_tys[1],
1695                out_len
1696            }
1697        );
1698        require!(
1699            in_len == out_len2,
1700            InvalidMonomorphization::ThirdArgumentLength {
1701                span,
1702                name,
1703                in_len,
1704                in_ty,
1705                arg_ty: arg_tys[2],
1706                out_len: out_len2
1707            }
1708        );
1709
1710        // The return type must match the first argument type
1711        require!(
1712            ret_ty == in_ty,
1713            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1714        );
1715
1716        require!(
1717            matches!(
1718                *element_ty1.kind(),
1719                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1720            ),
1721            InvalidMonomorphization::ExpectedElementType {
1722                span,
1723                name,
1724                expected_element: element_ty1,
1725                second_arg: arg_tys[1],
1726                in_elem,
1727                in_ty,
1728                mutability: ExpectedPointerMutability::Not,
1729            }
1730        );
1731
1732        let mask_elem_bitwidth = require_int_ty!(
1733            element_ty2.kind(),
1734            InvalidMonomorphization::ThirdArgElementType {
1735                span,
1736                name,
1737                expected_element: element_ty2,
1738                third_arg: arg_tys[2]
1739            }
1740        );
1741
1742        // Alignment of T, must be a constant integer value:
1743        let alignment_ty = bx.type_i32();
1744        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1745
1746        // Truncate the mask vector to a vector of i1s:
1747        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1748        let mask_ty = bx.type_vector(bx.type_i1(), in_len);
1749
1750        // Type of the vector of pointers:
1751        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1752        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
1753
1754        // Type of the vector of elements:
1755        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1756        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
1757
1758        let llvm_intrinsic =
1759            format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
1760        let fn_ty = bx.type_func(
1761            &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1762            llvm_elem_vec_ty,
1763        );
1764        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1765        let v = bx.call(
1766            fn_ty,
1767            None,
1768            None,
1769            f,
1770            &[args[1].immediate(), alignment, mask, args[0].immediate()],
1771            None,
1772            None,
1773        );
1774        return Ok(v);
1775    }
1776
1777    if name == sym::simd_masked_load {
1778        // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1779        // * N: number of elements in the input vectors
1780        // * T: type of the element to load
1781        // * M: any integer width is supported, will be truncated to i1
1782        // Loads contiguous elements from memory behind `pointer`, but only for
1783        // those lanes whose `mask` bit is enabled.
1784        // The memory addresses corresponding to the “off” lanes are not accessed.
1785
1786        // The element type of the "mask" argument must be a signed integer type of any width
1787        let mask_ty = in_ty;
1788        let (mask_len, mask_elem) = (in_len, in_elem);
1789
1790        // The second argument must be a pointer matching the element type
1791        let pointer_ty = arg_tys[1];
1792
1793        // The last argument is a passthrough vector providing values for disabled lanes
1794        let values_ty = arg_tys[2];
1795        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1796
1797        require_simd!(ret_ty, SimdReturn);
1798
1799        // Of the same length:
1800        require!(
1801            values_len == mask_len,
1802            InvalidMonomorphization::ThirdArgumentLength {
1803                span,
1804                name,
1805                in_len: mask_len,
1806                in_ty: mask_ty,
1807                arg_ty: values_ty,
1808                out_len: values_len
1809            }
1810        );
1811
1812        // The return type must match the last argument type
1813        require!(
1814            ret_ty == values_ty,
1815            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1816        );
1817
1818        require!(
1819            matches!(
1820                *pointer_ty.kind(),
1821                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1822            ),
1823            InvalidMonomorphization::ExpectedElementType {
1824                span,
1825                name,
1826                expected_element: values_elem,
1827                second_arg: pointer_ty,
1828                in_elem: values_elem,
1829                in_ty: values_ty,
1830                mutability: ExpectedPointerMutability::Not,
1831            }
1832        );
1833
1834        let m_elem_bitwidth = require_int_ty!(
1835            mask_elem.kind(),
1836            InvalidMonomorphization::ThirdArgElementType {
1837                span,
1838                name,
1839                expected_element: values_elem,
1840                third_arg: mask_ty,
1841            }
1842        );
1843
1844        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1845        let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
1846
1847        // Alignment of T, must be a constant integer value:
1848        let alignment_ty = bx.type_i32();
1849        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1850
1851        let llvm_pointer = bx.type_ptr();
1852
1853        // Type of the vector of elements:
1854        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1855        let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
1856
1857        let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0");
1858        let fn_ty = bx
1859            .type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty);
1860        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1861        let v = bx.call(
1862            fn_ty,
1863            None,
1864            None,
1865            f,
1866            &[args[1].immediate(), alignment, mask, args[2].immediate()],
1867            None,
1868            None,
1869        );
1870        return Ok(v);
1871    }
1872
1873    if name == sym::simd_masked_store {
1874        // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
1875        // * N: number of elements in the input vectors
1876        // * T: type of the element to load
1877        // * M: any integer width is supported, will be truncated to i1
1878        // Stores contiguous elements to memory behind `pointer`, but only for
1879        // those lanes whose `mask` bit is enabled.
1880        // The memory addresses corresponding to the “off” lanes are not accessed.
1881
1882        // The element type of the "mask" argument must be a signed integer type of any width
1883        let mask_ty = in_ty;
1884        let (mask_len, mask_elem) = (in_len, in_elem);
1885
1886        // The second argument must be a pointer matching the element type
1887        let pointer_ty = arg_tys[1];
1888
1889        // The last argument specifies the values to store to memory
1890        let values_ty = arg_tys[2];
1891        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1892
1893        // Of the same length:
1894        require!(
1895            values_len == mask_len,
1896            InvalidMonomorphization::ThirdArgumentLength {
1897                span,
1898                name,
1899                in_len: mask_len,
1900                in_ty: mask_ty,
1901                arg_ty: values_ty,
1902                out_len: values_len
1903            }
1904        );
1905
1906        // The second argument must be a mutable pointer type matching the element type
1907        require!(
1908            matches!(
1909                *pointer_ty.kind(),
1910                ty::RawPtr(p_ty, p_mutbl)
1911                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1912            ),
1913            InvalidMonomorphization::ExpectedElementType {
1914                span,
1915                name,
1916                expected_element: values_elem,
1917                second_arg: pointer_ty,
1918                in_elem: values_elem,
1919                in_ty: values_ty,
1920                mutability: ExpectedPointerMutability::Mut,
1921            }
1922        );
1923
1924        let m_elem_bitwidth = require_int_ty!(
1925            mask_elem.kind(),
1926            InvalidMonomorphization::ThirdArgElementType {
1927                span,
1928                name,
1929                expected_element: values_elem,
1930                third_arg: mask_ty,
1931            }
1932        );
1933
1934        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1935        let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
1936
1937        // Alignment of T, must be a constant integer value:
1938        let alignment_ty = bx.type_i32();
1939        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1940
1941        let ret_t = bx.type_void();
1942
1943        let llvm_pointer = bx.type_ptr();
1944
1945        // Type of the vector of elements:
1946        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1947        let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
1948
1949        let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0");
1950        let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t);
1951        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1952        let v = bx.call(
1953            fn_ty,
1954            None,
1955            None,
1956            f,
1957            &[args[2].immediate(), args[1].immediate(), alignment, mask],
1958            None,
1959            None,
1960        );
1961        return Ok(v);
1962    }
1963
1964    if name == sym::simd_scatter {
1965        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1966        //             mask: <N x i{M}>) -> ()
1967        // * N: number of elements in the input vectors
1968        // * T: type of the element to load
1969        // * M: any integer width is supported, will be truncated to i1
1970
1971        // All types must be simd vector types
1972        // The second argument must be a simd vector with an element type that's a pointer
1973        // to the element type of the first argument
1974        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1975        let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
1976        let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
1977
1978        // Of the same length:
1979        require!(
1980            in_len == element_len1,
1981            InvalidMonomorphization::SecondArgumentLength {
1982                span,
1983                name,
1984                in_len,
1985                in_ty,
1986                arg_ty: arg_tys[1],
1987                out_len: element_len1
1988            }
1989        );
1990        require!(
1991            in_len == element_len2,
1992            InvalidMonomorphization::ThirdArgumentLength {
1993                span,
1994                name,
1995                in_len,
1996                in_ty,
1997                arg_ty: arg_tys[2],
1998                out_len: element_len2
1999            }
2000        );
2001
2002        require!(
2003            matches!(
2004                *element_ty1.kind(),
2005                ty::RawPtr(p_ty, p_mutbl)
2006                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2007            ),
2008            InvalidMonomorphization::ExpectedElementType {
2009                span,
2010                name,
2011                expected_element: element_ty1,
2012                second_arg: arg_tys[1],
2013                in_elem,
2014                in_ty,
2015                mutability: ExpectedPointerMutability::Mut,
2016            }
2017        );
2018
2019        // The element type of the third argument must be a signed integer type of any width:
2020        let mask_elem_bitwidth = require_int_ty!(
2021            element_ty2.kind(),
2022            InvalidMonomorphization::ThirdArgElementType {
2023                span,
2024                name,
2025                expected_element: element_ty2,
2026                third_arg: arg_tys[2]
2027            }
2028        );
2029
2030        // Alignment of T, must be a constant integer value:
2031        let alignment_ty = bx.type_i32();
2032        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
2033
2034        // Truncate the mask vector to a vector of i1s:
2035        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2036        let mask_ty = bx.type_vector(bx.type_i1(), in_len);
2037
2038        let ret_t = bx.type_void();
2039
2040        // Type of the vector of pointers:
2041        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2042        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
2043
2044        // Type of the vector of elements:
2045        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2046        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
2047
2048        let llvm_intrinsic =
2049            format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
2050        let fn_ty =
2051            bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
2052        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2053        let v = bx.call(
2054            fn_ty,
2055            None,
2056            None,
2057            f,
2058            &[args[0].immediate(), args[1].immediate(), alignment, mask],
2059            None,
2060            None,
2061        );
2062        return Ok(v);
2063    }
2064
2065    macro_rules! arith_red {
2066        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2067         $identity:expr) => {
2068            if name == sym::$name {
2069                require!(
2070                    ret_ty == in_elem,
2071                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2072                );
2073                return match in_elem.kind() {
2074                    ty::Int(_) | ty::Uint(_) => {
2075                        let r = bx.$integer_reduce(args[0].immediate());
2076                        if $ordered {
2077                            // if overflow occurs, the result is the
2078                            // mathematical result modulo 2^n:
2079                            Ok(bx.$op(args[1].immediate(), r))
2080                        } else {
2081                            Ok(bx.$integer_reduce(args[0].immediate()))
2082                        }
2083                    }
2084                    ty::Float(f) => {
2085                        let acc = if $ordered {
2086                            // ordered arithmetic reductions take an accumulator
2087                            args[1].immediate()
2088                        } else {
2089                            // unordered arithmetic reductions use the identity accumulator
2090                            match f.bit_width() {
2091                                32 => bx.const_real(bx.type_f32(), $identity),
2092                                64 => bx.const_real(bx.type_f64(), $identity),
2093                                v => return_error!(
2094                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
2095                                        span,
2096                                        name,
2097                                        symbol: sym::$name,
2098                                        in_ty,
2099                                        in_elem,
2100                                        size: v,
2101                                        ret_ty
2102                                    }
2103                                ),
2104                            }
2105                        };
2106                        Ok(bx.$float_reduce(acc, args[0].immediate()))
2107                    }
2108                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2109                        span,
2110                        name,
2111                        symbol: sym::$name,
2112                        in_ty,
2113                        in_elem,
2114                        ret_ty
2115                    }),
2116                };
2117            }
2118        };
2119    }
2120
2121    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2122    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2123    arith_red!(
2124        simd_reduce_add_unordered: vector_reduce_add,
2125        vector_reduce_fadd_reassoc,
2126        false,
2127        add,
2128        -0.0
2129    );
2130    arith_red!(
2131        simd_reduce_mul_unordered: vector_reduce_mul,
2132        vector_reduce_fmul_reassoc,
2133        false,
2134        mul,
2135        1.0
2136    );
2137
2138    macro_rules! minmax_red {
2139        ($name:ident: $int_red:ident, $float_red:ident) => {
2140            if name == sym::$name {
2141                require!(
2142                    ret_ty == in_elem,
2143                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2144                );
2145                return match in_elem.kind() {
2146                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2147                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2148                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2149                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2150                        span,
2151                        name,
2152                        symbol: sym::$name,
2153                        in_ty,
2154                        in_elem,
2155                        ret_ty
2156                    }),
2157                };
2158            }
2159        };
2160    }
2161
2162    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2163    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2164
2165    macro_rules! bitwise_red {
2166        ($name:ident : $red:ident, $boolean:expr) => {
2167            if name == sym::$name {
2168                let input = if !$boolean {
2169                    require!(
2170                        ret_ty == in_elem,
2171                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2172                    );
2173                    args[0].immediate()
2174                } else {
2175                    let bitwidth = match in_elem.kind() {
2176                        ty::Int(i) => {
2177                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2178                        }
2179                        ty::Uint(i) => {
2180                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2181                        }
2182                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2183                            span,
2184                            name,
2185                            symbol: sym::$name,
2186                            in_ty,
2187                            in_elem,
2188                            ret_ty
2189                        }),
2190                    };
2191
2192                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2193                };
2194                return match in_elem.kind() {
2195                    ty::Int(_) | ty::Uint(_) => {
2196                        let r = bx.$red(input);
2197                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2198                    }
2199                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2200                        span,
2201                        name,
2202                        symbol: sym::$name,
2203                        in_ty,
2204                        in_elem,
2205                        ret_ty
2206                    }),
2207                };
2208            }
2209        };
2210    }
2211
2212    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2213    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2214    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2215    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2216    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2217
2218    if name == sym::simd_cast_ptr {
2219        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2220        require!(
2221            in_len == out_len,
2222            InvalidMonomorphization::ReturnLengthInputType {
2223                span,
2224                name,
2225                in_len,
2226                in_ty,
2227                ret_ty,
2228                out_len
2229            }
2230        );
2231
2232        match in_elem.kind() {
2233            ty::RawPtr(p_ty, _) => {
2234                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2235                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2236                });
2237                require!(
2238                    metadata.is_unit(),
2239                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2240                );
2241            }
2242            _ => {
2243                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2244            }
2245        }
2246        match out_elem.kind() {
2247            ty::RawPtr(p_ty, _) => {
2248                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2249                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2250                });
2251                require!(
2252                    metadata.is_unit(),
2253                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2254                );
2255            }
2256            _ => {
2257                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2258            }
2259        }
2260
2261        return Ok(args[0].immediate());
2262    }
2263
2264    if name == sym::simd_expose_provenance {
2265        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2266        require!(
2267            in_len == out_len,
2268            InvalidMonomorphization::ReturnLengthInputType {
2269                span,
2270                name,
2271                in_len,
2272                in_ty,
2273                ret_ty,
2274                out_len
2275            }
2276        );
2277
2278        match in_elem.kind() {
2279            ty::RawPtr(_, _) => {}
2280            _ => {
2281                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2282            }
2283        }
2284        match out_elem.kind() {
2285            ty::Uint(ty::UintTy::Usize) => {}
2286            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2287        }
2288
2289        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2290    }
2291
2292    if name == sym::simd_with_exposed_provenance {
2293        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2294        require!(
2295            in_len == out_len,
2296            InvalidMonomorphization::ReturnLengthInputType {
2297                span,
2298                name,
2299                in_len,
2300                in_ty,
2301                ret_ty,
2302                out_len
2303            }
2304        );
2305
2306        match in_elem.kind() {
2307            ty::Uint(ty::UintTy::Usize) => {}
2308            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2309        }
2310        match out_elem.kind() {
2311            ty::RawPtr(_, _) => {}
2312            _ => {
2313                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2314            }
2315        }
2316
2317        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2318    }
2319
2320    if name == sym::simd_cast || name == sym::simd_as {
2321        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2322        require!(
2323            in_len == out_len,
2324            InvalidMonomorphization::ReturnLengthInputType {
2325                span,
2326                name,
2327                in_len,
2328                in_ty,
2329                ret_ty,
2330                out_len
2331            }
2332        );
2333        // casting cares about nominal type, not just structural type
2334        if in_elem == out_elem {
2335            return Ok(args[0].immediate());
2336        }
2337
2338        #[derive(Copy, Clone)]
2339        enum Sign {
2340            Unsigned,
2341            Signed,
2342        }
2343        use Sign::*;
2344
2345        enum Style {
2346            Float,
2347            Int(Sign),
2348            Unsupported,
2349        }
2350
2351        let (in_style, in_width) = match in_elem.kind() {
2352            // vectors of pointer-sized integers should've been
2353            // disallowed before here, so this unwrap is safe.
2354            ty::Int(i) => (
2355                Style::Int(Signed),
2356                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2357            ),
2358            ty::Uint(u) => (
2359                Style::Int(Unsigned),
2360                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2361            ),
2362            ty::Float(f) => (Style::Float, f.bit_width()),
2363            _ => (Style::Unsupported, 0),
2364        };
2365        let (out_style, out_width) = match out_elem.kind() {
2366            ty::Int(i) => (
2367                Style::Int(Signed),
2368                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2369            ),
2370            ty::Uint(u) => (
2371                Style::Int(Unsigned),
2372                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2373            ),
2374            ty::Float(f) => (Style::Float, f.bit_width()),
2375            _ => (Style::Unsupported, 0),
2376        };
2377
2378        match (in_style, out_style) {
2379            (Style::Int(sign), Style::Int(_)) => {
2380                return Ok(match in_width.cmp(&out_width) {
2381                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2382                    Ordering::Equal => args[0].immediate(),
2383                    Ordering::Less => match sign {
2384                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2385                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2386                    },
2387                });
2388            }
2389            (Style::Int(Sign::Signed), Style::Float) => {
2390                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2391            }
2392            (Style::Int(Sign::Unsigned), Style::Float) => {
2393                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2394            }
2395            (Style::Float, Style::Int(sign)) => {
2396                return Ok(match (sign, name == sym::simd_as) {
2397                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2398                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2399                    (_, true) => bx.cast_float_to_int(
2400                        matches!(sign, Sign::Signed),
2401                        args[0].immediate(),
2402                        llret_ty,
2403                    ),
2404                });
2405            }
2406            (Style::Float, Style::Float) => {
2407                return Ok(match in_width.cmp(&out_width) {
2408                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2409                    Ordering::Equal => args[0].immediate(),
2410                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2411                });
2412            }
2413            _ => { /* Unsupported. Fallthrough. */ }
2414        }
2415        return_error!(InvalidMonomorphization::UnsupportedCast {
2416            span,
2417            name,
2418            in_ty,
2419            in_elem,
2420            ret_ty,
2421            out_elem
2422        });
2423    }
2424    macro_rules! arith_binary {
2425        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2426            $(if name == sym::$name {
2427                match in_elem.kind() {
2428                    $($(ty::$p(_))|* => {
2429                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2430                    })*
2431                    _ => {},
2432                }
2433                return_error!(
2434                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2435                );
2436            })*
2437        }
2438    }
2439    arith_binary! {
2440        simd_add: Uint, Int => add, Float => fadd;
2441        simd_sub: Uint, Int => sub, Float => fsub;
2442        simd_mul: Uint, Int => mul, Float => fmul;
2443        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2444        simd_rem: Uint => urem, Int => srem, Float => frem;
2445        simd_shl: Uint, Int => shl;
2446        simd_shr: Uint => lshr, Int => ashr;
2447        simd_and: Uint, Int => and;
2448        simd_or: Uint, Int => or;
2449        simd_xor: Uint, Int => xor;
2450        simd_fmax: Float => maxnum;
2451        simd_fmin: Float => minnum;
2452
2453    }
2454    macro_rules! arith_unary {
2455        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2456            $(if name == sym::$name {
2457                match in_elem.kind() {
2458                    $($(ty::$p(_))|* => {
2459                        return Ok(bx.$call(args[0].immediate()))
2460                    })*
2461                    _ => {},
2462                }
2463                return_error!(
2464                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2465                );
2466            })*
2467        }
2468    }
2469    arith_unary! {
2470        simd_neg: Int => neg, Float => fneg;
2471    }
2472
2473    // Unary integer intrinsics
2474    if matches!(
2475        name,
2476        sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_ctpop | sym::simd_cttz
2477    ) {
2478        let vec_ty = bx.cx.type_vector(
2479            match *in_elem.kind() {
2480                ty::Int(i) => bx.cx.type_int_from_ty(i),
2481                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2482                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2483                    span,
2484                    name,
2485                    in_ty,
2486                    in_elem
2487                }),
2488            },
2489            in_len as u64,
2490        );
2491        let intrinsic_name = match name {
2492            sym::simd_bswap => "bswap",
2493            sym::simd_bitreverse => "bitreverse",
2494            sym::simd_ctlz => "ctlz",
2495            sym::simd_ctpop => "ctpop",
2496            sym::simd_cttz => "cttz",
2497            _ => unreachable!(),
2498        };
2499        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2500        let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
2501
2502        return match name {
2503            // byte swap is no-op for i8/u8
2504            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2505            sym::simd_ctlz | sym::simd_cttz => {
2506                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2507                let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
2508                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2509                let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2510                Ok(bx.call(
2511                    fn_ty,
2512                    None,
2513                    None,
2514                    f,
2515                    &[args[0].immediate(), dont_poison_on_zero],
2516                    None,
2517                    None,
2518                ))
2519            }
2520            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2521                // simple unary argument cases
2522                let fn_ty = bx.type_func(&[vec_ty], vec_ty);
2523                let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2524                Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None, None))
2525            }
2526            _ => unreachable!(),
2527        };
2528    }
2529
2530    if name == sym::simd_arith_offset {
2531        // This also checks that the first operand is a ptr type.
2532        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2533            span_bug!(span, "must be called with a vector of pointer types as first argument")
2534        });
2535        let layout = bx.layout_of(pointee);
2536        let ptrs = args[0].immediate();
2537        // The second argument must be a ptr-sized integer.
2538        // (We don't care about the signedness, this is wrapping anyway.)
2539        let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
2540        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2541            span_bug!(
2542                span,
2543                "must be called with a vector of pointer-sized integers as second argument"
2544            );
2545        }
2546        let offsets = args[1].immediate();
2547
2548        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2549    }
2550
2551    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2552        let lhs = args[0].immediate();
2553        let rhs = args[1].immediate();
2554        let is_add = name == sym::simd_saturating_add;
2555        let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2556        let (signed, elem_width, elem_ty) = match *in_elem.kind() {
2557            ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2558            ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2559            _ => {
2560                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2561                    span,
2562                    name,
2563                    expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
2564                    vector_type: arg_tys[0]
2565                });
2566            }
2567        };
2568        let llvm_intrinsic = &format!(
2569            "llvm.{}{}.sat.v{}i{}",
2570            if signed { 's' } else { 'u' },
2571            if is_add { "add" } else { "sub" },
2572            in_len,
2573            elem_width
2574        );
2575        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2576
2577        let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
2578        let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2579        let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None, None);
2580        return Ok(v);
2581    }
2582
2583    span_bug!(span, "unknown SIMD intrinsic");
2584}