rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::codegen_attrs::autodiff_attrs;
7use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
8use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
9use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
10use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
11use rustc_codegen_ssa::traits::*;
12use rustc_hir::def_id::LOCAL_CRATE;
13use rustc_hir::{self as hir};
14use rustc_middle::mir::BinOp;
15use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
16use rustc_middle::ty::{self, GenericArgsRef, Instance, SimdAlign, Ty, TyCtxt, TypingEnv};
17use rustc_middle::{bug, span_bug};
18use rustc_span::{Span, Symbol, sym};
19use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate};
20use rustc_target::callconv::PassMode;
21use tracing::debug;
22
23use crate::abi::FnAbiLlvmExt;
24use crate::builder::Builder;
25use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
26use crate::context::CodegenCx;
27use crate::errors::AutoDiffWithoutEnable;
28use crate::llvm::{self, Metadata, Type, Value};
29use crate::type_of::LayoutLlvmExt;
30use crate::va_arg::emit_va_arg;
31
32fn call_simple_intrinsic<'ll, 'tcx>(
33    bx: &mut Builder<'_, 'll, 'tcx>,
34    name: Symbol,
35    args: &[OperandRef<'tcx, &'ll Value>],
36) -> Option<&'ll Value> {
37    let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
38        sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
39        sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
40        sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
41        sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
42
43        sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
44        sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
45        sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
46        sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
47
48        sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
49        sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
50        sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
51        sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
52
53        sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
54        sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
55        sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
56        sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
57
58        sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
59        sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
60        sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
61        sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
62
63        sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
64        sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
65        sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
66        sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
67
68        sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
69        sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
70        sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
71        sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
72
73        sym::logf16 => ("llvm.log", &[bx.type_f16()]),
74        sym::logf32 => ("llvm.log", &[bx.type_f32()]),
75        sym::logf64 => ("llvm.log", &[bx.type_f64()]),
76        sym::logf128 => ("llvm.log", &[bx.type_f128()]),
77
78        sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
79        sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
80        sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
81        sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
82
83        sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
84        sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
85        sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
86        sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
87
88        sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
89        sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
90        sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
91        sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
92
93        sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
94        sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
95        sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
96        sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
97
98        sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
99        sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
100        sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
101        sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
102
103        sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
104        sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
105        sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
106        sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
107
108        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
109        // when llvm/llvm-project#{139380,139381,140445} are fixed.
110        //sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
111        //sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
112        //sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
113        //sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
114        //
115        sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
116        sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
117        sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
118        sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
119
120        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
121        // when llvm/llvm-project#{139380,139381,140445} are fixed.
122        //sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
123        //sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
124        //sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
125        //sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
126        //
127        sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
128        sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
129        sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
130        sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
131
132        sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
133        sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
134        sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
135        sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
136
137        sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
138        sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
139        sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
140        sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
141
142        sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
143        sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
144        sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
145        sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
146
147        // We could use any of `rint`, `nearbyint`, or `roundeven`
148        // for this -- they are all identical in semantics when
149        // assuming the default FP environment.
150        // `rint` is what we used for $forever.
151        sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
152        sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
153        sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
154        sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
155
156        sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
157        sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
158        sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
159        sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
160
161        _ => return None,
162    };
163    Some(bx.call_intrinsic(
164        base_name,
165        type_params,
166        &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
167    ))
168}
169
170impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
171    fn codegen_intrinsic_call(
172        &mut self,
173        instance: ty::Instance<'tcx>,
174        args: &[OperandRef<'tcx, &'ll Value>],
175        result: PlaceRef<'tcx, &'ll Value>,
176        span: Span,
177    ) -> Result<(), ty::Instance<'tcx>> {
178        let tcx = self.tcx;
179
180        let name = tcx.item_name(instance.def_id());
181        let fn_args = instance.args;
182
183        let simple = call_simple_intrinsic(self, name, args);
184        let llval = match name {
185            _ if simple.is_some() => simple.unwrap(),
186            sym::ptr_mask => {
187                let ptr = args[0].immediate();
188                self.call_intrinsic(
189                    "llvm.ptrmask",
190                    &[self.val_ty(ptr), self.type_isize()],
191                    &[ptr, args[1].immediate()],
192                )
193            }
194            sym::autodiff => {
195                codegen_autodiff(self, tcx, instance, args, result);
196                return Ok(());
197            }
198            sym::is_val_statically_known => {
199                if let OperandValue::Immediate(imm) = args[0].val {
200                    self.call_intrinsic(
201                        "llvm.is.constant",
202                        &[args[0].layout.immediate_llvm_type(self.cx)],
203                        &[imm],
204                    )
205                } else {
206                    self.const_bool(false)
207                }
208            }
209            sym::select_unpredictable => {
210                let cond = args[0].immediate();
211                assert_eq!(args[1].layout, args[2].layout);
212                let select = |bx: &mut Self, true_val, false_val| {
213                    let result = bx.select(cond, true_val, false_val);
214                    bx.set_unpredictable(&result);
215                    result
216                };
217                match (args[1].val, args[2].val) {
218                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
219                        assert!(true_val.llextra.is_none());
220                        assert!(false_val.llextra.is_none());
221                        assert_eq!(true_val.align, false_val.align);
222                        let ptr = select(self, true_val.llval, false_val.llval);
223                        let selected =
224                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
225                        selected.store(self, result);
226                        return Ok(());
227                    }
228                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
229                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
230                        let true_val = args[1].immediate_or_packed_pair(self);
231                        let false_val = args[2].immediate_or_packed_pair(self);
232                        select(self, true_val, false_val)
233                    }
234                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
235                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
236                }
237            }
238            sym::catch_unwind => {
239                catch_unwind_intrinsic(
240                    self,
241                    args[0].immediate(),
242                    args[1].immediate(),
243                    args[2].immediate(),
244                    result,
245                );
246                return Ok(());
247            }
248            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
249            sym::va_copy => {
250                let dest = args[0].immediate();
251                self.call_intrinsic(
252                    "llvm.va_copy",
253                    &[self.val_ty(dest)],
254                    &[dest, args[1].immediate()],
255                )
256            }
257            sym::va_arg => {
258                match result.layout.backend_repr {
259                    BackendRepr::Scalar(scalar) => {
260                        match scalar.primitive() {
261                            Primitive::Int(..) => {
262                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
263                                    // `va_arg` should not be called on an integer type
264                                    // less than 4 bytes in length. If it is, promote
265                                    // the integer to an `i32` and truncate the result
266                                    // back to the smaller type.
267                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
268                                    self.trunc(promoted_result, result.layout.llvm_type(self))
269                                } else {
270                                    emit_va_arg(self, args[0], result.layout.ty)
271                                }
272                            }
273                            Primitive::Float(Float::F16) => {
274                                bug!("the va_arg intrinsic does not work with `f16`")
275                            }
276                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
277                                emit_va_arg(self, args[0], result.layout.ty)
278                            }
279                            // `va_arg` should never be used with the return type f32.
280                            Primitive::Float(Float::F32) => {
281                                bug!("the va_arg intrinsic does not work with `f32`")
282                            }
283                            Primitive::Float(Float::F128) => {
284                                bug!("the va_arg intrinsic does not work with `f128`")
285                            }
286                        }
287                    }
288                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
289                }
290            }
291
292            sym::volatile_load | sym::unaligned_volatile_load => {
293                let ptr = args[0].immediate();
294                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
295                let align = if name == sym::unaligned_volatile_load {
296                    1
297                } else {
298                    result.layout.align.bytes() as u32
299                };
300                unsafe {
301                    llvm::LLVMSetAlignment(load, align);
302                }
303                if !result.layout.is_zst() {
304                    self.store_to_place(load, result.val);
305                }
306                return Ok(());
307            }
308            sym::volatile_store => {
309                let dst = args[0].deref(self.cx());
310                args[1].val.volatile_store(self, dst);
311                return Ok(());
312            }
313            sym::unaligned_volatile_store => {
314                let dst = args[0].deref(self.cx());
315                args[1].val.unaligned_volatile_store(self, dst);
316                return Ok(());
317            }
318            sym::prefetch_read_data
319            | sym::prefetch_write_data
320            | sym::prefetch_read_instruction
321            | sym::prefetch_write_instruction => {
322                let (rw, cache_type) = match name {
323                    sym::prefetch_read_data => (0, 1),
324                    sym::prefetch_write_data => (1, 1),
325                    sym::prefetch_read_instruction => (0, 0),
326                    sym::prefetch_write_instruction => (1, 0),
327                    _ => bug!(),
328                };
329                let ptr = args[0].immediate();
330                let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32();
331                self.call_intrinsic(
332                    "llvm.prefetch",
333                    &[self.val_ty(ptr)],
334                    &[
335                        ptr,
336                        self.const_i32(rw),
337                        self.const_i32(locality),
338                        self.const_i32(cache_type),
339                    ],
340                )
341            }
342            sym::carrying_mul_add => {
343                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
344
345                let wide_llty = self.type_ix(size.bits() * 2);
346                let args = args.as_array().unwrap();
347                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
348
349                let wide = if signed {
350                    let prod = self.unchecked_smul(a, b);
351                    let acc = self.unchecked_sadd(prod, c);
352                    self.unchecked_sadd(acc, d)
353                } else {
354                    let prod = self.unchecked_umul(a, b);
355                    let acc = self.unchecked_uadd(prod, c);
356                    self.unchecked_uadd(acc, d)
357                };
358
359                let narrow_llty = self.type_ix(size.bits());
360                let low = self.trunc(wide, narrow_llty);
361                let bits_const = self.const_uint(wide_llty, size.bits());
362                // No need for ashr when signed; LLVM changes it to lshr anyway.
363                let high = self.lshr(wide, bits_const);
364                // FIXME: could be `trunc nuw`, even for signed.
365                let high = self.trunc(high, narrow_llty);
366
367                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
368                let pair = self.const_poison(pair_llty);
369                let pair = self.insert_value(pair, low, 0);
370                let pair = self.insert_value(pair, high, 1);
371                pair
372            }
373            sym::ctlz
374            | sym::ctlz_nonzero
375            | sym::cttz
376            | sym::cttz_nonzero
377            | sym::ctpop
378            | sym::bswap
379            | sym::bitreverse
380            | sym::rotate_left
381            | sym::rotate_right
382            | sym::saturating_add
383            | sym::saturating_sub
384            | sym::unchecked_funnel_shl
385            | sym::unchecked_funnel_shr => {
386                let ty = args[0].layout.ty;
387                if !ty.is_integral() {
388                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
389                        span,
390                        name,
391                        ty,
392                    });
393                    return Ok(());
394                }
395                let (size, signed) = ty.int_size_and_signed(self.tcx);
396                let width = size.bits();
397                let llty = self.type_ix(width);
398                match name {
399                    sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
400                        let y =
401                            self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
402                        let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
403                            "llvm.ctlz"
404                        } else {
405                            "llvm.cttz"
406                        };
407                        let ret =
408                            self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
409                        self.intcast(ret, result.layout.llvm_type(self), false)
410                    }
411                    sym::ctpop => {
412                        let ret =
413                            self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
414                        self.intcast(ret, result.layout.llvm_type(self), false)
415                    }
416                    sym::bswap => {
417                        if width == 8 {
418                            args[0].immediate() // byte swap a u8/i8 is just a no-op
419                        } else {
420                            self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
421                        }
422                    }
423                    sym::bitreverse => {
424                        self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
425                    }
426                    sym::rotate_left
427                    | sym::rotate_right
428                    | sym::unchecked_funnel_shl
429                    | sym::unchecked_funnel_shr => {
430                        let is_left = name == sym::rotate_left || name == sym::unchecked_funnel_shl;
431                        let lhs = args[0].immediate();
432                        let (rhs, raw_shift) =
433                            if name == sym::rotate_left || name == sym::rotate_right {
434                                // rotate = funnel shift with first two args the same
435                                (lhs, args[1].immediate())
436                            } else {
437                                (args[1].immediate(), args[2].immediate())
438                            };
439                        let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
440
441                        // llvm expects shift to be the same type as the values, but rust
442                        // always uses `u32`.
443                        let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false);
444
445                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift])
446                    }
447                    sym::saturating_add | sym::saturating_sub => {
448                        let is_add = name == sym::saturating_add;
449                        let lhs = args[0].immediate();
450                        let rhs = args[1].immediate();
451                        let llvm_name = format!(
452                            "llvm.{}{}.sat",
453                            if signed { 's' } else { 'u' },
454                            if is_add { "add" } else { "sub" },
455                        );
456                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
457                    }
458                    _ => bug!(),
459                }
460            }
461
462            sym::raw_eq => {
463                use BackendRepr::*;
464                let tp_ty = fn_args.type_at(0);
465                let layout = self.layout_of(tp_ty).layout;
466                let use_integer_compare = match layout.backend_repr() {
467                    Scalar(_) | ScalarPair(_, _) => true,
468                    SimdVector { .. } => false,
469                    Memory { .. } => {
470                        // For rusty ABIs, small aggregates are actually passed
471                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
472                        // so we re-use that same threshold here.
473                        layout.size() <= self.data_layout().pointer_size() * 2
474                    }
475                };
476
477                let a = args[0].immediate();
478                let b = args[1].immediate();
479                if layout.size().bytes() == 0 {
480                    self.const_bool(true)
481                } else if use_integer_compare {
482                    let integer_ty = self.type_ix(layout.size().bits());
483                    let a_val = self.load(integer_ty, a, layout.align().abi);
484                    let b_val = self.load(integer_ty, b, layout.align().abi);
485                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
486                } else {
487                    let n = self.const_usize(layout.size().bytes());
488                    let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
489                    self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
490                }
491            }
492
493            sym::compare_bytes => {
494                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
495                let cmp = self.call_intrinsic(
496                    "memcmp",
497                    &[],
498                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
499                );
500                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
501                self.sext(cmp, self.type_ix(32))
502            }
503
504            sym::black_box => {
505                args[0].val.store(self, result);
506                let result_val_span = [result.val.llval];
507                // We need to "use" the argument in some way LLVM can't introspect, and on
508                // targets that support it we can typically leverage inline assembly to do
509                // this. LLVM's interpretation of inline assembly is that it's, well, a black
510                // box. This isn't the greatest implementation since it probably deoptimizes
511                // more than we want, but it's so far good enough.
512                //
513                // For zero-sized types, the location pointed to by the result may be
514                // uninitialized. Do not "use" the result in this case; instead just clobber
515                // the memory.
516                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
517                    ("~{memory}", &[])
518                } else {
519                    ("r,~{memory}", &result_val_span)
520                };
521                crate::asm::inline_asm_call(
522                    self,
523                    "",
524                    constraint,
525                    inputs,
526                    self.type_void(),
527                    &[],
528                    true,
529                    false,
530                    llvm::AsmDialect::Att,
531                    &[span],
532                    false,
533                    None,
534                    None,
535                )
536                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
537
538                // We have copied the value to `result` already.
539                return Ok(());
540            }
541
542            _ if name.as_str().starts_with("simd_") => {
543                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
544                // This gives them the expected layout of a regular #[repr(simd)] vector.
545                let mut loaded_args = Vec::new();
546                for arg in args {
547                    loaded_args.push(
548                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
549                        // with reduced alignment and no padding) rather than as immediates.
550                        // We can use a vector load to fix the layout and turn the argument
551                        // into an immediate.
552                        if arg.layout.ty.is_simd()
553                            && let OperandValue::Ref(place) = arg.val
554                        {
555                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
556                            let elem_ll_ty = match elem_ty.kind() {
557                                ty::Float(f) => self.type_float_from_ty(*f),
558                                ty::Int(i) => self.type_int_from_ty(*i),
559                                ty::Uint(u) => self.type_uint_from_ty(*u),
560                                ty::RawPtr(_, _) => self.type_ptr(),
561                                _ => unreachable!(),
562                            };
563                            let loaded =
564                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
565                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
566                        } else {
567                            *arg
568                        },
569                    );
570                }
571
572                let llret_ty = if result.layout.ty.is_simd()
573                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
574                {
575                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
576                    let elem_ll_ty = match elem_ty.kind() {
577                        ty::Float(f) => self.type_float_from_ty(*f),
578                        ty::Int(i) => self.type_int_from_ty(*i),
579                        ty::Uint(u) => self.type_uint_from_ty(*u),
580                        ty::RawPtr(_, _) => self.type_ptr(),
581                        _ => unreachable!(),
582                    };
583                    self.type_vector(elem_ll_ty, size)
584                } else {
585                    result.layout.llvm_type(self)
586                };
587
588                match generic_simd_intrinsic(
589                    self,
590                    name,
591                    fn_args,
592                    &loaded_args,
593                    result.layout.ty,
594                    llret_ty,
595                    span,
596                ) {
597                    Ok(llval) => llval,
598                    // If there was an error, just skip this invocation... we'll abort compilation
599                    // anyway, but we can keep codegen'ing to find more errors.
600                    Err(()) => return Ok(()),
601                }
602            }
603
604            _ => {
605                debug!("unknown intrinsic '{}' -- falling back to default body", name);
606                // Call the fallback body instead of generating the intrinsic code
607                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
608            }
609        };
610
611        if result.layout.ty.is_bool() {
612            let val = self.from_immediate(llval);
613            self.store_to_place(val, result.val);
614        } else if !result.layout.ty.is_unit() {
615            self.store_to_place(llval, result.val);
616        }
617        Ok(())
618    }
619
620    fn abort(&mut self) {
621        self.call_intrinsic("llvm.trap", &[], &[]);
622    }
623
624    fn assume(&mut self, val: Self::Value) {
625        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
626            self.call_intrinsic("llvm.assume", &[], &[val]);
627        }
628    }
629
630    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
631        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
632            self.call_intrinsic(
633                "llvm.expect",
634                &[self.type_i1()],
635                &[cond, self.const_bool(expected)],
636            )
637        } else {
638            cond
639        }
640    }
641
642    fn type_checked_load(
643        &mut self,
644        llvtable: &'ll Value,
645        vtable_byte_offset: u64,
646        typeid: &'ll Metadata,
647    ) -> Self::Value {
648        let typeid = self.get_metadata_value(typeid);
649        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
650        let type_checked_load = self.call_intrinsic(
651            "llvm.type.checked.load",
652            &[],
653            &[llvtable, vtable_byte_offset, typeid],
654        );
655        self.extract_value(type_checked_load, 0)
656    }
657
658    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
659        self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
660    }
661
662    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
663        self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
664    }
665}
666
667fn catch_unwind_intrinsic<'ll, 'tcx>(
668    bx: &mut Builder<'_, 'll, 'tcx>,
669    try_func: &'ll Value,
670    data: &'ll Value,
671    catch_func: &'ll Value,
672    dest: PlaceRef<'tcx, &'ll Value>,
673) {
674    if !bx.sess().panic_strategy().unwinds() {
675        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
676        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
677        // Return 0 unconditionally from the intrinsic call;
678        // we can never unwind.
679        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
680    } else if wants_msvc_seh(bx.sess()) {
681        codegen_msvc_try(bx, try_func, data, catch_func, dest);
682    } else if wants_wasm_eh(bx.sess()) {
683        codegen_wasm_try(bx, try_func, data, catch_func, dest);
684    } else if bx.sess().target.os == "emscripten" {
685        codegen_emcc_try(bx, try_func, data, catch_func, dest);
686    } else {
687        codegen_gnu_try(bx, try_func, data, catch_func, dest);
688    }
689}
690
691// MSVC's definition of the `rust_try` function.
692//
693// This implementation uses the new exception handling instructions in LLVM
694// which have support in LLVM for SEH on MSVC targets. Although these
695// instructions are meant to work for all targets, as of the time of this
696// writing, however, LLVM does not recommend the usage of these new instructions
697// as the old ones are still more optimized.
698fn codegen_msvc_try<'ll, 'tcx>(
699    bx: &mut Builder<'_, 'll, 'tcx>,
700    try_func: &'ll Value,
701    data: &'ll Value,
702    catch_func: &'ll Value,
703    dest: PlaceRef<'tcx, &'ll Value>,
704) {
705    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
706        bx.set_personality_fn(bx.eh_personality());
707
708        let normal = bx.append_sibling_block("normal");
709        let catchswitch = bx.append_sibling_block("catchswitch");
710        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
711        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
712        let caught = bx.append_sibling_block("caught");
713
714        let try_func = llvm::get_param(bx.llfn(), 0);
715        let data = llvm::get_param(bx.llfn(), 1);
716        let catch_func = llvm::get_param(bx.llfn(), 2);
717
718        // We're generating an IR snippet that looks like:
719        //
720        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
721        //      %slot = alloca i8*
722        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
723        //
724        //   normal:
725        //      ret i32 0
726        //
727        //   catchswitch:
728        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
729        //
730        //   catchpad_rust:
731        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
732        //      %ptr = load %slot
733        //      call %catch_func(%data, %ptr)
734        //      catchret from %tok to label %caught
735        //
736        //   catchpad_foreign:
737        //      %tok = catchpad within %cs [null, 64, null]
738        //      call %catch_func(%data, null)
739        //      catchret from %tok to label %caught
740        //
741        //   caught:
742        //      ret i32 1
743        //   }
744        //
745        // This structure follows the basic usage of throw/try/catch in LLVM.
746        // For example, compile this C++ snippet to see what LLVM generates:
747        //
748        //      struct rust_panic {
749        //          rust_panic(const rust_panic&);
750        //          ~rust_panic();
751        //
752        //          void* x[2];
753        //      };
754        //
755        //      int __rust_try(
756        //          void (*try_func)(void*),
757        //          void *data,
758        //          void (*catch_func)(void*, void*) noexcept
759        //      ) {
760        //          try {
761        //              try_func(data);
762        //              return 0;
763        //          } catch(rust_panic& a) {
764        //              catch_func(data, &a);
765        //              return 1;
766        //          } catch(...) {
767        //              catch_func(data, NULL);
768        //              return 1;
769        //          }
770        //      }
771        //
772        // More information can be found in libstd's seh.rs implementation.
773        let ptr_size = bx.tcx().data_layout.pointer_size();
774        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
775        let slot = bx.alloca(ptr_size, ptr_align);
776        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
777        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
778
779        bx.switch_to_block(normal);
780        bx.ret(bx.const_i32(0));
781
782        bx.switch_to_block(catchswitch);
783        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
784
785        // We can't use the TypeDescriptor defined in libpanic_unwind because it
786        // might be in another DLL and the SEH encoding only supports specifying
787        // a TypeDescriptor from the current module.
788        //
789        // However this isn't an issue since the MSVC runtime uses string
790        // comparison on the type name to match TypeDescriptors rather than
791        // pointer equality.
792        //
793        // So instead we generate a new TypeDescriptor in each module that uses
794        // `try` and let the linker merge duplicate definitions in the same
795        // module.
796        //
797        // When modifying, make sure that the type_name string exactly matches
798        // the one used in library/panic_unwind/src/seh.rs.
799        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
800        let type_name = bx.const_bytes(b"rust_panic\0");
801        let type_info =
802            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
803        let tydesc = bx.declare_global(
804            &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
805            bx.val_ty(type_info),
806        );
807
808        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
809        if bx.cx.tcx.sess.target.supports_comdat() {
810            llvm::SetUniqueComdat(bx.llmod, tydesc);
811        }
812        llvm::set_initializer(tydesc, type_info);
813
814        // The flag value of 8 indicates that we are catching the exception by
815        // reference instead of by value. We can't use catch by value because
816        // that requires copying the exception object, which we don't support
817        // since our exception object effectively contains a Box.
818        //
819        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
820        bx.switch_to_block(catchpad_rust);
821        let flags = bx.const_i32(8);
822        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
823        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
824        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
825        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
826        bx.catch_ret(&funclet, caught);
827
828        // The flag value of 64 indicates a "catch-all".
829        bx.switch_to_block(catchpad_foreign);
830        let flags = bx.const_i32(64);
831        let null = bx.const_null(bx.type_ptr());
832        let funclet = bx.catch_pad(cs, &[null, flags, null]);
833        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
834        bx.catch_ret(&funclet, caught);
835
836        bx.switch_to_block(caught);
837        bx.ret(bx.const_i32(1));
838    });
839
840    // Note that no invoke is used here because by definition this function
841    // can't panic (that's what it's catching).
842    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
843    OperandValue::Immediate(ret).store(bx, dest);
844}
845
846// WASM's definition of the `rust_try` function.
847fn codegen_wasm_try<'ll, 'tcx>(
848    bx: &mut Builder<'_, 'll, 'tcx>,
849    try_func: &'ll Value,
850    data: &'ll Value,
851    catch_func: &'ll Value,
852    dest: PlaceRef<'tcx, &'ll Value>,
853) {
854    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
855        bx.set_personality_fn(bx.eh_personality());
856
857        let normal = bx.append_sibling_block("normal");
858        let catchswitch = bx.append_sibling_block("catchswitch");
859        let catchpad = bx.append_sibling_block("catchpad");
860        let caught = bx.append_sibling_block("caught");
861
862        let try_func = llvm::get_param(bx.llfn(), 0);
863        let data = llvm::get_param(bx.llfn(), 1);
864        let catch_func = llvm::get_param(bx.llfn(), 2);
865
866        // We're generating an IR snippet that looks like:
867        //
868        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
869        //      %slot = alloca i8*
870        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
871        //
872        //   normal:
873        //      ret i32 0
874        //
875        //   catchswitch:
876        //      %cs = catchswitch within none [%catchpad] unwind to caller
877        //
878        //   catchpad:
879        //      %tok = catchpad within %cs [null]
880        //      %ptr = call @llvm.wasm.get.exception(token %tok)
881        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
882        //      call %catch_func(%data, %ptr)
883        //      catchret from %tok to label %caught
884        //
885        //   caught:
886        //      ret i32 1
887        //   }
888        //
889        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
890        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
891
892        bx.switch_to_block(normal);
893        bx.ret(bx.const_i32(0));
894
895        bx.switch_to_block(catchswitch);
896        let cs = bx.catch_switch(None, None, &[catchpad]);
897
898        bx.switch_to_block(catchpad);
899        let null = bx.const_null(bx.type_ptr());
900        let funclet = bx.catch_pad(cs, &[null]);
901
902        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
903        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
904
905        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
906        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
907        bx.catch_ret(&funclet, caught);
908
909        bx.switch_to_block(caught);
910        bx.ret(bx.const_i32(1));
911    });
912
913    // Note that no invoke is used here because by definition this function
914    // can't panic (that's what it's catching).
915    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
916    OperandValue::Immediate(ret).store(bx, dest);
917}
918
919// Definition of the standard `try` function for Rust using the GNU-like model
920// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
921// instructions).
922//
923// This codegen is a little surprising because we always call a shim
924// function instead of inlining the call to `invoke` manually here. This is done
925// because in LLVM we're only allowed to have one personality per function
926// definition. The call to the `try` intrinsic is being inlined into the
927// function calling it, and that function may already have other personality
928// functions in play. By calling a shim we're guaranteed that our shim will have
929// the right personality function.
930fn codegen_gnu_try<'ll, 'tcx>(
931    bx: &mut Builder<'_, 'll, 'tcx>,
932    try_func: &'ll Value,
933    data: &'ll Value,
934    catch_func: &'ll Value,
935    dest: PlaceRef<'tcx, &'ll Value>,
936) {
937    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
938        // Codegens the shims described above:
939        //
940        //   bx:
941        //      invoke %try_func(%data) normal %normal unwind %catch
942        //
943        //   normal:
944        //      ret 0
945        //
946        //   catch:
947        //      (%ptr, _) = landingpad
948        //      call %catch_func(%data, %ptr)
949        //      ret 1
950        let then = bx.append_sibling_block("then");
951        let catch = bx.append_sibling_block("catch");
952
953        let try_func = llvm::get_param(bx.llfn(), 0);
954        let data = llvm::get_param(bx.llfn(), 1);
955        let catch_func = llvm::get_param(bx.llfn(), 2);
956        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
957        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
958
959        bx.switch_to_block(then);
960        bx.ret(bx.const_i32(0));
961
962        // Type indicator for the exception being thrown.
963        //
964        // The first value in this tuple is a pointer to the exception object
965        // being thrown. The second value is a "selector" indicating which of
966        // the landing pad clauses the exception's type had been matched to.
967        // rust_try ignores the selector.
968        bx.switch_to_block(catch);
969        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
970        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
971        let tydesc = bx.const_null(bx.type_ptr());
972        bx.add_clause(vals, tydesc);
973        let ptr = bx.extract_value(vals, 0);
974        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
975        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
976        bx.ret(bx.const_i32(1));
977    });
978
979    // Note that no invoke is used here because by definition this function
980    // can't panic (that's what it's catching).
981    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
982    OperandValue::Immediate(ret).store(bx, dest);
983}
984
985// Variant of codegen_gnu_try used for emscripten where Rust panics are
986// implemented using C++ exceptions. Here we use exceptions of a specific type
987// (`struct rust_panic`) to represent Rust panics.
988fn codegen_emcc_try<'ll, 'tcx>(
989    bx: &mut Builder<'_, 'll, 'tcx>,
990    try_func: &'ll Value,
991    data: &'ll Value,
992    catch_func: &'ll Value,
993    dest: PlaceRef<'tcx, &'ll Value>,
994) {
995    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
996        // Codegens the shims described above:
997        //
998        //   bx:
999        //      invoke %try_func(%data) normal %normal unwind %catch
1000        //
1001        //   normal:
1002        //      ret 0
1003        //
1004        //   catch:
1005        //      (%ptr, %selector) = landingpad
1006        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
1007        //      %is_rust_panic = %selector == %rust_typeid
1008        //      %catch_data = alloca { i8*, i8 }
1009        //      %catch_data[0] = %ptr
1010        //      %catch_data[1] = %is_rust_panic
1011        //      call %catch_func(%data, %catch_data)
1012        //      ret 1
1013        let then = bx.append_sibling_block("then");
1014        let catch = bx.append_sibling_block("catch");
1015
1016        let try_func = llvm::get_param(bx.llfn(), 0);
1017        let data = llvm::get_param(bx.llfn(), 1);
1018        let catch_func = llvm::get_param(bx.llfn(), 2);
1019        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1020        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1021
1022        bx.switch_to_block(then);
1023        bx.ret(bx.const_i32(0));
1024
1025        // Type indicator for the exception being thrown.
1026        //
1027        // The first value in this tuple is a pointer to the exception object
1028        // being thrown. The second value is a "selector" indicating which of
1029        // the landing pad clauses the exception's type had been matched to.
1030        bx.switch_to_block(catch);
1031        let tydesc = bx.eh_catch_typeinfo();
1032        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1033        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1034        bx.add_clause(vals, tydesc);
1035        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1036        let ptr = bx.extract_value(vals, 0);
1037        let selector = bx.extract_value(vals, 1);
1038
1039        // Check if the typeid we got is the one for a Rust panic.
1040        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1041        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1042        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1043
1044        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1045        // create an alloca and pass a pointer to that.
1046        let ptr_size = bx.tcx().data_layout.pointer_size();
1047        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1048        let i8_align = bx.tcx().data_layout.i8_align;
1049        // Required in order for there to be no padding between the fields.
1050        assert!(i8_align <= ptr_align);
1051        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1052        bx.store(ptr, catch_data, ptr_align);
1053        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1054        bx.store(is_rust_panic, catch_data_1, i8_align);
1055
1056        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1057        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1058        bx.ret(bx.const_i32(1));
1059    });
1060
1061    // Note that no invoke is used here because by definition this function
1062    // can't panic (that's what it's catching).
1063    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1064    OperandValue::Immediate(ret).store(bx, dest);
1065}
1066
1067// Helper function to give a Block to a closure to codegen a shim function.
1068// This is currently primarily used for the `try` intrinsic functions above.
1069fn gen_fn<'a, 'll, 'tcx>(
1070    cx: &'a CodegenCx<'ll, 'tcx>,
1071    name: &str,
1072    rust_fn_sig: ty::PolyFnSig<'tcx>,
1073    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1074) -> (&'ll Type, &'ll Value) {
1075    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1076    let llty = fn_abi.llvm_type(cx);
1077    let llfn = cx.declare_fn(name, fn_abi, None);
1078    cx.set_frame_pointer_type(llfn);
1079    cx.apply_target_cpu_attr(llfn);
1080    // FIXME(eddyb) find a nicer way to do this.
1081    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1082    let llbb = Builder::append_block(cx, llfn, "entry-block");
1083    let bx = Builder::build(cx, llbb);
1084    codegen(bx);
1085    (llty, llfn)
1086}
1087
1088// Helper function used to get a handle to the `__rust_try` function used to
1089// catch exceptions.
1090//
1091// This function is only generated once and is then cached.
1092fn get_rust_try_fn<'a, 'll, 'tcx>(
1093    cx: &'a CodegenCx<'ll, 'tcx>,
1094    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1095) -> (&'ll Type, &'ll Value) {
1096    if let Some(llfn) = cx.rust_try_fn.get() {
1097        return llfn;
1098    }
1099
1100    // Define the type up front for the signature of the rust_try function.
1101    let tcx = cx.tcx;
1102    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1103    // `unsafe fn(*mut i8) -> ()`
1104    let try_fn_ty = Ty::new_fn_ptr(
1105        tcx,
1106        ty::Binder::dummy(tcx.mk_fn_sig(
1107            [i8p],
1108            tcx.types.unit,
1109            false,
1110            hir::Safety::Unsafe,
1111            ExternAbi::Rust,
1112        )),
1113    );
1114    // `unsafe fn(*mut i8, *mut i8) -> ()`
1115    let catch_fn_ty = Ty::new_fn_ptr(
1116        tcx,
1117        ty::Binder::dummy(tcx.mk_fn_sig(
1118            [i8p, i8p],
1119            tcx.types.unit,
1120            false,
1121            hir::Safety::Unsafe,
1122            ExternAbi::Rust,
1123        )),
1124    );
1125    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1126    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1127        [try_fn_ty, i8p, catch_fn_ty],
1128        tcx.types.i32,
1129        false,
1130        hir::Safety::Unsafe,
1131        ExternAbi::Rust,
1132    ));
1133    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1134    cx.rust_try_fn.set(Some(rust_try));
1135    rust_try
1136}
1137
1138fn codegen_autodiff<'ll, 'tcx>(
1139    bx: &mut Builder<'_, 'll, 'tcx>,
1140    tcx: TyCtxt<'tcx>,
1141    instance: ty::Instance<'tcx>,
1142    args: &[OperandRef<'tcx, &'ll Value>],
1143    result: PlaceRef<'tcx, &'ll Value>,
1144) {
1145    if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) {
1146        let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable);
1147    }
1148
1149    let fn_args = instance.args;
1150    let callee_ty = instance.ty(tcx, bx.typing_env());
1151
1152    let sig = callee_ty.fn_sig(tcx).skip_binder();
1153
1154    let ret_ty = sig.output();
1155    let llret_ty = bx.layout_of(ret_ty).llvm_type(bx);
1156
1157    // Get source, diff, and attrs
1158    let (source_id, source_args) = match fn_args.into_type_list(tcx)[0].kind() {
1159        ty::FnDef(def_id, source_params) => (def_id, source_params),
1160        _ => bug!("invalid autodiff intrinsic args"),
1161    };
1162
1163    let fn_source = match Instance::try_resolve(tcx, bx.cx.typing_env(), *source_id, source_args) {
1164        Ok(Some(instance)) => instance,
1165        Ok(None) => bug!(
1166            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1167            source_id,
1168            source_args
1169        ),
1170        Err(_) => {
1171            // An error has already been emitted
1172            return;
1173        }
1174    };
1175
1176    let source_symbol = symbol_name_for_instance_in_crate(tcx, fn_source.clone(), LOCAL_CRATE);
1177    let Some(fn_to_diff) = bx.cx.get_function(&source_symbol) else {
1178        bug!("could not find source function")
1179    };
1180
1181    let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() {
1182        ty::FnDef(def_id, diff_args) => (def_id, diff_args),
1183        _ => bug!("invalid args"),
1184    };
1185
1186    let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) {
1187        Ok(Some(instance)) => instance,
1188        Ok(None) => bug!(
1189            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1190            diff_id,
1191            diff_args
1192        ),
1193        Err(_) => {
1194            // An error has already been emitted
1195            return;
1196        }
1197    };
1198
1199    let val_arr = get_args_from_tuple(bx, args[2], fn_diff);
1200    let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE);
1201
1202    let Some(mut diff_attrs) = autodiff_attrs(tcx, fn_diff.def_id()) else {
1203        bug!("could not find autodiff attrs")
1204    };
1205
1206    adjust_activity_to_abi(
1207        tcx,
1208        fn_source,
1209        TypingEnv::fully_monomorphized(),
1210        &mut diff_attrs.input_activity,
1211    );
1212
1213    let fnc_tree =
1214        rustc_middle::ty::fnc_typetrees(tcx, fn_source.ty(tcx, TypingEnv::fully_monomorphized()));
1215
1216    // Build body
1217    generate_enzyme_call(
1218        bx,
1219        bx.cx,
1220        fn_to_diff,
1221        &diff_symbol,
1222        llret_ty,
1223        &val_arr,
1224        diff_attrs.clone(),
1225        result,
1226        fnc_tree,
1227    );
1228}
1229
1230fn get_args_from_tuple<'ll, 'tcx>(
1231    bx: &mut Builder<'_, 'll, 'tcx>,
1232    tuple_op: OperandRef<'tcx, &'ll Value>,
1233    fn_instance: Instance<'tcx>,
1234) -> Vec<&'ll Value> {
1235    let cx = bx.cx;
1236    let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty());
1237
1238    match tuple_op.val {
1239        OperandValue::Immediate(val) => vec![val],
1240        OperandValue::Pair(v1, v2) => vec![v1, v2],
1241        OperandValue::Ref(ptr) => {
1242            let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout };
1243
1244            let mut result = Vec::with_capacity(fn_abi.args.len());
1245            let mut tuple_index = 0;
1246
1247            for arg in &fn_abi.args {
1248                match arg.mode {
1249                    PassMode::Ignore => {}
1250                    PassMode::Direct(_) | PassMode::Cast { .. } => {
1251                        let field = tuple_place.project_field(bx, tuple_index);
1252                        let llvm_ty = field.layout.llvm_type(bx.cx);
1253                        let val = bx.load(llvm_ty, field.val.llval, field.val.align);
1254                        result.push(val);
1255                        tuple_index += 1;
1256                    }
1257                    PassMode::Pair(_, _) => {
1258                        let field = tuple_place.project_field(bx, tuple_index);
1259                        let llvm_ty = field.layout.llvm_type(bx.cx);
1260                        let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align);
1261                        result.push(bx.extract_value(pair_val, 0));
1262                        result.push(bx.extract_value(pair_val, 1));
1263                        tuple_index += 1;
1264                    }
1265                    PassMode::Indirect { .. } => {
1266                        let field = tuple_place.project_field(bx, tuple_index);
1267                        result.push(field.val.llval);
1268                        tuple_index += 1;
1269                    }
1270                }
1271            }
1272
1273            result
1274        }
1275
1276        OperandValue::ZeroSized => vec![],
1277    }
1278}
1279
1280fn generic_simd_intrinsic<'ll, 'tcx>(
1281    bx: &mut Builder<'_, 'll, 'tcx>,
1282    name: Symbol,
1283    fn_args: GenericArgsRef<'tcx>,
1284    args: &[OperandRef<'tcx, &'ll Value>],
1285    ret_ty: Ty<'tcx>,
1286    llret_ty: &'ll Type,
1287    span: Span,
1288) -> Result<&'ll Value, ()> {
1289    macro_rules! return_error {
1290        ($diag: expr) => {{
1291            bx.sess().dcx().emit_err($diag);
1292            return Err(());
1293        }};
1294    }
1295
1296    macro_rules! require {
1297        ($cond: expr, $diag: expr) => {
1298            if !$cond {
1299                return_error!($diag);
1300            }
1301        };
1302    }
1303
1304    macro_rules! require_simd {
1305        ($ty: expr, $variant:ident) => {{
1306            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1307            $ty.simd_size_and_type(bx.tcx())
1308        }};
1309    }
1310
1311    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1312    macro_rules! require_int_or_uint_ty {
1313        ($ty: expr, $diag: expr) => {
1314            match $ty {
1315                ty::Int(i) => {
1316                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1317                }
1318                ty::Uint(i) => {
1319                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1320                }
1321                _ => {
1322                    return_error!($diag);
1323                }
1324            }
1325        };
1326    }
1327
1328    let llvm_version = crate::llvm_util::get_version();
1329
1330    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1331    /// down to an i1 based mask that can be used by llvm intrinsics.
1332    ///
1333    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1334    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1335    /// but codegen for several targets is better if we consider the highest bit by shifting.
1336    ///
1337    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1338    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1339    /// instead the mask can be used as is.
1340    ///
1341    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1342    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1343    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1344        bx: &mut Builder<'a, 'll, 'tcx>,
1345        i_xn: &'ll Value,
1346        in_elem_bitwidth: u64,
1347        in_len: u64,
1348    ) -> &'ll Value {
1349        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1350        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1351        let shift_indices = vec![shift_idx; in_len as _];
1352        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1353        // Truncate vector to an <i1 x N>
1354        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1355    }
1356
1357    // Sanity-check: all vector arguments must be immediates.
1358    if cfg!(debug_assertions) {
1359        for arg in args {
1360            if arg.layout.ty.is_simd() {
1361                assert_matches!(arg.val, OperandValue::Immediate(_));
1362            }
1363        }
1364    }
1365
1366    if name == sym::simd_select_bitmask {
1367        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1368
1369        let expected_int_bits = len.max(8).next_power_of_two();
1370        let expected_bytes = len.div_ceil(8);
1371
1372        let mask_ty = args[0].layout.ty;
1373        let mask = match mask_ty.kind() {
1374            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1375            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1376            ty::Array(elem, len)
1377                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1378                    && len
1379                        .try_to_target_usize(bx.tcx)
1380                        .expect("expected monomorphic const in codegen")
1381                        == expected_bytes =>
1382            {
1383                let place = PlaceRef::alloca(bx, args[0].layout);
1384                args[0].val.store(bx, place);
1385                let int_ty = bx.type_ix(expected_bytes * 8);
1386                bx.load(int_ty, place.val.llval, Align::ONE)
1387            }
1388            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1389                span,
1390                name,
1391                mask_ty,
1392                expected_int_bits,
1393                expected_bytes
1394            }),
1395        };
1396
1397        let i1 = bx.type_i1();
1398        let im = bx.type_ix(len);
1399        let i1xn = bx.type_vector(i1, len);
1400        let m_im = bx.trunc(mask, im);
1401        let m_i1s = bx.bitcast(m_im, i1xn);
1402        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1403    }
1404
1405    // every intrinsic below takes a SIMD vector as its first argument
1406    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
1407    let in_ty = args[0].layout.ty;
1408
1409    let comparison = match name {
1410        sym::simd_eq => Some(BinOp::Eq),
1411        sym::simd_ne => Some(BinOp::Ne),
1412        sym::simd_lt => Some(BinOp::Lt),
1413        sym::simd_le => Some(BinOp::Le),
1414        sym::simd_gt => Some(BinOp::Gt),
1415        sym::simd_ge => Some(BinOp::Ge),
1416        _ => None,
1417    };
1418
1419    if let Some(cmp_op) = comparison {
1420        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1421
1422        require!(
1423            in_len == out_len,
1424            InvalidMonomorphization::ReturnLengthInputType {
1425                span,
1426                name,
1427                in_len,
1428                in_ty,
1429                ret_ty,
1430                out_len
1431            }
1432        );
1433        require!(
1434            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1435            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1436        );
1437
1438        return Ok(compare_simd_types(
1439            bx,
1440            args[0].immediate(),
1441            args[1].immediate(),
1442            in_elem,
1443            llret_ty,
1444            cmp_op,
1445        ));
1446    }
1447
1448    if name == sym::simd_shuffle_const_generic {
1449        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1450        let n = idx.len() as u64;
1451
1452        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1453        require!(
1454            out_len == n,
1455            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1456        );
1457        require!(
1458            in_elem == out_ty,
1459            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1460        );
1461
1462        let total_len = in_len * 2;
1463
1464        let indices: Option<Vec<_>> = idx
1465            .iter()
1466            .enumerate()
1467            .map(|(arg_idx, val)| {
1468                let idx = val.unwrap_leaf().to_i32();
1469                if idx >= i32::try_from(total_len).unwrap() {
1470                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1471                        span,
1472                        name,
1473                        arg_idx: arg_idx as u64,
1474                        total_len: total_len.into(),
1475                    });
1476                    None
1477                } else {
1478                    Some(bx.const_i32(idx))
1479                }
1480            })
1481            .collect();
1482        let Some(indices) = indices else {
1483            return Ok(bx.const_null(llret_ty));
1484        };
1485
1486        return Ok(bx.shuffle_vector(
1487            args[0].immediate(),
1488            args[1].immediate(),
1489            bx.const_vector(&indices),
1490        ));
1491    }
1492
1493    if name == sym::simd_shuffle {
1494        // Make sure this is actually a SIMD vector.
1495        let idx_ty = args[2].layout.ty;
1496        let n: u64 = if idx_ty.is_simd()
1497            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1498        {
1499            idx_ty.simd_size_and_type(bx.cx.tcx).0
1500        } else {
1501            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1502        };
1503
1504        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1505        require!(
1506            out_len == n,
1507            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1508        );
1509        require!(
1510            in_elem == out_ty,
1511            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1512        );
1513
1514        let total_len = u128::from(in_len) * 2;
1515
1516        // Check that the indices are in-bounds.
1517        let indices = args[2].immediate();
1518        for i in 0..n {
1519            let val = bx.const_get_elt(indices, i as u64);
1520            let idx = bx
1521                .const_to_opt_u128(val, true)
1522                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1523            if idx >= total_len {
1524                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1525                    span,
1526                    name,
1527                    arg_idx: i,
1528                    total_len,
1529                });
1530            }
1531        }
1532
1533        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1534    }
1535
1536    if name == sym::simd_insert || name == sym::simd_insert_dyn {
1537        require!(
1538            in_elem == args[2].layout.ty,
1539            InvalidMonomorphization::InsertedType {
1540                span,
1541                name,
1542                in_elem,
1543                in_ty,
1544                out_ty: args[2].layout.ty
1545            }
1546        );
1547
1548        let index_imm = if name == sym::simd_insert {
1549            let idx = bx
1550                .const_to_opt_u128(args[1].immediate(), false)
1551                .expect("typeck should have ensure that this is a const");
1552            if idx >= in_len.into() {
1553                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1554                    span,
1555                    name,
1556                    arg_idx: 1,
1557                    total_len: in_len.into(),
1558                });
1559            }
1560            bx.const_i32(idx as i32)
1561        } else {
1562            args[1].immediate()
1563        };
1564
1565        return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1566    }
1567    if name == sym::simd_extract || name == sym::simd_extract_dyn {
1568        require!(
1569            ret_ty == in_elem,
1570            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1571        );
1572        let index_imm = if name == sym::simd_extract {
1573            let idx = bx
1574                .const_to_opt_u128(args[1].immediate(), false)
1575                .expect("typeck should have ensure that this is a const");
1576            if idx >= in_len.into() {
1577                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1578                    span,
1579                    name,
1580                    arg_idx: 1,
1581                    total_len: in_len.into(),
1582                });
1583            }
1584            bx.const_i32(idx as i32)
1585        } else {
1586            args[1].immediate()
1587        };
1588
1589        return Ok(bx.extract_element(args[0].immediate(), index_imm));
1590    }
1591
1592    if name == sym::simd_select {
1593        let m_elem_ty = in_elem;
1594        let m_len = in_len;
1595        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1596        require!(
1597            m_len == v_len,
1598            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1599        );
1600        let in_elem_bitwidth = require_int_or_uint_ty!(
1601            m_elem_ty.kind(),
1602            InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1603        );
1604        let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1605        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1606    }
1607
1608    if name == sym::simd_bitmask {
1609        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1610        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1611        // * an unsigned integer
1612        // * an array of `u8`
1613        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1614        //
1615        // The bit order of the result depends on the byte endianness, LSB-first for little
1616        // endian and MSB-first for big endian.
1617        let expected_int_bits = in_len.max(8).next_power_of_two();
1618        let expected_bytes = in_len.div_ceil(8);
1619
1620        // Integer vector <i{in_bitwidth} x in_len>:
1621        let in_elem_bitwidth = require_int_or_uint_ty!(
1622            in_elem.kind(),
1623            InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1624        );
1625
1626        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1627        // Bitcast <i1 x N> to iN:
1628        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1629
1630        match ret_ty.kind() {
1631            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1632                // Zero-extend iN to the bitmask type:
1633                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1634            }
1635            ty::Array(elem, len)
1636                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1637                    && len
1638                        .try_to_target_usize(bx.tcx)
1639                        .expect("expected monomorphic const in codegen")
1640                        == expected_bytes =>
1641            {
1642                // Zero-extend iN to the array length:
1643                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1644
1645                // Convert the integer to a byte array
1646                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1647                bx.store(ze, ptr, Align::ONE);
1648                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1649                return Ok(bx.load(array_ty, ptr, Align::ONE));
1650            }
1651            _ => return_error!(InvalidMonomorphization::CannotReturn {
1652                span,
1653                name,
1654                ret_ty,
1655                expected_int_bits,
1656                expected_bytes
1657            }),
1658        }
1659    }
1660
1661    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1662        name: Symbol,
1663        in_elem: Ty<'_>,
1664        in_ty: Ty<'_>,
1665        in_len: u64,
1666        bx: &mut Builder<'_, 'll, 'tcx>,
1667        span: Span,
1668        args: &[OperandRef<'tcx, &'ll Value>],
1669    ) -> Result<&'ll Value, ()> {
1670        macro_rules! return_error {
1671            ($diag: expr) => {{
1672                bx.sess().dcx().emit_err($diag);
1673                return Err(());
1674            }};
1675        }
1676
1677        let elem_ty = if let ty::Float(f) = in_elem.kind() {
1678            bx.cx.type_float_from_ty(*f)
1679        } else {
1680            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1681        };
1682
1683        let vec_ty = bx.type_vector(elem_ty, in_len);
1684
1685        let intr_name = match name {
1686            sym::simd_ceil => "llvm.ceil",
1687            sym::simd_fabs => "llvm.fabs",
1688            sym::simd_fcos => "llvm.cos",
1689            sym::simd_fexp2 => "llvm.exp2",
1690            sym::simd_fexp => "llvm.exp",
1691            sym::simd_flog10 => "llvm.log10",
1692            sym::simd_flog2 => "llvm.log2",
1693            sym::simd_flog => "llvm.log",
1694            sym::simd_floor => "llvm.floor",
1695            sym::simd_fma => "llvm.fma",
1696            sym::simd_relaxed_fma => "llvm.fmuladd",
1697            sym::simd_fsin => "llvm.sin",
1698            sym::simd_fsqrt => "llvm.sqrt",
1699            sym::simd_round => "llvm.round",
1700            sym::simd_round_ties_even => "llvm.rint",
1701            sym::simd_trunc => "llvm.trunc",
1702            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1703        };
1704        Ok(bx.call_intrinsic(
1705            intr_name,
1706            &[vec_ty],
1707            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1708        ))
1709    }
1710
1711    if std::matches!(
1712        name,
1713        sym::simd_ceil
1714            | sym::simd_fabs
1715            | sym::simd_fcos
1716            | sym::simd_fexp2
1717            | sym::simd_fexp
1718            | sym::simd_flog10
1719            | sym::simd_flog2
1720            | sym::simd_flog
1721            | sym::simd_floor
1722            | sym::simd_fma
1723            | sym::simd_fsin
1724            | sym::simd_fsqrt
1725            | sym::simd_relaxed_fma
1726            | sym::simd_round
1727            | sym::simd_round_ties_even
1728            | sym::simd_trunc
1729    ) {
1730        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1731    }
1732
1733    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1734        let elem_ty = match *elem_ty.kind() {
1735            ty::Int(v) => cx.type_int_from_ty(v),
1736            ty::Uint(v) => cx.type_uint_from_ty(v),
1737            ty::Float(v) => cx.type_float_from_ty(v),
1738            ty::RawPtr(_, _) => cx.type_ptr(),
1739            _ => unreachable!(),
1740        };
1741        cx.type_vector(elem_ty, vec_len)
1742    }
1743
1744    if name == sym::simd_gather {
1745        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1746        //             mask: <N x i{M}>) -> <N x T>
1747        // * N: number of elements in the input vectors
1748        // * T: type of the element to load
1749        // * M: any integer width is supported, will be truncated to i1
1750
1751        // All types must be simd vector types
1752
1753        // The second argument must be a simd vector with an element type that's a pointer
1754        // to the element type of the first argument
1755        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1756        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1757        // The element type of the third argument must be a signed integer type of any width:
1758        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1759        require_simd!(ret_ty, SimdReturn);
1760
1761        // Of the same length:
1762        require!(
1763            in_len == out_len,
1764            InvalidMonomorphization::SecondArgumentLength {
1765                span,
1766                name,
1767                in_len,
1768                in_ty,
1769                arg_ty: args[1].layout.ty,
1770                out_len
1771            }
1772        );
1773        require!(
1774            in_len == out_len2,
1775            InvalidMonomorphization::ThirdArgumentLength {
1776                span,
1777                name,
1778                in_len,
1779                in_ty,
1780                arg_ty: args[2].layout.ty,
1781                out_len: out_len2
1782            }
1783        );
1784
1785        // The return type must match the first argument type
1786        require!(
1787            ret_ty == in_ty,
1788            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1789        );
1790
1791        require!(
1792            matches!(
1793                *element_ty1.kind(),
1794                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1795            ),
1796            InvalidMonomorphization::ExpectedElementType {
1797                span,
1798                name,
1799                expected_element: element_ty1,
1800                second_arg: args[1].layout.ty,
1801                in_elem,
1802                in_ty,
1803                mutability: ExpectedPointerMutability::Not,
1804            }
1805        );
1806
1807        let mask_elem_bitwidth = require_int_or_uint_ty!(
1808            element_ty2.kind(),
1809            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1810        );
1811
1812        // Alignment of T, must be a constant integer value:
1813        let alignment = bx.align_of(in_elem).bytes();
1814
1815        // Truncate the mask vector to a vector of i1s:
1816        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1817
1818        // Type of the vector of pointers:
1819        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1820
1821        // Type of the vector of elements:
1822        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1823
1824        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
1825            let alignment = bx.const_i32(alignment as i32);
1826            &[args[1].immediate(), alignment, mask, args[0].immediate()]
1827        } else {
1828            &[args[1].immediate(), mask, args[0].immediate()]
1829        };
1830
1831        let call =
1832            bx.call_intrinsic("llvm.masked.gather", &[llvm_elem_vec_ty, llvm_pointer_vec_ty], args);
1833        if llvm_version >= (22, 0, 0) {
1834            crate::attributes::apply_to_callsite(
1835                call,
1836                crate::llvm::AttributePlace::Argument(0),
1837                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
1838            )
1839        }
1840        return Ok(call);
1841    }
1842
1843    fn llvm_alignment<'ll, 'tcx>(
1844        bx: &mut Builder<'_, 'll, 'tcx>,
1845        alignment: SimdAlign,
1846        vector_ty: Ty<'tcx>,
1847        element_ty: Ty<'tcx>,
1848    ) -> u64 {
1849        match alignment {
1850            SimdAlign::Unaligned => 1,
1851            SimdAlign::Element => bx.align_of(element_ty).bytes(),
1852            SimdAlign::Vector => bx.align_of(vector_ty).bytes(),
1853        }
1854    }
1855
1856    if name == sym::simd_masked_load {
1857        // simd_masked_load<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1858        // * N: number of elements in the input vectors
1859        // * T: type of the element to load
1860        // * M: any integer width is supported, will be truncated to i1
1861        // Loads contiguous elements from memory behind `pointer`, but only for
1862        // those lanes whose `mask` bit is enabled.
1863        // The memory addresses corresponding to the “off” lanes are not accessed.
1864
1865        let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
1866            .unwrap_leaf()
1867            .to_simd_alignment();
1868
1869        // The element type of the "mask" argument must be a signed integer type of any width
1870        let mask_ty = in_ty;
1871        let (mask_len, mask_elem) = (in_len, in_elem);
1872
1873        // The second argument must be a pointer matching the element type
1874        let pointer_ty = args[1].layout.ty;
1875
1876        // The last argument is a passthrough vector providing values for disabled lanes
1877        let values_ty = args[2].layout.ty;
1878        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1879
1880        require_simd!(ret_ty, SimdReturn);
1881
1882        // Of the same length:
1883        require!(
1884            values_len == mask_len,
1885            InvalidMonomorphization::ThirdArgumentLength {
1886                span,
1887                name,
1888                in_len: mask_len,
1889                in_ty: mask_ty,
1890                arg_ty: values_ty,
1891                out_len: values_len
1892            }
1893        );
1894
1895        // The return type must match the last argument type
1896        require!(
1897            ret_ty == values_ty,
1898            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1899        );
1900
1901        require!(
1902            matches!(
1903                *pointer_ty.kind(),
1904                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1905            ),
1906            InvalidMonomorphization::ExpectedElementType {
1907                span,
1908                name,
1909                expected_element: values_elem,
1910                second_arg: pointer_ty,
1911                in_elem: values_elem,
1912                in_ty: values_ty,
1913                mutability: ExpectedPointerMutability::Not,
1914            }
1915        );
1916
1917        let m_elem_bitwidth = require_int_or_uint_ty!(
1918            mask_elem.kind(),
1919            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1920        );
1921
1922        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1923
1924        // Alignment of T, must be a constant integer value:
1925        let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
1926
1927        let llvm_pointer = bx.type_ptr();
1928
1929        // Type of the vector of elements:
1930        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1931
1932        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
1933            let alignment = bx.const_i32(alignment as i32);
1934
1935            &[args[1].immediate(), alignment, mask, args[2].immediate()]
1936        } else {
1937            &[args[1].immediate(), mask, args[2].immediate()]
1938        };
1939
1940        let call = bx.call_intrinsic("llvm.masked.load", &[llvm_elem_vec_ty, llvm_pointer], args);
1941        if llvm_version >= (22, 0, 0) {
1942            crate::attributes::apply_to_callsite(
1943                call,
1944                crate::llvm::AttributePlace::Argument(0),
1945                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
1946            )
1947        }
1948        return Ok(call);
1949    }
1950
1951    if name == sym::simd_masked_store {
1952        // simd_masked_store<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
1953        // * N: number of elements in the input vectors
1954        // * T: type of the element to load
1955        // * M: any integer width is supported, will be truncated to i1
1956        // Stores contiguous elements to memory behind `pointer`, but only for
1957        // those lanes whose `mask` bit is enabled.
1958        // The memory addresses corresponding to the “off” lanes are not accessed.
1959
1960        let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
1961            .unwrap_leaf()
1962            .to_simd_alignment();
1963
1964        // The element type of the "mask" argument must be a signed integer type of any width
1965        let mask_ty = in_ty;
1966        let (mask_len, mask_elem) = (in_len, in_elem);
1967
1968        // The second argument must be a pointer matching the element type
1969        let pointer_ty = args[1].layout.ty;
1970
1971        // The last argument specifies the values to store to memory
1972        let values_ty = args[2].layout.ty;
1973        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1974
1975        // Of the same length:
1976        require!(
1977            values_len == mask_len,
1978            InvalidMonomorphization::ThirdArgumentLength {
1979                span,
1980                name,
1981                in_len: mask_len,
1982                in_ty: mask_ty,
1983                arg_ty: values_ty,
1984                out_len: values_len
1985            }
1986        );
1987
1988        // The second argument must be a mutable pointer type matching the element type
1989        require!(
1990            matches!(
1991                *pointer_ty.kind(),
1992                ty::RawPtr(p_ty, p_mutbl)
1993                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1994            ),
1995            InvalidMonomorphization::ExpectedElementType {
1996                span,
1997                name,
1998                expected_element: values_elem,
1999                second_arg: pointer_ty,
2000                in_elem: values_elem,
2001                in_ty: values_ty,
2002                mutability: ExpectedPointerMutability::Mut,
2003            }
2004        );
2005
2006        let m_elem_bitwidth = require_int_or_uint_ty!(
2007            mask_elem.kind(),
2008            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2009        );
2010
2011        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2012
2013        // Alignment of T, must be a constant integer value:
2014        let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2015
2016        let llvm_pointer = bx.type_ptr();
2017
2018        // Type of the vector of elements:
2019        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2020
2021        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2022            let alignment = bx.const_i32(alignment as i32);
2023            &[args[2].immediate(), args[1].immediate(), alignment, mask]
2024        } else {
2025            &[args[2].immediate(), args[1].immediate(), mask]
2026        };
2027
2028        let call = bx.call_intrinsic("llvm.masked.store", &[llvm_elem_vec_ty, llvm_pointer], args);
2029        if llvm_version >= (22, 0, 0) {
2030            crate::attributes::apply_to_callsite(
2031                call,
2032                crate::llvm::AttributePlace::Argument(1),
2033                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2034            )
2035        }
2036        return Ok(call);
2037    }
2038
2039    if name == sym::simd_scatter {
2040        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
2041        //             mask: <N x i{M}>) -> ()
2042        // * N: number of elements in the input vectors
2043        // * T: type of the element to load
2044        // * M: any integer width is supported, will be truncated to i1
2045
2046        // All types must be simd vector types
2047        // The second argument must be a simd vector with an element type that's a pointer
2048        // to the element type of the first argument
2049        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
2050        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
2051        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
2052
2053        // Of the same length:
2054        require!(
2055            in_len == element_len1,
2056            InvalidMonomorphization::SecondArgumentLength {
2057                span,
2058                name,
2059                in_len,
2060                in_ty,
2061                arg_ty: args[1].layout.ty,
2062                out_len: element_len1
2063            }
2064        );
2065        require!(
2066            in_len == element_len2,
2067            InvalidMonomorphization::ThirdArgumentLength {
2068                span,
2069                name,
2070                in_len,
2071                in_ty,
2072                arg_ty: args[2].layout.ty,
2073                out_len: element_len2
2074            }
2075        );
2076
2077        require!(
2078            matches!(
2079                *element_ty1.kind(),
2080                ty::RawPtr(p_ty, p_mutbl)
2081                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2082            ),
2083            InvalidMonomorphization::ExpectedElementType {
2084                span,
2085                name,
2086                expected_element: element_ty1,
2087                second_arg: args[1].layout.ty,
2088                in_elem,
2089                in_ty,
2090                mutability: ExpectedPointerMutability::Mut,
2091            }
2092        );
2093
2094        // The element type of the third argument must be an integer type of any width:
2095        let mask_elem_bitwidth = require_int_or_uint_ty!(
2096            element_ty2.kind(),
2097            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2098        );
2099
2100        // Alignment of T, must be a constant integer value:
2101        let alignment = bx.align_of(in_elem).bytes();
2102
2103        // Truncate the mask vector to a vector of i1s:
2104        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2105
2106        // Type of the vector of pointers:
2107        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2108
2109        // Type of the vector of elements:
2110        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2111        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2112            let alignment = bx.const_i32(alignment as i32);
2113            &[args[0].immediate(), args[1].immediate(), alignment, mask]
2114        } else {
2115            &[args[0].immediate(), args[1].immediate(), mask]
2116        };
2117        let call = bx.call_intrinsic(
2118            "llvm.masked.scatter",
2119            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2120            args,
2121        );
2122        if llvm_version >= (22, 0, 0) {
2123            crate::attributes::apply_to_callsite(
2124                call,
2125                crate::llvm::AttributePlace::Argument(1),
2126                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2127            )
2128        }
2129        return Ok(call);
2130    }
2131
2132    macro_rules! arith_red {
2133        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2134         $identity:expr) => {
2135            if name == sym::$name {
2136                require!(
2137                    ret_ty == in_elem,
2138                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2139                );
2140                return match in_elem.kind() {
2141                    ty::Int(_) | ty::Uint(_) => {
2142                        let r = bx.$integer_reduce(args[0].immediate());
2143                        if $ordered {
2144                            // if overflow occurs, the result is the
2145                            // mathematical result modulo 2^n:
2146                            Ok(bx.$op(args[1].immediate(), r))
2147                        } else {
2148                            Ok(bx.$integer_reduce(args[0].immediate()))
2149                        }
2150                    }
2151                    ty::Float(f) => {
2152                        let acc = if $ordered {
2153                            // ordered arithmetic reductions take an accumulator
2154                            args[1].immediate()
2155                        } else {
2156                            // unordered arithmetic reductions use the identity accumulator
2157                            match f.bit_width() {
2158                                32 => bx.const_real(bx.type_f32(), $identity),
2159                                64 => bx.const_real(bx.type_f64(), $identity),
2160                                v => return_error!(
2161                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
2162                                        span,
2163                                        name,
2164                                        symbol: sym::$name,
2165                                        in_ty,
2166                                        in_elem,
2167                                        size: v,
2168                                        ret_ty
2169                                    }
2170                                ),
2171                            }
2172                        };
2173                        Ok(bx.$float_reduce(acc, args[0].immediate()))
2174                    }
2175                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2176                        span,
2177                        name,
2178                        symbol: sym::$name,
2179                        in_ty,
2180                        in_elem,
2181                        ret_ty
2182                    }),
2183                };
2184            }
2185        };
2186    }
2187
2188    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2189    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2190    arith_red!(
2191        simd_reduce_add_unordered: vector_reduce_add,
2192        vector_reduce_fadd_reassoc,
2193        false,
2194        add,
2195        -0.0
2196    );
2197    arith_red!(
2198        simd_reduce_mul_unordered: vector_reduce_mul,
2199        vector_reduce_fmul_reassoc,
2200        false,
2201        mul,
2202        1.0
2203    );
2204
2205    macro_rules! minmax_red {
2206        ($name:ident: $int_red:ident, $float_red:ident) => {
2207            if name == sym::$name {
2208                require!(
2209                    ret_ty == in_elem,
2210                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2211                );
2212                return match in_elem.kind() {
2213                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2214                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2215                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2216                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2217                        span,
2218                        name,
2219                        symbol: sym::$name,
2220                        in_ty,
2221                        in_elem,
2222                        ret_ty
2223                    }),
2224                };
2225            }
2226        };
2227    }
2228
2229    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2230    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2231
2232    macro_rules! bitwise_red {
2233        ($name:ident : $red:ident, $boolean:expr) => {
2234            if name == sym::$name {
2235                let input = if !$boolean {
2236                    require!(
2237                        ret_ty == in_elem,
2238                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2239                    );
2240                    args[0].immediate()
2241                } else {
2242                    let bitwidth = match in_elem.kind() {
2243                        ty::Int(i) => {
2244                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2245                        }
2246                        ty::Uint(i) => {
2247                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2248                        }
2249                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2250                            span,
2251                            name,
2252                            symbol: sym::$name,
2253                            in_ty,
2254                            in_elem,
2255                            ret_ty
2256                        }),
2257                    };
2258
2259                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2260                };
2261                return match in_elem.kind() {
2262                    ty::Int(_) | ty::Uint(_) => {
2263                        let r = bx.$red(input);
2264                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2265                    }
2266                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2267                        span,
2268                        name,
2269                        symbol: sym::$name,
2270                        in_ty,
2271                        in_elem,
2272                        ret_ty
2273                    }),
2274                };
2275            }
2276        };
2277    }
2278
2279    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2280    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2281    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2282    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2283    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2284
2285    if name == sym::simd_cast_ptr {
2286        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2287        require!(
2288            in_len == out_len,
2289            InvalidMonomorphization::ReturnLengthInputType {
2290                span,
2291                name,
2292                in_len,
2293                in_ty,
2294                ret_ty,
2295                out_len
2296            }
2297        );
2298
2299        match in_elem.kind() {
2300            ty::RawPtr(p_ty, _) => {
2301                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2302                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2303                });
2304                require!(
2305                    metadata.is_unit(),
2306                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2307                );
2308            }
2309            _ => {
2310                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2311            }
2312        }
2313        match out_elem.kind() {
2314            ty::RawPtr(p_ty, _) => {
2315                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2316                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2317                });
2318                require!(
2319                    metadata.is_unit(),
2320                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2321                );
2322            }
2323            _ => {
2324                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2325            }
2326        }
2327
2328        return Ok(args[0].immediate());
2329    }
2330
2331    if name == sym::simd_expose_provenance {
2332        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2333        require!(
2334            in_len == out_len,
2335            InvalidMonomorphization::ReturnLengthInputType {
2336                span,
2337                name,
2338                in_len,
2339                in_ty,
2340                ret_ty,
2341                out_len
2342            }
2343        );
2344
2345        match in_elem.kind() {
2346            ty::RawPtr(_, _) => {}
2347            _ => {
2348                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2349            }
2350        }
2351        match out_elem.kind() {
2352            ty::Uint(ty::UintTy::Usize) => {}
2353            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2354        }
2355
2356        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2357    }
2358
2359    if name == sym::simd_with_exposed_provenance {
2360        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2361        require!(
2362            in_len == out_len,
2363            InvalidMonomorphization::ReturnLengthInputType {
2364                span,
2365                name,
2366                in_len,
2367                in_ty,
2368                ret_ty,
2369                out_len
2370            }
2371        );
2372
2373        match in_elem.kind() {
2374            ty::Uint(ty::UintTy::Usize) => {}
2375            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2376        }
2377        match out_elem.kind() {
2378            ty::RawPtr(_, _) => {}
2379            _ => {
2380                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2381            }
2382        }
2383
2384        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2385    }
2386
2387    if name == sym::simd_cast || name == sym::simd_as {
2388        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2389        require!(
2390            in_len == out_len,
2391            InvalidMonomorphization::ReturnLengthInputType {
2392                span,
2393                name,
2394                in_len,
2395                in_ty,
2396                ret_ty,
2397                out_len
2398            }
2399        );
2400        // casting cares about nominal type, not just structural type
2401        if in_elem == out_elem {
2402            return Ok(args[0].immediate());
2403        }
2404
2405        #[derive(Copy, Clone)]
2406        enum Sign {
2407            Unsigned,
2408            Signed,
2409        }
2410        use Sign::*;
2411
2412        enum Style {
2413            Float,
2414            Int(Sign),
2415            Unsupported,
2416        }
2417
2418        let (in_style, in_width) = match in_elem.kind() {
2419            // vectors of pointer-sized integers should've been
2420            // disallowed before here, so this unwrap is safe.
2421            ty::Int(i) => (
2422                Style::Int(Signed),
2423                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2424            ),
2425            ty::Uint(u) => (
2426                Style::Int(Unsigned),
2427                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2428            ),
2429            ty::Float(f) => (Style::Float, f.bit_width()),
2430            _ => (Style::Unsupported, 0),
2431        };
2432        let (out_style, out_width) = match out_elem.kind() {
2433            ty::Int(i) => (
2434                Style::Int(Signed),
2435                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2436            ),
2437            ty::Uint(u) => (
2438                Style::Int(Unsigned),
2439                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2440            ),
2441            ty::Float(f) => (Style::Float, f.bit_width()),
2442            _ => (Style::Unsupported, 0),
2443        };
2444
2445        match (in_style, out_style) {
2446            (Style::Int(sign), Style::Int(_)) => {
2447                return Ok(match in_width.cmp(&out_width) {
2448                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2449                    Ordering::Equal => args[0].immediate(),
2450                    Ordering::Less => match sign {
2451                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2452                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2453                    },
2454                });
2455            }
2456            (Style::Int(Sign::Signed), Style::Float) => {
2457                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2458            }
2459            (Style::Int(Sign::Unsigned), Style::Float) => {
2460                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2461            }
2462            (Style::Float, Style::Int(sign)) => {
2463                return Ok(match (sign, name == sym::simd_as) {
2464                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2465                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2466                    (_, true) => bx.cast_float_to_int(
2467                        matches!(sign, Sign::Signed),
2468                        args[0].immediate(),
2469                        llret_ty,
2470                    ),
2471                });
2472            }
2473            (Style::Float, Style::Float) => {
2474                return Ok(match in_width.cmp(&out_width) {
2475                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2476                    Ordering::Equal => args[0].immediate(),
2477                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2478                });
2479            }
2480            _ => { /* Unsupported. Fallthrough. */ }
2481        }
2482        return_error!(InvalidMonomorphization::UnsupportedCast {
2483            span,
2484            name,
2485            in_ty,
2486            in_elem,
2487            ret_ty,
2488            out_elem
2489        });
2490    }
2491    macro_rules! arith_binary {
2492        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2493            $(if name == sym::$name {
2494                match in_elem.kind() {
2495                    $($(ty::$p(_))|* => {
2496                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2497                    })*
2498                    _ => {},
2499                }
2500                return_error!(
2501                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2502                );
2503            })*
2504        }
2505    }
2506    arith_binary! {
2507        simd_add: Uint, Int => add, Float => fadd;
2508        simd_sub: Uint, Int => sub, Float => fsub;
2509        simd_mul: Uint, Int => mul, Float => fmul;
2510        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2511        simd_rem: Uint => urem, Int => srem, Float => frem;
2512        simd_shl: Uint, Int => shl;
2513        simd_shr: Uint => lshr, Int => ashr;
2514        simd_and: Uint, Int => and;
2515        simd_or: Uint, Int => or;
2516        simd_xor: Uint, Int => xor;
2517        simd_fmax: Float => maxnum;
2518        simd_fmin: Float => minnum;
2519
2520    }
2521    macro_rules! arith_unary {
2522        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2523            $(if name == sym::$name {
2524                match in_elem.kind() {
2525                    $($(ty::$p(_))|* => {
2526                        return Ok(bx.$call(args[0].immediate()))
2527                    })*
2528                    _ => {},
2529                }
2530                return_error!(
2531                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2532                );
2533            })*
2534        }
2535    }
2536    arith_unary! {
2537        simd_neg: Int => neg, Float => fneg;
2538    }
2539
2540    // Unary integer intrinsics
2541    if matches!(
2542        name,
2543        sym::simd_bswap
2544            | sym::simd_bitreverse
2545            | sym::simd_ctlz
2546            | sym::simd_ctpop
2547            | sym::simd_cttz
2548            | sym::simd_funnel_shl
2549            | sym::simd_funnel_shr
2550    ) {
2551        let vec_ty = bx.cx.type_vector(
2552            match *in_elem.kind() {
2553                ty::Int(i) => bx.cx.type_int_from_ty(i),
2554                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2555                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2556                    span,
2557                    name,
2558                    in_ty,
2559                    in_elem
2560                }),
2561            },
2562            in_len as u64,
2563        );
2564        let llvm_intrinsic = match name {
2565            sym::simd_bswap => "llvm.bswap",
2566            sym::simd_bitreverse => "llvm.bitreverse",
2567            sym::simd_ctlz => "llvm.ctlz",
2568            sym::simd_ctpop => "llvm.ctpop",
2569            sym::simd_cttz => "llvm.cttz",
2570            sym::simd_funnel_shl => "llvm.fshl",
2571            sym::simd_funnel_shr => "llvm.fshr",
2572            _ => unreachable!(),
2573        };
2574        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2575
2576        return match name {
2577            // byte swap is no-op for i8/u8
2578            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2579            sym::simd_ctlz | sym::simd_cttz => {
2580                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2581                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2582                Ok(bx.call_intrinsic(
2583                    llvm_intrinsic,
2584                    &[vec_ty],
2585                    &[args[0].immediate(), dont_poison_on_zero],
2586                ))
2587            }
2588            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2589                // simple unary argument cases
2590                Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
2591            }
2592            sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
2593                llvm_intrinsic,
2594                &[vec_ty],
2595                &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
2596            )),
2597            _ => unreachable!(),
2598        };
2599    }
2600
2601    if name == sym::simd_arith_offset {
2602        // This also checks that the first operand is a ptr type.
2603        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2604            span_bug!(span, "must be called with a vector of pointer types as first argument")
2605        });
2606        let layout = bx.layout_of(pointee);
2607        let ptrs = args[0].immediate();
2608        // The second argument must be a ptr-sized integer.
2609        // (We don't care about the signedness, this is wrapping anyway.)
2610        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
2611        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2612            span_bug!(
2613                span,
2614                "must be called with a vector of pointer-sized integers as second argument"
2615            );
2616        }
2617        let offsets = args[1].immediate();
2618
2619        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2620    }
2621
2622    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2623        let lhs = args[0].immediate();
2624        let rhs = args[1].immediate();
2625        let is_add = name == sym::simd_saturating_add;
2626        let (signed, elem_ty) = match *in_elem.kind() {
2627            ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
2628            ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
2629            _ => {
2630                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2631                    span,
2632                    name,
2633                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
2634                    vector_type: args[0].layout.ty
2635                });
2636            }
2637        };
2638        let llvm_intrinsic = format!(
2639            "llvm.{}{}.sat",
2640            if signed { 's' } else { 'u' },
2641            if is_add { "add" } else { "sub" },
2642        );
2643        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2644
2645        return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
2646    }
2647
2648    span_bug!(span, "unknown SIMD intrinsic");
2649}