rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::codegen_attrs::autodiff_attrs;
7use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
8use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
9use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
10use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
11use rustc_codegen_ssa::traits::*;
12use rustc_hir::def_id::LOCAL_CRATE;
13use rustc_hir::{self as hir};
14use rustc_middle::mir::BinOp;
15use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
16use rustc_middle::ty::{self, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv};
17use rustc_middle::{bug, span_bug};
18use rustc_span::{Span, Symbol, sym};
19use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate};
20use rustc_target::callconv::PassMode;
21use rustc_target::spec::PanicStrategy;
22use tracing::debug;
23
24use crate::abi::FnAbiLlvmExt;
25use crate::builder::Builder;
26use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
27use crate::context::CodegenCx;
28use crate::errors::AutoDiffWithoutEnable;
29use crate::llvm::{self, Metadata};
30use crate::type_::Type;
31use crate::type_of::LayoutLlvmExt;
32use crate::va_arg::emit_va_arg;
33use crate::value::Value;
34
35fn call_simple_intrinsic<'ll, 'tcx>(
36    bx: &mut Builder<'_, 'll, 'tcx>,
37    name: Symbol,
38    args: &[OperandRef<'tcx, &'ll Value>],
39) -> Option<&'ll Value> {
40    let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
41        sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
42        sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
43        sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
44        sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
45
46        sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
47        sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
48        sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
49        sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
50
51        sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
52        sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
53        sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
54        sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
55
56        sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
57        sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
58        sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
59        sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
60
61        sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
62        sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
63        sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
64        sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
65
66        sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
67        sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
68        sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
69        sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
70
71        sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
72        sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
73        sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
74        sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
75
76        sym::logf16 => ("llvm.log", &[bx.type_f16()]),
77        sym::logf32 => ("llvm.log", &[bx.type_f32()]),
78        sym::logf64 => ("llvm.log", &[bx.type_f64()]),
79        sym::logf128 => ("llvm.log", &[bx.type_f128()]),
80
81        sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
82        sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
83        sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
84        sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
85
86        sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
87        sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
88        sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
89        sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
90
91        sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
92        sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
93        sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
94        sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
95
96        sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
97        sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
98        sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
99        sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
100
101        sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
102        sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
103        sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
104        sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
105
106        sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
107        sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
108        sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
109        sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
110
111        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
112        // when llvm/llvm-project#{139380,139381,140445} are fixed.
113        //sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
114        //sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
115        //sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
116        //sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
117        //
118        sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
119        sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
120        sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
121        sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
122
123        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
124        // when llvm/llvm-project#{139380,139381,140445} are fixed.
125        //sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
126        //sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
127        //sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
128        //sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
129        //
130        sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
131        sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
132        sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
133        sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
134
135        sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
136        sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
137        sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
138        sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
139
140        sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
141        sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
142        sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
143        sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
144
145        sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
146        sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
147        sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
148        sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
149
150        // We could use any of `rint`, `nearbyint`, or `roundeven`
151        // for this -- they are all identical in semantics when
152        // assuming the default FP environment.
153        // `rint` is what we used for $forever.
154        sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
155        sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
156        sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
157        sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
158
159        sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
160        sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
161        sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
162        sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
163
164        _ => return None,
165    };
166    Some(bx.call_intrinsic(
167        base_name,
168        type_params,
169        &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
170    ))
171}
172
173impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
174    fn codegen_intrinsic_call(
175        &mut self,
176        instance: ty::Instance<'tcx>,
177        args: &[OperandRef<'tcx, &'ll Value>],
178        result: PlaceRef<'tcx, &'ll Value>,
179        span: Span,
180    ) -> Result<(), ty::Instance<'tcx>> {
181        let tcx = self.tcx;
182
183        let name = tcx.item_name(instance.def_id());
184        let fn_args = instance.args;
185
186        let simple = call_simple_intrinsic(self, name, args);
187        let llval = match name {
188            _ if simple.is_some() => simple.unwrap(),
189            sym::ptr_mask => {
190                let ptr = args[0].immediate();
191                self.call_intrinsic(
192                    "llvm.ptrmask",
193                    &[self.val_ty(ptr), self.type_isize()],
194                    &[ptr, args[1].immediate()],
195                )
196            }
197            sym::autodiff => {
198                codegen_autodiff(self, tcx, instance, args, result);
199                return Ok(());
200            }
201            sym::is_val_statically_known => {
202                if let OperandValue::Immediate(imm) = args[0].val {
203                    self.call_intrinsic(
204                        "llvm.is.constant",
205                        &[args[0].layout.immediate_llvm_type(self.cx)],
206                        &[imm],
207                    )
208                } else {
209                    self.const_bool(false)
210                }
211            }
212            sym::select_unpredictable => {
213                let cond = args[0].immediate();
214                assert_eq!(args[1].layout, args[2].layout);
215                let select = |bx: &mut Self, true_val, false_val| {
216                    let result = bx.select(cond, true_val, false_val);
217                    bx.set_unpredictable(&result);
218                    result
219                };
220                match (args[1].val, args[2].val) {
221                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
222                        assert!(true_val.llextra.is_none());
223                        assert!(false_val.llextra.is_none());
224                        assert_eq!(true_val.align, false_val.align);
225                        let ptr = select(self, true_val.llval, false_val.llval);
226                        let selected =
227                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
228                        selected.store(self, result);
229                        return Ok(());
230                    }
231                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
232                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
233                        let true_val = args[1].immediate_or_packed_pair(self);
234                        let false_val = args[2].immediate_or_packed_pair(self);
235                        select(self, true_val, false_val)
236                    }
237                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
238                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
239                }
240            }
241            sym::catch_unwind => {
242                catch_unwind_intrinsic(
243                    self,
244                    args[0].immediate(),
245                    args[1].immediate(),
246                    args[2].immediate(),
247                    result,
248                );
249                return Ok(());
250            }
251            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
252            sym::va_copy => {
253                let dest = args[0].immediate();
254                self.call_intrinsic(
255                    "llvm.va_copy",
256                    &[self.val_ty(dest)],
257                    &[dest, args[1].immediate()],
258                )
259            }
260            sym::va_arg => {
261                match result.layout.backend_repr {
262                    BackendRepr::Scalar(scalar) => {
263                        match scalar.primitive() {
264                            Primitive::Int(..) => {
265                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
266                                    // `va_arg` should not be called on an integer type
267                                    // less than 4 bytes in length. If it is, promote
268                                    // the integer to an `i32` and truncate the result
269                                    // back to the smaller type.
270                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
271                                    self.trunc(promoted_result, result.layout.llvm_type(self))
272                                } else {
273                                    emit_va_arg(self, args[0], result.layout.ty)
274                                }
275                            }
276                            Primitive::Float(Float::F16) => {
277                                bug!("the va_arg intrinsic does not work with `f16`")
278                            }
279                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
280                                emit_va_arg(self, args[0], result.layout.ty)
281                            }
282                            // `va_arg` should never be used with the return type f32.
283                            Primitive::Float(Float::F32) => {
284                                bug!("the va_arg intrinsic does not work with `f32`")
285                            }
286                            Primitive::Float(Float::F128) => {
287                                bug!("the va_arg intrinsic does not work with `f128`")
288                            }
289                        }
290                    }
291                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
292                }
293            }
294
295            sym::volatile_load | sym::unaligned_volatile_load => {
296                let ptr = args[0].immediate();
297                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
298                let align = if name == sym::unaligned_volatile_load {
299                    1
300                } else {
301                    result.layout.align.abi.bytes() as u32
302                };
303                unsafe {
304                    llvm::LLVMSetAlignment(load, align);
305                }
306                if !result.layout.is_zst() {
307                    self.store_to_place(load, result.val);
308                }
309                return Ok(());
310            }
311            sym::volatile_store => {
312                let dst = args[0].deref(self.cx());
313                args[1].val.volatile_store(self, dst);
314                return Ok(());
315            }
316            sym::unaligned_volatile_store => {
317                let dst = args[0].deref(self.cx());
318                args[1].val.unaligned_volatile_store(self, dst);
319                return Ok(());
320            }
321            sym::prefetch_read_data
322            | sym::prefetch_write_data
323            | sym::prefetch_read_instruction
324            | sym::prefetch_write_instruction => {
325                let (rw, cache_type) = match name {
326                    sym::prefetch_read_data => (0, 1),
327                    sym::prefetch_write_data => (1, 1),
328                    sym::prefetch_read_instruction => (0, 0),
329                    sym::prefetch_write_instruction => (1, 0),
330                    _ => bug!(),
331                };
332                let ptr = args[0].immediate();
333                let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32();
334                self.call_intrinsic(
335                    "llvm.prefetch",
336                    &[self.val_ty(ptr)],
337                    &[
338                        ptr,
339                        self.const_i32(rw),
340                        self.const_i32(locality),
341                        self.const_i32(cache_type),
342                    ],
343                )
344            }
345            sym::carrying_mul_add => {
346                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
347
348                let wide_llty = self.type_ix(size.bits() * 2);
349                let args = args.as_array().unwrap();
350                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
351
352                let wide = if signed {
353                    let prod = self.unchecked_smul(a, b);
354                    let acc = self.unchecked_sadd(prod, c);
355                    self.unchecked_sadd(acc, d)
356                } else {
357                    let prod = self.unchecked_umul(a, b);
358                    let acc = self.unchecked_uadd(prod, c);
359                    self.unchecked_uadd(acc, d)
360                };
361
362                let narrow_llty = self.type_ix(size.bits());
363                let low = self.trunc(wide, narrow_llty);
364                let bits_const = self.const_uint(wide_llty, size.bits());
365                // No need for ashr when signed; LLVM changes it to lshr anyway.
366                let high = self.lshr(wide, bits_const);
367                // FIXME: could be `trunc nuw`, even for signed.
368                let high = self.trunc(high, narrow_llty);
369
370                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
371                let pair = self.const_poison(pair_llty);
372                let pair = self.insert_value(pair, low, 0);
373                let pair = self.insert_value(pair, high, 1);
374                pair
375            }
376            sym::ctlz
377            | sym::ctlz_nonzero
378            | sym::cttz
379            | sym::cttz_nonzero
380            | sym::ctpop
381            | sym::bswap
382            | sym::bitreverse
383            | sym::rotate_left
384            | sym::rotate_right
385            | sym::saturating_add
386            | sym::saturating_sub
387            | sym::unchecked_funnel_shl
388            | sym::unchecked_funnel_shr => {
389                let ty = args[0].layout.ty;
390                if !ty.is_integral() {
391                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
392                        span,
393                        name,
394                        ty,
395                    });
396                    return Ok(());
397                }
398                let (size, signed) = ty.int_size_and_signed(self.tcx);
399                let width = size.bits();
400                let llty = self.type_ix(width);
401                match name {
402                    sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
403                        let y =
404                            self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
405                        let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
406                            "llvm.ctlz"
407                        } else {
408                            "llvm.cttz"
409                        };
410                        let ret =
411                            self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
412                        self.intcast(ret, result.layout.llvm_type(self), false)
413                    }
414                    sym::ctpop => {
415                        let ret =
416                            self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
417                        self.intcast(ret, result.layout.llvm_type(self), false)
418                    }
419                    sym::bswap => {
420                        if width == 8 {
421                            args[0].immediate() // byte swap a u8/i8 is just a no-op
422                        } else {
423                            self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
424                        }
425                    }
426                    sym::bitreverse => {
427                        self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
428                    }
429                    sym::rotate_left
430                    | sym::rotate_right
431                    | sym::unchecked_funnel_shl
432                    | sym::unchecked_funnel_shr => {
433                        let is_left = name == sym::rotate_left || name == sym::unchecked_funnel_shl;
434                        let lhs = args[0].immediate();
435                        let (rhs, raw_shift) =
436                            if name == sym::rotate_left || name == sym::rotate_right {
437                                // rotate = funnel shift with first two args the same
438                                (lhs, args[1].immediate())
439                            } else {
440                                (args[1].immediate(), args[2].immediate())
441                            };
442                        let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
443
444                        // llvm expects shift to be the same type as the values, but rust
445                        // always uses `u32`.
446                        let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false);
447
448                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift])
449                    }
450                    sym::saturating_add | sym::saturating_sub => {
451                        let is_add = name == sym::saturating_add;
452                        let lhs = args[0].immediate();
453                        let rhs = args[1].immediate();
454                        let llvm_name = format!(
455                            "llvm.{}{}.sat",
456                            if signed { 's' } else { 'u' },
457                            if is_add { "add" } else { "sub" },
458                        );
459                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
460                    }
461                    _ => bug!(),
462                }
463            }
464
465            sym::raw_eq => {
466                use BackendRepr::*;
467                let tp_ty = fn_args.type_at(0);
468                let layout = self.layout_of(tp_ty).layout;
469                let use_integer_compare = match layout.backend_repr() {
470                    Scalar(_) | ScalarPair(_, _) => true,
471                    SimdVector { .. } => false,
472                    Memory { .. } => {
473                        // For rusty ABIs, small aggregates are actually passed
474                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
475                        // so we re-use that same threshold here.
476                        layout.size() <= self.data_layout().pointer_size() * 2
477                    }
478                };
479
480                let a = args[0].immediate();
481                let b = args[1].immediate();
482                if layout.size().bytes() == 0 {
483                    self.const_bool(true)
484                } else if use_integer_compare {
485                    let integer_ty = self.type_ix(layout.size().bits());
486                    let a_val = self.load(integer_ty, a, layout.align().abi);
487                    let b_val = self.load(integer_ty, b, layout.align().abi);
488                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
489                } else {
490                    let n = self.const_usize(layout.size().bytes());
491                    let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
492                    self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
493                }
494            }
495
496            sym::compare_bytes => {
497                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
498                let cmp = self.call_intrinsic(
499                    "memcmp",
500                    &[],
501                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
502                );
503                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
504                self.sext(cmp, self.type_ix(32))
505            }
506
507            sym::black_box => {
508                args[0].val.store(self, result);
509                let result_val_span = [result.val.llval];
510                // We need to "use" the argument in some way LLVM can't introspect, and on
511                // targets that support it we can typically leverage inline assembly to do
512                // this. LLVM's interpretation of inline assembly is that it's, well, a black
513                // box. This isn't the greatest implementation since it probably deoptimizes
514                // more than we want, but it's so far good enough.
515                //
516                // For zero-sized types, the location pointed to by the result may be
517                // uninitialized. Do not "use" the result in this case; instead just clobber
518                // the memory.
519                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
520                    ("~{memory}", &[])
521                } else {
522                    ("r,~{memory}", &result_val_span)
523                };
524                crate::asm::inline_asm_call(
525                    self,
526                    "",
527                    constraint,
528                    inputs,
529                    self.type_void(),
530                    &[],
531                    true,
532                    false,
533                    llvm::AsmDialect::Att,
534                    &[span],
535                    false,
536                    None,
537                    None,
538                )
539                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
540
541                // We have copied the value to `result` already.
542                return Ok(());
543            }
544
545            _ if name.as_str().starts_with("simd_") => {
546                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
547                // This gives them the expected layout of a regular #[repr(simd)] vector.
548                let mut loaded_args = Vec::new();
549                for arg in args {
550                    loaded_args.push(
551                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
552                        // with reduced alignment and no padding) rather than as immediates.
553                        // We can use a vector load to fix the layout and turn the argument
554                        // into an immediate.
555                        if arg.layout.ty.is_simd()
556                            && let OperandValue::Ref(place) = arg.val
557                        {
558                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
559                            let elem_ll_ty = match elem_ty.kind() {
560                                ty::Float(f) => self.type_float_from_ty(*f),
561                                ty::Int(i) => self.type_int_from_ty(*i),
562                                ty::Uint(u) => self.type_uint_from_ty(*u),
563                                ty::RawPtr(_, _) => self.type_ptr(),
564                                _ => unreachable!(),
565                            };
566                            let loaded =
567                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
568                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
569                        } else {
570                            *arg
571                        },
572                    );
573                }
574
575                let llret_ty = if result.layout.ty.is_simd()
576                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
577                {
578                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
579                    let elem_ll_ty = match elem_ty.kind() {
580                        ty::Float(f) => self.type_float_from_ty(*f),
581                        ty::Int(i) => self.type_int_from_ty(*i),
582                        ty::Uint(u) => self.type_uint_from_ty(*u),
583                        ty::RawPtr(_, _) => self.type_ptr(),
584                        _ => unreachable!(),
585                    };
586                    self.type_vector(elem_ll_ty, size)
587                } else {
588                    result.layout.llvm_type(self)
589                };
590
591                match generic_simd_intrinsic(
592                    self,
593                    name,
594                    fn_args,
595                    &loaded_args,
596                    result.layout.ty,
597                    llret_ty,
598                    span,
599                ) {
600                    Ok(llval) => llval,
601                    // If there was an error, just skip this invocation... we'll abort compilation
602                    // anyway, but we can keep codegen'ing to find more errors.
603                    Err(()) => return Ok(()),
604                }
605            }
606
607            _ => {
608                debug!("unknown intrinsic '{}' -- falling back to default body", name);
609                // Call the fallback body instead of generating the intrinsic code
610                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
611            }
612        };
613
614        if result.layout.ty.is_bool() {
615            let val = self.from_immediate(llval);
616            self.store_to_place(val, result.val);
617        } else if !result.layout.ty.is_unit() {
618            self.store_to_place(llval, result.val);
619        }
620        Ok(())
621    }
622
623    fn abort(&mut self) {
624        self.call_intrinsic("llvm.trap", &[], &[]);
625    }
626
627    fn assume(&mut self, val: Self::Value) {
628        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
629            self.call_intrinsic("llvm.assume", &[], &[val]);
630        }
631    }
632
633    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
634        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
635            self.call_intrinsic(
636                "llvm.expect",
637                &[self.type_i1()],
638                &[cond, self.const_bool(expected)],
639            )
640        } else {
641            cond
642        }
643    }
644
645    fn type_checked_load(
646        &mut self,
647        llvtable: &'ll Value,
648        vtable_byte_offset: u64,
649        typeid: &'ll Metadata,
650    ) -> Self::Value {
651        let typeid = self.get_metadata_value(typeid);
652        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
653        let type_checked_load = self.call_intrinsic(
654            "llvm.type.checked.load",
655            &[],
656            &[llvtable, vtable_byte_offset, typeid],
657        );
658        self.extract_value(type_checked_load, 0)
659    }
660
661    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
662        self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
663    }
664
665    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
666        self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
667    }
668}
669
670fn catch_unwind_intrinsic<'ll, 'tcx>(
671    bx: &mut Builder<'_, 'll, 'tcx>,
672    try_func: &'ll Value,
673    data: &'ll Value,
674    catch_func: &'ll Value,
675    dest: PlaceRef<'tcx, &'ll Value>,
676) {
677    if bx.sess().panic_strategy() == PanicStrategy::Abort {
678        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
679        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
680        // Return 0 unconditionally from the intrinsic call;
681        // we can never unwind.
682        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
683    } else if wants_msvc_seh(bx.sess()) {
684        codegen_msvc_try(bx, try_func, data, catch_func, dest);
685    } else if wants_wasm_eh(bx.sess()) {
686        codegen_wasm_try(bx, try_func, data, catch_func, dest);
687    } else if bx.sess().target.os == "emscripten" {
688        codegen_emcc_try(bx, try_func, data, catch_func, dest);
689    } else {
690        codegen_gnu_try(bx, try_func, data, catch_func, dest);
691    }
692}
693
694// MSVC's definition of the `rust_try` function.
695//
696// This implementation uses the new exception handling instructions in LLVM
697// which have support in LLVM for SEH on MSVC targets. Although these
698// instructions are meant to work for all targets, as of the time of this
699// writing, however, LLVM does not recommend the usage of these new instructions
700// as the old ones are still more optimized.
701fn codegen_msvc_try<'ll, 'tcx>(
702    bx: &mut Builder<'_, 'll, 'tcx>,
703    try_func: &'ll Value,
704    data: &'ll Value,
705    catch_func: &'ll Value,
706    dest: PlaceRef<'tcx, &'ll Value>,
707) {
708    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
709        bx.set_personality_fn(bx.eh_personality());
710
711        let normal = bx.append_sibling_block("normal");
712        let catchswitch = bx.append_sibling_block("catchswitch");
713        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
714        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
715        let caught = bx.append_sibling_block("caught");
716
717        let try_func = llvm::get_param(bx.llfn(), 0);
718        let data = llvm::get_param(bx.llfn(), 1);
719        let catch_func = llvm::get_param(bx.llfn(), 2);
720
721        // We're generating an IR snippet that looks like:
722        //
723        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
724        //      %slot = alloca i8*
725        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
726        //
727        //   normal:
728        //      ret i32 0
729        //
730        //   catchswitch:
731        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
732        //
733        //   catchpad_rust:
734        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
735        //      %ptr = load %slot
736        //      call %catch_func(%data, %ptr)
737        //      catchret from %tok to label %caught
738        //
739        //   catchpad_foreign:
740        //      %tok = catchpad within %cs [null, 64, null]
741        //      call %catch_func(%data, null)
742        //      catchret from %tok to label %caught
743        //
744        //   caught:
745        //      ret i32 1
746        //   }
747        //
748        // This structure follows the basic usage of throw/try/catch in LLVM.
749        // For example, compile this C++ snippet to see what LLVM generates:
750        //
751        //      struct rust_panic {
752        //          rust_panic(const rust_panic&);
753        //          ~rust_panic();
754        //
755        //          void* x[2];
756        //      };
757        //
758        //      int __rust_try(
759        //          void (*try_func)(void*),
760        //          void *data,
761        //          void (*catch_func)(void*, void*) noexcept
762        //      ) {
763        //          try {
764        //              try_func(data);
765        //              return 0;
766        //          } catch(rust_panic& a) {
767        //              catch_func(data, &a);
768        //              return 1;
769        //          } catch(...) {
770        //              catch_func(data, NULL);
771        //              return 1;
772        //          }
773        //      }
774        //
775        // More information can be found in libstd's seh.rs implementation.
776        let ptr_size = bx.tcx().data_layout.pointer_size();
777        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
778        let slot = bx.alloca(ptr_size, ptr_align);
779        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
780        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
781
782        bx.switch_to_block(normal);
783        bx.ret(bx.const_i32(0));
784
785        bx.switch_to_block(catchswitch);
786        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
787
788        // We can't use the TypeDescriptor defined in libpanic_unwind because it
789        // might be in another DLL and the SEH encoding only supports specifying
790        // a TypeDescriptor from the current module.
791        //
792        // However this isn't an issue since the MSVC runtime uses string
793        // comparison on the type name to match TypeDescriptors rather than
794        // pointer equality.
795        //
796        // So instead we generate a new TypeDescriptor in each module that uses
797        // `try` and let the linker merge duplicate definitions in the same
798        // module.
799        //
800        // When modifying, make sure that the type_name string exactly matches
801        // the one used in library/panic_unwind/src/seh.rs.
802        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
803        let type_name = bx.const_bytes(b"rust_panic\0");
804        let type_info =
805            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
806        let tydesc = bx.declare_global(
807            &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
808            bx.val_ty(type_info),
809        );
810
811        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
812        if bx.cx.tcx.sess.target.supports_comdat() {
813            llvm::SetUniqueComdat(bx.llmod, tydesc);
814        }
815        llvm::set_initializer(tydesc, type_info);
816
817        // The flag value of 8 indicates that we are catching the exception by
818        // reference instead of by value. We can't use catch by value because
819        // that requires copying the exception object, which we don't support
820        // since our exception object effectively contains a Box.
821        //
822        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
823        bx.switch_to_block(catchpad_rust);
824        let flags = bx.const_i32(8);
825        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
826        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
827        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
828        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
829        bx.catch_ret(&funclet, caught);
830
831        // The flag value of 64 indicates a "catch-all".
832        bx.switch_to_block(catchpad_foreign);
833        let flags = bx.const_i32(64);
834        let null = bx.const_null(bx.type_ptr());
835        let funclet = bx.catch_pad(cs, &[null, flags, null]);
836        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
837        bx.catch_ret(&funclet, caught);
838
839        bx.switch_to_block(caught);
840        bx.ret(bx.const_i32(1));
841    });
842
843    // Note that no invoke is used here because by definition this function
844    // can't panic (that's what it's catching).
845    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
846    OperandValue::Immediate(ret).store(bx, dest);
847}
848
849// WASM's definition of the `rust_try` function.
850fn codegen_wasm_try<'ll, 'tcx>(
851    bx: &mut Builder<'_, 'll, 'tcx>,
852    try_func: &'ll Value,
853    data: &'ll Value,
854    catch_func: &'ll Value,
855    dest: PlaceRef<'tcx, &'ll Value>,
856) {
857    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
858        bx.set_personality_fn(bx.eh_personality());
859
860        let normal = bx.append_sibling_block("normal");
861        let catchswitch = bx.append_sibling_block("catchswitch");
862        let catchpad = bx.append_sibling_block("catchpad");
863        let caught = bx.append_sibling_block("caught");
864
865        let try_func = llvm::get_param(bx.llfn(), 0);
866        let data = llvm::get_param(bx.llfn(), 1);
867        let catch_func = llvm::get_param(bx.llfn(), 2);
868
869        // We're generating an IR snippet that looks like:
870        //
871        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
872        //      %slot = alloca i8*
873        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
874        //
875        //   normal:
876        //      ret i32 0
877        //
878        //   catchswitch:
879        //      %cs = catchswitch within none [%catchpad] unwind to caller
880        //
881        //   catchpad:
882        //      %tok = catchpad within %cs [null]
883        //      %ptr = call @llvm.wasm.get.exception(token %tok)
884        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
885        //      call %catch_func(%data, %ptr)
886        //      catchret from %tok to label %caught
887        //
888        //   caught:
889        //      ret i32 1
890        //   }
891        //
892        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
893        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
894
895        bx.switch_to_block(normal);
896        bx.ret(bx.const_i32(0));
897
898        bx.switch_to_block(catchswitch);
899        let cs = bx.catch_switch(None, None, &[catchpad]);
900
901        bx.switch_to_block(catchpad);
902        let null = bx.const_null(bx.type_ptr());
903        let funclet = bx.catch_pad(cs, &[null]);
904
905        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
906        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
907
908        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
909        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
910        bx.catch_ret(&funclet, caught);
911
912        bx.switch_to_block(caught);
913        bx.ret(bx.const_i32(1));
914    });
915
916    // Note that no invoke is used here because by definition this function
917    // can't panic (that's what it's catching).
918    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
919    OperandValue::Immediate(ret).store(bx, dest);
920}
921
922// Definition of the standard `try` function for Rust using the GNU-like model
923// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
924// instructions).
925//
926// This codegen is a little surprising because we always call a shim
927// function instead of inlining the call to `invoke` manually here. This is done
928// because in LLVM we're only allowed to have one personality per function
929// definition. The call to the `try` intrinsic is being inlined into the
930// function calling it, and that function may already have other personality
931// functions in play. By calling a shim we're guaranteed that our shim will have
932// the right personality function.
933fn codegen_gnu_try<'ll, 'tcx>(
934    bx: &mut Builder<'_, 'll, 'tcx>,
935    try_func: &'ll Value,
936    data: &'ll Value,
937    catch_func: &'ll Value,
938    dest: PlaceRef<'tcx, &'ll Value>,
939) {
940    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
941        // Codegens the shims described above:
942        //
943        //   bx:
944        //      invoke %try_func(%data) normal %normal unwind %catch
945        //
946        //   normal:
947        //      ret 0
948        //
949        //   catch:
950        //      (%ptr, _) = landingpad
951        //      call %catch_func(%data, %ptr)
952        //      ret 1
953        let then = bx.append_sibling_block("then");
954        let catch = bx.append_sibling_block("catch");
955
956        let try_func = llvm::get_param(bx.llfn(), 0);
957        let data = llvm::get_param(bx.llfn(), 1);
958        let catch_func = llvm::get_param(bx.llfn(), 2);
959        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
960        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
961
962        bx.switch_to_block(then);
963        bx.ret(bx.const_i32(0));
964
965        // Type indicator for the exception being thrown.
966        //
967        // The first value in this tuple is a pointer to the exception object
968        // being thrown. The second value is a "selector" indicating which of
969        // the landing pad clauses the exception's type had been matched to.
970        // rust_try ignores the selector.
971        bx.switch_to_block(catch);
972        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
973        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
974        let tydesc = bx.const_null(bx.type_ptr());
975        bx.add_clause(vals, tydesc);
976        let ptr = bx.extract_value(vals, 0);
977        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
978        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
979        bx.ret(bx.const_i32(1));
980    });
981
982    // Note that no invoke is used here because by definition this function
983    // can't panic (that's what it's catching).
984    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
985    OperandValue::Immediate(ret).store(bx, dest);
986}
987
988// Variant of codegen_gnu_try used for emscripten where Rust panics are
989// implemented using C++ exceptions. Here we use exceptions of a specific type
990// (`struct rust_panic`) to represent Rust panics.
991fn codegen_emcc_try<'ll, 'tcx>(
992    bx: &mut Builder<'_, 'll, 'tcx>,
993    try_func: &'ll Value,
994    data: &'ll Value,
995    catch_func: &'ll Value,
996    dest: PlaceRef<'tcx, &'ll Value>,
997) {
998    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
999        // Codegens the shims described above:
1000        //
1001        //   bx:
1002        //      invoke %try_func(%data) normal %normal unwind %catch
1003        //
1004        //   normal:
1005        //      ret 0
1006        //
1007        //   catch:
1008        //      (%ptr, %selector) = landingpad
1009        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
1010        //      %is_rust_panic = %selector == %rust_typeid
1011        //      %catch_data = alloca { i8*, i8 }
1012        //      %catch_data[0] = %ptr
1013        //      %catch_data[1] = %is_rust_panic
1014        //      call %catch_func(%data, %catch_data)
1015        //      ret 1
1016        let then = bx.append_sibling_block("then");
1017        let catch = bx.append_sibling_block("catch");
1018
1019        let try_func = llvm::get_param(bx.llfn(), 0);
1020        let data = llvm::get_param(bx.llfn(), 1);
1021        let catch_func = llvm::get_param(bx.llfn(), 2);
1022        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1023        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1024
1025        bx.switch_to_block(then);
1026        bx.ret(bx.const_i32(0));
1027
1028        // Type indicator for the exception being thrown.
1029        //
1030        // The first value in this tuple is a pointer to the exception object
1031        // being thrown. The second value is a "selector" indicating which of
1032        // the landing pad clauses the exception's type had been matched to.
1033        bx.switch_to_block(catch);
1034        let tydesc = bx.eh_catch_typeinfo();
1035        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1036        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1037        bx.add_clause(vals, tydesc);
1038        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1039        let ptr = bx.extract_value(vals, 0);
1040        let selector = bx.extract_value(vals, 1);
1041
1042        // Check if the typeid we got is the one for a Rust panic.
1043        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1044        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1045        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1046
1047        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1048        // create an alloca and pass a pointer to that.
1049        let ptr_size = bx.tcx().data_layout.pointer_size();
1050        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1051        let i8_align = bx.tcx().data_layout.i8_align.abi;
1052        // Required in order for there to be no padding between the fields.
1053        assert!(i8_align <= ptr_align);
1054        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1055        bx.store(ptr, catch_data, ptr_align);
1056        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1057        bx.store(is_rust_panic, catch_data_1, i8_align);
1058
1059        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1060        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1061        bx.ret(bx.const_i32(1));
1062    });
1063
1064    // Note that no invoke is used here because by definition this function
1065    // can't panic (that's what it's catching).
1066    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1067    OperandValue::Immediate(ret).store(bx, dest);
1068}
1069
1070// Helper function to give a Block to a closure to codegen a shim function.
1071// This is currently primarily used for the `try` intrinsic functions above.
1072fn gen_fn<'a, 'll, 'tcx>(
1073    cx: &'a CodegenCx<'ll, 'tcx>,
1074    name: &str,
1075    rust_fn_sig: ty::PolyFnSig<'tcx>,
1076    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1077) -> (&'ll Type, &'ll Value) {
1078    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1079    let llty = fn_abi.llvm_type(cx);
1080    let llfn = cx.declare_fn(name, fn_abi, None);
1081    cx.set_frame_pointer_type(llfn);
1082    cx.apply_target_cpu_attr(llfn);
1083    // FIXME(eddyb) find a nicer way to do this.
1084    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1085    let llbb = Builder::append_block(cx, llfn, "entry-block");
1086    let bx = Builder::build(cx, llbb);
1087    codegen(bx);
1088    (llty, llfn)
1089}
1090
1091// Helper function used to get a handle to the `__rust_try` function used to
1092// catch exceptions.
1093//
1094// This function is only generated once and is then cached.
1095fn get_rust_try_fn<'a, 'll, 'tcx>(
1096    cx: &'a CodegenCx<'ll, 'tcx>,
1097    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1098) -> (&'ll Type, &'ll Value) {
1099    if let Some(llfn) = cx.rust_try_fn.get() {
1100        return llfn;
1101    }
1102
1103    // Define the type up front for the signature of the rust_try function.
1104    let tcx = cx.tcx;
1105    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1106    // `unsafe fn(*mut i8) -> ()`
1107    let try_fn_ty = Ty::new_fn_ptr(
1108        tcx,
1109        ty::Binder::dummy(tcx.mk_fn_sig(
1110            [i8p],
1111            tcx.types.unit,
1112            false,
1113            hir::Safety::Unsafe,
1114            ExternAbi::Rust,
1115        )),
1116    );
1117    // `unsafe fn(*mut i8, *mut i8) -> ()`
1118    let catch_fn_ty = Ty::new_fn_ptr(
1119        tcx,
1120        ty::Binder::dummy(tcx.mk_fn_sig(
1121            [i8p, i8p],
1122            tcx.types.unit,
1123            false,
1124            hir::Safety::Unsafe,
1125            ExternAbi::Rust,
1126        )),
1127    );
1128    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1129    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1130        [try_fn_ty, i8p, catch_fn_ty],
1131        tcx.types.i32,
1132        false,
1133        hir::Safety::Unsafe,
1134        ExternAbi::Rust,
1135    ));
1136    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1137    cx.rust_try_fn.set(Some(rust_try));
1138    rust_try
1139}
1140
1141fn codegen_autodiff<'ll, 'tcx>(
1142    bx: &mut Builder<'_, 'll, 'tcx>,
1143    tcx: TyCtxt<'tcx>,
1144    instance: ty::Instance<'tcx>,
1145    args: &[OperandRef<'tcx, &'ll Value>],
1146    result: PlaceRef<'tcx, &'ll Value>,
1147) {
1148    if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) {
1149        let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable);
1150    }
1151
1152    let fn_args = instance.args;
1153    let callee_ty = instance.ty(tcx, bx.typing_env());
1154
1155    let sig = callee_ty.fn_sig(tcx).skip_binder();
1156
1157    let ret_ty = sig.output();
1158    let llret_ty = bx.layout_of(ret_ty).llvm_type(bx);
1159
1160    // Get source, diff, and attrs
1161    let (source_id, source_args) = match fn_args.into_type_list(tcx)[0].kind() {
1162        ty::FnDef(def_id, source_params) => (def_id, source_params),
1163        _ => bug!("invalid autodiff intrinsic args"),
1164    };
1165
1166    let fn_source = match Instance::try_resolve(tcx, bx.cx.typing_env(), *source_id, source_args) {
1167        Ok(Some(instance)) => instance,
1168        Ok(None) => bug!(
1169            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1170            source_id,
1171            source_args
1172        ),
1173        Err(_) => {
1174            // An error has already been emitted
1175            return;
1176        }
1177    };
1178
1179    let source_symbol = symbol_name_for_instance_in_crate(tcx, fn_source.clone(), LOCAL_CRATE);
1180    let Some(fn_to_diff) = bx.cx.get_function(&source_symbol) else {
1181        bug!("could not find source function")
1182    };
1183
1184    let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() {
1185        ty::FnDef(def_id, diff_args) => (def_id, diff_args),
1186        _ => bug!("invalid args"),
1187    };
1188
1189    let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) {
1190        Ok(Some(instance)) => instance,
1191        Ok(None) => bug!(
1192            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1193            diff_id,
1194            diff_args
1195        ),
1196        Err(_) => {
1197            // An error has already been emitted
1198            return;
1199        }
1200    };
1201
1202    let val_arr = get_args_from_tuple(bx, args[2], fn_diff);
1203    let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE);
1204
1205    let Some(mut diff_attrs) = autodiff_attrs(tcx, fn_diff.def_id()) else {
1206        bug!("could not find autodiff attrs")
1207    };
1208
1209    adjust_activity_to_abi(
1210        tcx,
1211        fn_source.ty(tcx, TypingEnv::fully_monomorphized()),
1212        &mut diff_attrs.input_activity,
1213    );
1214
1215    // Build body
1216    generate_enzyme_call(
1217        bx,
1218        bx.cx,
1219        fn_to_diff,
1220        &diff_symbol,
1221        llret_ty,
1222        &val_arr,
1223        diff_attrs.clone(),
1224        result,
1225    );
1226}
1227
1228fn get_args_from_tuple<'ll, 'tcx>(
1229    bx: &mut Builder<'_, 'll, 'tcx>,
1230    tuple_op: OperandRef<'tcx, &'ll Value>,
1231    fn_instance: Instance<'tcx>,
1232) -> Vec<&'ll Value> {
1233    let cx = bx.cx;
1234    let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty());
1235
1236    match tuple_op.val {
1237        OperandValue::Immediate(val) => vec![val],
1238        OperandValue::Pair(v1, v2) => vec![v1, v2],
1239        OperandValue::Ref(ptr) => {
1240            let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout };
1241
1242            let mut result = Vec::with_capacity(fn_abi.args.len());
1243            let mut tuple_index = 0;
1244
1245            for arg in &fn_abi.args {
1246                match arg.mode {
1247                    PassMode::Ignore => {}
1248                    PassMode::Direct(_) | PassMode::Cast { .. } => {
1249                        let field = tuple_place.project_field(bx, tuple_index);
1250                        let llvm_ty = field.layout.llvm_type(bx.cx);
1251                        let val = bx.load(llvm_ty, field.val.llval, field.val.align);
1252                        result.push(val);
1253                        tuple_index += 1;
1254                    }
1255                    PassMode::Pair(_, _) => {
1256                        let field = tuple_place.project_field(bx, tuple_index);
1257                        let llvm_ty = field.layout.llvm_type(bx.cx);
1258                        let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align);
1259                        result.push(bx.extract_value(pair_val, 0));
1260                        result.push(bx.extract_value(pair_val, 1));
1261                        tuple_index += 1;
1262                    }
1263                    PassMode::Indirect { .. } => {
1264                        let field = tuple_place.project_field(bx, tuple_index);
1265                        result.push(field.val.llval);
1266                        tuple_index += 1;
1267                    }
1268                }
1269            }
1270
1271            result
1272        }
1273
1274        OperandValue::ZeroSized => vec![],
1275    }
1276}
1277
1278fn generic_simd_intrinsic<'ll, 'tcx>(
1279    bx: &mut Builder<'_, 'll, 'tcx>,
1280    name: Symbol,
1281    fn_args: GenericArgsRef<'tcx>,
1282    args: &[OperandRef<'tcx, &'ll Value>],
1283    ret_ty: Ty<'tcx>,
1284    llret_ty: &'ll Type,
1285    span: Span,
1286) -> Result<&'ll Value, ()> {
1287    macro_rules! return_error {
1288        ($diag: expr) => {{
1289            bx.sess().dcx().emit_err($diag);
1290            return Err(());
1291        }};
1292    }
1293
1294    macro_rules! require {
1295        ($cond: expr, $diag: expr) => {
1296            if !$cond {
1297                return_error!($diag);
1298            }
1299        };
1300    }
1301
1302    macro_rules! require_simd {
1303        ($ty: expr, $variant:ident) => {{
1304            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1305            $ty.simd_size_and_type(bx.tcx())
1306        }};
1307    }
1308
1309    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1310    macro_rules! require_int_or_uint_ty {
1311        ($ty: expr, $diag: expr) => {
1312            match $ty {
1313                ty::Int(i) => {
1314                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1315                }
1316                ty::Uint(i) => {
1317                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1318                }
1319                _ => {
1320                    return_error!($diag);
1321                }
1322            }
1323        };
1324    }
1325
1326    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1327    /// down to an i1 based mask that can be used by llvm intrinsics.
1328    ///
1329    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1330    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1331    /// but codegen for several targets is better if we consider the highest bit by shifting.
1332    ///
1333    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1334    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1335    /// instead the mask can be used as is.
1336    ///
1337    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1338    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1339    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1340        bx: &mut Builder<'a, 'll, 'tcx>,
1341        i_xn: &'ll Value,
1342        in_elem_bitwidth: u64,
1343        in_len: u64,
1344    ) -> &'ll Value {
1345        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1346        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1347        let shift_indices = vec![shift_idx; in_len as _];
1348        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1349        // Truncate vector to an <i1 x N>
1350        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1351    }
1352
1353    // Sanity-check: all vector arguments must be immediates.
1354    if cfg!(debug_assertions) {
1355        for arg in args {
1356            if arg.layout.ty.is_simd() {
1357                assert_matches!(arg.val, OperandValue::Immediate(_));
1358            }
1359        }
1360    }
1361
1362    if name == sym::simd_select_bitmask {
1363        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1364
1365        let expected_int_bits = len.max(8).next_power_of_two();
1366        let expected_bytes = len.div_ceil(8);
1367
1368        let mask_ty = args[0].layout.ty;
1369        let mask = match mask_ty.kind() {
1370            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1371            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1372            ty::Array(elem, len)
1373                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1374                    && len
1375                        .try_to_target_usize(bx.tcx)
1376                        .expect("expected monomorphic const in codegen")
1377                        == expected_bytes =>
1378            {
1379                let place = PlaceRef::alloca(bx, args[0].layout);
1380                args[0].val.store(bx, place);
1381                let int_ty = bx.type_ix(expected_bytes * 8);
1382                bx.load(int_ty, place.val.llval, Align::ONE)
1383            }
1384            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1385                span,
1386                name,
1387                mask_ty,
1388                expected_int_bits,
1389                expected_bytes
1390            }),
1391        };
1392
1393        let i1 = bx.type_i1();
1394        let im = bx.type_ix(len);
1395        let i1xn = bx.type_vector(i1, len);
1396        let m_im = bx.trunc(mask, im);
1397        let m_i1s = bx.bitcast(m_im, i1xn);
1398        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1399    }
1400
1401    // every intrinsic below takes a SIMD vector as its first argument
1402    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
1403    let in_ty = args[0].layout.ty;
1404
1405    let comparison = match name {
1406        sym::simd_eq => Some(BinOp::Eq),
1407        sym::simd_ne => Some(BinOp::Ne),
1408        sym::simd_lt => Some(BinOp::Lt),
1409        sym::simd_le => Some(BinOp::Le),
1410        sym::simd_gt => Some(BinOp::Gt),
1411        sym::simd_ge => Some(BinOp::Ge),
1412        _ => None,
1413    };
1414
1415    if let Some(cmp_op) = comparison {
1416        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1417
1418        require!(
1419            in_len == out_len,
1420            InvalidMonomorphization::ReturnLengthInputType {
1421                span,
1422                name,
1423                in_len,
1424                in_ty,
1425                ret_ty,
1426                out_len
1427            }
1428        );
1429        require!(
1430            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1431            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1432        );
1433
1434        return Ok(compare_simd_types(
1435            bx,
1436            args[0].immediate(),
1437            args[1].immediate(),
1438            in_elem,
1439            llret_ty,
1440            cmp_op,
1441        ));
1442    }
1443
1444    if name == sym::simd_shuffle_const_generic {
1445        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1446        let n = idx.len() as u64;
1447
1448        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1449        require!(
1450            out_len == n,
1451            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1452        );
1453        require!(
1454            in_elem == out_ty,
1455            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1456        );
1457
1458        let total_len = in_len * 2;
1459
1460        let indices: Option<Vec<_>> = idx
1461            .iter()
1462            .enumerate()
1463            .map(|(arg_idx, val)| {
1464                let idx = val.unwrap_leaf().to_i32();
1465                if idx >= i32::try_from(total_len).unwrap() {
1466                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1467                        span,
1468                        name,
1469                        arg_idx: arg_idx as u64,
1470                        total_len: total_len.into(),
1471                    });
1472                    None
1473                } else {
1474                    Some(bx.const_i32(idx))
1475                }
1476            })
1477            .collect();
1478        let Some(indices) = indices else {
1479            return Ok(bx.const_null(llret_ty));
1480        };
1481
1482        return Ok(bx.shuffle_vector(
1483            args[0].immediate(),
1484            args[1].immediate(),
1485            bx.const_vector(&indices),
1486        ));
1487    }
1488
1489    if name == sym::simd_shuffle {
1490        // Make sure this is actually a SIMD vector.
1491        let idx_ty = args[2].layout.ty;
1492        let n: u64 = if idx_ty.is_simd()
1493            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1494        {
1495            idx_ty.simd_size_and_type(bx.cx.tcx).0
1496        } else {
1497            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1498        };
1499
1500        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1501        require!(
1502            out_len == n,
1503            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1504        );
1505        require!(
1506            in_elem == out_ty,
1507            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1508        );
1509
1510        let total_len = u128::from(in_len) * 2;
1511
1512        // Check that the indices are in-bounds.
1513        let indices = args[2].immediate();
1514        for i in 0..n {
1515            let val = bx.const_get_elt(indices, i as u64);
1516            let idx = bx
1517                .const_to_opt_u128(val, true)
1518                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1519            if idx >= total_len {
1520                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1521                    span,
1522                    name,
1523                    arg_idx: i,
1524                    total_len,
1525                });
1526            }
1527        }
1528
1529        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1530    }
1531
1532    if name == sym::simd_insert || name == sym::simd_insert_dyn {
1533        require!(
1534            in_elem == args[2].layout.ty,
1535            InvalidMonomorphization::InsertedType {
1536                span,
1537                name,
1538                in_elem,
1539                in_ty,
1540                out_ty: args[2].layout.ty
1541            }
1542        );
1543
1544        let index_imm = if name == sym::simd_insert {
1545            let idx = bx
1546                .const_to_opt_u128(args[1].immediate(), false)
1547                .expect("typeck should have ensure that this is a const");
1548            if idx >= in_len.into() {
1549                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1550                    span,
1551                    name,
1552                    arg_idx: 1,
1553                    total_len: in_len.into(),
1554                });
1555            }
1556            bx.const_i32(idx as i32)
1557        } else {
1558            args[1].immediate()
1559        };
1560
1561        return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1562    }
1563    if name == sym::simd_extract || name == sym::simd_extract_dyn {
1564        require!(
1565            ret_ty == in_elem,
1566            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1567        );
1568        let index_imm = if name == sym::simd_extract {
1569            let idx = bx
1570                .const_to_opt_u128(args[1].immediate(), false)
1571                .expect("typeck should have ensure that this is a const");
1572            if idx >= in_len.into() {
1573                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1574                    span,
1575                    name,
1576                    arg_idx: 1,
1577                    total_len: in_len.into(),
1578                });
1579            }
1580            bx.const_i32(idx as i32)
1581        } else {
1582            args[1].immediate()
1583        };
1584
1585        return Ok(bx.extract_element(args[0].immediate(), index_imm));
1586    }
1587
1588    if name == sym::simd_select {
1589        let m_elem_ty = in_elem;
1590        let m_len = in_len;
1591        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1592        require!(
1593            m_len == v_len,
1594            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1595        );
1596        let in_elem_bitwidth = require_int_or_uint_ty!(
1597            m_elem_ty.kind(),
1598            InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1599        );
1600        let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1601        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1602    }
1603
1604    if name == sym::simd_bitmask {
1605        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1606        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1607        // * an unsigned integer
1608        // * an array of `u8`
1609        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1610        //
1611        // The bit order of the result depends on the byte endianness, LSB-first for little
1612        // endian and MSB-first for big endian.
1613        let expected_int_bits = in_len.max(8).next_power_of_two();
1614        let expected_bytes = in_len.div_ceil(8);
1615
1616        // Integer vector <i{in_bitwidth} x in_len>:
1617        let in_elem_bitwidth = require_int_or_uint_ty!(
1618            in_elem.kind(),
1619            InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1620        );
1621
1622        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1623        // Bitcast <i1 x N> to iN:
1624        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1625
1626        match ret_ty.kind() {
1627            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1628                // Zero-extend iN to the bitmask type:
1629                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1630            }
1631            ty::Array(elem, len)
1632                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1633                    && len
1634                        .try_to_target_usize(bx.tcx)
1635                        .expect("expected monomorphic const in codegen")
1636                        == expected_bytes =>
1637            {
1638                // Zero-extend iN to the array length:
1639                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1640
1641                // Convert the integer to a byte array
1642                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1643                bx.store(ze, ptr, Align::ONE);
1644                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1645                return Ok(bx.load(array_ty, ptr, Align::ONE));
1646            }
1647            _ => return_error!(InvalidMonomorphization::CannotReturn {
1648                span,
1649                name,
1650                ret_ty,
1651                expected_int_bits,
1652                expected_bytes
1653            }),
1654        }
1655    }
1656
1657    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1658        name: Symbol,
1659        in_elem: Ty<'_>,
1660        in_ty: Ty<'_>,
1661        in_len: u64,
1662        bx: &mut Builder<'_, 'll, 'tcx>,
1663        span: Span,
1664        args: &[OperandRef<'tcx, &'ll Value>],
1665    ) -> Result<&'ll Value, ()> {
1666        macro_rules! return_error {
1667            ($diag: expr) => {{
1668                bx.sess().dcx().emit_err($diag);
1669                return Err(());
1670            }};
1671        }
1672
1673        let elem_ty = if let ty::Float(f) = in_elem.kind() {
1674            bx.cx.type_float_from_ty(*f)
1675        } else {
1676            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1677        };
1678
1679        let vec_ty = bx.type_vector(elem_ty, in_len);
1680
1681        let intr_name = match name {
1682            sym::simd_ceil => "llvm.ceil",
1683            sym::simd_fabs => "llvm.fabs",
1684            sym::simd_fcos => "llvm.cos",
1685            sym::simd_fexp2 => "llvm.exp2",
1686            sym::simd_fexp => "llvm.exp",
1687            sym::simd_flog10 => "llvm.log10",
1688            sym::simd_flog2 => "llvm.log2",
1689            sym::simd_flog => "llvm.log",
1690            sym::simd_floor => "llvm.floor",
1691            sym::simd_fma => "llvm.fma",
1692            sym::simd_relaxed_fma => "llvm.fmuladd",
1693            sym::simd_fsin => "llvm.sin",
1694            sym::simd_fsqrt => "llvm.sqrt",
1695            sym::simd_round => "llvm.round",
1696            sym::simd_round_ties_even => "llvm.rint",
1697            sym::simd_trunc => "llvm.trunc",
1698            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1699        };
1700        Ok(bx.call_intrinsic(
1701            intr_name,
1702            &[vec_ty],
1703            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1704        ))
1705    }
1706
1707    if std::matches!(
1708        name,
1709        sym::simd_ceil
1710            | sym::simd_fabs
1711            | sym::simd_fcos
1712            | sym::simd_fexp2
1713            | sym::simd_fexp
1714            | sym::simd_flog10
1715            | sym::simd_flog2
1716            | sym::simd_flog
1717            | sym::simd_floor
1718            | sym::simd_fma
1719            | sym::simd_fsin
1720            | sym::simd_fsqrt
1721            | sym::simd_relaxed_fma
1722            | sym::simd_round
1723            | sym::simd_round_ties_even
1724            | sym::simd_trunc
1725    ) {
1726        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1727    }
1728
1729    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1730        let elem_ty = match *elem_ty.kind() {
1731            ty::Int(v) => cx.type_int_from_ty(v),
1732            ty::Uint(v) => cx.type_uint_from_ty(v),
1733            ty::Float(v) => cx.type_float_from_ty(v),
1734            ty::RawPtr(_, _) => cx.type_ptr(),
1735            _ => unreachable!(),
1736        };
1737        cx.type_vector(elem_ty, vec_len)
1738    }
1739
1740    if name == sym::simd_gather {
1741        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1742        //             mask: <N x i{M}>) -> <N x T>
1743        // * N: number of elements in the input vectors
1744        // * T: type of the element to load
1745        // * M: any integer width is supported, will be truncated to i1
1746
1747        // All types must be simd vector types
1748
1749        // The second argument must be a simd vector with an element type that's a pointer
1750        // to the element type of the first argument
1751        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1752        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1753        // The element type of the third argument must be a signed integer type of any width:
1754        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1755        require_simd!(ret_ty, SimdReturn);
1756
1757        // Of the same length:
1758        require!(
1759            in_len == out_len,
1760            InvalidMonomorphization::SecondArgumentLength {
1761                span,
1762                name,
1763                in_len,
1764                in_ty,
1765                arg_ty: args[1].layout.ty,
1766                out_len
1767            }
1768        );
1769        require!(
1770            in_len == out_len2,
1771            InvalidMonomorphization::ThirdArgumentLength {
1772                span,
1773                name,
1774                in_len,
1775                in_ty,
1776                arg_ty: args[2].layout.ty,
1777                out_len: out_len2
1778            }
1779        );
1780
1781        // The return type must match the first argument type
1782        require!(
1783            ret_ty == in_ty,
1784            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1785        );
1786
1787        require!(
1788            matches!(
1789                *element_ty1.kind(),
1790                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1791            ),
1792            InvalidMonomorphization::ExpectedElementType {
1793                span,
1794                name,
1795                expected_element: element_ty1,
1796                second_arg: args[1].layout.ty,
1797                in_elem,
1798                in_ty,
1799                mutability: ExpectedPointerMutability::Not,
1800            }
1801        );
1802
1803        let mask_elem_bitwidth = require_int_or_uint_ty!(
1804            element_ty2.kind(),
1805            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1806        );
1807
1808        // Alignment of T, must be a constant integer value:
1809        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1810
1811        // Truncate the mask vector to a vector of i1s:
1812        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1813
1814        // Type of the vector of pointers:
1815        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1816
1817        // Type of the vector of elements:
1818        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1819
1820        return Ok(bx.call_intrinsic(
1821            "llvm.masked.gather",
1822            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1823            &[args[1].immediate(), alignment, mask, args[0].immediate()],
1824        ));
1825    }
1826
1827    if name == sym::simd_masked_load {
1828        // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1829        // * N: number of elements in the input vectors
1830        // * T: type of the element to load
1831        // * M: any integer width is supported, will be truncated to i1
1832        // Loads contiguous elements from memory behind `pointer`, but only for
1833        // those lanes whose `mask` bit is enabled.
1834        // The memory addresses corresponding to the “off” lanes are not accessed.
1835
1836        // The element type of the "mask" argument must be a signed integer type of any width
1837        let mask_ty = in_ty;
1838        let (mask_len, mask_elem) = (in_len, in_elem);
1839
1840        // The second argument must be a pointer matching the element type
1841        let pointer_ty = args[1].layout.ty;
1842
1843        // The last argument is a passthrough vector providing values for disabled lanes
1844        let values_ty = args[2].layout.ty;
1845        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1846
1847        require_simd!(ret_ty, SimdReturn);
1848
1849        // Of the same length:
1850        require!(
1851            values_len == mask_len,
1852            InvalidMonomorphization::ThirdArgumentLength {
1853                span,
1854                name,
1855                in_len: mask_len,
1856                in_ty: mask_ty,
1857                arg_ty: values_ty,
1858                out_len: values_len
1859            }
1860        );
1861
1862        // The return type must match the last argument type
1863        require!(
1864            ret_ty == values_ty,
1865            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1866        );
1867
1868        require!(
1869            matches!(
1870                *pointer_ty.kind(),
1871                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1872            ),
1873            InvalidMonomorphization::ExpectedElementType {
1874                span,
1875                name,
1876                expected_element: values_elem,
1877                second_arg: pointer_ty,
1878                in_elem: values_elem,
1879                in_ty: values_ty,
1880                mutability: ExpectedPointerMutability::Not,
1881            }
1882        );
1883
1884        let m_elem_bitwidth = require_int_or_uint_ty!(
1885            mask_elem.kind(),
1886            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1887        );
1888
1889        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1890
1891        // Alignment of T, must be a constant integer value:
1892        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1893
1894        let llvm_pointer = bx.type_ptr();
1895
1896        // Type of the vector of elements:
1897        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1898
1899        return Ok(bx.call_intrinsic(
1900            "llvm.masked.load",
1901            &[llvm_elem_vec_ty, llvm_pointer],
1902            &[args[1].immediate(), alignment, mask, args[2].immediate()],
1903        ));
1904    }
1905
1906    if name == sym::simd_masked_store {
1907        // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
1908        // * N: number of elements in the input vectors
1909        // * T: type of the element to load
1910        // * M: any integer width is supported, will be truncated to i1
1911        // Stores contiguous elements to memory behind `pointer`, but only for
1912        // those lanes whose `mask` bit is enabled.
1913        // The memory addresses corresponding to the “off” lanes are not accessed.
1914
1915        // The element type of the "mask" argument must be a signed integer type of any width
1916        let mask_ty = in_ty;
1917        let (mask_len, mask_elem) = (in_len, in_elem);
1918
1919        // The second argument must be a pointer matching the element type
1920        let pointer_ty = args[1].layout.ty;
1921
1922        // The last argument specifies the values to store to memory
1923        let values_ty = args[2].layout.ty;
1924        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1925
1926        // Of the same length:
1927        require!(
1928            values_len == mask_len,
1929            InvalidMonomorphization::ThirdArgumentLength {
1930                span,
1931                name,
1932                in_len: mask_len,
1933                in_ty: mask_ty,
1934                arg_ty: values_ty,
1935                out_len: values_len
1936            }
1937        );
1938
1939        // The second argument must be a mutable pointer type matching the element type
1940        require!(
1941            matches!(
1942                *pointer_ty.kind(),
1943                ty::RawPtr(p_ty, p_mutbl)
1944                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1945            ),
1946            InvalidMonomorphization::ExpectedElementType {
1947                span,
1948                name,
1949                expected_element: values_elem,
1950                second_arg: pointer_ty,
1951                in_elem: values_elem,
1952                in_ty: values_ty,
1953                mutability: ExpectedPointerMutability::Mut,
1954            }
1955        );
1956
1957        let m_elem_bitwidth = require_int_or_uint_ty!(
1958            mask_elem.kind(),
1959            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1960        );
1961
1962        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1963
1964        // Alignment of T, must be a constant integer value:
1965        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1966
1967        let llvm_pointer = bx.type_ptr();
1968
1969        // Type of the vector of elements:
1970        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1971
1972        return Ok(bx.call_intrinsic(
1973            "llvm.masked.store",
1974            &[llvm_elem_vec_ty, llvm_pointer],
1975            &[args[2].immediate(), args[1].immediate(), alignment, mask],
1976        ));
1977    }
1978
1979    if name == sym::simd_scatter {
1980        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1981        //             mask: <N x i{M}>) -> ()
1982        // * N: number of elements in the input vectors
1983        // * T: type of the element to load
1984        // * M: any integer width is supported, will be truncated to i1
1985
1986        // All types must be simd vector types
1987        // The second argument must be a simd vector with an element type that's a pointer
1988        // to the element type of the first argument
1989        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1990        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1991        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1992
1993        // Of the same length:
1994        require!(
1995            in_len == element_len1,
1996            InvalidMonomorphization::SecondArgumentLength {
1997                span,
1998                name,
1999                in_len,
2000                in_ty,
2001                arg_ty: args[1].layout.ty,
2002                out_len: element_len1
2003            }
2004        );
2005        require!(
2006            in_len == element_len2,
2007            InvalidMonomorphization::ThirdArgumentLength {
2008                span,
2009                name,
2010                in_len,
2011                in_ty,
2012                arg_ty: args[2].layout.ty,
2013                out_len: element_len2
2014            }
2015        );
2016
2017        require!(
2018            matches!(
2019                *element_ty1.kind(),
2020                ty::RawPtr(p_ty, p_mutbl)
2021                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2022            ),
2023            InvalidMonomorphization::ExpectedElementType {
2024                span,
2025                name,
2026                expected_element: element_ty1,
2027                second_arg: args[1].layout.ty,
2028                in_elem,
2029                in_ty,
2030                mutability: ExpectedPointerMutability::Mut,
2031            }
2032        );
2033
2034        // The element type of the third argument must be an integer type of any width:
2035        let mask_elem_bitwidth = require_int_or_uint_ty!(
2036            element_ty2.kind(),
2037            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2038        );
2039
2040        // Alignment of T, must be a constant integer value:
2041        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
2042
2043        // Truncate the mask vector to a vector of i1s:
2044        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2045
2046        // Type of the vector of pointers:
2047        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2048
2049        // Type of the vector of elements:
2050        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2051
2052        return Ok(bx.call_intrinsic(
2053            "llvm.masked.scatter",
2054            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2055            &[args[0].immediate(), args[1].immediate(), alignment, mask],
2056        ));
2057    }
2058
2059    macro_rules! arith_red {
2060        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2061         $identity:expr) => {
2062            if name == sym::$name {
2063                require!(
2064                    ret_ty == in_elem,
2065                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2066                );
2067                return match in_elem.kind() {
2068                    ty::Int(_) | ty::Uint(_) => {
2069                        let r = bx.$integer_reduce(args[0].immediate());
2070                        if $ordered {
2071                            // if overflow occurs, the result is the
2072                            // mathematical result modulo 2^n:
2073                            Ok(bx.$op(args[1].immediate(), r))
2074                        } else {
2075                            Ok(bx.$integer_reduce(args[0].immediate()))
2076                        }
2077                    }
2078                    ty::Float(f) => {
2079                        let acc = if $ordered {
2080                            // ordered arithmetic reductions take an accumulator
2081                            args[1].immediate()
2082                        } else {
2083                            // unordered arithmetic reductions use the identity accumulator
2084                            match f.bit_width() {
2085                                32 => bx.const_real(bx.type_f32(), $identity),
2086                                64 => bx.const_real(bx.type_f64(), $identity),
2087                                v => return_error!(
2088                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
2089                                        span,
2090                                        name,
2091                                        symbol: sym::$name,
2092                                        in_ty,
2093                                        in_elem,
2094                                        size: v,
2095                                        ret_ty
2096                                    }
2097                                ),
2098                            }
2099                        };
2100                        Ok(bx.$float_reduce(acc, args[0].immediate()))
2101                    }
2102                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2103                        span,
2104                        name,
2105                        symbol: sym::$name,
2106                        in_ty,
2107                        in_elem,
2108                        ret_ty
2109                    }),
2110                };
2111            }
2112        };
2113    }
2114
2115    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2116    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2117    arith_red!(
2118        simd_reduce_add_unordered: vector_reduce_add,
2119        vector_reduce_fadd_reassoc,
2120        false,
2121        add,
2122        -0.0
2123    );
2124    arith_red!(
2125        simd_reduce_mul_unordered: vector_reduce_mul,
2126        vector_reduce_fmul_reassoc,
2127        false,
2128        mul,
2129        1.0
2130    );
2131
2132    macro_rules! minmax_red {
2133        ($name:ident: $int_red:ident, $float_red:ident) => {
2134            if name == sym::$name {
2135                require!(
2136                    ret_ty == in_elem,
2137                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2138                );
2139                return match in_elem.kind() {
2140                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2141                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2142                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2143                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2144                        span,
2145                        name,
2146                        symbol: sym::$name,
2147                        in_ty,
2148                        in_elem,
2149                        ret_ty
2150                    }),
2151                };
2152            }
2153        };
2154    }
2155
2156    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2157    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2158
2159    macro_rules! bitwise_red {
2160        ($name:ident : $red:ident, $boolean:expr) => {
2161            if name == sym::$name {
2162                let input = if !$boolean {
2163                    require!(
2164                        ret_ty == in_elem,
2165                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2166                    );
2167                    args[0].immediate()
2168                } else {
2169                    let bitwidth = match in_elem.kind() {
2170                        ty::Int(i) => {
2171                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2172                        }
2173                        ty::Uint(i) => {
2174                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2175                        }
2176                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2177                            span,
2178                            name,
2179                            symbol: sym::$name,
2180                            in_ty,
2181                            in_elem,
2182                            ret_ty
2183                        }),
2184                    };
2185
2186                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2187                };
2188                return match in_elem.kind() {
2189                    ty::Int(_) | ty::Uint(_) => {
2190                        let r = bx.$red(input);
2191                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2192                    }
2193                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2194                        span,
2195                        name,
2196                        symbol: sym::$name,
2197                        in_ty,
2198                        in_elem,
2199                        ret_ty
2200                    }),
2201                };
2202            }
2203        };
2204    }
2205
2206    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2207    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2208    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2209    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2210    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2211
2212    if name == sym::simd_cast_ptr {
2213        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2214        require!(
2215            in_len == out_len,
2216            InvalidMonomorphization::ReturnLengthInputType {
2217                span,
2218                name,
2219                in_len,
2220                in_ty,
2221                ret_ty,
2222                out_len
2223            }
2224        );
2225
2226        match in_elem.kind() {
2227            ty::RawPtr(p_ty, _) => {
2228                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2229                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2230                });
2231                require!(
2232                    metadata.is_unit(),
2233                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2234                );
2235            }
2236            _ => {
2237                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2238            }
2239        }
2240        match out_elem.kind() {
2241            ty::RawPtr(p_ty, _) => {
2242                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2243                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2244                });
2245                require!(
2246                    metadata.is_unit(),
2247                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2248                );
2249            }
2250            _ => {
2251                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2252            }
2253        }
2254
2255        return Ok(args[0].immediate());
2256    }
2257
2258    if name == sym::simd_expose_provenance {
2259        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2260        require!(
2261            in_len == out_len,
2262            InvalidMonomorphization::ReturnLengthInputType {
2263                span,
2264                name,
2265                in_len,
2266                in_ty,
2267                ret_ty,
2268                out_len
2269            }
2270        );
2271
2272        match in_elem.kind() {
2273            ty::RawPtr(_, _) => {}
2274            _ => {
2275                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2276            }
2277        }
2278        match out_elem.kind() {
2279            ty::Uint(ty::UintTy::Usize) => {}
2280            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2281        }
2282
2283        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2284    }
2285
2286    if name == sym::simd_with_exposed_provenance {
2287        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2288        require!(
2289            in_len == out_len,
2290            InvalidMonomorphization::ReturnLengthInputType {
2291                span,
2292                name,
2293                in_len,
2294                in_ty,
2295                ret_ty,
2296                out_len
2297            }
2298        );
2299
2300        match in_elem.kind() {
2301            ty::Uint(ty::UintTy::Usize) => {}
2302            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2303        }
2304        match out_elem.kind() {
2305            ty::RawPtr(_, _) => {}
2306            _ => {
2307                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2308            }
2309        }
2310
2311        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2312    }
2313
2314    if name == sym::simd_cast || name == sym::simd_as {
2315        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2316        require!(
2317            in_len == out_len,
2318            InvalidMonomorphization::ReturnLengthInputType {
2319                span,
2320                name,
2321                in_len,
2322                in_ty,
2323                ret_ty,
2324                out_len
2325            }
2326        );
2327        // casting cares about nominal type, not just structural type
2328        if in_elem == out_elem {
2329            return Ok(args[0].immediate());
2330        }
2331
2332        #[derive(Copy, Clone)]
2333        enum Sign {
2334            Unsigned,
2335            Signed,
2336        }
2337        use Sign::*;
2338
2339        enum Style {
2340            Float,
2341            Int(Sign),
2342            Unsupported,
2343        }
2344
2345        let (in_style, in_width) = match in_elem.kind() {
2346            // vectors of pointer-sized integers should've been
2347            // disallowed before here, so this unwrap is safe.
2348            ty::Int(i) => (
2349                Style::Int(Signed),
2350                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2351            ),
2352            ty::Uint(u) => (
2353                Style::Int(Unsigned),
2354                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2355            ),
2356            ty::Float(f) => (Style::Float, f.bit_width()),
2357            _ => (Style::Unsupported, 0),
2358        };
2359        let (out_style, out_width) = match out_elem.kind() {
2360            ty::Int(i) => (
2361                Style::Int(Signed),
2362                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2363            ),
2364            ty::Uint(u) => (
2365                Style::Int(Unsigned),
2366                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2367            ),
2368            ty::Float(f) => (Style::Float, f.bit_width()),
2369            _ => (Style::Unsupported, 0),
2370        };
2371
2372        match (in_style, out_style) {
2373            (Style::Int(sign), Style::Int(_)) => {
2374                return Ok(match in_width.cmp(&out_width) {
2375                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2376                    Ordering::Equal => args[0].immediate(),
2377                    Ordering::Less => match sign {
2378                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2379                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2380                    },
2381                });
2382            }
2383            (Style::Int(Sign::Signed), Style::Float) => {
2384                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2385            }
2386            (Style::Int(Sign::Unsigned), Style::Float) => {
2387                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2388            }
2389            (Style::Float, Style::Int(sign)) => {
2390                return Ok(match (sign, name == sym::simd_as) {
2391                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2392                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2393                    (_, true) => bx.cast_float_to_int(
2394                        matches!(sign, Sign::Signed),
2395                        args[0].immediate(),
2396                        llret_ty,
2397                    ),
2398                });
2399            }
2400            (Style::Float, Style::Float) => {
2401                return Ok(match in_width.cmp(&out_width) {
2402                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2403                    Ordering::Equal => args[0].immediate(),
2404                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2405                });
2406            }
2407            _ => { /* Unsupported. Fallthrough. */ }
2408        }
2409        return_error!(InvalidMonomorphization::UnsupportedCast {
2410            span,
2411            name,
2412            in_ty,
2413            in_elem,
2414            ret_ty,
2415            out_elem
2416        });
2417    }
2418    macro_rules! arith_binary {
2419        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2420            $(if name == sym::$name {
2421                match in_elem.kind() {
2422                    $($(ty::$p(_))|* => {
2423                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2424                    })*
2425                    _ => {},
2426                }
2427                return_error!(
2428                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2429                );
2430            })*
2431        }
2432    }
2433    arith_binary! {
2434        simd_add: Uint, Int => add, Float => fadd;
2435        simd_sub: Uint, Int => sub, Float => fsub;
2436        simd_mul: Uint, Int => mul, Float => fmul;
2437        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2438        simd_rem: Uint => urem, Int => srem, Float => frem;
2439        simd_shl: Uint, Int => shl;
2440        simd_shr: Uint => lshr, Int => ashr;
2441        simd_and: Uint, Int => and;
2442        simd_or: Uint, Int => or;
2443        simd_xor: Uint, Int => xor;
2444        simd_fmax: Float => maxnum;
2445        simd_fmin: Float => minnum;
2446
2447    }
2448    macro_rules! arith_unary {
2449        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2450            $(if name == sym::$name {
2451                match in_elem.kind() {
2452                    $($(ty::$p(_))|* => {
2453                        return Ok(bx.$call(args[0].immediate()))
2454                    })*
2455                    _ => {},
2456                }
2457                return_error!(
2458                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2459                );
2460            })*
2461        }
2462    }
2463    arith_unary! {
2464        simd_neg: Int => neg, Float => fneg;
2465    }
2466
2467    // Unary integer intrinsics
2468    if matches!(
2469        name,
2470        sym::simd_bswap
2471            | sym::simd_bitreverse
2472            | sym::simd_ctlz
2473            | sym::simd_ctpop
2474            | sym::simd_cttz
2475            | sym::simd_funnel_shl
2476            | sym::simd_funnel_shr
2477    ) {
2478        let vec_ty = bx.cx.type_vector(
2479            match *in_elem.kind() {
2480                ty::Int(i) => bx.cx.type_int_from_ty(i),
2481                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2482                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2483                    span,
2484                    name,
2485                    in_ty,
2486                    in_elem
2487                }),
2488            },
2489            in_len as u64,
2490        );
2491        let llvm_intrinsic = match name {
2492            sym::simd_bswap => "llvm.bswap",
2493            sym::simd_bitreverse => "llvm.bitreverse",
2494            sym::simd_ctlz => "llvm.ctlz",
2495            sym::simd_ctpop => "llvm.ctpop",
2496            sym::simd_cttz => "llvm.cttz",
2497            sym::simd_funnel_shl => "llvm.fshl",
2498            sym::simd_funnel_shr => "llvm.fshr",
2499            _ => unreachable!(),
2500        };
2501        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2502
2503        return match name {
2504            // byte swap is no-op for i8/u8
2505            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2506            sym::simd_ctlz | sym::simd_cttz => {
2507                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2508                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2509                Ok(bx.call_intrinsic(
2510                    llvm_intrinsic,
2511                    &[vec_ty],
2512                    &[args[0].immediate(), dont_poison_on_zero],
2513                ))
2514            }
2515            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2516                // simple unary argument cases
2517                Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
2518            }
2519            sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
2520                llvm_intrinsic,
2521                &[vec_ty],
2522                &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
2523            )),
2524            _ => unreachable!(),
2525        };
2526    }
2527
2528    if name == sym::simd_arith_offset {
2529        // This also checks that the first operand is a ptr type.
2530        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2531            span_bug!(span, "must be called with a vector of pointer types as first argument")
2532        });
2533        let layout = bx.layout_of(pointee);
2534        let ptrs = args[0].immediate();
2535        // The second argument must be a ptr-sized integer.
2536        // (We don't care about the signedness, this is wrapping anyway.)
2537        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
2538        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2539            span_bug!(
2540                span,
2541                "must be called with a vector of pointer-sized integers as second argument"
2542            );
2543        }
2544        let offsets = args[1].immediate();
2545
2546        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2547    }
2548
2549    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2550        let lhs = args[0].immediate();
2551        let rhs = args[1].immediate();
2552        let is_add = name == sym::simd_saturating_add;
2553        let (signed, elem_ty) = match *in_elem.kind() {
2554            ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
2555            ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
2556            _ => {
2557                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2558                    span,
2559                    name,
2560                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
2561                    vector_type: args[0].layout.ty
2562                });
2563            }
2564        };
2565        let llvm_intrinsic = format!(
2566            "llvm.{}{}.sat",
2567            if signed { 's' } else { 'u' },
2568            if is_add { "add" } else { "sub" },
2569        );
2570        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2571
2572        return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
2573    }
2574
2575    span_bug!(span, "unknown SIMD intrinsic");
2576}