rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::codegen_attrs::autodiff_attrs;
7use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
8use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
9use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
10use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
11use rustc_codegen_ssa::traits::*;
12use rustc_hir::def_id::LOCAL_CRATE;
13use rustc_hir::{self as hir};
14use rustc_middle::mir::BinOp;
15use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
16use rustc_middle::ty::offload_meta::OffloadMetadata;
17use rustc_middle::ty::{self, GenericArgsRef, Instance, SimdAlign, Ty, TyCtxt, TypingEnv};
18use rustc_middle::{bug, span_bug};
19use rustc_session::config::CrateType;
20use rustc_span::{Span, Symbol, sym};
21use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate};
22use rustc_target::callconv::PassMode;
23use rustc_target::spec::Os;
24use tracing::debug;
25
26use crate::abi::FnAbiLlvmExt;
27use crate::builder::Builder;
28use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
29use crate::builder::gpu_offload::{gen_call_handling, gen_define_handling};
30use crate::context::CodegenCx;
31use crate::errors::{
32    AutoDiffWithoutEnable, AutoDiffWithoutLto, OffloadWithoutEnable, OffloadWithoutFatLTO,
33};
34use crate::llvm::{self, Metadata, Type, Value};
35use crate::type_of::LayoutLlvmExt;
36use crate::va_arg::emit_va_arg;
37
38fn call_simple_intrinsic<'ll, 'tcx>(
39    bx: &mut Builder<'_, 'll, 'tcx>,
40    name: Symbol,
41    args: &[OperandRef<'tcx, &'ll Value>],
42) -> Option<&'ll Value> {
43    let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
44        sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
45        sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
46        sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
47        sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
48
49        sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
50        sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
51        sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
52        sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
53
54        sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
55        sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
56        sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
57        sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
58
59        sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
60        sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
61        sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
62        sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
63
64        sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
65        sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
66        sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
67        sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
68
69        sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
70        sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
71        sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
72        sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
73
74        sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
75        sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
76        sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
77        sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
78
79        sym::logf16 => ("llvm.log", &[bx.type_f16()]),
80        sym::logf32 => ("llvm.log", &[bx.type_f32()]),
81        sym::logf64 => ("llvm.log", &[bx.type_f64()]),
82        sym::logf128 => ("llvm.log", &[bx.type_f128()]),
83
84        sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
85        sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
86        sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
87        sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
88
89        sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
90        sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
91        sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
92        sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
93
94        sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
95        sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
96        sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
97        sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
98
99        sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
100        sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
101        sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
102        sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
103
104        sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
105        sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
106        sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
107        sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
108
109        sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
110        sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
111        sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
112        sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
113
114        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
115        // when llvm/llvm-project#{139380,139381,140445} are fixed.
116        //sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
117        //sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
118        //sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
119        //sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
120        //
121        sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
122        sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
123        sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
124        sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
125
126        // FIXME: LLVM currently mis-compile those intrinsics, re-enable them
127        // when llvm/llvm-project#{139380,139381,140445} are fixed.
128        //sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
129        //sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
130        //sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
131        //sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
132        //
133        sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
134        sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
135        sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
136        sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
137
138        sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
139        sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
140        sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
141        sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
142
143        sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
144        sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
145        sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
146        sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
147
148        sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
149        sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
150        sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
151        sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
152
153        // We could use any of `rint`, `nearbyint`, or `roundeven`
154        // for this -- they are all identical in semantics when
155        // assuming the default FP environment.
156        // `rint` is what we used for $forever.
157        sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
158        sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
159        sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
160        sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
161
162        sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
163        sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
164        sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
165        sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
166
167        _ => return None,
168    };
169    Some(bx.call_intrinsic(
170        base_name,
171        type_params,
172        &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
173    ))
174}
175
176impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
177    fn codegen_intrinsic_call(
178        &mut self,
179        instance: ty::Instance<'tcx>,
180        args: &[OperandRef<'tcx, &'ll Value>],
181        result: PlaceRef<'tcx, &'ll Value>,
182        span: Span,
183    ) -> Result<(), ty::Instance<'tcx>> {
184        let tcx = self.tcx;
185
186        let name = tcx.item_name(instance.def_id());
187        let fn_args = instance.args;
188
189        let simple = call_simple_intrinsic(self, name, args);
190        let llval = match name {
191            _ if simple.is_some() => simple.unwrap(),
192            sym::ptr_mask => {
193                let ptr = args[0].immediate();
194                self.call_intrinsic(
195                    "llvm.ptrmask",
196                    &[self.val_ty(ptr), self.type_isize()],
197                    &[ptr, args[1].immediate()],
198                )
199            }
200            sym::autodiff => {
201                codegen_autodiff(self, tcx, instance, args, result);
202                return Ok(());
203            }
204            sym::offload => {
205                if tcx.sess.opts.unstable_opts.offload.is_empty() {
206                    let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutEnable);
207                }
208
209                if tcx.sess.lto() != rustc_session::config::Lto::Fat {
210                    let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutFatLTO);
211                }
212
213                codegen_offload(self, tcx, instance, args);
214                return Ok(());
215            }
216            sym::is_val_statically_known => {
217                if let OperandValue::Immediate(imm) = args[0].val {
218                    self.call_intrinsic(
219                        "llvm.is.constant",
220                        &[args[0].layout.immediate_llvm_type(self.cx)],
221                        &[imm],
222                    )
223                } else {
224                    self.const_bool(false)
225                }
226            }
227            sym::select_unpredictable => {
228                let cond = args[0].immediate();
229                assert_eq!(args[1].layout, args[2].layout);
230                let select = |bx: &mut Self, true_val, false_val| {
231                    let result = bx.select(cond, true_val, false_val);
232                    bx.set_unpredictable(&result);
233                    result
234                };
235                match (args[1].val, args[2].val) {
236                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
237                        assert!(true_val.llextra.is_none());
238                        assert!(false_val.llextra.is_none());
239                        assert_eq!(true_val.align, false_val.align);
240                        let ptr = select(self, true_val.llval, false_val.llval);
241                        let selected =
242                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
243                        selected.store(self, result);
244                        return Ok(());
245                    }
246                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
247                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
248                        let true_val = args[1].immediate_or_packed_pair(self);
249                        let false_val = args[2].immediate_or_packed_pair(self);
250                        select(self, true_val, false_val)
251                    }
252                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
253                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
254                }
255            }
256            sym::catch_unwind => {
257                catch_unwind_intrinsic(
258                    self,
259                    args[0].immediate(),
260                    args[1].immediate(),
261                    args[2].immediate(),
262                    result,
263                );
264                return Ok(());
265            }
266            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
267            sym::va_copy => {
268                let dest = args[0].immediate();
269                self.call_intrinsic(
270                    "llvm.va_copy",
271                    &[self.val_ty(dest)],
272                    &[dest, args[1].immediate()],
273                )
274            }
275            sym::va_arg => {
276                match result.layout.backend_repr {
277                    BackendRepr::Scalar(scalar) => {
278                        match scalar.primitive() {
279                            Primitive::Int(..) => {
280                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
281                                    // `va_arg` should not be called on an integer type
282                                    // less than 4 bytes in length. If it is, promote
283                                    // the integer to an `i32` and truncate the result
284                                    // back to the smaller type.
285                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
286                                    self.trunc(promoted_result, result.layout.llvm_type(self))
287                                } else {
288                                    emit_va_arg(self, args[0], result.layout.ty)
289                                }
290                            }
291                            Primitive::Float(Float::F16) => {
292                                bug!("the va_arg intrinsic does not work with `f16`")
293                            }
294                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
295                                emit_va_arg(self, args[0], result.layout.ty)
296                            }
297                            // `va_arg` should never be used with the return type f32.
298                            Primitive::Float(Float::F32) => {
299                                bug!("the va_arg intrinsic does not work with `f32`")
300                            }
301                            Primitive::Float(Float::F128) => {
302                                bug!("the va_arg intrinsic does not work with `f128`")
303                            }
304                        }
305                    }
306                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
307                }
308            }
309
310            sym::volatile_load | sym::unaligned_volatile_load => {
311                let ptr = args[0].immediate();
312                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
313                let align = if name == sym::unaligned_volatile_load {
314                    1
315                } else {
316                    result.layout.align.bytes() as u32
317                };
318                unsafe {
319                    llvm::LLVMSetAlignment(load, align);
320                }
321                if !result.layout.is_zst() {
322                    self.store_to_place(load, result.val);
323                }
324                return Ok(());
325            }
326            sym::volatile_store => {
327                let dst = args[0].deref(self.cx());
328                args[1].val.volatile_store(self, dst);
329                return Ok(());
330            }
331            sym::unaligned_volatile_store => {
332                let dst = args[0].deref(self.cx());
333                args[1].val.unaligned_volatile_store(self, dst);
334                return Ok(());
335            }
336            sym::prefetch_read_data
337            | sym::prefetch_write_data
338            | sym::prefetch_read_instruction
339            | sym::prefetch_write_instruction => {
340                let (rw, cache_type) = match name {
341                    sym::prefetch_read_data => (0, 1),
342                    sym::prefetch_write_data => (1, 1),
343                    sym::prefetch_read_instruction => (0, 0),
344                    sym::prefetch_write_instruction => (1, 0),
345                    _ => bug!(),
346                };
347                let ptr = args[0].immediate();
348                let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32();
349                self.call_intrinsic(
350                    "llvm.prefetch",
351                    &[self.val_ty(ptr)],
352                    &[
353                        ptr,
354                        self.const_i32(rw),
355                        self.const_i32(locality),
356                        self.const_i32(cache_type),
357                    ],
358                )
359            }
360            sym::carrying_mul_add => {
361                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
362
363                let wide_llty = self.type_ix(size.bits() * 2);
364                let args = args.as_array().unwrap();
365                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
366
367                let wide = if signed {
368                    let prod = self.unchecked_smul(a, b);
369                    let acc = self.unchecked_sadd(prod, c);
370                    self.unchecked_sadd(acc, d)
371                } else {
372                    let prod = self.unchecked_umul(a, b);
373                    let acc = self.unchecked_uadd(prod, c);
374                    self.unchecked_uadd(acc, d)
375                };
376
377                let narrow_llty = self.type_ix(size.bits());
378                let low = self.trunc(wide, narrow_llty);
379                let bits_const = self.const_uint(wide_llty, size.bits());
380                // No need for ashr when signed; LLVM changes it to lshr anyway.
381                let high = self.lshr(wide, bits_const);
382                // FIXME: could be `trunc nuw`, even for signed.
383                let high = self.trunc(high, narrow_llty);
384
385                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
386                let pair = self.const_poison(pair_llty);
387                let pair = self.insert_value(pair, low, 0);
388                let pair = self.insert_value(pair, high, 1);
389                pair
390            }
391            sym::ctlz
392            | sym::ctlz_nonzero
393            | sym::cttz
394            | sym::cttz_nonzero
395            | sym::ctpop
396            | sym::bswap
397            | sym::bitreverse
398            | sym::saturating_add
399            | sym::saturating_sub
400            | sym::unchecked_funnel_shl
401            | sym::unchecked_funnel_shr => {
402                let ty = args[0].layout.ty;
403                if !ty.is_integral() {
404                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
405                        span,
406                        name,
407                        ty,
408                    });
409                    return Ok(());
410                }
411                let (size, signed) = ty.int_size_and_signed(self.tcx);
412                let width = size.bits();
413                let llty = self.type_ix(width);
414                match name {
415                    sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
416                        let y =
417                            self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
418                        let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
419                            "llvm.ctlz"
420                        } else {
421                            "llvm.cttz"
422                        };
423                        let ret =
424                            self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
425                        self.intcast(ret, result.layout.llvm_type(self), false)
426                    }
427                    sym::ctpop => {
428                        let ret =
429                            self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
430                        self.intcast(ret, result.layout.llvm_type(self), false)
431                    }
432                    sym::bswap => {
433                        if width == 8 {
434                            args[0].immediate() // byte swap a u8/i8 is just a no-op
435                        } else {
436                            self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
437                        }
438                    }
439                    sym::bitreverse => {
440                        self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
441                    }
442                    sym::unchecked_funnel_shl | sym::unchecked_funnel_shr => {
443                        let is_left = name == sym::unchecked_funnel_shl;
444                        let lhs = args[0].immediate();
445                        let rhs = args[1].immediate();
446                        let raw_shift = args[2].immediate();
447                        let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
448
449                        // llvm expects shift to be the same type as the values, but rust
450                        // always uses `u32`.
451                        let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false);
452
453                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift])
454                    }
455                    sym::saturating_add | sym::saturating_sub => {
456                        let is_add = name == sym::saturating_add;
457                        let lhs = args[0].immediate();
458                        let rhs = args[1].immediate();
459                        let llvm_name = format!(
460                            "llvm.{}{}.sat",
461                            if signed { 's' } else { 'u' },
462                            if is_add { "add" } else { "sub" },
463                        );
464                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
465                    }
466                    _ => bug!(),
467                }
468            }
469
470            sym::raw_eq => {
471                use BackendRepr::*;
472                let tp_ty = fn_args.type_at(0);
473                let layout = self.layout_of(tp_ty).layout;
474                let use_integer_compare = match layout.backend_repr() {
475                    Scalar(_) | ScalarPair(_, _) => true,
476                    SimdVector { .. } => false,
477                    ScalableVector { .. } => {
478                        tcx.dcx().emit_err(InvalidMonomorphization::NonScalableType {
479                            span,
480                            name: sym::raw_eq,
481                            ty: tp_ty,
482                        });
483                        return Ok(());
484                    }
485                    Memory { .. } => {
486                        // For rusty ABIs, small aggregates are actually passed
487                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
488                        // so we re-use that same threshold here.
489                        layout.size() <= self.data_layout().pointer_size() * 2
490                    }
491                };
492
493                let a = args[0].immediate();
494                let b = args[1].immediate();
495                if layout.size().bytes() == 0 {
496                    self.const_bool(true)
497                } else if use_integer_compare {
498                    let integer_ty = self.type_ix(layout.size().bits());
499                    let a_val = self.load(integer_ty, a, layout.align().abi);
500                    let b_val = self.load(integer_ty, b, layout.align().abi);
501                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
502                } else {
503                    let n = self.const_usize(layout.size().bytes());
504                    let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
505                    self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
506                }
507            }
508
509            sym::compare_bytes => {
510                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
511                let cmp = self.call_intrinsic(
512                    "memcmp",
513                    &[],
514                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
515                );
516                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
517                self.sext(cmp, self.type_ix(32))
518            }
519
520            sym::black_box => {
521                args[0].val.store(self, result);
522                let result_val_span = [result.val.llval];
523                // We need to "use" the argument in some way LLVM can't introspect, and on
524                // targets that support it we can typically leverage inline assembly to do
525                // this. LLVM's interpretation of inline assembly is that it's, well, a black
526                // box. This isn't the greatest implementation since it probably deoptimizes
527                // more than we want, but it's so far good enough.
528                //
529                // For zero-sized types, the location pointed to by the result may be
530                // uninitialized. Do not "use" the result in this case; instead just clobber
531                // the memory.
532                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
533                    ("~{memory}", &[])
534                } else {
535                    ("r,~{memory}", &result_val_span)
536                };
537                crate::asm::inline_asm_call(
538                    self,
539                    "",
540                    constraint,
541                    inputs,
542                    self.type_void(),
543                    &[],
544                    true,
545                    false,
546                    llvm::AsmDialect::Att,
547                    &[span],
548                    false,
549                    None,
550                    None,
551                )
552                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
553
554                // We have copied the value to `result` already.
555                return Ok(());
556            }
557
558            _ if name.as_str().starts_with("simd_") => {
559                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
560                // This gives them the expected layout of a regular #[repr(simd)] vector.
561                let mut loaded_args = Vec::new();
562                for arg in args {
563                    loaded_args.push(
564                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
565                        // with reduced alignment and no padding) rather than as immediates.
566                        // We can use a vector load to fix the layout and turn the argument
567                        // into an immediate.
568                        if arg.layout.ty.is_simd()
569                            && let OperandValue::Ref(place) = arg.val
570                        {
571                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
572                            let elem_ll_ty = match elem_ty.kind() {
573                                ty::Float(f) => self.type_float_from_ty(*f),
574                                ty::Int(i) => self.type_int_from_ty(*i),
575                                ty::Uint(u) => self.type_uint_from_ty(*u),
576                                ty::RawPtr(_, _) => self.type_ptr(),
577                                _ => unreachable!(),
578                            };
579                            let loaded =
580                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
581                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
582                        } else {
583                            *arg
584                        },
585                    );
586                }
587
588                let llret_ty = if result.layout.ty.is_simd()
589                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
590                {
591                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
592                    let elem_ll_ty = match elem_ty.kind() {
593                        ty::Float(f) => self.type_float_from_ty(*f),
594                        ty::Int(i) => self.type_int_from_ty(*i),
595                        ty::Uint(u) => self.type_uint_from_ty(*u),
596                        ty::RawPtr(_, _) => self.type_ptr(),
597                        _ => unreachable!(),
598                    };
599                    self.type_vector(elem_ll_ty, size)
600                } else {
601                    result.layout.llvm_type(self)
602                };
603
604                match generic_simd_intrinsic(
605                    self,
606                    name,
607                    fn_args,
608                    &loaded_args,
609                    result.layout.ty,
610                    llret_ty,
611                    span,
612                ) {
613                    Ok(llval) => llval,
614                    // If there was an error, just skip this invocation... we'll abort compilation
615                    // anyway, but we can keep codegen'ing to find more errors.
616                    Err(()) => return Ok(()),
617                }
618            }
619
620            _ => {
621                debug!("unknown intrinsic '{}' -- falling back to default body", name);
622                // Call the fallback body instead of generating the intrinsic code
623                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
624            }
625        };
626
627        if result.layout.ty.is_bool() {
628            let val = self.from_immediate(llval);
629            self.store_to_place(val, result.val);
630        } else if !result.layout.ty.is_unit() {
631            self.store_to_place(llval, result.val);
632        }
633        Ok(())
634    }
635
636    fn abort(&mut self) {
637        self.call_intrinsic("llvm.trap", &[], &[]);
638    }
639
640    fn assume(&mut self, val: Self::Value) {
641        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
642            self.call_intrinsic("llvm.assume", &[], &[val]);
643        }
644    }
645
646    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
647        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
648            self.call_intrinsic(
649                "llvm.expect",
650                &[self.type_i1()],
651                &[cond, self.const_bool(expected)],
652            )
653        } else {
654            cond
655        }
656    }
657
658    fn type_checked_load(
659        &mut self,
660        llvtable: &'ll Value,
661        vtable_byte_offset: u64,
662        typeid: &'ll Metadata,
663    ) -> Self::Value {
664        let typeid = self.get_metadata_value(typeid);
665        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
666        let type_checked_load = self.call_intrinsic(
667            "llvm.type.checked.load",
668            &[],
669            &[llvtable, vtable_byte_offset, typeid],
670        );
671        self.extract_value(type_checked_load, 0)
672    }
673
674    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
675        self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
676    }
677
678    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
679        self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
680    }
681}
682
683fn catch_unwind_intrinsic<'ll, 'tcx>(
684    bx: &mut Builder<'_, 'll, 'tcx>,
685    try_func: &'ll Value,
686    data: &'ll Value,
687    catch_func: &'ll Value,
688    dest: PlaceRef<'tcx, &'ll Value>,
689) {
690    if !bx.sess().panic_strategy().unwinds() {
691        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
692        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
693        // Return 0 unconditionally from the intrinsic call;
694        // we can never unwind.
695        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
696    } else if wants_msvc_seh(bx.sess()) {
697        codegen_msvc_try(bx, try_func, data, catch_func, dest);
698    } else if wants_wasm_eh(bx.sess()) {
699        codegen_wasm_try(bx, try_func, data, catch_func, dest);
700    } else if bx.sess().target.os == Os::Emscripten {
701        codegen_emcc_try(bx, try_func, data, catch_func, dest);
702    } else {
703        codegen_gnu_try(bx, try_func, data, catch_func, dest);
704    }
705}
706
707// MSVC's definition of the `rust_try` function.
708//
709// This implementation uses the new exception handling instructions in LLVM
710// which have support in LLVM for SEH on MSVC targets. Although these
711// instructions are meant to work for all targets, as of the time of this
712// writing, however, LLVM does not recommend the usage of these new instructions
713// as the old ones are still more optimized.
714fn codegen_msvc_try<'ll, 'tcx>(
715    bx: &mut Builder<'_, 'll, 'tcx>,
716    try_func: &'ll Value,
717    data: &'ll Value,
718    catch_func: &'ll Value,
719    dest: PlaceRef<'tcx, &'ll Value>,
720) {
721    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
722        bx.set_personality_fn(bx.eh_personality());
723
724        let normal = bx.append_sibling_block("normal");
725        let catchswitch = bx.append_sibling_block("catchswitch");
726        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
727        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
728        let caught = bx.append_sibling_block("caught");
729
730        let try_func = llvm::get_param(bx.llfn(), 0);
731        let data = llvm::get_param(bx.llfn(), 1);
732        let catch_func = llvm::get_param(bx.llfn(), 2);
733
734        // We're generating an IR snippet that looks like:
735        //
736        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
737        //      %slot = alloca i8*
738        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
739        //
740        //   normal:
741        //      ret i32 0
742        //
743        //   catchswitch:
744        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
745        //
746        //   catchpad_rust:
747        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
748        //      %ptr = load %slot
749        //      call %catch_func(%data, %ptr)
750        //      catchret from %tok to label %caught
751        //
752        //   catchpad_foreign:
753        //      %tok = catchpad within %cs [null, 64, null]
754        //      call %catch_func(%data, null)
755        //      catchret from %tok to label %caught
756        //
757        //   caught:
758        //      ret i32 1
759        //   }
760        //
761        // This structure follows the basic usage of throw/try/catch in LLVM.
762        // For example, compile this C++ snippet to see what LLVM generates:
763        //
764        //      struct rust_panic {
765        //          rust_panic(const rust_panic&);
766        //          ~rust_panic();
767        //
768        //          void* x[2];
769        //      };
770        //
771        //      int __rust_try(
772        //          void (*try_func)(void*),
773        //          void *data,
774        //          void (*catch_func)(void*, void*) noexcept
775        //      ) {
776        //          try {
777        //              try_func(data);
778        //              return 0;
779        //          } catch(rust_panic& a) {
780        //              catch_func(data, &a);
781        //              return 1;
782        //          } catch(...) {
783        //              catch_func(data, NULL);
784        //              return 1;
785        //          }
786        //      }
787        //
788        // More information can be found in libstd's seh.rs implementation.
789        let ptr_size = bx.tcx().data_layout.pointer_size();
790        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
791        let slot = bx.alloca(ptr_size, ptr_align);
792        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
793        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
794
795        bx.switch_to_block(normal);
796        bx.ret(bx.const_i32(0));
797
798        bx.switch_to_block(catchswitch);
799        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
800
801        // We can't use the TypeDescriptor defined in libpanic_unwind because it
802        // might be in another DLL and the SEH encoding only supports specifying
803        // a TypeDescriptor from the current module.
804        //
805        // However this isn't an issue since the MSVC runtime uses string
806        // comparison on the type name to match TypeDescriptors rather than
807        // pointer equality.
808        //
809        // So instead we generate a new TypeDescriptor in each module that uses
810        // `try` and let the linker merge duplicate definitions in the same
811        // module.
812        //
813        // When modifying, make sure that the type_name string exactly matches
814        // the one used in library/panic_unwind/src/seh.rs.
815        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
816        let type_name = bx.const_bytes(b"rust_panic\0");
817        let type_info =
818            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
819        let tydesc = bx.declare_global(
820            &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
821            bx.val_ty(type_info),
822        );
823
824        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
825        if bx.cx.tcx.sess.target.supports_comdat() {
826            llvm::SetUniqueComdat(bx.llmod, tydesc);
827        }
828        llvm::set_initializer(tydesc, type_info);
829
830        // The flag value of 8 indicates that we are catching the exception by
831        // reference instead of by value. We can't use catch by value because
832        // that requires copying the exception object, which we don't support
833        // since our exception object effectively contains a Box.
834        //
835        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
836        bx.switch_to_block(catchpad_rust);
837        let flags = bx.const_i32(8);
838        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
839        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
840        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
841        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
842        bx.catch_ret(&funclet, caught);
843
844        // The flag value of 64 indicates a "catch-all".
845        bx.switch_to_block(catchpad_foreign);
846        let flags = bx.const_i32(64);
847        let null = bx.const_null(bx.type_ptr());
848        let funclet = bx.catch_pad(cs, &[null, flags, null]);
849        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
850        bx.catch_ret(&funclet, caught);
851
852        bx.switch_to_block(caught);
853        bx.ret(bx.const_i32(1));
854    });
855
856    // Note that no invoke is used here because by definition this function
857    // can't panic (that's what it's catching).
858    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
859    OperandValue::Immediate(ret).store(bx, dest);
860}
861
862// WASM's definition of the `rust_try` function.
863fn codegen_wasm_try<'ll, 'tcx>(
864    bx: &mut Builder<'_, 'll, 'tcx>,
865    try_func: &'ll Value,
866    data: &'ll Value,
867    catch_func: &'ll Value,
868    dest: PlaceRef<'tcx, &'ll Value>,
869) {
870    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
871        bx.set_personality_fn(bx.eh_personality());
872
873        let normal = bx.append_sibling_block("normal");
874        let catchswitch = bx.append_sibling_block("catchswitch");
875        let catchpad = bx.append_sibling_block("catchpad");
876        let caught = bx.append_sibling_block("caught");
877
878        let try_func = llvm::get_param(bx.llfn(), 0);
879        let data = llvm::get_param(bx.llfn(), 1);
880        let catch_func = llvm::get_param(bx.llfn(), 2);
881
882        // We're generating an IR snippet that looks like:
883        //
884        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
885        //      %slot = alloca i8*
886        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
887        //
888        //   normal:
889        //      ret i32 0
890        //
891        //   catchswitch:
892        //      %cs = catchswitch within none [%catchpad] unwind to caller
893        //
894        //   catchpad:
895        //      %tok = catchpad within %cs [null]
896        //      %ptr = call @llvm.wasm.get.exception(token %tok)
897        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
898        //      call %catch_func(%data, %ptr)
899        //      catchret from %tok to label %caught
900        //
901        //   caught:
902        //      ret i32 1
903        //   }
904        //
905        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
906        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
907
908        bx.switch_to_block(normal);
909        bx.ret(bx.const_i32(0));
910
911        bx.switch_to_block(catchswitch);
912        let cs = bx.catch_switch(None, None, &[catchpad]);
913
914        bx.switch_to_block(catchpad);
915        let null = bx.const_null(bx.type_ptr());
916        let funclet = bx.catch_pad(cs, &[null]);
917
918        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
919        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
920
921        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
922        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
923        bx.catch_ret(&funclet, caught);
924
925        bx.switch_to_block(caught);
926        bx.ret(bx.const_i32(1));
927    });
928
929    // Note that no invoke is used here because by definition this function
930    // can't panic (that's what it's catching).
931    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
932    OperandValue::Immediate(ret).store(bx, dest);
933}
934
935// Definition of the standard `try` function for Rust using the GNU-like model
936// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
937// instructions).
938//
939// This codegen is a little surprising because we always call a shim
940// function instead of inlining the call to `invoke` manually here. This is done
941// because in LLVM we're only allowed to have one personality per function
942// definition. The call to the `try` intrinsic is being inlined into the
943// function calling it, and that function may already have other personality
944// functions in play. By calling a shim we're guaranteed that our shim will have
945// the right personality function.
946fn codegen_gnu_try<'ll, 'tcx>(
947    bx: &mut Builder<'_, 'll, 'tcx>,
948    try_func: &'ll Value,
949    data: &'ll Value,
950    catch_func: &'ll Value,
951    dest: PlaceRef<'tcx, &'ll Value>,
952) {
953    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
954        // Codegens the shims described above:
955        //
956        //   bx:
957        //      invoke %try_func(%data) normal %normal unwind %catch
958        //
959        //   normal:
960        //      ret 0
961        //
962        //   catch:
963        //      (%ptr, _) = landingpad
964        //      call %catch_func(%data, %ptr)
965        //      ret 1
966        let then = bx.append_sibling_block("then");
967        let catch = bx.append_sibling_block("catch");
968
969        let try_func = llvm::get_param(bx.llfn(), 0);
970        let data = llvm::get_param(bx.llfn(), 1);
971        let catch_func = llvm::get_param(bx.llfn(), 2);
972        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
973        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
974
975        bx.switch_to_block(then);
976        bx.ret(bx.const_i32(0));
977
978        // Type indicator for the exception being thrown.
979        //
980        // The first value in this tuple is a pointer to the exception object
981        // being thrown. The second value is a "selector" indicating which of
982        // the landing pad clauses the exception's type had been matched to.
983        // rust_try ignores the selector.
984        bx.switch_to_block(catch);
985        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
986        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
987        let tydesc = bx.const_null(bx.type_ptr());
988        bx.add_clause(vals, tydesc);
989        let ptr = bx.extract_value(vals, 0);
990        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
991        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
992        bx.ret(bx.const_i32(1));
993    });
994
995    // Note that no invoke is used here because by definition this function
996    // can't panic (that's what it's catching).
997    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
998    OperandValue::Immediate(ret).store(bx, dest);
999}
1000
1001// Variant of codegen_gnu_try used for emscripten where Rust panics are
1002// implemented using C++ exceptions. Here we use exceptions of a specific type
1003// (`struct rust_panic`) to represent Rust panics.
1004fn codegen_emcc_try<'ll, 'tcx>(
1005    bx: &mut Builder<'_, 'll, 'tcx>,
1006    try_func: &'ll Value,
1007    data: &'ll Value,
1008    catch_func: &'ll Value,
1009    dest: PlaceRef<'tcx, &'ll Value>,
1010) {
1011    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1012        // Codegens the shims described above:
1013        //
1014        //   bx:
1015        //      invoke %try_func(%data) normal %normal unwind %catch
1016        //
1017        //   normal:
1018        //      ret 0
1019        //
1020        //   catch:
1021        //      (%ptr, %selector) = landingpad
1022        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
1023        //      %is_rust_panic = %selector == %rust_typeid
1024        //      %catch_data = alloca { i8*, i8 }
1025        //      %catch_data[0] = %ptr
1026        //      %catch_data[1] = %is_rust_panic
1027        //      call %catch_func(%data, %catch_data)
1028        //      ret 1
1029        let then = bx.append_sibling_block("then");
1030        let catch = bx.append_sibling_block("catch");
1031
1032        let try_func = llvm::get_param(bx.llfn(), 0);
1033        let data = llvm::get_param(bx.llfn(), 1);
1034        let catch_func = llvm::get_param(bx.llfn(), 2);
1035        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1036        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1037
1038        bx.switch_to_block(then);
1039        bx.ret(bx.const_i32(0));
1040
1041        // Type indicator for the exception being thrown.
1042        //
1043        // The first value in this tuple is a pointer to the exception object
1044        // being thrown. The second value is a "selector" indicating which of
1045        // the landing pad clauses the exception's type had been matched to.
1046        bx.switch_to_block(catch);
1047        let tydesc = bx.eh_catch_typeinfo();
1048        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1049        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1050        bx.add_clause(vals, tydesc);
1051        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1052        let ptr = bx.extract_value(vals, 0);
1053        let selector = bx.extract_value(vals, 1);
1054
1055        // Check if the typeid we got is the one for a Rust panic.
1056        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1057        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1058        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1059
1060        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1061        // create an alloca and pass a pointer to that.
1062        let ptr_size = bx.tcx().data_layout.pointer_size();
1063        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1064        let i8_align = bx.tcx().data_layout.i8_align;
1065        // Required in order for there to be no padding between the fields.
1066        assert!(i8_align <= ptr_align);
1067        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1068        bx.store(ptr, catch_data, ptr_align);
1069        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1070        bx.store(is_rust_panic, catch_data_1, i8_align);
1071
1072        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1073        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1074        bx.ret(bx.const_i32(1));
1075    });
1076
1077    // Note that no invoke is used here because by definition this function
1078    // can't panic (that's what it's catching).
1079    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1080    OperandValue::Immediate(ret).store(bx, dest);
1081}
1082
1083// Helper function to give a Block to a closure to codegen a shim function.
1084// This is currently primarily used for the `try` intrinsic functions above.
1085fn gen_fn<'a, 'll, 'tcx>(
1086    cx: &'a CodegenCx<'ll, 'tcx>,
1087    name: &str,
1088    rust_fn_sig: ty::PolyFnSig<'tcx>,
1089    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1090) -> (&'ll Type, &'ll Value) {
1091    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1092    let llty = fn_abi.llvm_type(cx);
1093    let llfn = cx.declare_fn(name, fn_abi, None);
1094    cx.set_frame_pointer_type(llfn);
1095    cx.apply_target_cpu_attr(llfn);
1096    // FIXME(eddyb) find a nicer way to do this.
1097    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1098    let llbb = Builder::append_block(cx, llfn, "entry-block");
1099    let bx = Builder::build(cx, llbb);
1100    codegen(bx);
1101    (llty, llfn)
1102}
1103
1104// Helper function used to get a handle to the `__rust_try` function used to
1105// catch exceptions.
1106//
1107// This function is only generated once and is then cached.
1108fn get_rust_try_fn<'a, 'll, 'tcx>(
1109    cx: &'a CodegenCx<'ll, 'tcx>,
1110    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1111) -> (&'ll Type, &'ll Value) {
1112    if let Some(llfn) = cx.rust_try_fn.get() {
1113        return llfn;
1114    }
1115
1116    // Define the type up front for the signature of the rust_try function.
1117    let tcx = cx.tcx;
1118    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1119    // `unsafe fn(*mut i8) -> ()`
1120    let try_fn_ty = Ty::new_fn_ptr(
1121        tcx,
1122        ty::Binder::dummy(tcx.mk_fn_sig(
1123            [i8p],
1124            tcx.types.unit,
1125            false,
1126            hir::Safety::Unsafe,
1127            ExternAbi::Rust,
1128        )),
1129    );
1130    // `unsafe fn(*mut i8, *mut i8) -> ()`
1131    let catch_fn_ty = Ty::new_fn_ptr(
1132        tcx,
1133        ty::Binder::dummy(tcx.mk_fn_sig(
1134            [i8p, i8p],
1135            tcx.types.unit,
1136            false,
1137            hir::Safety::Unsafe,
1138            ExternAbi::Rust,
1139        )),
1140    );
1141    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1142    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1143        [try_fn_ty, i8p, catch_fn_ty],
1144        tcx.types.i32,
1145        false,
1146        hir::Safety::Unsafe,
1147        ExternAbi::Rust,
1148    ));
1149    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1150    cx.rust_try_fn.set(Some(rust_try));
1151    rust_try
1152}
1153
1154fn codegen_autodiff<'ll, 'tcx>(
1155    bx: &mut Builder<'_, 'll, 'tcx>,
1156    tcx: TyCtxt<'tcx>,
1157    instance: ty::Instance<'tcx>,
1158    args: &[OperandRef<'tcx, &'ll Value>],
1159    result: PlaceRef<'tcx, &'ll Value>,
1160) {
1161    if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) {
1162        let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable);
1163    }
1164
1165    let ct = tcx.crate_types();
1166    let lto = tcx.sess.lto();
1167    if ct.len() == 1 && ct.contains(&CrateType::Executable) {
1168        if lto != rustc_session::config::Lto::Fat {
1169            let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1170        }
1171    } else {
1172        if lto != rustc_session::config::Lto::Fat && !tcx.sess.opts.cg.linker_plugin_lto.enabled() {
1173            let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1174        }
1175    }
1176
1177    let fn_args = instance.args;
1178    let callee_ty = instance.ty(tcx, bx.typing_env());
1179
1180    let sig = callee_ty.fn_sig(tcx).skip_binder();
1181
1182    let ret_ty = sig.output();
1183    let llret_ty = bx.layout_of(ret_ty).llvm_type(bx);
1184
1185    // Get source, diff, and attrs
1186    let (source_id, source_args) = match fn_args.into_type_list(tcx)[0].kind() {
1187        ty::FnDef(def_id, source_params) => (def_id, source_params),
1188        _ => bug!("invalid autodiff intrinsic args"),
1189    };
1190
1191    let fn_source = match Instance::try_resolve(tcx, bx.cx.typing_env(), *source_id, source_args) {
1192        Ok(Some(instance)) => instance,
1193        Ok(None) => bug!(
1194            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1195            source_id,
1196            source_args
1197        ),
1198        Err(_) => {
1199            // An error has already been emitted
1200            return;
1201        }
1202    };
1203
1204    let source_symbol = symbol_name_for_instance_in_crate(tcx, fn_source.clone(), LOCAL_CRATE);
1205    let Some(fn_to_diff) = bx.cx.get_function(&source_symbol) else {
1206        bug!("could not find source function")
1207    };
1208
1209    let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() {
1210        ty::FnDef(def_id, diff_args) => (def_id, diff_args),
1211        _ => bug!("invalid args"),
1212    };
1213
1214    let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) {
1215        Ok(Some(instance)) => instance,
1216        Ok(None) => bug!(
1217            "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1218            diff_id,
1219            diff_args
1220        ),
1221        Err(_) => {
1222            // An error has already been emitted
1223            return;
1224        }
1225    };
1226
1227    let val_arr = get_args_from_tuple(bx, args[2], fn_diff);
1228    let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE);
1229
1230    let Some(mut diff_attrs) = autodiff_attrs(tcx, fn_diff.def_id()) else {
1231        bug!("could not find autodiff attrs")
1232    };
1233
1234    adjust_activity_to_abi(
1235        tcx,
1236        fn_source,
1237        TypingEnv::fully_monomorphized(),
1238        &mut diff_attrs.input_activity,
1239    );
1240
1241    let fnc_tree =
1242        rustc_middle::ty::fnc_typetrees(tcx, fn_source.ty(tcx, TypingEnv::fully_monomorphized()));
1243
1244    // Build body
1245    generate_enzyme_call(
1246        bx,
1247        bx.cx,
1248        fn_to_diff,
1249        &diff_symbol,
1250        llret_ty,
1251        &val_arr,
1252        diff_attrs.clone(),
1253        result,
1254        fnc_tree,
1255    );
1256}
1257
1258// Generates the LLVM code to offload a Rust function to a target device (e.g., GPU).
1259// For each kernel call, it generates the necessary globals (including metadata such as
1260// size and pass mode), manages memory mapping to and from the device, handles all
1261// data transfers, and launches the kernel on the target device.
1262fn codegen_offload<'ll, 'tcx>(
1263    bx: &mut Builder<'_, 'll, 'tcx>,
1264    tcx: TyCtxt<'tcx>,
1265    instance: ty::Instance<'tcx>,
1266    args: &[OperandRef<'tcx, &'ll Value>],
1267) {
1268    let cx = bx.cx;
1269    let fn_args = instance.args;
1270
1271    let (target_id, target_args) = match fn_args.into_type_list(tcx)[0].kind() {
1272        ty::FnDef(def_id, params) => (def_id, params),
1273        _ => bug!("invalid offload intrinsic arg"),
1274    };
1275
1276    let fn_target = match Instance::try_resolve(tcx, cx.typing_env(), *target_id, target_args) {
1277        Ok(Some(instance)) => instance,
1278        Ok(None) => bug!(
1279            "could not resolve ({:?}, {:?}) to a specific offload instance",
1280            target_id,
1281            target_args
1282        ),
1283        Err(_) => {
1284            // An error has already been emitted
1285            return;
1286        }
1287    };
1288
1289    let args = get_args_from_tuple(bx, args[1], fn_target);
1290    let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
1291
1292    let sig = tcx.fn_sig(fn_target.def_id()).skip_binder().skip_binder();
1293    let inputs = sig.inputs();
1294
1295    let metadata = inputs.iter().map(|ty| OffloadMetadata::from_ty(tcx, *ty)).collect::<Vec<_>>();
1296
1297    let types = inputs.iter().map(|ty| cx.layout_of(*ty).llvm_type(cx)).collect::<Vec<_>>();
1298
1299    let offload_globals_ref = cx.offload_globals.borrow();
1300    let offload_globals = match offload_globals_ref.as_ref() {
1301        Some(globals) => globals,
1302        None => {
1303            // Offload is not initialized, cannot continue
1304            return;
1305        }
1306    };
1307    let offload_data = gen_define_handling(&cx, &metadata, &types, target_symbol, offload_globals);
1308    gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals);
1309}
1310
1311fn get_args_from_tuple<'ll, 'tcx>(
1312    bx: &mut Builder<'_, 'll, 'tcx>,
1313    tuple_op: OperandRef<'tcx, &'ll Value>,
1314    fn_instance: Instance<'tcx>,
1315) -> Vec<&'ll Value> {
1316    let cx = bx.cx;
1317    let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty());
1318
1319    match tuple_op.val {
1320        OperandValue::Immediate(val) => vec![val],
1321        OperandValue::Pair(v1, v2) => vec![v1, v2],
1322        OperandValue::Ref(ptr) => {
1323            let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout };
1324
1325            let mut result = Vec::with_capacity(fn_abi.args.len());
1326            let mut tuple_index = 0;
1327
1328            for arg in &fn_abi.args {
1329                match arg.mode {
1330                    PassMode::Ignore => {}
1331                    PassMode::Direct(_) | PassMode::Cast { .. } => {
1332                        let field = tuple_place.project_field(bx, tuple_index);
1333                        let llvm_ty = field.layout.llvm_type(bx.cx);
1334                        let val = bx.load(llvm_ty, field.val.llval, field.val.align);
1335                        result.push(val);
1336                        tuple_index += 1;
1337                    }
1338                    PassMode::Pair(_, _) => {
1339                        let field = tuple_place.project_field(bx, tuple_index);
1340                        let llvm_ty = field.layout.llvm_type(bx.cx);
1341                        let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align);
1342                        result.push(bx.extract_value(pair_val, 0));
1343                        result.push(bx.extract_value(pair_val, 1));
1344                        tuple_index += 1;
1345                    }
1346                    PassMode::Indirect { .. } => {
1347                        let field = tuple_place.project_field(bx, tuple_index);
1348                        result.push(field.val.llval);
1349                        tuple_index += 1;
1350                    }
1351                }
1352            }
1353
1354            result
1355        }
1356
1357        OperandValue::ZeroSized => vec![],
1358    }
1359}
1360
1361fn generic_simd_intrinsic<'ll, 'tcx>(
1362    bx: &mut Builder<'_, 'll, 'tcx>,
1363    name: Symbol,
1364    fn_args: GenericArgsRef<'tcx>,
1365    args: &[OperandRef<'tcx, &'ll Value>],
1366    ret_ty: Ty<'tcx>,
1367    llret_ty: &'ll Type,
1368    span: Span,
1369) -> Result<&'ll Value, ()> {
1370    macro_rules! return_error {
1371        ($diag: expr) => {{
1372            bx.sess().dcx().emit_err($diag);
1373            return Err(());
1374        }};
1375    }
1376
1377    macro_rules! require {
1378        ($cond: expr, $diag: expr) => {
1379            if !$cond {
1380                return_error!($diag);
1381            }
1382        };
1383    }
1384
1385    macro_rules! require_simd {
1386        ($ty: expr, $variant:ident) => {{
1387            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1388            $ty.simd_size_and_type(bx.tcx())
1389        }};
1390    }
1391
1392    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1393    macro_rules! require_int_or_uint_ty {
1394        ($ty: expr, $diag: expr) => {
1395            match $ty {
1396                ty::Int(i) => {
1397                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1398                }
1399                ty::Uint(i) => {
1400                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1401                }
1402                _ => {
1403                    return_error!($diag);
1404                }
1405            }
1406        };
1407    }
1408
1409    let llvm_version = crate::llvm_util::get_version();
1410
1411    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1412    /// down to an i1 based mask that can be used by llvm intrinsics.
1413    ///
1414    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1415    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1416    /// but codegen for several targets is better if we consider the highest bit by shifting.
1417    ///
1418    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1419    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1420    /// instead the mask can be used as is.
1421    ///
1422    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1423    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1424    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1425        bx: &mut Builder<'a, 'll, 'tcx>,
1426        i_xn: &'ll Value,
1427        in_elem_bitwidth: u64,
1428        in_len: u64,
1429    ) -> &'ll Value {
1430        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1431        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1432        let shift_indices = vec![shift_idx; in_len as _];
1433        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1434        // Truncate vector to an <i1 x N>
1435        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1436    }
1437
1438    // Sanity-check: all vector arguments must be immediates.
1439    if cfg!(debug_assertions) {
1440        for arg in args {
1441            if arg.layout.ty.is_simd() {
1442                assert_matches!(arg.val, OperandValue::Immediate(_));
1443            }
1444        }
1445    }
1446
1447    if name == sym::simd_select_bitmask {
1448        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1449
1450        let expected_int_bits = len.max(8).next_power_of_two();
1451        let expected_bytes = len.div_ceil(8);
1452
1453        let mask_ty = args[0].layout.ty;
1454        let mask = match mask_ty.kind() {
1455            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1456            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1457            ty::Array(elem, len)
1458                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1459                    && len
1460                        .try_to_target_usize(bx.tcx)
1461                        .expect("expected monomorphic const in codegen")
1462                        == expected_bytes =>
1463            {
1464                let place = PlaceRef::alloca(bx, args[0].layout);
1465                args[0].val.store(bx, place);
1466                let int_ty = bx.type_ix(expected_bytes * 8);
1467                bx.load(int_ty, place.val.llval, Align::ONE)
1468            }
1469            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1470                span,
1471                name,
1472                mask_ty,
1473                expected_int_bits,
1474                expected_bytes
1475            }),
1476        };
1477
1478        let i1 = bx.type_i1();
1479        let im = bx.type_ix(len);
1480        let i1xn = bx.type_vector(i1, len);
1481        let m_im = bx.trunc(mask, im);
1482        let m_i1s = bx.bitcast(m_im, i1xn);
1483        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1484    }
1485
1486    // every intrinsic below takes a SIMD vector as its first argument
1487    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
1488    let in_ty = args[0].layout.ty;
1489
1490    let comparison = match name {
1491        sym::simd_eq => Some(BinOp::Eq),
1492        sym::simd_ne => Some(BinOp::Ne),
1493        sym::simd_lt => Some(BinOp::Lt),
1494        sym::simd_le => Some(BinOp::Le),
1495        sym::simd_gt => Some(BinOp::Gt),
1496        sym::simd_ge => Some(BinOp::Ge),
1497        _ => None,
1498    };
1499
1500    if let Some(cmp_op) = comparison {
1501        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1502
1503        require!(
1504            in_len == out_len,
1505            InvalidMonomorphization::ReturnLengthInputType {
1506                span,
1507                name,
1508                in_len,
1509                in_ty,
1510                ret_ty,
1511                out_len
1512            }
1513        );
1514        require!(
1515            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1516            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1517        );
1518
1519        return Ok(compare_simd_types(
1520            bx,
1521            args[0].immediate(),
1522            args[1].immediate(),
1523            in_elem,
1524            llret_ty,
1525            cmp_op,
1526        ));
1527    }
1528
1529    if name == sym::simd_shuffle_const_generic {
1530        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1531        let n = idx.len() as u64;
1532
1533        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1534        require!(
1535            out_len == n,
1536            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1537        );
1538        require!(
1539            in_elem == out_ty,
1540            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1541        );
1542
1543        let total_len = in_len * 2;
1544
1545        let indices: Option<Vec<_>> = idx
1546            .iter()
1547            .enumerate()
1548            .map(|(arg_idx, val)| {
1549                let idx = val.unwrap_leaf().to_i32();
1550                if idx >= i32::try_from(total_len).unwrap() {
1551                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1552                        span,
1553                        name,
1554                        arg_idx: arg_idx as u64,
1555                        total_len: total_len.into(),
1556                    });
1557                    None
1558                } else {
1559                    Some(bx.const_i32(idx))
1560                }
1561            })
1562            .collect();
1563        let Some(indices) = indices else {
1564            return Ok(bx.const_null(llret_ty));
1565        };
1566
1567        return Ok(bx.shuffle_vector(
1568            args[0].immediate(),
1569            args[1].immediate(),
1570            bx.const_vector(&indices),
1571        ));
1572    }
1573
1574    if name == sym::simd_shuffle {
1575        // Make sure this is actually a SIMD vector.
1576        let idx_ty = args[2].layout.ty;
1577        let n: u64 = if idx_ty.is_simd()
1578            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1579        {
1580            idx_ty.simd_size_and_type(bx.cx.tcx).0
1581        } else {
1582            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1583        };
1584
1585        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1586        require!(
1587            out_len == n,
1588            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1589        );
1590        require!(
1591            in_elem == out_ty,
1592            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1593        );
1594
1595        let total_len = u128::from(in_len) * 2;
1596
1597        // Check that the indices are in-bounds.
1598        let indices = args[2].immediate();
1599        for i in 0..n {
1600            let val = bx.const_get_elt(indices, i as u64);
1601            let idx = bx
1602                .const_to_opt_u128(val, true)
1603                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1604            if idx >= total_len {
1605                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1606                    span,
1607                    name,
1608                    arg_idx: i,
1609                    total_len,
1610                });
1611            }
1612        }
1613
1614        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1615    }
1616
1617    if name == sym::simd_insert || name == sym::simd_insert_dyn {
1618        require!(
1619            in_elem == args[2].layout.ty,
1620            InvalidMonomorphization::InsertedType {
1621                span,
1622                name,
1623                in_elem,
1624                in_ty,
1625                out_ty: args[2].layout.ty
1626            }
1627        );
1628
1629        let index_imm = if name == sym::simd_insert {
1630            let idx = bx
1631                .const_to_opt_u128(args[1].immediate(), false)
1632                .expect("typeck should have ensure that this is a const");
1633            if idx >= in_len.into() {
1634                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1635                    span,
1636                    name,
1637                    arg_idx: 1,
1638                    total_len: in_len.into(),
1639                });
1640            }
1641            bx.const_i32(idx as i32)
1642        } else {
1643            args[1].immediate()
1644        };
1645
1646        return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1647    }
1648    if name == sym::simd_extract || name == sym::simd_extract_dyn {
1649        require!(
1650            ret_ty == in_elem,
1651            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1652        );
1653        let index_imm = if name == sym::simd_extract {
1654            let idx = bx
1655                .const_to_opt_u128(args[1].immediate(), false)
1656                .expect("typeck should have ensure that this is a const");
1657            if idx >= in_len.into() {
1658                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1659                    span,
1660                    name,
1661                    arg_idx: 1,
1662                    total_len: in_len.into(),
1663                });
1664            }
1665            bx.const_i32(idx as i32)
1666        } else {
1667            args[1].immediate()
1668        };
1669
1670        return Ok(bx.extract_element(args[0].immediate(), index_imm));
1671    }
1672
1673    if name == sym::simd_select {
1674        let m_elem_ty = in_elem;
1675        let m_len = in_len;
1676        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1677        require!(
1678            m_len == v_len,
1679            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1680        );
1681
1682        let m_i1s = if args[1].layout.ty.is_scalable_vector() {
1683            match m_elem_ty.kind() {
1684                ty::Bool => {}
1685                _ => return_error!(InvalidMonomorphization::MaskWrongElementType {
1686                    span,
1687                    name,
1688                    ty: m_elem_ty
1689                }),
1690            };
1691            let i1 = bx.type_i1();
1692            let i1xn = bx.type_scalable_vector(i1, m_len as u64);
1693            bx.trunc(args[0].immediate(), i1xn)
1694        } else {
1695            let in_elem_bitwidth = require_int_or_uint_ty!(
1696                m_elem_ty.kind(),
1697                InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1698            );
1699            vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len)
1700        };
1701
1702        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1703    }
1704
1705    if name == sym::simd_bitmask {
1706        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1707        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1708        // * an unsigned integer
1709        // * an array of `u8`
1710        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1711        //
1712        // The bit order of the result depends on the byte endianness, LSB-first for little
1713        // endian and MSB-first for big endian.
1714        let expected_int_bits = in_len.max(8).next_power_of_two();
1715        let expected_bytes = in_len.div_ceil(8);
1716
1717        // Integer vector <i{in_bitwidth} x in_len>:
1718        let in_elem_bitwidth = require_int_or_uint_ty!(
1719            in_elem.kind(),
1720            InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1721        );
1722
1723        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1724        // Bitcast <i1 x N> to iN:
1725        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1726
1727        match ret_ty.kind() {
1728            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1729                // Zero-extend iN to the bitmask type:
1730                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1731            }
1732            ty::Array(elem, len)
1733                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1734                    && len
1735                        .try_to_target_usize(bx.tcx)
1736                        .expect("expected monomorphic const in codegen")
1737                        == expected_bytes =>
1738            {
1739                // Zero-extend iN to the array length:
1740                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1741
1742                // Convert the integer to a byte array
1743                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1744                bx.store(ze, ptr, Align::ONE);
1745                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1746                return Ok(bx.load(array_ty, ptr, Align::ONE));
1747            }
1748            _ => return_error!(InvalidMonomorphization::CannotReturn {
1749                span,
1750                name,
1751                ret_ty,
1752                expected_int_bits,
1753                expected_bytes
1754            }),
1755        }
1756    }
1757
1758    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1759        name: Symbol,
1760        in_elem: Ty<'_>,
1761        in_ty: Ty<'_>,
1762        in_len: u64,
1763        bx: &mut Builder<'_, 'll, 'tcx>,
1764        span: Span,
1765        args: &[OperandRef<'tcx, &'ll Value>],
1766    ) -> Result<&'ll Value, ()> {
1767        macro_rules! return_error {
1768            ($diag: expr) => {{
1769                bx.sess().dcx().emit_err($diag);
1770                return Err(());
1771            }};
1772        }
1773
1774        let ty::Float(f) = in_elem.kind() else {
1775            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1776        };
1777        let elem_ty = bx.cx.type_float_from_ty(*f);
1778
1779        let vec_ty = bx.type_vector(elem_ty, in_len);
1780
1781        let intr_name = match name {
1782            sym::simd_ceil => "llvm.ceil",
1783            sym::simd_fabs => "llvm.fabs",
1784            sym::simd_fcos => "llvm.cos",
1785            sym::simd_fexp2 => "llvm.exp2",
1786            sym::simd_fexp => "llvm.exp",
1787            sym::simd_flog10 => "llvm.log10",
1788            sym::simd_flog2 => "llvm.log2",
1789            sym::simd_flog => "llvm.log",
1790            sym::simd_floor => "llvm.floor",
1791            sym::simd_fma => "llvm.fma",
1792            sym::simd_relaxed_fma => "llvm.fmuladd",
1793            sym::simd_fsin => "llvm.sin",
1794            sym::simd_fsqrt => "llvm.sqrt",
1795            sym::simd_round => "llvm.round",
1796            sym::simd_round_ties_even => "llvm.rint",
1797            sym::simd_trunc => "llvm.trunc",
1798            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1799        };
1800        Ok(bx.call_intrinsic(
1801            intr_name,
1802            &[vec_ty],
1803            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1804        ))
1805    }
1806
1807    if std::matches!(
1808        name,
1809        sym::simd_ceil
1810            | sym::simd_fabs
1811            | sym::simd_fcos
1812            | sym::simd_fexp2
1813            | sym::simd_fexp
1814            | sym::simd_flog10
1815            | sym::simd_flog2
1816            | sym::simd_flog
1817            | sym::simd_floor
1818            | sym::simd_fma
1819            | sym::simd_fsin
1820            | sym::simd_fsqrt
1821            | sym::simd_relaxed_fma
1822            | sym::simd_round
1823            | sym::simd_round_ties_even
1824            | sym::simd_trunc
1825    ) {
1826        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1827    }
1828
1829    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1830        let elem_ty = match *elem_ty.kind() {
1831            ty::Int(v) => cx.type_int_from_ty(v),
1832            ty::Uint(v) => cx.type_uint_from_ty(v),
1833            ty::Float(v) => cx.type_float_from_ty(v),
1834            ty::RawPtr(_, _) => cx.type_ptr(),
1835            _ => unreachable!(),
1836        };
1837        cx.type_vector(elem_ty, vec_len)
1838    }
1839
1840    if name == sym::simd_gather {
1841        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1842        //             mask: <N x i{M}>) -> <N x T>
1843        // * N: number of elements in the input vectors
1844        // * T: type of the element to load
1845        // * M: any integer width is supported, will be truncated to i1
1846
1847        // All types must be simd vector types
1848
1849        // The second argument must be a simd vector with an element type that's a pointer
1850        // to the element type of the first argument
1851        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1852        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1853        // The element type of the third argument must be a signed integer type of any width:
1854        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1855        require_simd!(ret_ty, SimdReturn);
1856
1857        // Of the same length:
1858        require!(
1859            in_len == out_len,
1860            InvalidMonomorphization::SecondArgumentLength {
1861                span,
1862                name,
1863                in_len,
1864                in_ty,
1865                arg_ty: args[1].layout.ty,
1866                out_len
1867            }
1868        );
1869        require!(
1870            in_len == out_len2,
1871            InvalidMonomorphization::ThirdArgumentLength {
1872                span,
1873                name,
1874                in_len,
1875                in_ty,
1876                arg_ty: args[2].layout.ty,
1877                out_len: out_len2
1878            }
1879        );
1880
1881        // The return type must match the first argument type
1882        require!(
1883            ret_ty == in_ty,
1884            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1885        );
1886
1887        require!(
1888            matches!(
1889                *element_ty1.kind(),
1890                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1891            ),
1892            InvalidMonomorphization::ExpectedElementType {
1893                span,
1894                name,
1895                expected_element: element_ty1,
1896                second_arg: args[1].layout.ty,
1897                in_elem,
1898                in_ty,
1899                mutability: ExpectedPointerMutability::Not,
1900            }
1901        );
1902
1903        let mask_elem_bitwidth = require_int_or_uint_ty!(
1904            element_ty2.kind(),
1905            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1906        );
1907
1908        // Alignment of T, must be a constant integer value:
1909        let alignment = bx.align_of(in_elem).bytes();
1910
1911        // Truncate the mask vector to a vector of i1s:
1912        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1913
1914        // Type of the vector of pointers:
1915        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1916
1917        // Type of the vector of elements:
1918        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1919
1920        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
1921            let alignment = bx.const_i32(alignment as i32);
1922            &[args[1].immediate(), alignment, mask, args[0].immediate()]
1923        } else {
1924            &[args[1].immediate(), mask, args[0].immediate()]
1925        };
1926
1927        let call =
1928            bx.call_intrinsic("llvm.masked.gather", &[llvm_elem_vec_ty, llvm_pointer_vec_ty], args);
1929        if llvm_version >= (22, 0, 0) {
1930            crate::attributes::apply_to_callsite(
1931                call,
1932                crate::llvm::AttributePlace::Argument(0),
1933                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
1934            )
1935        }
1936        return Ok(call);
1937    }
1938
1939    fn llvm_alignment<'ll, 'tcx>(
1940        bx: &mut Builder<'_, 'll, 'tcx>,
1941        alignment: SimdAlign,
1942        vector_ty: Ty<'tcx>,
1943        element_ty: Ty<'tcx>,
1944    ) -> u64 {
1945        match alignment {
1946            SimdAlign::Unaligned => 1,
1947            SimdAlign::Element => bx.align_of(element_ty).bytes(),
1948            SimdAlign::Vector => bx.align_of(vector_ty).bytes(),
1949        }
1950    }
1951
1952    if name == sym::simd_masked_load {
1953        // simd_masked_load<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1954        // * N: number of elements in the input vectors
1955        // * T: type of the element to load
1956        // * M: any integer width is supported, will be truncated to i1
1957        // Loads contiguous elements from memory behind `pointer`, but only for
1958        // those lanes whose `mask` bit is enabled.
1959        // The memory addresses corresponding to the “off” lanes are not accessed.
1960
1961        let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
1962            .unwrap_leaf()
1963            .to_simd_alignment();
1964
1965        // The element type of the "mask" argument must be a signed integer type of any width
1966        let mask_ty = in_ty;
1967        let (mask_len, mask_elem) = (in_len, in_elem);
1968
1969        // The second argument must be a pointer matching the element type
1970        let pointer_ty = args[1].layout.ty;
1971
1972        // The last argument is a passthrough vector providing values for disabled lanes
1973        let values_ty = args[2].layout.ty;
1974        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1975
1976        require_simd!(ret_ty, SimdReturn);
1977
1978        // Of the same length:
1979        require!(
1980            values_len == mask_len,
1981            InvalidMonomorphization::ThirdArgumentLength {
1982                span,
1983                name,
1984                in_len: mask_len,
1985                in_ty: mask_ty,
1986                arg_ty: values_ty,
1987                out_len: values_len
1988            }
1989        );
1990
1991        // The return type must match the last argument type
1992        require!(
1993            ret_ty == values_ty,
1994            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1995        );
1996
1997        require!(
1998            matches!(
1999                *pointer_ty.kind(),
2000                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
2001            ),
2002            InvalidMonomorphization::ExpectedElementType {
2003                span,
2004                name,
2005                expected_element: values_elem,
2006                second_arg: pointer_ty,
2007                in_elem: values_elem,
2008                in_ty: values_ty,
2009                mutability: ExpectedPointerMutability::Not,
2010            }
2011        );
2012
2013        let m_elem_bitwidth = require_int_or_uint_ty!(
2014            mask_elem.kind(),
2015            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2016        );
2017
2018        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2019
2020        // Alignment of T, must be a constant integer value:
2021        let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2022
2023        let llvm_pointer = bx.type_ptr();
2024
2025        // Type of the vector of elements:
2026        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2027
2028        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2029            let alignment = bx.const_i32(alignment as i32);
2030
2031            &[args[1].immediate(), alignment, mask, args[2].immediate()]
2032        } else {
2033            &[args[1].immediate(), mask, args[2].immediate()]
2034        };
2035
2036        let call = bx.call_intrinsic("llvm.masked.load", &[llvm_elem_vec_ty, llvm_pointer], args);
2037        if llvm_version >= (22, 0, 0) {
2038            crate::attributes::apply_to_callsite(
2039                call,
2040                crate::llvm::AttributePlace::Argument(0),
2041                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2042            )
2043        }
2044        return Ok(call);
2045    }
2046
2047    if name == sym::simd_masked_store {
2048        // simd_masked_store<_, _, _, const ALIGN: SimdAlign>(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
2049        // * N: number of elements in the input vectors
2050        // * T: type of the element to load
2051        // * M: any integer width is supported, will be truncated to i1
2052        // Stores contiguous elements to memory behind `pointer`, but only for
2053        // those lanes whose `mask` bit is enabled.
2054        // The memory addresses corresponding to the “off” lanes are not accessed.
2055
2056        let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
2057            .unwrap_leaf()
2058            .to_simd_alignment();
2059
2060        // The element type of the "mask" argument must be a signed integer type of any width
2061        let mask_ty = in_ty;
2062        let (mask_len, mask_elem) = (in_len, in_elem);
2063
2064        // The second argument must be a pointer matching the element type
2065        let pointer_ty = args[1].layout.ty;
2066
2067        // The last argument specifies the values to store to memory
2068        let values_ty = args[2].layout.ty;
2069        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
2070
2071        // Of the same length:
2072        require!(
2073            values_len == mask_len,
2074            InvalidMonomorphization::ThirdArgumentLength {
2075                span,
2076                name,
2077                in_len: mask_len,
2078                in_ty: mask_ty,
2079                arg_ty: values_ty,
2080                out_len: values_len
2081            }
2082        );
2083
2084        // The second argument must be a mutable pointer type matching the element type
2085        require!(
2086            matches!(
2087                *pointer_ty.kind(),
2088                ty::RawPtr(p_ty, p_mutbl)
2089                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
2090            ),
2091            InvalidMonomorphization::ExpectedElementType {
2092                span,
2093                name,
2094                expected_element: values_elem,
2095                second_arg: pointer_ty,
2096                in_elem: values_elem,
2097                in_ty: values_ty,
2098                mutability: ExpectedPointerMutability::Mut,
2099            }
2100        );
2101
2102        let m_elem_bitwidth = require_int_or_uint_ty!(
2103            mask_elem.kind(),
2104            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2105        );
2106
2107        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2108
2109        // Alignment of T, must be a constant integer value:
2110        let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2111
2112        let llvm_pointer = bx.type_ptr();
2113
2114        // Type of the vector of elements:
2115        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2116
2117        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2118            let alignment = bx.const_i32(alignment as i32);
2119            &[args[2].immediate(), args[1].immediate(), alignment, mask]
2120        } else {
2121            &[args[2].immediate(), args[1].immediate(), mask]
2122        };
2123
2124        let call = bx.call_intrinsic("llvm.masked.store", &[llvm_elem_vec_ty, llvm_pointer], args);
2125        if llvm_version >= (22, 0, 0) {
2126            crate::attributes::apply_to_callsite(
2127                call,
2128                crate::llvm::AttributePlace::Argument(1),
2129                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2130            )
2131        }
2132        return Ok(call);
2133    }
2134
2135    if name == sym::simd_scatter {
2136        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
2137        //             mask: <N x i{M}>) -> ()
2138        // * N: number of elements in the input vectors
2139        // * T: type of the element to load
2140        // * M: any integer width is supported, will be truncated to i1
2141
2142        // All types must be simd vector types
2143        // The second argument must be a simd vector with an element type that's a pointer
2144        // to the element type of the first argument
2145        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
2146        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
2147        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
2148
2149        // Of the same length:
2150        require!(
2151            in_len == element_len1,
2152            InvalidMonomorphization::SecondArgumentLength {
2153                span,
2154                name,
2155                in_len,
2156                in_ty,
2157                arg_ty: args[1].layout.ty,
2158                out_len: element_len1
2159            }
2160        );
2161        require!(
2162            in_len == element_len2,
2163            InvalidMonomorphization::ThirdArgumentLength {
2164                span,
2165                name,
2166                in_len,
2167                in_ty,
2168                arg_ty: args[2].layout.ty,
2169                out_len: element_len2
2170            }
2171        );
2172
2173        require!(
2174            matches!(
2175                *element_ty1.kind(),
2176                ty::RawPtr(p_ty, p_mutbl)
2177                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2178            ),
2179            InvalidMonomorphization::ExpectedElementType {
2180                span,
2181                name,
2182                expected_element: element_ty1,
2183                second_arg: args[1].layout.ty,
2184                in_elem,
2185                in_ty,
2186                mutability: ExpectedPointerMutability::Mut,
2187            }
2188        );
2189
2190        // The element type of the third argument must be an integer type of any width:
2191        let mask_elem_bitwidth = require_int_or_uint_ty!(
2192            element_ty2.kind(),
2193            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2194        );
2195
2196        // Alignment of T, must be a constant integer value:
2197        let alignment = bx.align_of(in_elem).bytes();
2198
2199        // Truncate the mask vector to a vector of i1s:
2200        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2201
2202        // Type of the vector of pointers:
2203        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2204
2205        // Type of the vector of elements:
2206        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2207        let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2208            let alignment = bx.const_i32(alignment as i32);
2209            &[args[0].immediate(), args[1].immediate(), alignment, mask]
2210        } else {
2211            &[args[0].immediate(), args[1].immediate(), mask]
2212        };
2213        let call = bx.call_intrinsic(
2214            "llvm.masked.scatter",
2215            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2216            args,
2217        );
2218        if llvm_version >= (22, 0, 0) {
2219            crate::attributes::apply_to_callsite(
2220                call,
2221                crate::llvm::AttributePlace::Argument(1),
2222                &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2223            )
2224        }
2225        return Ok(call);
2226    }
2227
2228    macro_rules! arith_red {
2229        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2230         $identity:expr) => {
2231            if name == sym::$name {
2232                require!(
2233                    ret_ty == in_elem,
2234                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2235                );
2236                return match in_elem.kind() {
2237                    ty::Int(_) | ty::Uint(_) => {
2238                        let r = bx.$integer_reduce(args[0].immediate());
2239                        if $ordered {
2240                            // if overflow occurs, the result is the
2241                            // mathematical result modulo 2^n:
2242                            Ok(bx.$op(args[1].immediate(), r))
2243                        } else {
2244                            Ok(bx.$integer_reduce(args[0].immediate()))
2245                        }
2246                    }
2247                    ty::Float(f) => {
2248                        let acc = if $ordered {
2249                            // ordered arithmetic reductions take an accumulator
2250                            args[1].immediate()
2251                        } else {
2252                            // unordered arithmetic reductions use the identity accumulator
2253                            match f.bit_width() {
2254                                32 => bx.const_real(bx.type_f32(), $identity),
2255                                64 => bx.const_real(bx.type_f64(), $identity),
2256                                v => return_error!(
2257                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
2258                                        span,
2259                                        name,
2260                                        symbol: sym::$name,
2261                                        in_ty,
2262                                        in_elem,
2263                                        size: v,
2264                                        ret_ty
2265                                    }
2266                                ),
2267                            }
2268                        };
2269                        Ok(bx.$float_reduce(acc, args[0].immediate()))
2270                    }
2271                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2272                        span,
2273                        name,
2274                        symbol: sym::$name,
2275                        in_ty,
2276                        in_elem,
2277                        ret_ty
2278                    }),
2279                };
2280            }
2281        };
2282    }
2283
2284    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2285    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2286    arith_red!(
2287        simd_reduce_add_unordered: vector_reduce_add,
2288        vector_reduce_fadd_reassoc,
2289        false,
2290        add,
2291        -0.0
2292    );
2293    arith_red!(
2294        simd_reduce_mul_unordered: vector_reduce_mul,
2295        vector_reduce_fmul_reassoc,
2296        false,
2297        mul,
2298        1.0
2299    );
2300
2301    macro_rules! minmax_red {
2302        ($name:ident: $int_red:ident, $float_red:ident) => {
2303            if name == sym::$name {
2304                require!(
2305                    ret_ty == in_elem,
2306                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2307                );
2308                return match in_elem.kind() {
2309                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2310                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2311                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2312                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2313                        span,
2314                        name,
2315                        symbol: sym::$name,
2316                        in_ty,
2317                        in_elem,
2318                        ret_ty
2319                    }),
2320                };
2321            }
2322        };
2323    }
2324
2325    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2326    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2327
2328    macro_rules! bitwise_red {
2329        ($name:ident : $red:ident, $boolean:expr) => {
2330            if name == sym::$name {
2331                let input = if !$boolean {
2332                    require!(
2333                        ret_ty == in_elem,
2334                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2335                    );
2336                    args[0].immediate()
2337                } else {
2338                    let bitwidth = match in_elem.kind() {
2339                        ty::Int(i) => {
2340                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2341                        }
2342                        ty::Uint(i) => {
2343                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2344                        }
2345                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2346                            span,
2347                            name,
2348                            symbol: sym::$name,
2349                            in_ty,
2350                            in_elem,
2351                            ret_ty
2352                        }),
2353                    };
2354
2355                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2356                };
2357                return match in_elem.kind() {
2358                    ty::Int(_) | ty::Uint(_) => {
2359                        let r = bx.$red(input);
2360                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2361                    }
2362                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2363                        span,
2364                        name,
2365                        symbol: sym::$name,
2366                        in_ty,
2367                        in_elem,
2368                        ret_ty
2369                    }),
2370                };
2371            }
2372        };
2373    }
2374
2375    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2376    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2377    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2378    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2379    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2380
2381    if name == sym::simd_cast_ptr {
2382        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2383        require!(
2384            in_len == out_len,
2385            InvalidMonomorphization::ReturnLengthInputType {
2386                span,
2387                name,
2388                in_len,
2389                in_ty,
2390                ret_ty,
2391                out_len
2392            }
2393        );
2394
2395        match in_elem.kind() {
2396            ty::RawPtr(p_ty, _) => {
2397                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2398                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2399                });
2400                require!(
2401                    metadata.is_unit(),
2402                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2403                );
2404            }
2405            _ => {
2406                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2407            }
2408        }
2409        match out_elem.kind() {
2410            ty::RawPtr(p_ty, _) => {
2411                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2412                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2413                });
2414                require!(
2415                    metadata.is_unit(),
2416                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2417                );
2418            }
2419            _ => {
2420                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2421            }
2422        }
2423
2424        return Ok(args[0].immediate());
2425    }
2426
2427    if name == sym::simd_expose_provenance {
2428        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2429        require!(
2430            in_len == out_len,
2431            InvalidMonomorphization::ReturnLengthInputType {
2432                span,
2433                name,
2434                in_len,
2435                in_ty,
2436                ret_ty,
2437                out_len
2438            }
2439        );
2440
2441        match in_elem.kind() {
2442            ty::RawPtr(_, _) => {}
2443            _ => {
2444                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2445            }
2446        }
2447        match out_elem.kind() {
2448            ty::Uint(ty::UintTy::Usize) => {}
2449            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2450        }
2451
2452        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2453    }
2454
2455    if name == sym::simd_with_exposed_provenance {
2456        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2457        require!(
2458            in_len == out_len,
2459            InvalidMonomorphization::ReturnLengthInputType {
2460                span,
2461                name,
2462                in_len,
2463                in_ty,
2464                ret_ty,
2465                out_len
2466            }
2467        );
2468
2469        match in_elem.kind() {
2470            ty::Uint(ty::UintTy::Usize) => {}
2471            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2472        }
2473        match out_elem.kind() {
2474            ty::RawPtr(_, _) => {}
2475            _ => {
2476                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2477            }
2478        }
2479
2480        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2481    }
2482
2483    if name == sym::simd_cast || name == sym::simd_as {
2484        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2485        require!(
2486            in_len == out_len,
2487            InvalidMonomorphization::ReturnLengthInputType {
2488                span,
2489                name,
2490                in_len,
2491                in_ty,
2492                ret_ty,
2493                out_len
2494            }
2495        );
2496        // casting cares about nominal type, not just structural type
2497        if in_elem == out_elem {
2498            return Ok(args[0].immediate());
2499        }
2500
2501        #[derive(Copy, Clone)]
2502        enum Sign {
2503            Unsigned,
2504            Signed,
2505        }
2506        use Sign::*;
2507
2508        enum Style {
2509            Float,
2510            Int(Sign),
2511            Unsupported,
2512        }
2513
2514        let (in_style, in_width) = match in_elem.kind() {
2515            // vectors of pointer-sized integers should've been
2516            // disallowed before here, so this unwrap is safe.
2517            ty::Int(i) => (
2518                Style::Int(Signed),
2519                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2520            ),
2521            ty::Uint(u) => (
2522                Style::Int(Unsigned),
2523                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2524            ),
2525            ty::Float(f) => (Style::Float, f.bit_width()),
2526            _ => (Style::Unsupported, 0),
2527        };
2528        let (out_style, out_width) = match out_elem.kind() {
2529            ty::Int(i) => (
2530                Style::Int(Signed),
2531                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2532            ),
2533            ty::Uint(u) => (
2534                Style::Int(Unsigned),
2535                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2536            ),
2537            ty::Float(f) => (Style::Float, f.bit_width()),
2538            _ => (Style::Unsupported, 0),
2539        };
2540
2541        match (in_style, out_style) {
2542            (Style::Int(sign), Style::Int(_)) => {
2543                return Ok(match in_width.cmp(&out_width) {
2544                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2545                    Ordering::Equal => args[0].immediate(),
2546                    Ordering::Less => match sign {
2547                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2548                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2549                    },
2550                });
2551            }
2552            (Style::Int(Sign::Signed), Style::Float) => {
2553                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2554            }
2555            (Style::Int(Sign::Unsigned), Style::Float) => {
2556                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2557            }
2558            (Style::Float, Style::Int(sign)) => {
2559                return Ok(match (sign, name == sym::simd_as) {
2560                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2561                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2562                    (_, true) => bx.cast_float_to_int(
2563                        matches!(sign, Sign::Signed),
2564                        args[0].immediate(),
2565                        llret_ty,
2566                    ),
2567                });
2568            }
2569            (Style::Float, Style::Float) => {
2570                return Ok(match in_width.cmp(&out_width) {
2571                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2572                    Ordering::Equal => args[0].immediate(),
2573                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2574                });
2575            }
2576            _ => { /* Unsupported. Fallthrough. */ }
2577        }
2578        return_error!(InvalidMonomorphization::UnsupportedCast {
2579            span,
2580            name,
2581            in_ty,
2582            in_elem,
2583            ret_ty,
2584            out_elem
2585        });
2586    }
2587    macro_rules! arith_binary {
2588        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2589            $(if name == sym::$name {
2590                match in_elem.kind() {
2591                    $($(ty::$p(_))|* => {
2592                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2593                    })*
2594                    _ => {},
2595                }
2596                return_error!(
2597                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2598                );
2599            })*
2600        }
2601    }
2602    arith_binary! {
2603        simd_add: Uint, Int => add, Float => fadd;
2604        simd_sub: Uint, Int => sub, Float => fsub;
2605        simd_mul: Uint, Int => mul, Float => fmul;
2606        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2607        simd_rem: Uint => urem, Int => srem, Float => frem;
2608        simd_shl: Uint, Int => shl;
2609        simd_shr: Uint => lshr, Int => ashr;
2610        simd_and: Uint, Int => and;
2611        simd_or: Uint, Int => or;
2612        simd_xor: Uint, Int => xor;
2613        simd_fmax: Float => maxnum;
2614        simd_fmin: Float => minnum;
2615
2616    }
2617    macro_rules! arith_unary {
2618        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2619            $(if name == sym::$name {
2620                match in_elem.kind() {
2621                    $($(ty::$p(_))|* => {
2622                        return Ok(bx.$call(args[0].immediate()))
2623                    })*
2624                    _ => {},
2625                }
2626                return_error!(
2627                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2628                );
2629            })*
2630        }
2631    }
2632    arith_unary! {
2633        simd_neg: Int => neg, Float => fneg;
2634    }
2635
2636    // Unary integer intrinsics
2637    if matches!(
2638        name,
2639        sym::simd_bswap
2640            | sym::simd_bitreverse
2641            | sym::simd_ctlz
2642            | sym::simd_ctpop
2643            | sym::simd_cttz
2644            | sym::simd_funnel_shl
2645            | sym::simd_funnel_shr
2646    ) {
2647        let vec_ty = bx.cx.type_vector(
2648            match *in_elem.kind() {
2649                ty::Int(i) => bx.cx.type_int_from_ty(i),
2650                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2651                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2652                    span,
2653                    name,
2654                    in_ty,
2655                    in_elem
2656                }),
2657            },
2658            in_len as u64,
2659        );
2660        let llvm_intrinsic = match name {
2661            sym::simd_bswap => "llvm.bswap",
2662            sym::simd_bitreverse => "llvm.bitreverse",
2663            sym::simd_ctlz => "llvm.ctlz",
2664            sym::simd_ctpop => "llvm.ctpop",
2665            sym::simd_cttz => "llvm.cttz",
2666            sym::simd_funnel_shl => "llvm.fshl",
2667            sym::simd_funnel_shr => "llvm.fshr",
2668            _ => unreachable!(),
2669        };
2670        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2671
2672        return match name {
2673            // byte swap is no-op for i8/u8
2674            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2675            sym::simd_ctlz | sym::simd_cttz => {
2676                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2677                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2678                Ok(bx.call_intrinsic(
2679                    llvm_intrinsic,
2680                    &[vec_ty],
2681                    &[args[0].immediate(), dont_poison_on_zero],
2682                ))
2683            }
2684            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2685                // simple unary argument cases
2686                Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
2687            }
2688            sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
2689                llvm_intrinsic,
2690                &[vec_ty],
2691                &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
2692            )),
2693            _ => unreachable!(),
2694        };
2695    }
2696
2697    if name == sym::simd_arith_offset {
2698        // This also checks that the first operand is a ptr type.
2699        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2700            span_bug!(span, "must be called with a vector of pointer types as first argument")
2701        });
2702        let layout = bx.layout_of(pointee);
2703        let ptrs = args[0].immediate();
2704        // The second argument must be a ptr-sized integer.
2705        // (We don't care about the signedness, this is wrapping anyway.)
2706        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
2707        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2708            span_bug!(
2709                span,
2710                "must be called with a vector of pointer-sized integers as second argument"
2711            );
2712        }
2713        let offsets = args[1].immediate();
2714
2715        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2716    }
2717
2718    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2719        let lhs = args[0].immediate();
2720        let rhs = args[1].immediate();
2721        let is_add = name == sym::simd_saturating_add;
2722        let (signed, elem_ty) = match *in_elem.kind() {
2723            ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
2724            ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
2725            _ => {
2726                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2727                    span,
2728                    name,
2729                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
2730                    vector_type: args[0].layout.ty
2731                });
2732            }
2733        };
2734        let llvm_intrinsic = format!(
2735            "llvm.{}{}.sat",
2736            if signed { 's' } else { 'u' },
2737            if is_add { "add" } else { "sub" },
2738        );
2739        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2740
2741        return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
2742    }
2743
2744    span_bug!(span, "unknown SIMD intrinsic");
2745}