1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
7use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
10use rustc_codegen_ssa::traits::*;
11use rustc_hir as hir;
12use rustc_middle::mir::BinOp;
13use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
14use rustc_middle::ty::{self, GenericArgsRef, Ty};
15use rustc_middle::{bug, span_bug};
16use rustc_span::{Span, Symbol, sym};
17use rustc_symbol_mangling::mangle_internal_symbol;
18use rustc_target::callconv::{FnAbi, PassMode};
19use rustc_target::spec::{HasTargetSpec, PanicStrategy};
20use tracing::debug;
21
22use crate::abi::{FnAbiLlvmExt, LlvmType};
23use crate::builder::Builder;
24use crate::context::CodegenCx;
25use crate::llvm::{self, Metadata};
26use crate::type_::Type;
27use crate::type_of::LayoutLlvmExt;
28use crate::va_arg::emit_va_arg;
29use crate::value::Value;
30
31fn get_simple_intrinsic<'ll>(
32 cx: &CodegenCx<'ll, '_>,
33 name: Symbol,
34) -> Option<(&'ll Type, &'ll Value)> {
35 let llvm_name = match name {
36 sym::sqrtf16 => "llvm.sqrt.f16",
37 sym::sqrtf32 => "llvm.sqrt.f32",
38 sym::sqrtf64 => "llvm.sqrt.f64",
39 sym::sqrtf128 => "llvm.sqrt.f128",
40
41 sym::powif16 => "llvm.powi.f16.i32",
42 sym::powif32 => "llvm.powi.f32.i32",
43 sym::powif64 => "llvm.powi.f64.i32",
44 sym::powif128 => "llvm.powi.f128.i32",
45
46 sym::sinf16 => "llvm.sin.f16",
47 sym::sinf32 => "llvm.sin.f32",
48 sym::sinf64 => "llvm.sin.f64",
49 sym::sinf128 => "llvm.sin.f128",
50
51 sym::cosf16 => "llvm.cos.f16",
52 sym::cosf32 => "llvm.cos.f32",
53 sym::cosf64 => "llvm.cos.f64",
54 sym::cosf128 => "llvm.cos.f128",
55
56 sym::powf16 => "llvm.pow.f16",
57 sym::powf32 => "llvm.pow.f32",
58 sym::powf64 => "llvm.pow.f64",
59 sym::powf128 => "llvm.pow.f128",
60
61 sym::expf16 => "llvm.exp.f16",
62 sym::expf32 => "llvm.exp.f32",
63 sym::expf64 => "llvm.exp.f64",
64 sym::expf128 => "llvm.exp.f128",
65
66 sym::exp2f16 => "llvm.exp2.f16",
67 sym::exp2f32 => "llvm.exp2.f32",
68 sym::exp2f64 => "llvm.exp2.f64",
69 sym::exp2f128 => "llvm.exp2.f128",
70
71 sym::logf16 => "llvm.log.f16",
72 sym::logf32 => "llvm.log.f32",
73 sym::logf64 => "llvm.log.f64",
74 sym::logf128 => "llvm.log.f128",
75
76 sym::log10f16 => "llvm.log10.f16",
77 sym::log10f32 => "llvm.log10.f32",
78 sym::log10f64 => "llvm.log10.f64",
79 sym::log10f128 => "llvm.log10.f128",
80
81 sym::log2f16 => "llvm.log2.f16",
82 sym::log2f32 => "llvm.log2.f32",
83 sym::log2f64 => "llvm.log2.f64",
84 sym::log2f128 => "llvm.log2.f128",
85
86 sym::fmaf16 => "llvm.fma.f16",
87 sym::fmaf32 => "llvm.fma.f32",
88 sym::fmaf64 => "llvm.fma.f64",
89 sym::fmaf128 => "llvm.fma.f128",
90
91 sym::fmuladdf16 => "llvm.fmuladd.f16",
92 sym::fmuladdf32 => "llvm.fmuladd.f32",
93 sym::fmuladdf64 => "llvm.fmuladd.f64",
94 sym::fmuladdf128 => "llvm.fmuladd.f128",
95
96 sym::fabsf16 => "llvm.fabs.f16",
97 sym::fabsf32 => "llvm.fabs.f32",
98 sym::fabsf64 => "llvm.fabs.f64",
99 sym::fabsf128 => "llvm.fabs.f128",
100
101 sym::minnumf16 => "llvm.minnum.f16",
102 sym::minnumf32 => "llvm.minnum.f32",
103 sym::minnumf64 => "llvm.minnum.f64",
104 sym::minnumf128 => "llvm.minnum.f128",
105
106 sym::minimumf16 => "llvm.minimum.f16",
107 sym::minimumf32 => "llvm.minimum.f32",
108 sym::minimumf64 => "llvm.minimum.f64",
109 sym::maxnumf16 => "llvm.maxnum.f16",
113 sym::maxnumf32 => "llvm.maxnum.f32",
114 sym::maxnumf64 => "llvm.maxnum.f64",
115 sym::maxnumf128 => "llvm.maxnum.f128",
116
117 sym::maximumf16 => "llvm.maximum.f16",
118 sym::maximumf32 => "llvm.maximum.f32",
119 sym::maximumf64 => "llvm.maximum.f64",
120 sym::copysignf16 => "llvm.copysign.f16",
124 sym::copysignf32 => "llvm.copysign.f32",
125 sym::copysignf64 => "llvm.copysign.f64",
126 sym::copysignf128 => "llvm.copysign.f128",
127
128 sym::floorf16 => "llvm.floor.f16",
129 sym::floorf32 => "llvm.floor.f32",
130 sym::floorf64 => "llvm.floor.f64",
131 sym::floorf128 => "llvm.floor.f128",
132
133 sym::ceilf16 => "llvm.ceil.f16",
134 sym::ceilf32 => "llvm.ceil.f32",
135 sym::ceilf64 => "llvm.ceil.f64",
136 sym::ceilf128 => "llvm.ceil.f128",
137
138 sym::truncf16 => "llvm.trunc.f16",
139 sym::truncf32 => "llvm.trunc.f32",
140 sym::truncf64 => "llvm.trunc.f64",
141 sym::truncf128 => "llvm.trunc.f128",
142
143 sym::round_ties_even_f16 => "llvm.rint.f16",
148 sym::round_ties_even_f32 => "llvm.rint.f32",
149 sym::round_ties_even_f64 => "llvm.rint.f64",
150 sym::round_ties_even_f128 => "llvm.rint.f128",
151
152 sym::roundf16 => "llvm.round.f16",
153 sym::roundf32 => "llvm.round.f32",
154 sym::roundf64 => "llvm.round.f64",
155 sym::roundf128 => "llvm.round.f128",
156
157 sym::ptr_mask => "llvm.ptrmask",
158
159 _ => return None,
160 };
161 Some(cx.get_intrinsic(llvm_name))
162}
163
164impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
165 fn codegen_intrinsic_call(
166 &mut self,
167 instance: ty::Instance<'tcx>,
168 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
169 args: &[OperandRef<'tcx, &'ll Value>],
170 llresult: &'ll Value,
171 span: Span,
172 ) -> Result<(), ty::Instance<'tcx>> {
173 let tcx = self.tcx;
174 let callee_ty = instance.ty(tcx, self.typing_env());
175
176 let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
177 bug!("expected fn item type, found {}", callee_ty);
178 };
179
180 let sig = callee_ty.fn_sig(tcx);
181 let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
182 let arg_tys = sig.inputs();
183 let ret_ty = sig.output();
184 let name = tcx.item_name(def_id);
185
186 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
187 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
188
189 let simple = get_simple_intrinsic(self, name);
190 let llval = match name {
191 _ if simple.is_some() => {
192 let (simple_ty, simple_fn) = simple.unwrap();
193 self.call(
194 simple_ty,
195 None,
196 None,
197 simple_fn,
198 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
199 None,
200 Some(instance),
201 )
202 }
203 sym::is_val_statically_known => {
204 let intrinsic_type = args[0].layout.immediate_llvm_type(self.cx);
205 let kind = self.type_kind(intrinsic_type);
206 let intrinsic_name = match kind {
207 TypeKind::Pointer | TypeKind::Integer => {
208 Some(format!("llvm.is.constant.{intrinsic_type:?}"))
209 }
210 TypeKind::Half => Some(format!("llvm.is.constant.f16")),
212 TypeKind::Float => Some(format!("llvm.is.constant.f32")),
213 TypeKind::Double => Some(format!("llvm.is.constant.f64")),
214 TypeKind::FP128 => Some(format!("llvm.is.constant.f128")),
215 _ => None,
216 };
217 if let Some(intrinsic_name) = intrinsic_name {
218 self.call_intrinsic(&intrinsic_name, &[args[0].immediate()])
219 } else {
220 self.const_bool(false)
221 }
222 }
223 sym::select_unpredictable => {
224 let cond = args[0].immediate();
225 assert_eq!(args[1].layout, args[2].layout);
226 let select = |bx: &mut Self, true_val, false_val| {
227 let result = bx.select(cond, true_val, false_val);
228 bx.set_unpredictable(&result);
229 result
230 };
231 match (args[1].val, args[2].val) {
232 (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
233 assert!(true_val.llextra.is_none());
234 assert!(false_val.llextra.is_none());
235 assert_eq!(true_val.align, false_val.align);
236 let ptr = select(self, true_val.llval, false_val.llval);
237 let selected =
238 OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
239 selected.store(self, result);
240 return Ok(());
241 }
242 (OperandValue::Immediate(_), OperandValue::Immediate(_))
243 | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
244 let true_val = args[1].immediate_or_packed_pair(self);
245 let false_val = args[2].immediate_or_packed_pair(self);
246 select(self, true_val, false_val)
247 }
248 (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
249 _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
250 }
251 }
252 sym::catch_unwind => {
253 catch_unwind_intrinsic(
254 self,
255 args[0].immediate(),
256 args[1].immediate(),
257 args[2].immediate(),
258 llresult,
259 );
260 return Ok(());
261 }
262 sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
263 sym::va_copy => {
264 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
265 }
266 sym::va_arg => {
267 match fn_abi.ret.layout.backend_repr {
268 BackendRepr::Scalar(scalar) => {
269 match scalar.primitive() {
270 Primitive::Int(..) => {
271 if self.cx().size_of(ret_ty).bytes() < 4 {
272 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
277 self.trunc(promoted_result, llret_ty)
278 } else {
279 emit_va_arg(self, args[0], ret_ty)
280 }
281 }
282 Primitive::Float(Float::F16) => {
283 bug!("the va_arg intrinsic does not work with `f16`")
284 }
285 Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
286 emit_va_arg(self, args[0], ret_ty)
287 }
288 Primitive::Float(Float::F32) => {
290 bug!("the va_arg intrinsic does not work with `f32`")
291 }
292 Primitive::Float(Float::F128) => {
293 bug!("the va_arg intrinsic does not work with `f128`")
294 }
295 }
296 }
297 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
298 }
299 }
300
301 sym::volatile_load | sym::unaligned_volatile_load => {
302 let tp_ty = fn_args.type_at(0);
303 let ptr = args[0].immediate();
304 let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
305 let llty = ty.llvm_type(self);
306 self.volatile_load(llty, ptr)
307 } else {
308 self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
309 };
310 let align = if name == sym::unaligned_volatile_load {
311 1
312 } else {
313 self.align_of(tp_ty).bytes() as u32
314 };
315 unsafe {
316 llvm::LLVMSetAlignment(load, align);
317 }
318 if !result.layout.is_zst() {
319 self.store_to_place(load, result.val);
320 }
321 return Ok(());
322 }
323 sym::volatile_store => {
324 let dst = args[0].deref(self.cx());
325 args[1].val.volatile_store(self, dst);
326 return Ok(());
327 }
328 sym::unaligned_volatile_store => {
329 let dst = args[0].deref(self.cx());
330 args[1].val.unaligned_volatile_store(self, dst);
331 return Ok(());
332 }
333 sym::prefetch_read_data
334 | sym::prefetch_write_data
335 | sym::prefetch_read_instruction
336 | sym::prefetch_write_instruction => {
337 let (rw, cache_type) = match name {
338 sym::prefetch_read_data => (0, 1),
339 sym::prefetch_write_data => (1, 1),
340 sym::prefetch_read_instruction => (0, 0),
341 sym::prefetch_write_instruction => (1, 0),
342 _ => bug!(),
343 };
344 self.call_intrinsic(
345 "llvm.prefetch",
346 &[
347 args[0].immediate(),
348 self.const_i32(rw),
349 args[1].immediate(),
350 self.const_i32(cache_type),
351 ],
352 )
353 }
354 sym::carrying_mul_add => {
355 let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
356
357 let wide_llty = self.type_ix(size.bits() * 2);
358 let args = args.as_array().unwrap();
359 let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
360
361 let wide = if signed {
362 let prod = self.unchecked_smul(a, b);
363 let acc = self.unchecked_sadd(prod, c);
364 self.unchecked_sadd(acc, d)
365 } else {
366 let prod = self.unchecked_umul(a, b);
367 let acc = self.unchecked_uadd(prod, c);
368 self.unchecked_uadd(acc, d)
369 };
370
371 let narrow_llty = self.type_ix(size.bits());
372 let low = self.trunc(wide, narrow_llty);
373 let bits_const = self.const_uint(wide_llty, size.bits());
374 let high = self.lshr(wide, bits_const);
376 let high = self.trunc(high, narrow_llty);
378
379 let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
380 let pair = self.const_poison(pair_llty);
381 let pair = self.insert_value(pair, low, 0);
382 let pair = self.insert_value(pair, high, 1);
383 pair
384 }
385 sym::ctlz
386 | sym::ctlz_nonzero
387 | sym::cttz
388 | sym::cttz_nonzero
389 | sym::ctpop
390 | sym::bswap
391 | sym::bitreverse
392 | sym::rotate_left
393 | sym::rotate_right
394 | sym::saturating_add
395 | sym::saturating_sub => {
396 let ty = arg_tys[0];
397 if !ty.is_integral() {
398 tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
399 span,
400 name,
401 ty,
402 });
403 return Ok(());
404 }
405 let (size, signed) = ty.int_size_and_signed(self.tcx);
406 let width = size.bits();
407 match name {
408 sym::ctlz | sym::cttz => {
409 let y = self.const_bool(false);
410 let ret = self.call_intrinsic(
411 &format!("llvm.{name}.i{width}"),
412 &[args[0].immediate(), y],
413 );
414
415 self.intcast(ret, llret_ty, false)
416 }
417 sym::ctlz_nonzero => {
418 let y = self.const_bool(true);
419 let llvm_name = &format!("llvm.ctlz.i{width}");
420 let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
421 self.intcast(ret, llret_ty, false)
422 }
423 sym::cttz_nonzero => {
424 let y = self.const_bool(true);
425 let llvm_name = &format!("llvm.cttz.i{width}");
426 let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
427 self.intcast(ret, llret_ty, false)
428 }
429 sym::ctpop => {
430 let ret = self.call_intrinsic(
431 &format!("llvm.ctpop.i{width}"),
432 &[args[0].immediate()],
433 );
434 self.intcast(ret, llret_ty, false)
435 }
436 sym::bswap => {
437 if width == 8 {
438 args[0].immediate() } else {
440 self.call_intrinsic(
441 &format!("llvm.bswap.i{width}"),
442 &[args[0].immediate()],
443 )
444 }
445 }
446 sym::bitreverse => self.call_intrinsic(
447 &format!("llvm.bitreverse.i{width}"),
448 &[args[0].immediate()],
449 ),
450 sym::rotate_left | sym::rotate_right => {
451 let is_left = name == sym::rotate_left;
452 let val = args[0].immediate();
453 let raw_shift = args[1].immediate();
454 let llvm_name =
456 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
457
458 let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
461
462 self.call_intrinsic(llvm_name, &[val, val, raw_shift])
463 }
464 sym::saturating_add | sym::saturating_sub => {
465 let is_add = name == sym::saturating_add;
466 let lhs = args[0].immediate();
467 let rhs = args[1].immediate();
468 let llvm_name = &format!(
469 "llvm.{}{}.sat.i{}",
470 if signed { 's' } else { 'u' },
471 if is_add { "add" } else { "sub" },
472 width
473 );
474 self.call_intrinsic(llvm_name, &[lhs, rhs])
475 }
476 _ => bug!(),
477 }
478 }
479
480 sym::raw_eq => {
481 use BackendRepr::*;
482 let tp_ty = fn_args.type_at(0);
483 let layout = self.layout_of(tp_ty).layout;
484 let use_integer_compare = match layout.backend_repr() {
485 Scalar(_) | ScalarPair(_, _) => true,
486 SimdVector { .. } => false,
487 Memory { .. } => {
488 layout.size() <= self.data_layout().pointer_size * 2
492 }
493 };
494
495 let a = args[0].immediate();
496 let b = args[1].immediate();
497 if layout.size().bytes() == 0 {
498 self.const_bool(true)
499 } else if use_integer_compare {
500 let integer_ty = self.type_ix(layout.size().bits());
501 let a_val = self.load(integer_ty, a, layout.align().abi);
502 let b_val = self.load(integer_ty, b, layout.align().abi);
503 self.icmp(IntPredicate::IntEQ, a_val, b_val)
504 } else {
505 let n = self.const_usize(layout.size().bytes());
506 let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
507 match self.cx.sess().target.arch.as_ref() {
508 "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
509 _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
510 }
511 }
512 }
513
514 sym::compare_bytes => {
515 let cmp = self.call_intrinsic(
517 "memcmp",
518 &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
519 );
520 self.sext(cmp, self.type_ix(32))
522 }
523
524 sym::black_box => {
525 args[0].val.store(self, result);
526 let result_val_span = [result.val.llval];
527 let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
537 ("~{memory}", &[])
538 } else {
539 ("r,~{memory}", &result_val_span)
540 };
541 crate::asm::inline_asm_call(
542 self,
543 "",
544 constraint,
545 inputs,
546 self.type_void(),
547 &[],
548 true,
549 false,
550 llvm::AsmDialect::Att,
551 &[span],
552 false,
553 None,
554 None,
555 )
556 .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
557
558 return Ok(());
560 }
561
562 _ if name.as_str().starts_with("simd_") => {
563 let mut loaded_args = Vec::new();
566 for (ty, arg) in arg_tys.iter().zip(args) {
567 loaded_args.push(
568 if ty.is_simd()
573 && let OperandValue::Ref(place) = arg.val
574 {
575 let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
576 let elem_ll_ty = match elem_ty.kind() {
577 ty::Float(f) => self.type_float_from_ty(*f),
578 ty::Int(i) => self.type_int_from_ty(*i),
579 ty::Uint(u) => self.type_uint_from_ty(*u),
580 ty::RawPtr(_, _) => self.type_ptr(),
581 _ => unreachable!(),
582 };
583 let loaded =
584 self.load_from_place(self.type_vector(elem_ll_ty, size), place);
585 OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
586 } else {
587 *arg
588 },
589 );
590 }
591
592 let llret_ty = if ret_ty.is_simd()
593 && let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr
594 {
595 let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
596 let elem_ll_ty = match elem_ty.kind() {
597 ty::Float(f) => self.type_float_from_ty(*f),
598 ty::Int(i) => self.type_int_from_ty(*i),
599 ty::Uint(u) => self.type_uint_from_ty(*u),
600 ty::RawPtr(_, _) => self.type_ptr(),
601 _ => unreachable!(),
602 };
603 self.type_vector(elem_ll_ty, size)
604 } else {
605 llret_ty
606 };
607
608 match generic_simd_intrinsic(
609 self,
610 name,
611 callee_ty,
612 fn_args,
613 &loaded_args,
614 ret_ty,
615 llret_ty,
616 span,
617 ) {
618 Ok(llval) => llval,
619 Err(()) => return Ok(()),
622 }
623 }
624
625 _ => {
626 debug!("unknown intrinsic '{}' -- falling back to default body", name);
627 return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
629 }
630 };
631
632 if !fn_abi.ret.is_ignore() {
633 if let PassMode::Cast { .. } = &fn_abi.ret.mode {
634 self.store(llval, result.val.llval, result.val.align);
635 } else {
636 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
637 .val
638 .store(self, result);
639 }
640 }
641 Ok(())
642 }
643
644 fn abort(&mut self) {
645 self.call_intrinsic("llvm.trap", &[]);
646 }
647
648 fn assume(&mut self, val: Self::Value) {
649 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
650 self.call_intrinsic("llvm.assume", &[val]);
651 }
652 }
653
654 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
655 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
656 self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
657 } else {
658 cond
659 }
660 }
661
662 fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value {
663 let typeid = self.get_metadata_value(typeid);
666 self.call_intrinsic("llvm.type.test", &[pointer, typeid])
667 }
668
669 fn type_checked_load(
670 &mut self,
671 llvtable: &'ll Value,
672 vtable_byte_offset: u64,
673 typeid: &'ll Metadata,
674 ) -> Self::Value {
675 let typeid = self.get_metadata_value(typeid);
676 let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
677 let type_checked_load =
678 self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
679 self.extract_value(type_checked_load, 0)
680 }
681
682 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
683 self.call_intrinsic("llvm.va_start", &[va_list])
684 }
685
686 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
687 self.call_intrinsic("llvm.va_end", &[va_list])
688 }
689}
690
691fn catch_unwind_intrinsic<'ll>(
692 bx: &mut Builder<'_, 'll, '_>,
693 try_func: &'ll Value,
694 data: &'ll Value,
695 catch_func: &'ll Value,
696 dest: &'ll Value,
697) {
698 if bx.sess().panic_strategy() == PanicStrategy::Abort {
699 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
700 bx.call(try_func_ty, None, None, try_func, &[data], None, None);
701 let ret_align = bx.tcx().data_layout.i32_align.abi;
704 bx.store(bx.const_i32(0), dest, ret_align);
705 } else if wants_msvc_seh(bx.sess()) {
706 codegen_msvc_try(bx, try_func, data, catch_func, dest);
707 } else if wants_wasm_eh(bx.sess()) {
708 codegen_wasm_try(bx, try_func, data, catch_func, dest);
709 } else if bx.sess().target.os == "emscripten" {
710 codegen_emcc_try(bx, try_func, data, catch_func, dest);
711 } else {
712 codegen_gnu_try(bx, try_func, data, catch_func, dest);
713 }
714}
715
716fn codegen_msvc_try<'ll>(
724 bx: &mut Builder<'_, 'll, '_>,
725 try_func: &'ll Value,
726 data: &'ll Value,
727 catch_func: &'ll Value,
728 dest: &'ll Value,
729) {
730 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
731 bx.set_personality_fn(bx.eh_personality());
732
733 let normal = bx.append_sibling_block("normal");
734 let catchswitch = bx.append_sibling_block("catchswitch");
735 let catchpad_rust = bx.append_sibling_block("catchpad_rust");
736 let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
737 let caught = bx.append_sibling_block("caught");
738
739 let try_func = llvm::get_param(bx.llfn(), 0);
740 let data = llvm::get_param(bx.llfn(), 1);
741 let catch_func = llvm::get_param(bx.llfn(), 2);
742
743 let ptr_size = bx.tcx().data_layout.pointer_size;
799 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
800 let slot = bx.alloca(ptr_size, ptr_align);
801 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
802 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
803
804 bx.switch_to_block(normal);
805 bx.ret(bx.const_i32(0));
806
807 bx.switch_to_block(catchswitch);
808 let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
809
810 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
825 let type_name = bx.const_bytes(b"rust_panic\0");
826 let type_info =
827 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
828 let tydesc = bx.declare_global(
829 &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
830 bx.val_ty(type_info),
831 );
832
833 llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
834 if bx.cx.tcx.sess.target.supports_comdat() {
835 llvm::SetUniqueComdat(bx.llmod, tydesc);
836 }
837 llvm::set_initializer(tydesc, type_info);
838
839 bx.switch_to_block(catchpad_rust);
846 let flags = bx.const_i32(8);
847 let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
848 let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
849 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
850 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
851 bx.catch_ret(&funclet, caught);
852
853 bx.switch_to_block(catchpad_foreign);
855 let flags = bx.const_i32(64);
856 let null = bx.const_null(bx.type_ptr());
857 let funclet = bx.catch_pad(cs, &[null, flags, null]);
858 bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
859 bx.catch_ret(&funclet, caught);
860
861 bx.switch_to_block(caught);
862 bx.ret(bx.const_i32(1));
863 });
864
865 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
868 let i32_align = bx.tcx().data_layout.i32_align.abi;
869 bx.store(ret, dest, i32_align);
870}
871
872fn codegen_wasm_try<'ll>(
874 bx: &mut Builder<'_, 'll, '_>,
875 try_func: &'ll Value,
876 data: &'ll Value,
877 catch_func: &'ll Value,
878 dest: &'ll Value,
879) {
880 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
881 bx.set_personality_fn(bx.eh_personality());
882
883 let normal = bx.append_sibling_block("normal");
884 let catchswitch = bx.append_sibling_block("catchswitch");
885 let catchpad = bx.append_sibling_block("catchpad");
886 let caught = bx.append_sibling_block("caught");
887
888 let try_func = llvm::get_param(bx.llfn(), 0);
889 let data = llvm::get_param(bx.llfn(), 1);
890 let catch_func = llvm::get_param(bx.llfn(), 2);
891
892 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
916 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
917
918 bx.switch_to_block(normal);
919 bx.ret(bx.const_i32(0));
920
921 bx.switch_to_block(catchswitch);
922 let cs = bx.catch_switch(None, None, &[catchpad]);
923
924 bx.switch_to_block(catchpad);
925 let null = bx.const_null(bx.type_ptr());
926 let funclet = bx.catch_pad(cs, &[null]);
927
928 let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
929 let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
930
931 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
932 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
933 bx.catch_ret(&funclet, caught);
934
935 bx.switch_to_block(caught);
936 bx.ret(bx.const_i32(1));
937 });
938
939 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
942 let i32_align = bx.tcx().data_layout.i32_align.abi;
943 bx.store(ret, dest, i32_align);
944}
945
946fn codegen_gnu_try<'ll>(
958 bx: &mut Builder<'_, 'll, '_>,
959 try_func: &'ll Value,
960 data: &'ll Value,
961 catch_func: &'ll Value,
962 dest: &'ll Value,
963) {
964 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
965 let then = bx.append_sibling_block("then");
978 let catch = bx.append_sibling_block("catch");
979
980 let try_func = llvm::get_param(bx.llfn(), 0);
981 let data = llvm::get_param(bx.llfn(), 1);
982 let catch_func = llvm::get_param(bx.llfn(), 2);
983 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
984 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
985
986 bx.switch_to_block(then);
987 bx.ret(bx.const_i32(0));
988
989 bx.switch_to_block(catch);
996 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
997 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
998 let tydesc = bx.const_null(bx.type_ptr());
999 bx.add_clause(vals, tydesc);
1000 let ptr = bx.extract_value(vals, 0);
1001 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1002 bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
1003 bx.ret(bx.const_i32(1));
1004 });
1005
1006 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1009 let i32_align = bx.tcx().data_layout.i32_align.abi;
1010 bx.store(ret, dest, i32_align);
1011}
1012
1013fn codegen_emcc_try<'ll>(
1017 bx: &mut Builder<'_, 'll, '_>,
1018 try_func: &'ll Value,
1019 data: &'ll Value,
1020 catch_func: &'ll Value,
1021 dest: &'ll Value,
1022) {
1023 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1024 let then = bx.append_sibling_block("then");
1042 let catch = bx.append_sibling_block("catch");
1043
1044 let try_func = llvm::get_param(bx.llfn(), 0);
1045 let data = llvm::get_param(bx.llfn(), 1);
1046 let catch_func = llvm::get_param(bx.llfn(), 2);
1047 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1048 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1049
1050 bx.switch_to_block(then);
1051 bx.ret(bx.const_i32(0));
1052
1053 bx.switch_to_block(catch);
1059 let tydesc = bx.eh_catch_typeinfo();
1060 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1061 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1062 bx.add_clause(vals, tydesc);
1063 bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1064 let ptr = bx.extract_value(vals, 0);
1065 let selector = bx.extract_value(vals, 1);
1066
1067 let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
1069 let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1070 let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1071
1072 let ptr_size = bx.tcx().data_layout.pointer_size;
1075 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1076 let i8_align = bx.tcx().data_layout.i8_align.abi;
1077 assert!(i8_align <= ptr_align);
1079 let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1080 bx.store(ptr, catch_data, ptr_align);
1081 let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1082 bx.store(is_rust_panic, catch_data_1, i8_align);
1083
1084 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1085 bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1086 bx.ret(bx.const_i32(1));
1087 });
1088
1089 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1092 let i32_align = bx.tcx().data_layout.i32_align.abi;
1093 bx.store(ret, dest, i32_align);
1094}
1095
1096fn gen_fn<'a, 'll, 'tcx>(
1099 cx: &'a CodegenCx<'ll, 'tcx>,
1100 name: &str,
1101 rust_fn_sig: ty::PolyFnSig<'tcx>,
1102 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1103) -> (&'ll Type, &'ll Value) {
1104 let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1105 let llty = fn_abi.llvm_type(cx);
1106 let llfn = cx.declare_fn(name, fn_abi, None);
1107 cx.set_frame_pointer_type(llfn);
1108 cx.apply_target_cpu_attr(llfn);
1109 llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1111 let llbb = Builder::append_block(cx, llfn, "entry-block");
1112 let bx = Builder::build(cx, llbb);
1113 codegen(bx);
1114 (llty, llfn)
1115}
1116
1117fn get_rust_try_fn<'a, 'll, 'tcx>(
1122 cx: &'a CodegenCx<'ll, 'tcx>,
1123 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1124) -> (&'ll Type, &'ll Value) {
1125 if let Some(llfn) = cx.rust_try_fn.get() {
1126 return llfn;
1127 }
1128
1129 let tcx = cx.tcx;
1131 let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1132 let try_fn_ty = Ty::new_fn_ptr(
1134 tcx,
1135 ty::Binder::dummy(tcx.mk_fn_sig(
1136 [i8p],
1137 tcx.types.unit,
1138 false,
1139 hir::Safety::Unsafe,
1140 ExternAbi::Rust,
1141 )),
1142 );
1143 let catch_fn_ty = Ty::new_fn_ptr(
1145 tcx,
1146 ty::Binder::dummy(tcx.mk_fn_sig(
1147 [i8p, i8p],
1148 tcx.types.unit,
1149 false,
1150 hir::Safety::Unsafe,
1151 ExternAbi::Rust,
1152 )),
1153 );
1154 let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1156 [try_fn_ty, i8p, catch_fn_ty],
1157 tcx.types.i32,
1158 false,
1159 hir::Safety::Unsafe,
1160 ExternAbi::Rust,
1161 ));
1162 let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1163 cx.rust_try_fn.set(Some(rust_try));
1164 rust_try
1165}
1166
1167fn generic_simd_intrinsic<'ll, 'tcx>(
1168 bx: &mut Builder<'_, 'll, 'tcx>,
1169 name: Symbol,
1170 callee_ty: Ty<'tcx>,
1171 fn_args: GenericArgsRef<'tcx>,
1172 args: &[OperandRef<'tcx, &'ll Value>],
1173 ret_ty: Ty<'tcx>,
1174 llret_ty: &'ll Type,
1175 span: Span,
1176) -> Result<&'ll Value, ()> {
1177 macro_rules! return_error {
1178 ($diag: expr) => {{
1179 bx.sess().dcx().emit_err($diag);
1180 return Err(());
1181 }};
1182 }
1183
1184 macro_rules! require {
1185 ($cond: expr, $diag: expr) => {
1186 if !$cond {
1187 return_error!($diag);
1188 }
1189 };
1190 }
1191
1192 macro_rules! require_simd {
1193 ($ty: expr, $variant:ident) => {{
1194 require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1195 $ty.simd_size_and_type(bx.tcx())
1196 }};
1197 }
1198
1199 macro_rules! require_int_or_uint_ty {
1201 ($ty: expr, $diag: expr) => {
1202 match $ty {
1203 ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1204 ty::Uint(i) => {
1205 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
1206 }
1207 _ => {
1208 return_error!($diag);
1209 }
1210 }
1211 };
1212 }
1213
1214 fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1228 bx: &mut Builder<'a, 'll, 'tcx>,
1229 i_xn: &'ll Value,
1230 in_elem_bitwidth: u64,
1231 in_len: u64,
1232 ) -> &'ll Value {
1233 let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1235 let shift_indices = vec![shift_idx; in_len as _];
1236 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1237 bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1239 }
1240
1241 let tcx = bx.tcx();
1242 let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx));
1243 let arg_tys = sig.inputs();
1244
1245 if cfg!(debug_assertions) {
1247 for (ty, arg) in arg_tys.iter().zip(args) {
1248 if ty.is_simd() {
1249 assert_matches!(arg.val, OperandValue::Immediate(_));
1250 }
1251 }
1252 }
1253
1254 if name == sym::simd_select_bitmask {
1255 let (len, _) = require_simd!(arg_tys[1], SimdArgument);
1256
1257 let expected_int_bits = len.max(8).next_power_of_two();
1258 let expected_bytes = len.div_ceil(8);
1259
1260 let mask_ty = arg_tys[0];
1261 let mask = match mask_ty.kind() {
1262 ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1263 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1264 ty::Array(elem, len)
1265 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1266 && len
1267 .try_to_target_usize(bx.tcx)
1268 .expect("expected monomorphic const in codegen")
1269 == expected_bytes =>
1270 {
1271 let place = PlaceRef::alloca(bx, args[0].layout);
1272 args[0].val.store(bx, place);
1273 let int_ty = bx.type_ix(expected_bytes * 8);
1274 bx.load(int_ty, place.val.llval, Align::ONE)
1275 }
1276 _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1277 span,
1278 name,
1279 mask_ty,
1280 expected_int_bits,
1281 expected_bytes
1282 }),
1283 };
1284
1285 let i1 = bx.type_i1();
1286 let im = bx.type_ix(len);
1287 let i1xn = bx.type_vector(i1, len);
1288 let m_im = bx.trunc(mask, im);
1289 let m_i1s = bx.bitcast(m_im, i1xn);
1290 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1291 }
1292
1293 let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
1295 let in_ty = arg_tys[0];
1296
1297 let comparison = match name {
1298 sym::simd_eq => Some(BinOp::Eq),
1299 sym::simd_ne => Some(BinOp::Ne),
1300 sym::simd_lt => Some(BinOp::Lt),
1301 sym::simd_le => Some(BinOp::Le),
1302 sym::simd_gt => Some(BinOp::Gt),
1303 sym::simd_ge => Some(BinOp::Ge),
1304 _ => None,
1305 };
1306
1307 if let Some(cmp_op) = comparison {
1308 let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1309
1310 require!(
1311 in_len == out_len,
1312 InvalidMonomorphization::ReturnLengthInputType {
1313 span,
1314 name,
1315 in_len,
1316 in_ty,
1317 ret_ty,
1318 out_len
1319 }
1320 );
1321 require!(
1322 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1323 InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1324 );
1325
1326 return Ok(compare_simd_types(
1327 bx,
1328 args[0].immediate(),
1329 args[1].immediate(),
1330 in_elem,
1331 llret_ty,
1332 cmp_op,
1333 ));
1334 }
1335
1336 if name == sym::simd_shuffle_const_generic {
1337 let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1338 let n = idx.len() as u64;
1339
1340 let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1341 require!(
1342 out_len == n,
1343 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1344 );
1345 require!(
1346 in_elem == out_ty,
1347 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1348 );
1349
1350 let total_len = in_len * 2;
1351
1352 let indices: Option<Vec<_>> = idx
1353 .iter()
1354 .enumerate()
1355 .map(|(arg_idx, val)| {
1356 let idx = val.unwrap_leaf().to_i32();
1357 if idx >= i32::try_from(total_len).unwrap() {
1358 bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1359 span,
1360 name,
1361 arg_idx: arg_idx as u64,
1362 total_len: total_len.into(),
1363 });
1364 None
1365 } else {
1366 Some(bx.const_i32(idx))
1367 }
1368 })
1369 .collect();
1370 let Some(indices) = indices else {
1371 return Ok(bx.const_null(llret_ty));
1372 };
1373
1374 return Ok(bx.shuffle_vector(
1375 args[0].immediate(),
1376 args[1].immediate(),
1377 bx.const_vector(&indices),
1378 ));
1379 }
1380
1381 if name == sym::simd_shuffle {
1382 let idx_ty = args[2].layout.ty;
1384 let n: u64 = if idx_ty.is_simd()
1385 && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1386 {
1387 idx_ty.simd_size_and_type(bx.cx.tcx).0
1388 } else {
1389 return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1390 };
1391
1392 let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1393 require!(
1394 out_len == n,
1395 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1396 );
1397 require!(
1398 in_elem == out_ty,
1399 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1400 );
1401
1402 let total_len = u128::from(in_len) * 2;
1403
1404 let indices = args[2].immediate();
1406 for i in 0..n {
1407 let val = bx.const_get_elt(indices, i as u64);
1408 let idx = bx
1409 .const_to_opt_u128(val, true)
1410 .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1411 if idx >= total_len {
1412 return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1413 span,
1414 name,
1415 arg_idx: i,
1416 total_len,
1417 });
1418 }
1419 }
1420
1421 return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1422 }
1423
1424 if name == sym::simd_insert || name == sym::simd_insert_dyn {
1425 require!(
1426 in_elem == arg_tys[2],
1427 InvalidMonomorphization::InsertedType {
1428 span,
1429 name,
1430 in_elem,
1431 in_ty,
1432 out_ty: arg_tys[2]
1433 }
1434 );
1435
1436 let index_imm = if name == sym::simd_insert {
1437 let idx = bx
1438 .const_to_opt_u128(args[1].immediate(), false)
1439 .expect("typeck should have ensure that this is a const");
1440 if idx >= in_len.into() {
1441 return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1442 span,
1443 name,
1444 arg_idx: 1,
1445 total_len: in_len.into(),
1446 });
1447 }
1448 bx.const_i32(idx as i32)
1449 } else {
1450 args[1].immediate()
1451 };
1452
1453 return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1454 }
1455 if name == sym::simd_extract || name == sym::simd_extract_dyn {
1456 require!(
1457 ret_ty == in_elem,
1458 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1459 );
1460 let index_imm = if name == sym::simd_extract {
1461 let idx = bx
1462 .const_to_opt_u128(args[1].immediate(), false)
1463 .expect("typeck should have ensure that this is a const");
1464 if idx >= in_len.into() {
1465 return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1466 span,
1467 name,
1468 arg_idx: 1,
1469 total_len: in_len.into(),
1470 });
1471 }
1472 bx.const_i32(idx as i32)
1473 } else {
1474 args[1].immediate()
1475 };
1476
1477 return Ok(bx.extract_element(args[0].immediate(), index_imm));
1478 }
1479
1480 if name == sym::simd_select {
1481 let m_elem_ty = in_elem;
1482 let m_len = in_len;
1483 let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
1484 require!(
1485 m_len == v_len,
1486 InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1487 );
1488 let in_elem_bitwidth = require_int_or_uint_ty!(
1489 m_elem_ty.kind(),
1490 InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1491 );
1492 let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1493 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1494 }
1495
1496 if name == sym::simd_bitmask {
1497 let expected_int_bits = in_len.max(8).next_power_of_two();
1506 let expected_bytes = in_len.div_ceil(8);
1507
1508 let in_elem_bitwidth = require_int_or_uint_ty!(
1510 in_elem.kind(),
1511 InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1512 );
1513
1514 let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1515 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1517
1518 match ret_ty.kind() {
1519 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1520 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1522 }
1523 ty::Array(elem, len)
1524 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1525 && len
1526 .try_to_target_usize(bx.tcx)
1527 .expect("expected monomorphic const in codegen")
1528 == expected_bytes =>
1529 {
1530 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1532
1533 let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1535 bx.store(ze, ptr, Align::ONE);
1536 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1537 return Ok(bx.load(array_ty, ptr, Align::ONE));
1538 }
1539 _ => return_error!(InvalidMonomorphization::CannotReturn {
1540 span,
1541 name,
1542 ret_ty,
1543 expected_int_bits,
1544 expected_bytes
1545 }),
1546 }
1547 }
1548
1549 fn simd_simple_float_intrinsic<'ll, 'tcx>(
1550 name: Symbol,
1551 in_elem: Ty<'_>,
1552 in_ty: Ty<'_>,
1553 in_len: u64,
1554 bx: &mut Builder<'_, 'll, 'tcx>,
1555 span: Span,
1556 args: &[OperandRef<'tcx, &'ll Value>],
1557 ) -> Result<&'ll Value, ()> {
1558 macro_rules! return_error {
1559 ($diag: expr) => {{
1560 bx.sess().dcx().emit_err($diag);
1561 return Err(());
1562 }};
1563 }
1564
1565 let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
1566 let elem_ty = bx.cx.type_float_from_ty(*f);
1567 match f.bit_width() {
1568 16 => ("f16", elem_ty),
1569 32 => ("f32", elem_ty),
1570 64 => ("f64", elem_ty),
1571 128 => ("f128", elem_ty),
1572 _ => return_error!(InvalidMonomorphization::FloatingPointVector {
1573 span,
1574 name,
1575 f_ty: *f,
1576 in_ty,
1577 }),
1578 }
1579 } else {
1580 return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1581 };
1582
1583 let vec_ty = bx.type_vector(elem_ty, in_len);
1584
1585 let (intr_name, fn_ty) = match name {
1586 sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
1587 sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
1588 sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
1589 sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
1590 sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
1591 sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
1592 sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
1593 sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
1594 sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
1595 sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1596 sym::simd_relaxed_fma => ("fmuladd", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1597 sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
1598 sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
1599 sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
1600 sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
1601 _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1602 };
1603 let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
1604 let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
1605 let c = bx.call(
1606 fn_ty,
1607 None,
1608 None,
1609 f,
1610 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1611 None,
1612 None,
1613 );
1614 Ok(c)
1615 }
1616
1617 if std::matches!(
1618 name,
1619 sym::simd_ceil
1620 | sym::simd_fabs
1621 | sym::simd_fcos
1622 | sym::simd_fexp2
1623 | sym::simd_fexp
1624 | sym::simd_flog10
1625 | sym::simd_flog2
1626 | sym::simd_flog
1627 | sym::simd_floor
1628 | sym::simd_fma
1629 | sym::simd_fsin
1630 | sym::simd_fsqrt
1631 | sym::simd_relaxed_fma
1632 | sym::simd_round
1633 | sym::simd_trunc
1634 ) {
1635 return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1636 }
1637
1638 fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
1642 match *elem_ty.kind() {
1643 ty::Int(v) => format!(
1644 "v{}i{}",
1645 vec_len,
1646 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1648 ),
1649 ty::Uint(v) => format!(
1650 "v{}i{}",
1651 vec_len,
1652 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1654 ),
1655 ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
1656 ty::RawPtr(_, _) => format!("v{}p0", vec_len),
1657 _ => unreachable!(),
1658 }
1659 }
1660
1661 fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1662 let elem_ty = match *elem_ty.kind() {
1663 ty::Int(v) => cx.type_int_from_ty(v),
1664 ty::Uint(v) => cx.type_uint_from_ty(v),
1665 ty::Float(v) => cx.type_float_from_ty(v),
1666 ty::RawPtr(_, _) => cx.type_ptr(),
1667 _ => unreachable!(),
1668 };
1669 cx.type_vector(elem_ty, vec_len)
1670 }
1671
1672 if name == sym::simd_gather {
1673 let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1684 let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
1685 let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
1687 require_simd!(ret_ty, SimdReturn);
1688
1689 require!(
1691 in_len == out_len,
1692 InvalidMonomorphization::SecondArgumentLength {
1693 span,
1694 name,
1695 in_len,
1696 in_ty,
1697 arg_ty: arg_tys[1],
1698 out_len
1699 }
1700 );
1701 require!(
1702 in_len == out_len2,
1703 InvalidMonomorphization::ThirdArgumentLength {
1704 span,
1705 name,
1706 in_len,
1707 in_ty,
1708 arg_ty: arg_tys[2],
1709 out_len: out_len2
1710 }
1711 );
1712
1713 require!(
1715 ret_ty == in_ty,
1716 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1717 );
1718
1719 require!(
1720 matches!(
1721 *element_ty1.kind(),
1722 ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1723 ),
1724 InvalidMonomorphization::ExpectedElementType {
1725 span,
1726 name,
1727 expected_element: element_ty1,
1728 second_arg: arg_tys[1],
1729 in_elem,
1730 in_ty,
1731 mutability: ExpectedPointerMutability::Not,
1732 }
1733 );
1734
1735 let mask_elem_bitwidth = require_int_or_uint_ty!(
1736 element_ty2.kind(),
1737 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1738 );
1739
1740 let alignment_ty = bx.type_i32();
1742 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1743
1744 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1746 let mask_ty = bx.type_vector(bx.type_i1(), in_len);
1747
1748 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1750 let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
1751
1752 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1754 let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
1755
1756 let llvm_intrinsic =
1757 format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
1758 let fn_ty = bx.type_func(
1759 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1760 llvm_elem_vec_ty,
1761 );
1762 let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1763 let v = bx.call(
1764 fn_ty,
1765 None,
1766 None,
1767 f,
1768 &[args[1].immediate(), alignment, mask, args[0].immediate()],
1769 None,
1770 None,
1771 );
1772 return Ok(v);
1773 }
1774
1775 if name == sym::simd_masked_load {
1776 let mask_ty = in_ty;
1786 let (mask_len, mask_elem) = (in_len, in_elem);
1787
1788 let pointer_ty = arg_tys[1];
1790
1791 let values_ty = arg_tys[2];
1793 let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1794
1795 require_simd!(ret_ty, SimdReturn);
1796
1797 require!(
1799 values_len == mask_len,
1800 InvalidMonomorphization::ThirdArgumentLength {
1801 span,
1802 name,
1803 in_len: mask_len,
1804 in_ty: mask_ty,
1805 arg_ty: values_ty,
1806 out_len: values_len
1807 }
1808 );
1809
1810 require!(
1812 ret_ty == values_ty,
1813 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1814 );
1815
1816 require!(
1817 matches!(
1818 *pointer_ty.kind(),
1819 ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1820 ),
1821 InvalidMonomorphization::ExpectedElementType {
1822 span,
1823 name,
1824 expected_element: values_elem,
1825 second_arg: pointer_ty,
1826 in_elem: values_elem,
1827 in_ty: values_ty,
1828 mutability: ExpectedPointerMutability::Not,
1829 }
1830 );
1831
1832 let m_elem_bitwidth = require_int_or_uint_ty!(
1833 mask_elem.kind(),
1834 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1835 );
1836
1837 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1838 let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
1839
1840 let alignment_ty = bx.type_i32();
1842 let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1843
1844 let llvm_pointer = bx.type_ptr();
1845
1846 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1848 let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
1849
1850 let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0");
1851 let fn_ty = bx
1852 .type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty);
1853 let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1854 let v = bx.call(
1855 fn_ty,
1856 None,
1857 None,
1858 f,
1859 &[args[1].immediate(), alignment, mask, args[2].immediate()],
1860 None,
1861 None,
1862 );
1863 return Ok(v);
1864 }
1865
1866 if name == sym::simd_masked_store {
1867 let mask_ty = in_ty;
1877 let (mask_len, mask_elem) = (in_len, in_elem);
1878
1879 let pointer_ty = arg_tys[1];
1881
1882 let values_ty = arg_tys[2];
1884 let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1885
1886 require!(
1888 values_len == mask_len,
1889 InvalidMonomorphization::ThirdArgumentLength {
1890 span,
1891 name,
1892 in_len: mask_len,
1893 in_ty: mask_ty,
1894 arg_ty: values_ty,
1895 out_len: values_len
1896 }
1897 );
1898
1899 require!(
1901 matches!(
1902 *pointer_ty.kind(),
1903 ty::RawPtr(p_ty, p_mutbl)
1904 if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1905 ),
1906 InvalidMonomorphization::ExpectedElementType {
1907 span,
1908 name,
1909 expected_element: values_elem,
1910 second_arg: pointer_ty,
1911 in_elem: values_elem,
1912 in_ty: values_ty,
1913 mutability: ExpectedPointerMutability::Mut,
1914 }
1915 );
1916
1917 let m_elem_bitwidth = require_int_or_uint_ty!(
1918 mask_elem.kind(),
1919 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1920 );
1921
1922 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1923 let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
1924
1925 let alignment_ty = bx.type_i32();
1927 let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1928
1929 let ret_t = bx.type_void();
1930
1931 let llvm_pointer = bx.type_ptr();
1932
1933 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1935 let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
1936
1937 let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0");
1938 let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t);
1939 let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1940 let v = bx.call(
1941 fn_ty,
1942 None,
1943 None,
1944 f,
1945 &[args[2].immediate(), args[1].immediate(), alignment, mask],
1946 None,
1947 None,
1948 );
1949 return Ok(v);
1950 }
1951
1952 if name == sym::simd_scatter {
1953 let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1963 let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
1964 let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
1965
1966 require!(
1968 in_len == element_len1,
1969 InvalidMonomorphization::SecondArgumentLength {
1970 span,
1971 name,
1972 in_len,
1973 in_ty,
1974 arg_ty: arg_tys[1],
1975 out_len: element_len1
1976 }
1977 );
1978 require!(
1979 in_len == element_len2,
1980 InvalidMonomorphization::ThirdArgumentLength {
1981 span,
1982 name,
1983 in_len,
1984 in_ty,
1985 arg_ty: arg_tys[2],
1986 out_len: element_len2
1987 }
1988 );
1989
1990 require!(
1991 matches!(
1992 *element_ty1.kind(),
1993 ty::RawPtr(p_ty, p_mutbl)
1994 if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
1995 ),
1996 InvalidMonomorphization::ExpectedElementType {
1997 span,
1998 name,
1999 expected_element: element_ty1,
2000 second_arg: arg_tys[1],
2001 in_elem,
2002 in_ty,
2003 mutability: ExpectedPointerMutability::Mut,
2004 }
2005 );
2006
2007 let mask_elem_bitwidth = require_int_or_uint_ty!(
2009 element_ty2.kind(),
2010 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2011 );
2012
2013 let alignment_ty = bx.type_i32();
2015 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
2016
2017 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2019 let mask_ty = bx.type_vector(bx.type_i1(), in_len);
2020
2021 let ret_t = bx.type_void();
2022
2023 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2025 let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
2026
2027 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2029 let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
2030
2031 let llvm_intrinsic =
2032 format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
2033 let fn_ty =
2034 bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
2035 let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2036 let v = bx.call(
2037 fn_ty,
2038 None,
2039 None,
2040 f,
2041 &[args[0].immediate(), args[1].immediate(), alignment, mask],
2042 None,
2043 None,
2044 );
2045 return Ok(v);
2046 }
2047
2048 macro_rules! arith_red {
2049 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2050 $identity:expr) => {
2051 if name == sym::$name {
2052 require!(
2053 ret_ty == in_elem,
2054 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2055 );
2056 return match in_elem.kind() {
2057 ty::Int(_) | ty::Uint(_) => {
2058 let r = bx.$integer_reduce(args[0].immediate());
2059 if $ordered {
2060 Ok(bx.$op(args[1].immediate(), r))
2063 } else {
2064 Ok(bx.$integer_reduce(args[0].immediate()))
2065 }
2066 }
2067 ty::Float(f) => {
2068 let acc = if $ordered {
2069 args[1].immediate()
2071 } else {
2072 match f.bit_width() {
2074 32 => bx.const_real(bx.type_f32(), $identity),
2075 64 => bx.const_real(bx.type_f64(), $identity),
2076 v => return_error!(
2077 InvalidMonomorphization::UnsupportedSymbolOfSize {
2078 span,
2079 name,
2080 symbol: sym::$name,
2081 in_ty,
2082 in_elem,
2083 size: v,
2084 ret_ty
2085 }
2086 ),
2087 }
2088 };
2089 Ok(bx.$float_reduce(acc, args[0].immediate()))
2090 }
2091 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2092 span,
2093 name,
2094 symbol: sym::$name,
2095 in_ty,
2096 in_elem,
2097 ret_ty
2098 }),
2099 };
2100 }
2101 };
2102 }
2103
2104 arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2105 arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2106 arith_red!(
2107 simd_reduce_add_unordered: vector_reduce_add,
2108 vector_reduce_fadd_reassoc,
2109 false,
2110 add,
2111 -0.0
2112 );
2113 arith_red!(
2114 simd_reduce_mul_unordered: vector_reduce_mul,
2115 vector_reduce_fmul_reassoc,
2116 false,
2117 mul,
2118 1.0
2119 );
2120
2121 macro_rules! minmax_red {
2122 ($name:ident: $int_red:ident, $float_red:ident) => {
2123 if name == sym::$name {
2124 require!(
2125 ret_ty == in_elem,
2126 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2127 );
2128 return match in_elem.kind() {
2129 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2130 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2131 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2132 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2133 span,
2134 name,
2135 symbol: sym::$name,
2136 in_ty,
2137 in_elem,
2138 ret_ty
2139 }),
2140 };
2141 }
2142 };
2143 }
2144
2145 minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2146 minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2147
2148 macro_rules! bitwise_red {
2149 ($name:ident : $red:ident, $boolean:expr) => {
2150 if name == sym::$name {
2151 let input = if !$boolean {
2152 require!(
2153 ret_ty == in_elem,
2154 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2155 );
2156 args[0].immediate()
2157 } else {
2158 let bitwidth = match in_elem.kind() {
2159 ty::Int(i) => {
2160 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2161 }
2162 ty::Uint(i) => {
2163 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2164 }
2165 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2166 span,
2167 name,
2168 symbol: sym::$name,
2169 in_ty,
2170 in_elem,
2171 ret_ty
2172 }),
2173 };
2174
2175 vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2176 };
2177 return match in_elem.kind() {
2178 ty::Int(_) | ty::Uint(_) => {
2179 let r = bx.$red(input);
2180 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2181 }
2182 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2183 span,
2184 name,
2185 symbol: sym::$name,
2186 in_ty,
2187 in_elem,
2188 ret_ty
2189 }),
2190 };
2191 }
2192 };
2193 }
2194
2195 bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2196 bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2197 bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2198 bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2199 bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2200
2201 if name == sym::simd_cast_ptr {
2202 let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2203 require!(
2204 in_len == out_len,
2205 InvalidMonomorphization::ReturnLengthInputType {
2206 span,
2207 name,
2208 in_len,
2209 in_ty,
2210 ret_ty,
2211 out_len
2212 }
2213 );
2214
2215 match in_elem.kind() {
2216 ty::RawPtr(p_ty, _) => {
2217 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2218 bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2219 });
2220 require!(
2221 metadata.is_unit(),
2222 InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2223 );
2224 }
2225 _ => {
2226 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2227 }
2228 }
2229 match out_elem.kind() {
2230 ty::RawPtr(p_ty, _) => {
2231 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2232 bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2233 });
2234 require!(
2235 metadata.is_unit(),
2236 InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2237 );
2238 }
2239 _ => {
2240 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2241 }
2242 }
2243
2244 return Ok(args[0].immediate());
2245 }
2246
2247 if name == sym::simd_expose_provenance {
2248 let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2249 require!(
2250 in_len == out_len,
2251 InvalidMonomorphization::ReturnLengthInputType {
2252 span,
2253 name,
2254 in_len,
2255 in_ty,
2256 ret_ty,
2257 out_len
2258 }
2259 );
2260
2261 match in_elem.kind() {
2262 ty::RawPtr(_, _) => {}
2263 _ => {
2264 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2265 }
2266 }
2267 match out_elem.kind() {
2268 ty::Uint(ty::UintTy::Usize) => {}
2269 _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2270 }
2271
2272 return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2273 }
2274
2275 if name == sym::simd_with_exposed_provenance {
2276 let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2277 require!(
2278 in_len == out_len,
2279 InvalidMonomorphization::ReturnLengthInputType {
2280 span,
2281 name,
2282 in_len,
2283 in_ty,
2284 ret_ty,
2285 out_len
2286 }
2287 );
2288
2289 match in_elem.kind() {
2290 ty::Uint(ty::UintTy::Usize) => {}
2291 _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2292 }
2293 match out_elem.kind() {
2294 ty::RawPtr(_, _) => {}
2295 _ => {
2296 return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2297 }
2298 }
2299
2300 return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2301 }
2302
2303 if name == sym::simd_cast || name == sym::simd_as {
2304 let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2305 require!(
2306 in_len == out_len,
2307 InvalidMonomorphization::ReturnLengthInputType {
2308 span,
2309 name,
2310 in_len,
2311 in_ty,
2312 ret_ty,
2313 out_len
2314 }
2315 );
2316 if in_elem == out_elem {
2318 return Ok(args[0].immediate());
2319 }
2320
2321 #[derive(Copy, Clone)]
2322 enum Sign {
2323 Unsigned,
2324 Signed,
2325 }
2326 use Sign::*;
2327
2328 enum Style {
2329 Float,
2330 Int(Sign),
2331 Unsupported,
2332 }
2333
2334 let (in_style, in_width) = match in_elem.kind() {
2335 ty::Int(i) => (
2338 Style::Int(Signed),
2339 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2340 ),
2341 ty::Uint(u) => (
2342 Style::Int(Unsigned),
2343 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2344 ),
2345 ty::Float(f) => (Style::Float, f.bit_width()),
2346 _ => (Style::Unsupported, 0),
2347 };
2348 let (out_style, out_width) = match out_elem.kind() {
2349 ty::Int(i) => (
2350 Style::Int(Signed),
2351 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2352 ),
2353 ty::Uint(u) => (
2354 Style::Int(Unsigned),
2355 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2356 ),
2357 ty::Float(f) => (Style::Float, f.bit_width()),
2358 _ => (Style::Unsupported, 0),
2359 };
2360
2361 match (in_style, out_style) {
2362 (Style::Int(sign), Style::Int(_)) => {
2363 return Ok(match in_width.cmp(&out_width) {
2364 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2365 Ordering::Equal => args[0].immediate(),
2366 Ordering::Less => match sign {
2367 Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2368 Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2369 },
2370 });
2371 }
2372 (Style::Int(Sign::Signed), Style::Float) => {
2373 return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2374 }
2375 (Style::Int(Sign::Unsigned), Style::Float) => {
2376 return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2377 }
2378 (Style::Float, Style::Int(sign)) => {
2379 return Ok(match (sign, name == sym::simd_as) {
2380 (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2381 (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2382 (_, true) => bx.cast_float_to_int(
2383 matches!(sign, Sign::Signed),
2384 args[0].immediate(),
2385 llret_ty,
2386 ),
2387 });
2388 }
2389 (Style::Float, Style::Float) => {
2390 return Ok(match in_width.cmp(&out_width) {
2391 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2392 Ordering::Equal => args[0].immediate(),
2393 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2394 });
2395 }
2396 _ => { }
2397 }
2398 return_error!(InvalidMonomorphization::UnsupportedCast {
2399 span,
2400 name,
2401 in_ty,
2402 in_elem,
2403 ret_ty,
2404 out_elem
2405 });
2406 }
2407 macro_rules! arith_binary {
2408 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2409 $(if name == sym::$name {
2410 match in_elem.kind() {
2411 $($(ty::$p(_))|* => {
2412 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2413 })*
2414 _ => {},
2415 }
2416 return_error!(
2417 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2418 );
2419 })*
2420 }
2421 }
2422 arith_binary! {
2423 simd_add: Uint, Int => add, Float => fadd;
2424 simd_sub: Uint, Int => sub, Float => fsub;
2425 simd_mul: Uint, Int => mul, Float => fmul;
2426 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2427 simd_rem: Uint => urem, Int => srem, Float => frem;
2428 simd_shl: Uint, Int => shl;
2429 simd_shr: Uint => lshr, Int => ashr;
2430 simd_and: Uint, Int => and;
2431 simd_or: Uint, Int => or;
2432 simd_xor: Uint, Int => xor;
2433 simd_fmax: Float => maxnum;
2434 simd_fmin: Float => minnum;
2435
2436 }
2437 macro_rules! arith_unary {
2438 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2439 $(if name == sym::$name {
2440 match in_elem.kind() {
2441 $($(ty::$p(_))|* => {
2442 return Ok(bx.$call(args[0].immediate()))
2443 })*
2444 _ => {},
2445 }
2446 return_error!(
2447 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2448 );
2449 })*
2450 }
2451 }
2452 arith_unary! {
2453 simd_neg: Int => neg, Float => fneg;
2454 }
2455
2456 if matches!(
2458 name,
2459 sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_ctpop | sym::simd_cttz
2460 ) {
2461 let vec_ty = bx.cx.type_vector(
2462 match *in_elem.kind() {
2463 ty::Int(i) => bx.cx.type_int_from_ty(i),
2464 ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2465 _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2466 span,
2467 name,
2468 in_ty,
2469 in_elem
2470 }),
2471 },
2472 in_len as u64,
2473 );
2474 let intrinsic_name = match name {
2475 sym::simd_bswap => "bswap",
2476 sym::simd_bitreverse => "bitreverse",
2477 sym::simd_ctlz => "ctlz",
2478 sym::simd_ctpop => "ctpop",
2479 sym::simd_cttz => "cttz",
2480 _ => unreachable!(),
2481 };
2482 let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2483 let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
2484
2485 return match name {
2486 sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2488 sym::simd_ctlz | sym::simd_cttz => {
2489 let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
2491 let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2492 let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2493 Ok(bx.call(
2494 fn_ty,
2495 None,
2496 None,
2497 f,
2498 &[args[0].immediate(), dont_poison_on_zero],
2499 None,
2500 None,
2501 ))
2502 }
2503 sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2504 let fn_ty = bx.type_func(&[vec_ty], vec_ty);
2506 let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2507 Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None, None))
2508 }
2509 _ => unreachable!(),
2510 };
2511 }
2512
2513 if name == sym::simd_arith_offset {
2514 let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2516 span_bug!(span, "must be called with a vector of pointer types as first argument")
2517 });
2518 let layout = bx.layout_of(pointee);
2519 let ptrs = args[0].immediate();
2520 let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
2523 if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2524 span_bug!(
2525 span,
2526 "must be called with a vector of pointer-sized integers as second argument"
2527 );
2528 }
2529 let offsets = args[1].immediate();
2530
2531 return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2532 }
2533
2534 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2535 let lhs = args[0].immediate();
2536 let rhs = args[1].immediate();
2537 let is_add = name == sym::simd_saturating_add;
2538 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2539 let (signed, elem_width, elem_ty) = match *in_elem.kind() {
2540 ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2541 ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2542 _ => {
2543 return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2544 span,
2545 name,
2546 expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
2547 vector_type: arg_tys[0]
2548 });
2549 }
2550 };
2551 let llvm_intrinsic = &format!(
2552 "llvm.{}{}.sat.v{}i{}",
2553 if signed { 's' } else { 'u' },
2554 if is_add { "add" } else { "sub" },
2555 in_len,
2556 elem_width
2557 );
2558 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2559
2560 let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
2561 let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2562 let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None, None);
2563 return Ok(v);
2564 }
2565
2566 span_bug!(span, "unknown SIMD intrinsic");
2567}