rustc_codegen_ssa/mir/
intrinsic.rs

1use rustc_abi::WrappingRange;
2use rustc_middle::ty::{self, Ty, TyCtxt};
3use rustc_middle::{bug, span_bug};
4use rustc_session::config::OptLevel;
5use rustc_span::{Span, sym};
6use rustc_target::callconv::{FnAbi, PassMode};
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::errors::InvalidMonomorphization;
12use crate::traits::*;
13use crate::{MemFlags, errors, meth, size_of_val};
14
15fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
16    bx: &mut Bx,
17    allow_overlap: bool,
18    volatile: bool,
19    ty: Ty<'tcx>,
20    dst: Bx::Value,
21    src: Bx::Value,
22    count: Bx::Value,
23) {
24    let layout = bx.layout_of(ty);
25    let size = layout.size;
26    let align = layout.align.abi;
27    let size = bx.mul(bx.const_usize(size.bytes()), count);
28    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
29    if allow_overlap {
30        bx.memmove(dst, align, src, align, size, flags);
31    } else {
32        bx.memcpy(dst, align, src, align, size, flags);
33    }
34}
35
36fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
37    bx: &mut Bx,
38    volatile: bool,
39    ty: Ty<'tcx>,
40    dst: Bx::Value,
41    val: Bx::Value,
42    count: Bx::Value,
43) {
44    let layout = bx.layout_of(ty);
45    let size = layout.size;
46    let align = layout.align.abi;
47    let size = bx.mul(bx.const_usize(size.bytes()), count);
48    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
49    bx.memset(dst, val, size, align, flags);
50}
51
52impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
53    /// In the `Err` case, returns the instance that should be called instead.
54    pub fn codegen_intrinsic_call(
55        bx: &mut Bx,
56        instance: ty::Instance<'tcx>,
57        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
58        args: &[OperandRef<'tcx, Bx::Value>],
59        llresult: Bx::Value,
60        span: Span,
61    ) -> Result<(), ty::Instance<'tcx>> {
62        let callee_ty = instance.ty(bx.tcx(), bx.typing_env());
63
64        let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
65            span_bug!(span, "expected fn item type, found {}", callee_ty);
66        };
67
68        let sig = callee_ty.fn_sig(bx.tcx());
69        let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig);
70        let arg_tys = sig.inputs();
71        let ret_ty = sig.output();
72        let name = bx.tcx().item_name(def_id);
73        let name_str = name.as_str();
74
75        // If we're swapping something that's *not* an `OperandValue::Ref`,
76        // then we can do it directly and avoid the alloca.
77        // Otherwise, we'll let the fallback MIR body take care of it.
78        if let sym::typed_swap_nonoverlapping = name {
79            let pointee_ty = fn_args.type_at(0);
80            let pointee_layout = bx.layout_of(pointee_ty);
81            if !bx.is_backend_ref(pointee_layout)
82                // But if we're not going to optimize, trying to use the fallback
83                // body just makes things worse, so don't bother.
84                || bx.sess().opts.optimize == OptLevel::No
85                // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
86                // reinterpretation of values as (chunkable) byte arrays, and the loop in the
87                // block optimization in `ptr::swap_nonoverlapping` is hard to rewrite back
88                // into the (unoptimized) direct swapping implementation, so we disable it.
89                || bx.sess().target.arch == "spirv"
90            {
91                let align = pointee_layout.align.abi;
92                let x_place = args[0].val.deref(align);
93                let y_place = args[1].val.deref(align);
94                bx.typed_place_swap(x_place, y_place, pointee_layout);
95                return Ok(());
96            }
97        }
98
99        let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
100        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
101
102        let llval = match name {
103            sym::abort => {
104                bx.abort();
105                return Ok(());
106            }
107
108            sym::va_start => bx.va_start(args[0].immediate()),
109            sym::va_end => bx.va_end(args[0].immediate()),
110            sym::size_of_val => {
111                let tp_ty = fn_args.type_at(0);
112                let (_, meta) = args[0].val.pointer_parts();
113                let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
114                llsize
115            }
116            sym::min_align_of_val => {
117                let tp_ty = fn_args.type_at(0);
118                let (_, meta) = args[0].val.pointer_parts();
119                let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
120                llalign
121            }
122            sym::vtable_size | sym::vtable_align => {
123                let vtable = args[0].immediate();
124                let idx = match name {
125                    sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
126                    sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
127                    _ => bug!(),
128                };
129                let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty);
130                match name {
131                    // Size is always <= isize::MAX.
132                    sym::vtable_size => {
133                        let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
134                        bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
135                    }
136                    // Alignment is always nonzero.
137                    sym::vtable_align => {
138                        bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
139                    }
140                    _ => {}
141                }
142                value
143            }
144            sym::pref_align_of
145            | sym::needs_drop
146            | sym::type_id
147            | sym::type_name
148            | sym::variant_count => {
149                let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
150                OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
151            }
152            sym::arith_offset => {
153                let ty = fn_args.type_at(0);
154                let layout = bx.layout_of(ty);
155                let ptr = args[0].immediate();
156                let offset = args[1].immediate();
157                bx.gep(bx.backend_type(layout), ptr, &[offset])
158            }
159            sym::copy => {
160                copy_intrinsic(
161                    bx,
162                    true,
163                    false,
164                    fn_args.type_at(0),
165                    args[1].immediate(),
166                    args[0].immediate(),
167                    args[2].immediate(),
168                );
169                return Ok(());
170            }
171            sym::write_bytes => {
172                memset_intrinsic(
173                    bx,
174                    false,
175                    fn_args.type_at(0),
176                    args[0].immediate(),
177                    args[1].immediate(),
178                    args[2].immediate(),
179                );
180                return Ok(());
181            }
182
183            sym::volatile_copy_nonoverlapping_memory => {
184                copy_intrinsic(
185                    bx,
186                    false,
187                    true,
188                    fn_args.type_at(0),
189                    args[0].immediate(),
190                    args[1].immediate(),
191                    args[2].immediate(),
192                );
193                return Ok(());
194            }
195            sym::volatile_copy_memory => {
196                copy_intrinsic(
197                    bx,
198                    true,
199                    true,
200                    fn_args.type_at(0),
201                    args[0].immediate(),
202                    args[1].immediate(),
203                    args[2].immediate(),
204                );
205                return Ok(());
206            }
207            sym::volatile_set_memory => {
208                memset_intrinsic(
209                    bx,
210                    true,
211                    fn_args.type_at(0),
212                    args[0].immediate(),
213                    args[1].immediate(),
214                    args[2].immediate(),
215                );
216                return Ok(());
217            }
218            sym::volatile_store => {
219                let dst = args[0].deref(bx.cx());
220                args[1].val.volatile_store(bx, dst);
221                return Ok(());
222            }
223            sym::unaligned_volatile_store => {
224                let dst = args[0].deref(bx.cx());
225                args[1].val.unaligned_volatile_store(bx, dst);
226                return Ok(());
227            }
228            sym::disjoint_bitor => {
229                let a = args[0].immediate();
230                let b = args[1].immediate();
231                bx.or_disjoint(a, b)
232            }
233            sym::exact_div => {
234                let ty = arg_tys[0];
235                match int_type_width_signed(ty, bx.tcx()) {
236                    Some((_width, signed)) => {
237                        if signed {
238                            bx.exactsdiv(args[0].immediate(), args[1].immediate())
239                        } else {
240                            bx.exactudiv(args[0].immediate(), args[1].immediate())
241                        }
242                    }
243                    None => {
244                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
245                            span,
246                            name,
247                            ty,
248                        });
249                        return Ok(());
250                    }
251                }
252            }
253            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
254                match float_type_width(arg_tys[0]) {
255                    Some(_width) => match name {
256                        sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
257                        sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
258                        sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
259                        sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
260                        sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
261                        _ => bug!(),
262                    },
263                    None => {
264                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
265                            span,
266                            name,
267                            ty: arg_tys[0],
268                        });
269                        return Ok(());
270                    }
271                }
272            }
273            sym::fadd_algebraic
274            | sym::fsub_algebraic
275            | sym::fmul_algebraic
276            | sym::fdiv_algebraic
277            | sym::frem_algebraic => match float_type_width(arg_tys[0]) {
278                Some(_width) => match name {
279                    sym::fadd_algebraic => {
280                        bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
281                    }
282                    sym::fsub_algebraic => {
283                        bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
284                    }
285                    sym::fmul_algebraic => {
286                        bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
287                    }
288                    sym::fdiv_algebraic => {
289                        bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
290                    }
291                    sym::frem_algebraic => {
292                        bx.frem_algebraic(args[0].immediate(), args[1].immediate())
293                    }
294                    _ => bug!(),
295                },
296                None => {
297                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
298                        span,
299                        name,
300                        ty: arg_tys[0],
301                    });
302                    return Ok(());
303                }
304            },
305
306            sym::float_to_int_unchecked => {
307                if float_type_width(arg_tys[0]).is_none() {
308                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
309                        span,
310                        ty: arg_tys[0],
311                    });
312                    return Ok(());
313                }
314                let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
315                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
316                        span,
317                        ty: ret_ty,
318                    });
319                    return Ok(());
320                };
321                if signed {
322                    bx.fptosi(args[0].immediate(), llret_ty)
323                } else {
324                    bx.fptoui(args[0].immediate(), llret_ty)
325                }
326            }
327
328            // This requires that atomic intrinsics follow a specific naming pattern:
329            // "atomic_<operation>[_<ordering>]"
330            name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
331                use crate::common::AtomicOrdering::*;
332                use crate::common::{AtomicRmwBinOp, SynchronizationScope};
333
334                let Some((instruction, ordering)) = atomic.split_once('_') else {
335                    bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
336                };
337
338                let parse_ordering = |bx: &Bx, s| match s {
339                    "unordered" => Unordered,
340                    "relaxed" => Relaxed,
341                    "acquire" => Acquire,
342                    "release" => Release,
343                    "acqrel" => AcquireRelease,
344                    "seqcst" => SequentiallyConsistent,
345                    _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
346                };
347
348                let invalid_monomorphization = |ty| {
349                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
350                        span,
351                        name,
352                        ty,
353                    });
354                };
355
356                match instruction {
357                    "cxchg" | "cxchgweak" => {
358                        let Some((success, failure)) = ordering.split_once('_') else {
359                            bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
360                        };
361                        let ty = fn_args.type_at(0);
362                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
363                            let weak = instruction == "cxchgweak";
364                            let dst = args[0].immediate();
365                            let cmp = args[1].immediate();
366                            let src = args[2].immediate();
367                            let (val, success) = bx.atomic_cmpxchg(
368                                dst,
369                                cmp,
370                                src,
371                                parse_ordering(bx, success),
372                                parse_ordering(bx, failure),
373                                weak,
374                            );
375                            let val = bx.from_immediate(val);
376                            let success = bx.from_immediate(success);
377
378                            let dest = result.project_field(bx, 0);
379                            bx.store_to_place(val, dest.val);
380                            let dest = result.project_field(bx, 1);
381                            bx.store_to_place(success, dest.val);
382                        } else {
383                            invalid_monomorphization(ty);
384                        }
385                        return Ok(());
386                    }
387
388                    "load" => {
389                        let ty = fn_args.type_at(0);
390                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
391                            let layout = bx.layout_of(ty);
392                            let size = layout.size;
393                            let source = args[0].immediate();
394                            bx.atomic_load(
395                                bx.backend_type(layout),
396                                source,
397                                parse_ordering(bx, ordering),
398                                size,
399                            )
400                        } else {
401                            invalid_monomorphization(ty);
402                            return Ok(());
403                        }
404                    }
405
406                    "store" => {
407                        let ty = fn_args.type_at(0);
408                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
409                            let size = bx.layout_of(ty).size;
410                            let val = args[1].immediate();
411                            let ptr = args[0].immediate();
412                            bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
413                        } else {
414                            invalid_monomorphization(ty);
415                        }
416                        return Ok(());
417                    }
418
419                    "fence" => {
420                        bx.atomic_fence(
421                            parse_ordering(bx, ordering),
422                            SynchronizationScope::CrossThread,
423                        );
424                        return Ok(());
425                    }
426
427                    "singlethreadfence" => {
428                        bx.atomic_fence(
429                            parse_ordering(bx, ordering),
430                            SynchronizationScope::SingleThread,
431                        );
432                        return Ok(());
433                    }
434
435                    // These are all AtomicRMW ops
436                    "max" | "min" => {
437                        let atom_op = if instruction == "max" {
438                            AtomicRmwBinOp::AtomicMax
439                        } else {
440                            AtomicRmwBinOp::AtomicMin
441                        };
442
443                        let ty = fn_args.type_at(0);
444                        if matches!(ty.kind(), ty::Int(_)) {
445                            let ptr = args[0].immediate();
446                            let val = args[1].immediate();
447                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
448                        } else {
449                            invalid_monomorphization(ty);
450                            return Ok(());
451                        }
452                    }
453                    "umax" | "umin" => {
454                        let atom_op = if instruction == "umax" {
455                            AtomicRmwBinOp::AtomicUMax
456                        } else {
457                            AtomicRmwBinOp::AtomicUMin
458                        };
459
460                        let ty = fn_args.type_at(0);
461                        if matches!(ty.kind(), ty::Uint(_)) {
462                            let ptr = args[0].immediate();
463                            let val = args[1].immediate();
464                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
465                        } else {
466                            invalid_monomorphization(ty);
467                            return Ok(());
468                        }
469                    }
470                    op => {
471                        let atom_op = match op {
472                            "xchg" => AtomicRmwBinOp::AtomicXchg,
473                            "xadd" => AtomicRmwBinOp::AtomicAdd,
474                            "xsub" => AtomicRmwBinOp::AtomicSub,
475                            "and" => AtomicRmwBinOp::AtomicAnd,
476                            "nand" => AtomicRmwBinOp::AtomicNand,
477                            "or" => AtomicRmwBinOp::AtomicOr,
478                            "xor" => AtomicRmwBinOp::AtomicXor,
479                            _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
480                        };
481
482                        let ty = fn_args.type_at(0);
483                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
484                            let ptr = args[0].immediate();
485                            let val = args[1].immediate();
486                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
487                        } else {
488                            invalid_monomorphization(ty);
489                            return Ok(());
490                        }
491                    }
492                }
493            }
494
495            sym::nontemporal_store => {
496                let dst = args[0].deref(bx.cx());
497                args[1].val.nontemporal_store(bx, dst);
498                return Ok(());
499            }
500
501            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
502                let ty = fn_args.type_at(0);
503                let pointee_size = bx.layout_of(ty).size;
504
505                let a = args[0].immediate();
506                let b = args[1].immediate();
507                let a = bx.ptrtoint(a, bx.type_isize());
508                let b = bx.ptrtoint(b, bx.type_isize());
509                let pointee_size = bx.const_usize(pointee_size.bytes());
510                if name == sym::ptr_offset_from {
511                    // This is the same sequence that Clang emits for pointer subtraction.
512                    // It can be neither `nsw` nor `nuw` because the input is treated as
513                    // unsigned but then the output is treated as signed, so neither works.
514                    let d = bx.sub(a, b);
515                    // this is where the signed magic happens (notice the `s` in `exactsdiv`)
516                    bx.exactsdiv(d, pointee_size)
517                } else {
518                    // The `_unsigned` version knows the relative ordering of the pointers,
519                    // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
520                    let d = bx.unchecked_usub(a, b);
521                    bx.exactudiv(d, pointee_size)
522                }
523            }
524
525            sym::cold_path => {
526                // This is a no-op. The intrinsic is just a hint to the optimizer.
527                return Ok(());
528            }
529
530            _ => {
531                // Need to use backend-specific things in the implementation.
532                return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
533            }
534        };
535
536        if !fn_abi.ret.is_ignore() {
537            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
538                bx.store_to_place(llval, result.val);
539            } else {
540                OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
541                    .val
542                    .store(bx, result);
543            }
544        }
545        Ok(())
546    }
547}
548
549// Returns the width of an int Ty, and if it's signed or not
550// Returns None if the type is not an integer
551// FIXME: there’s multiple of this functions, investigate using some of the already existing
552// stuffs.
553fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
554    match ty.kind() {
555        ty::Int(t) => {
556            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
557        }
558        ty::Uint(t) => {
559            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
560        }
561        _ => None,
562    }
563}
564
565// Returns the width of a float Ty
566// Returns None if the type is not a float
567fn float_type_width(ty: Ty<'_>) -> Option<u64> {
568    match ty.kind() {
569        ty::Float(t) => Some(t.bit_width()),
570        _ => None,
571    }
572}