Skip to main content

rustc_codegen_ssa/mir/
intrinsic.rs

1use rustc_abi::{Align, WrappingRange};
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7use rustc_target::spec::Arch;
8
9use super::FunctionCx;
10use super::operand::OperandRef;
11use super::place::PlaceRef;
12use crate::common::{AtomicRmwBinOp, SynchronizationScope};
13use crate::errors::InvalidMonomorphization;
14use crate::traits::*;
15use crate::{MemFlags, meth, size_of_val};
16
17fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18    bx: &mut Bx,
19    allow_overlap: bool,
20    volatile: bool,
21    ty: Ty<'tcx>,
22    dst: Bx::Value,
23    src: Bx::Value,
24    count: Bx::Value,
25) {
26    let layout = bx.layout_of(ty);
27    let size = layout.size;
28    let align = layout.align.abi;
29    let size = bx.mul(bx.const_usize(size.bytes()), count);
30    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31    if allow_overlap {
32        bx.memmove(dst, align, src, align, size, flags);
33    } else {
34        bx.memcpy(dst, align, src, align, size, flags, None);
35    }
36}
37
38fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39    bx: &mut Bx,
40    volatile: bool,
41    ty: Ty<'tcx>,
42    dst: Bx::Value,
43    val: Bx::Value,
44    count: Bx::Value,
45) {
46    let layout = bx.layout_of(ty);
47    let size = layout.size;
48    let align = layout.align.abi;
49    let size = bx.mul(bx.const_usize(size.bytes()), count);
50    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51    bx.memset(dst, val, size, align, flags);
52}
53
54impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55    /// In the `Err` case, returns the instance that should be called instead.
56    pub fn codegen_intrinsic_call(
57        &mut self,
58        bx: &mut Bx,
59        instance: ty::Instance<'tcx>,
60        args: &[OperandRef<'tcx, Bx::Value>],
61        result: PlaceRef<'tcx, Bx::Value>,
62        source_info: SourceInfo,
63    ) -> Result<(), ty::Instance<'tcx>> {
64        let span = source_info.span;
65
66        let name = bx.tcx().item_name(instance.def_id());
67        let fn_args = instance.args;
68
69        // If we're swapping something that's *not* an `OperandValue::Ref`,
70        // then we can do it directly and avoid the alloca.
71        // Otherwise, we'll let the fallback MIR body take care of it.
72        if let sym::typed_swap_nonoverlapping = name {
73            let pointee_ty = fn_args.type_at(0);
74            let pointee_layout = bx.layout_of(pointee_ty);
75            if !bx.is_backend_ref(pointee_layout)
76                // But if we're not going to optimize, trying to use the fallback
77                // body just makes things worse, so don't bother.
78                || bx.sess().opts.optimize == OptLevel::No
79                // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
80                // reinterpretation of values as (chunkable) byte arrays, and the loop in the
81                // block optimization in `ptr::swap_nonoverlapping` is hard to rewrite back
82                // into the (unoptimized) direct swapping implementation, so we disable it.
83                || bx.sess().target.arch == Arch::SpirV
84            {
85                let align = pointee_layout.align.abi;
86                let x_place = args[0].val.deref(align);
87                let y_place = args[1].val.deref(align);
88                bx.typed_place_swap(x_place, y_place, pointee_layout);
89                return Ok(());
90            }
91        }
92
93        let invalid_monomorphization_int_type = |ty| {
94            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
95        };
96        let invalid_monomorphization_int_or_ptr_type = |ty| {
97            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
98                span,
99                name,
100                ty,
101            });
102        };
103
104        let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
105            let discr = ord.to_branch()[0].to_leaf();
106            discr.to_atomic_ordering()
107        };
108
109        if args.is_empty() {
110            match name {
111                sym::abort
112                | sym::unreachable
113                | sym::cold_path
114                | sym::gpu_launch_sized_workgroup_mem
115                | sym::breakpoint
116                | sym::amdgpu_dispatch_ptr
117                | sym::assert_zero_valid
118                | sym::assert_mem_uninitialized_valid
119                | sym::assert_inhabited
120                | sym::ub_checks
121                | sym::contract_checks
122                | sym::atomic_fence
123                | sym::atomic_singlethreadfence
124                | sym::caller_location => {}
125                _ => {
126                    ::rustc_middle::util::bug::span_bug_fmt(span,
    format_args!("Nullary intrinsic {0} must be called in a const block. If you are seeing this message from code outside the standard library, the unstable implementation details of the relevant intrinsic may have changed. Consider using stable APIs instead. If you are adding a new nullary intrinsic that is inherently a runtime intrinsic, update this check.",
        name));span_bug!(
127                        span,
128                        "Nullary intrinsic {name} must be called in a const block. \
129                        If you are seeing this message from code outside the standard library, the \
130                        unstable implementation details of the relevant intrinsic may have changed. \
131                        Consider using stable APIs instead. \
132                        If you are adding a new nullary intrinsic that is inherently a runtime \
133                        intrinsic, update this check."
134                    );
135                }
136            }
137        }
138
139        let llval = match name {
140            sym::abort => {
141                bx.abort();
142                return Ok(());
143            }
144
145            sym::caller_location => {
146                let location = self.get_caller_location(bx, source_info);
147                location.val.store(bx, result);
148                return Ok(());
149            }
150
151            sym::va_start => bx.va_start(args[0].immediate()),
152            sym::va_end => bx.va_end(args[0].immediate()),
153            sym::size_of_val => {
154                let tp_ty = fn_args.type_at(0);
155                let (_, meta) = args[0].val.pointer_parts();
156                let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
157                llsize
158            }
159            sym::align_of_val => {
160                let tp_ty = fn_args.type_at(0);
161                let (_, meta) = args[0].val.pointer_parts();
162                let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
163                llalign
164            }
165            sym::vtable_size | sym::vtable_align => {
166                let vtable = args[0].immediate();
167                let idx = match name {
168                    sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
169                    sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
170                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
171                };
172                let value = meth::VirtualIndex::from_index(idx).get_usize(
173                    bx,
174                    vtable,
175                    instance.ty(bx.tcx(), bx.typing_env()),
176                );
177                match name {
178                    // Size is always <= isize::MAX.
179                    sym::vtable_size => {
180                        let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
181                        bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
182                    }
183                    // Alignment is always a power of two, thus 1..=0x800…000,
184                    // but also bounded by the maximum we support in type layout.
185                    sym::vtable_align => {
186                        let align_bound = Align::max_for_target(bx.data_layout()).bytes().into();
187                        bx.range_metadata(value, WrappingRange { start: 1, end: align_bound })
188                    }
189                    _ => {}
190                }
191                value
192            }
193            sym::arith_offset => {
194                let ty = fn_args.type_at(0);
195                let layout = bx.layout_of(ty);
196                let ptr = args[0].immediate();
197                let offset = args[1].immediate();
198                bx.gep(bx.backend_type(layout), ptr, &[offset])
199            }
200            sym::copy => {
201                copy_intrinsic(
202                    bx,
203                    true,
204                    false,
205                    fn_args.type_at(0),
206                    args[1].immediate(),
207                    args[0].immediate(),
208                    args[2].immediate(),
209                );
210                return Ok(());
211            }
212            sym::write_bytes => {
213                memset_intrinsic(
214                    bx,
215                    false,
216                    fn_args.type_at(0),
217                    args[0].immediate(),
218                    args[1].immediate(),
219                    args[2].immediate(),
220                );
221                return Ok(());
222            }
223
224            sym::volatile_copy_nonoverlapping_memory => {
225                copy_intrinsic(
226                    bx,
227                    false,
228                    true,
229                    fn_args.type_at(0),
230                    args[0].immediate(),
231                    args[1].immediate(),
232                    args[2].immediate(),
233                );
234                return Ok(());
235            }
236            sym::volatile_copy_memory => {
237                copy_intrinsic(
238                    bx,
239                    true,
240                    true,
241                    fn_args.type_at(0),
242                    args[0].immediate(),
243                    args[1].immediate(),
244                    args[2].immediate(),
245                );
246                return Ok(());
247            }
248            sym::volatile_set_memory => {
249                memset_intrinsic(
250                    bx,
251                    true,
252                    fn_args.type_at(0),
253                    args[0].immediate(),
254                    args[1].immediate(),
255                    args[2].immediate(),
256                );
257                return Ok(());
258            }
259            sym::volatile_store => {
260                let dst = args[0].deref(bx.cx());
261                args[1].val.volatile_store(bx, dst);
262                return Ok(());
263            }
264            sym::unaligned_volatile_store => {
265                let dst = args[0].deref(bx.cx());
266                args[1].val.unaligned_volatile_store(bx, dst);
267                return Ok(());
268            }
269            sym::disjoint_bitor => {
270                let a = args[0].immediate();
271                let b = args[1].immediate();
272                bx.or_disjoint(a, b)
273            }
274            sym::exact_div => {
275                let ty = args[0].layout.ty;
276                match int_type_width_signed(ty, bx.tcx()) {
277                    Some((_width, signed)) => {
278                        if signed {
279                            bx.exactsdiv(args[0].immediate(), args[1].immediate())
280                        } else {
281                            bx.exactudiv(args[0].immediate(), args[1].immediate())
282                        }
283                    }
284                    None => {
285                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
286                            span,
287                            name,
288                            ty,
289                        });
290                        return Ok(());
291                    }
292                }
293            }
294            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
295                match float_type_width(args[0].layout.ty) {
296                    Some(_width) => match name {
297                        sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
298                        sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
299                        sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
300                        sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
301                        sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
302                        _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
303                    },
304                    None => {
305                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
306                            span,
307                            name,
308                            ty: args[0].layout.ty,
309                        });
310                        return Ok(());
311                    }
312                }
313            }
314            sym::fadd_algebraic
315            | sym::fsub_algebraic
316            | sym::fmul_algebraic
317            | sym::fdiv_algebraic
318            | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
319                Some(_width) => match name {
320                    sym::fadd_algebraic => {
321                        bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
322                    }
323                    sym::fsub_algebraic => {
324                        bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
325                    }
326                    sym::fmul_algebraic => {
327                        bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
328                    }
329                    sym::fdiv_algebraic => {
330                        bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
331                    }
332                    sym::frem_algebraic => {
333                        bx.frem_algebraic(args[0].immediate(), args[1].immediate())
334                    }
335                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
336                },
337                None => {
338                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
339                        span,
340                        name,
341                        ty: args[0].layout.ty,
342                    });
343                    return Ok(());
344                }
345            },
346
347            sym::float_to_int_unchecked => {
348                if float_type_width(args[0].layout.ty).is_none() {
349                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
350                        span,
351                        ty: args[0].layout.ty,
352                    });
353                    return Ok(());
354                }
355                let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
356                else {
357                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
358                        span,
359                        ty: result.layout.ty,
360                    });
361                    return Ok(());
362                };
363                if signed {
364                    bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
365                } else {
366                    bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
367                }
368            }
369
370            sym::atomic_load => {
371                let ty = fn_args.type_at(0);
372                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
373                    invalid_monomorphization_int_or_ptr_type(ty);
374                    return Ok(());
375                }
376                let ordering = fn_args.const_at(1).to_value();
377                let layout = bx.layout_of(ty);
378                let source = args[0].immediate();
379                bx.atomic_load(
380                    bx.backend_type(layout),
381                    source,
382                    parse_atomic_ordering(ordering),
383                    layout.size,
384                )
385            }
386            sym::atomic_store => {
387                let ty = fn_args.type_at(0);
388                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
389                    invalid_monomorphization_int_or_ptr_type(ty);
390                    return Ok(());
391                }
392                let ordering = fn_args.const_at(1).to_value();
393                let size = bx.layout_of(ty).size;
394                let val = args[1].immediate();
395                let ptr = args[0].immediate();
396                bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
397                return Ok(());
398            }
399            // These are all AtomicRMW ops
400            sym::atomic_cxchg | sym::atomic_cxchgweak => {
401                let ty = fn_args.type_at(0);
402                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
403                    invalid_monomorphization_int_or_ptr_type(ty);
404                    return Ok(());
405                }
406                let succ_ordering = fn_args.const_at(1).to_value();
407                let fail_ordering = fn_args.const_at(2).to_value();
408                let weak = name == sym::atomic_cxchgweak;
409                let dst = args[0].immediate();
410                let cmp = args[1].immediate();
411                let src = args[2].immediate();
412                let (val, success) = bx.atomic_cmpxchg(
413                    dst,
414                    cmp,
415                    src,
416                    parse_atomic_ordering(succ_ordering),
417                    parse_atomic_ordering(fail_ordering),
418                    weak,
419                );
420                let val = bx.from_immediate(val);
421                let success = bx.from_immediate(success);
422
423                let dest = result.project_field(bx, 0);
424                bx.store_to_place(val, dest.val);
425                let dest = result.project_field(bx, 1);
426                bx.store_to_place(success, dest.val);
427
428                return Ok(());
429            }
430            sym::atomic_max | sym::atomic_min => {
431                let atom_op = if name == sym::atomic_max {
432                    AtomicRmwBinOp::AtomicMax
433                } else {
434                    AtomicRmwBinOp::AtomicMin
435                };
436
437                let ty = fn_args.type_at(0);
438                if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
    ty::Int(_) => true,
    _ => false,
}matches!(ty.kind(), ty::Int(_)) {
439                    let ordering = fn_args.const_at(1).to_value();
440                    let ptr = args[0].immediate();
441                    let val = args[1].immediate();
442                    bx.atomic_rmw(
443                        atom_op,
444                        ptr,
445                        val,
446                        parse_atomic_ordering(ordering),
447                        /* ret_ptr */ false,
448                    )
449                } else {
450                    invalid_monomorphization_int_type(ty);
451                    return Ok(());
452                }
453            }
454            sym::atomic_umax | sym::atomic_umin => {
455                let atom_op = if name == sym::atomic_umax {
456                    AtomicRmwBinOp::AtomicUMax
457                } else {
458                    AtomicRmwBinOp::AtomicUMin
459                };
460
461                let ty = fn_args.type_at(0);
462                if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
    ty::Uint(_) => true,
    _ => false,
}matches!(ty.kind(), ty::Uint(_)) {
463                    let ordering = fn_args.const_at(1).to_value();
464                    let ptr = args[0].immediate();
465                    let val = args[1].immediate();
466                    bx.atomic_rmw(
467                        atom_op,
468                        ptr,
469                        val,
470                        parse_atomic_ordering(ordering),
471                        /* ret_ptr */ false,
472                    )
473                } else {
474                    invalid_monomorphization_int_type(ty);
475                    return Ok(());
476                }
477            }
478            sym::atomic_xchg => {
479                let ty = fn_args.type_at(0);
480                let ordering = fn_args.const_at(1).to_value();
481                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
482                    let ptr = args[0].immediate();
483                    let val = args[1].immediate();
484                    let atomic_op = AtomicRmwBinOp::AtomicXchg;
485                    bx.atomic_rmw(
486                        atomic_op,
487                        ptr,
488                        val,
489                        parse_atomic_ordering(ordering),
490                        /* ret_ptr */ ty.is_raw_ptr(),
491                    )
492                } else {
493                    invalid_monomorphization_int_or_ptr_type(ty);
494                    return Ok(());
495                }
496            }
497            sym::atomic_xadd
498            | sym::atomic_xsub
499            | sym::atomic_and
500            | sym::atomic_nand
501            | sym::atomic_or
502            | sym::atomic_xor => {
503                let atom_op = match name {
504                    sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
505                    sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
506                    sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
507                    sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
508                    sym::atomic_or => AtomicRmwBinOp::AtomicOr,
509                    sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
510                    _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
511                };
512
513                // The type of the in-memory data.
514                let ty_mem = fn_args.type_at(0);
515                // The type of the 2nd operand, given by-value.
516                let ty_op = fn_args.type_at(1);
517
518                let ordering = fn_args.const_at(2).to_value();
519                // We require either both arguments to have the same integer type, or the first to
520                // be a pointer and the second to be `usize`.
521                if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
522                    || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
523                {
524                    let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
525                    let val = args[1].immediate(); // of type `ty_op`
526                    bx.atomic_rmw(
527                        atom_op,
528                        ptr,
529                        val,
530                        parse_atomic_ordering(ordering),
531                        /* ret_ptr */ ty_mem.is_raw_ptr(),
532                    )
533                } else {
534                    invalid_monomorphization_int_or_ptr_type(ty_mem);
535                    return Ok(());
536                }
537            }
538            sym::atomic_fence => {
539                let ordering = fn_args.const_at(0).to_value();
540                bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
541                return Ok(());
542            }
543
544            sym::atomic_singlethreadfence => {
545                let ordering = fn_args.const_at(0).to_value();
546                bx.atomic_fence(
547                    parse_atomic_ordering(ordering),
548                    SynchronizationScope::SingleThread,
549                );
550                return Ok(());
551            }
552
553            sym::nontemporal_store => {
554                let dst = args[0].deref(bx.cx());
555                args[1].val.nontemporal_store(bx, dst);
556                return Ok(());
557            }
558
559            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
560                let ty = fn_args.type_at(0);
561                let pointee_size = bx.layout_of(ty).size;
562
563                let a = args[0].immediate();
564                let b = args[1].immediate();
565                let a = bx.ptrtoint(a, bx.type_isize());
566                let b = bx.ptrtoint(b, bx.type_isize());
567                let pointee_size = bx.const_usize(pointee_size.bytes());
568                if name == sym::ptr_offset_from {
569                    // This is the same sequence that Clang emits for pointer subtraction.
570                    // It can be neither `nsw` nor `nuw` because the input is treated as
571                    // unsigned but then the output is treated as signed, so neither works.
572                    let d = bx.sub(a, b);
573                    // this is where the signed magic happens (notice the `s` in `exactsdiv`)
574                    bx.exactsdiv(d, pointee_size)
575                } else {
576                    // The `_unsigned` version knows the relative ordering of the pointers,
577                    // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
578                    let d = bx.unchecked_usub(a, b);
579                    bx.exactudiv(d, pointee_size)
580                }
581            }
582
583            sym::cold_path => {
584                // This is a no-op. The intrinsic is just a hint to the optimizer.
585                return Ok(());
586            }
587
588            _ => {
589                // Need to use backend-specific things in the implementation.
590                return bx.codegen_intrinsic_call(instance, args, result, span);
591            }
592        };
593
594        if result.layout.ty.is_bool() {
595            let val = bx.from_immediate(llval);
596            bx.store_to_place(val, result.val);
597        } else if !result.layout.ty.is_unit() {
598            bx.store_to_place(llval, result.val);
599        }
600        Ok(())
601    }
602}
603
604// Returns the width of an int Ty, and if it's signed or not
605// Returns None if the type is not an integer
606// FIXME: there’s multiple of this functions, investigate using some of the already existing
607// stuffs.
608fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
609    match ty.kind() {
610        ty::Int(t) => {
611            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
612        }
613        ty::Uint(t) => {
614            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
615        }
616        _ => None,
617    }
618}
619
620// Returns the width of a float Ty
621// Returns None if the type is not a float
622fn float_type_width(ty: Ty<'_>) -> Option<u64> {
623    match ty.kind() {
624        ty::Float(t) => Some(t.bit_width()),
625        _ => None,
626    }
627}