Skip to main content

rustc_codegen_ssa/mir/
intrinsic.rs

1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7use rustc_target::spec::Arch;
8
9use super::FunctionCx;
10use super::operand::OperandRef;
11use super::place::PlaceRef;
12use crate::common::{AtomicRmwBinOp, SynchronizationScope};
13use crate::errors::InvalidMonomorphization;
14use crate::traits::*;
15use crate::{MemFlags, meth, size_of_val};
16
17fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18    bx: &mut Bx,
19    allow_overlap: bool,
20    volatile: bool,
21    ty: Ty<'tcx>,
22    dst: Bx::Value,
23    src: Bx::Value,
24    count: Bx::Value,
25) {
26    let layout = bx.layout_of(ty);
27    let size = layout.size;
28    let align = layout.align.abi;
29    let size = bx.mul(bx.const_usize(size.bytes()), count);
30    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31    if allow_overlap {
32        bx.memmove(dst, align, src, align, size, flags);
33    } else {
34        bx.memcpy(dst, align, src, align, size, flags, None);
35    }
36}
37
38fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39    bx: &mut Bx,
40    volatile: bool,
41    ty: Ty<'tcx>,
42    dst: Bx::Value,
43    val: Bx::Value,
44    count: Bx::Value,
45) {
46    let layout = bx.layout_of(ty);
47    let size = layout.size;
48    let align = layout.align.abi;
49    let size = bx.mul(bx.const_usize(size.bytes()), count);
50    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51    bx.memset(dst, val, size, align, flags);
52}
53
54impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55    /// In the `Err` case, returns the instance that should be called instead.
56    pub fn codegen_intrinsic_call(
57        &mut self,
58        bx: &mut Bx,
59        instance: ty::Instance<'tcx>,
60        args: &[OperandRef<'tcx, Bx::Value>],
61        result: PlaceRef<'tcx, Bx::Value>,
62        source_info: SourceInfo,
63    ) -> Result<(), ty::Instance<'tcx>> {
64        let span = source_info.span;
65
66        let name = bx.tcx().item_name(instance.def_id());
67        let fn_args = instance.args;
68
69        // If we're swapping something that's *not* an `OperandValue::Ref`,
70        // then we can do it directly and avoid the alloca.
71        // Otherwise, we'll let the fallback MIR body take care of it.
72        if let sym::typed_swap_nonoverlapping = name {
73            let pointee_ty = fn_args.type_at(0);
74            let pointee_layout = bx.layout_of(pointee_ty);
75            if !bx.is_backend_ref(pointee_layout)
76                // But if we're not going to optimize, trying to use the fallback
77                // body just makes things worse, so don't bother.
78                || bx.sess().opts.optimize == OptLevel::No
79                // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
80                // reinterpretation of values as (chunkable) byte arrays, and the loop in the
81                // block optimization in `ptr::swap_nonoverlapping` is hard to rewrite back
82                // into the (unoptimized) direct swapping implementation, so we disable it.
83                || bx.sess().target.arch == Arch::SpirV
84            {
85                let align = pointee_layout.align.abi;
86                let x_place = args[0].val.deref(align);
87                let y_place = args[1].val.deref(align);
88                bx.typed_place_swap(x_place, y_place, pointee_layout);
89                return Ok(());
90            }
91        }
92
93        let invalid_monomorphization_int_type = |ty| {
94            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
95        };
96        let invalid_monomorphization_int_or_ptr_type = |ty| {
97            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
98                span,
99                name,
100                ty,
101            });
102        };
103
104        let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
105            let discr = ord.to_branch()[0].to_leaf();
106            discr.to_atomic_ordering()
107        };
108
109        if args.is_empty() {
110            match name {
111                sym::abort
112                | sym::unreachable
113                | sym::cold_path
114                | sym::breakpoint
115                | sym::amdgpu_dispatch_ptr
116                | sym::assert_zero_valid
117                | sym::assert_mem_uninitialized_valid
118                | sym::assert_inhabited
119                | sym::ub_checks
120                | sym::contract_checks
121                | sym::atomic_fence
122                | sym::atomic_singlethreadfence
123                | sym::caller_location => {}
124                _ => {
125                    ::rustc_middle::util::bug::span_bug_fmt(span,
    format_args!("Nullary intrinsic {0} must be called in a const block. If you are seeing this message from code outside the standard library, the unstable implementation details of the relevant intrinsic may have changed. Consider using stable APIs instead. If you are adding a new nullary intrinsic that is inherently a runtime intrinsic, update this check.",
        name));span_bug!(
126                        span,
127                        "Nullary intrinsic {name} must be called in a const block. \
128                        If you are seeing this message from code outside the standard library, the \
129                        unstable implementation details of the relevant intrinsic may have changed. \
130                        Consider using stable APIs instead. \
131                        If you are adding a new nullary intrinsic that is inherently a runtime \
132                        intrinsic, update this check."
133                    );
134                }
135            }
136        }
137
138        let llval = match name {
139            sym::abort => {
140                bx.abort();
141                return Ok(());
142            }
143
144            sym::caller_location => {
145                let location = self.get_caller_location(bx, source_info);
146                location.val.store(bx, result);
147                return Ok(());
148            }
149
150            sym::va_start => bx.va_start(args[0].immediate()),
151            sym::va_end => bx.va_end(args[0].immediate()),
152            sym::size_of_val => {
153                let tp_ty = fn_args.type_at(0);
154                let (_, meta) = args[0].val.pointer_parts();
155                let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
156                llsize
157            }
158            sym::align_of_val => {
159                let tp_ty = fn_args.type_at(0);
160                let (_, meta) = args[0].val.pointer_parts();
161                let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
162                llalign
163            }
164            sym::vtable_size | sym::vtable_align => {
165                let vtable = args[0].immediate();
166                let idx = match name {
167                    sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
168                    sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
169                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
170                };
171                let value = meth::VirtualIndex::from_index(idx).get_usize(
172                    bx,
173                    vtable,
174                    instance.ty(bx.tcx(), bx.typing_env()),
175                );
176                match name {
177                    // Size is always <= isize::MAX.
178                    sym::vtable_size => {
179                        let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
180                        bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
181                    }
182                    // Alignment is always nonzero.
183                    sym::vtable_align => {
184                        bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
185                    }
186                    _ => {}
187                }
188                value
189            }
190            sym::arith_offset => {
191                let ty = fn_args.type_at(0);
192                let layout = bx.layout_of(ty);
193                let ptr = args[0].immediate();
194                let offset = args[1].immediate();
195                bx.gep(bx.backend_type(layout), ptr, &[offset])
196            }
197            sym::copy => {
198                copy_intrinsic(
199                    bx,
200                    true,
201                    false,
202                    fn_args.type_at(0),
203                    args[1].immediate(),
204                    args[0].immediate(),
205                    args[2].immediate(),
206                );
207                return Ok(());
208            }
209            sym::write_bytes => {
210                memset_intrinsic(
211                    bx,
212                    false,
213                    fn_args.type_at(0),
214                    args[0].immediate(),
215                    args[1].immediate(),
216                    args[2].immediate(),
217                );
218                return Ok(());
219            }
220
221            sym::volatile_copy_nonoverlapping_memory => {
222                copy_intrinsic(
223                    bx,
224                    false,
225                    true,
226                    fn_args.type_at(0),
227                    args[0].immediate(),
228                    args[1].immediate(),
229                    args[2].immediate(),
230                );
231                return Ok(());
232            }
233            sym::volatile_copy_memory => {
234                copy_intrinsic(
235                    bx,
236                    true,
237                    true,
238                    fn_args.type_at(0),
239                    args[0].immediate(),
240                    args[1].immediate(),
241                    args[2].immediate(),
242                );
243                return Ok(());
244            }
245            sym::volatile_set_memory => {
246                memset_intrinsic(
247                    bx,
248                    true,
249                    fn_args.type_at(0),
250                    args[0].immediate(),
251                    args[1].immediate(),
252                    args[2].immediate(),
253                );
254                return Ok(());
255            }
256            sym::volatile_store => {
257                let dst = args[0].deref(bx.cx());
258                args[1].val.volatile_store(bx, dst);
259                return Ok(());
260            }
261            sym::unaligned_volatile_store => {
262                let dst = args[0].deref(bx.cx());
263                args[1].val.unaligned_volatile_store(bx, dst);
264                return Ok(());
265            }
266            sym::disjoint_bitor => {
267                let a = args[0].immediate();
268                let b = args[1].immediate();
269                bx.or_disjoint(a, b)
270            }
271            sym::exact_div => {
272                let ty = args[0].layout.ty;
273                match int_type_width_signed(ty, bx.tcx()) {
274                    Some((_width, signed)) => {
275                        if signed {
276                            bx.exactsdiv(args[0].immediate(), args[1].immediate())
277                        } else {
278                            bx.exactudiv(args[0].immediate(), args[1].immediate())
279                        }
280                    }
281                    None => {
282                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
283                            span,
284                            name,
285                            ty,
286                        });
287                        return Ok(());
288                    }
289                }
290            }
291            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
292                match float_type_width(args[0].layout.ty) {
293                    Some(_width) => match name {
294                        sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
295                        sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
296                        sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
297                        sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
298                        sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
299                        _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
300                    },
301                    None => {
302                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
303                            span,
304                            name,
305                            ty: args[0].layout.ty,
306                        });
307                        return Ok(());
308                    }
309                }
310            }
311            sym::fadd_algebraic
312            | sym::fsub_algebraic
313            | sym::fmul_algebraic
314            | sym::fdiv_algebraic
315            | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
316                Some(_width) => match name {
317                    sym::fadd_algebraic => {
318                        bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
319                    }
320                    sym::fsub_algebraic => {
321                        bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
322                    }
323                    sym::fmul_algebraic => {
324                        bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
325                    }
326                    sym::fdiv_algebraic => {
327                        bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
328                    }
329                    sym::frem_algebraic => {
330                        bx.frem_algebraic(args[0].immediate(), args[1].immediate())
331                    }
332                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
333                },
334                None => {
335                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
336                        span,
337                        name,
338                        ty: args[0].layout.ty,
339                    });
340                    return Ok(());
341                }
342            },
343
344            sym::float_to_int_unchecked => {
345                if float_type_width(args[0].layout.ty).is_none() {
346                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
347                        span,
348                        ty: args[0].layout.ty,
349                    });
350                    return Ok(());
351                }
352                let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
353                else {
354                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
355                        span,
356                        ty: result.layout.ty,
357                    });
358                    return Ok(());
359                };
360                if signed {
361                    bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
362                } else {
363                    bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
364                }
365            }
366
367            sym::atomic_load => {
368                let ty = fn_args.type_at(0);
369                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
370                    invalid_monomorphization_int_or_ptr_type(ty);
371                    return Ok(());
372                }
373                let ordering = fn_args.const_at(1).to_value();
374                let layout = bx.layout_of(ty);
375                let source = args[0].immediate();
376                bx.atomic_load(
377                    bx.backend_type(layout),
378                    source,
379                    parse_atomic_ordering(ordering),
380                    layout.size,
381                )
382            }
383            sym::atomic_store => {
384                let ty = fn_args.type_at(0);
385                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
386                    invalid_monomorphization_int_or_ptr_type(ty);
387                    return Ok(());
388                }
389                let ordering = fn_args.const_at(1).to_value();
390                let size = bx.layout_of(ty).size;
391                let val = args[1].immediate();
392                let ptr = args[0].immediate();
393                bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
394                return Ok(());
395            }
396            // These are all AtomicRMW ops
397            sym::atomic_cxchg | sym::atomic_cxchgweak => {
398                let ty = fn_args.type_at(0);
399                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
400                    invalid_monomorphization_int_or_ptr_type(ty);
401                    return Ok(());
402                }
403                let succ_ordering = fn_args.const_at(1).to_value();
404                let fail_ordering = fn_args.const_at(2).to_value();
405                let weak = name == sym::atomic_cxchgweak;
406                let dst = args[0].immediate();
407                let cmp = args[1].immediate();
408                let src = args[2].immediate();
409                let (val, success) = bx.atomic_cmpxchg(
410                    dst,
411                    cmp,
412                    src,
413                    parse_atomic_ordering(succ_ordering),
414                    parse_atomic_ordering(fail_ordering),
415                    weak,
416                );
417                let val = bx.from_immediate(val);
418                let success = bx.from_immediate(success);
419
420                let dest = result.project_field(bx, 0);
421                bx.store_to_place(val, dest.val);
422                let dest = result.project_field(bx, 1);
423                bx.store_to_place(success, dest.val);
424
425                return Ok(());
426            }
427            sym::atomic_max | sym::atomic_min => {
428                let atom_op = if name == sym::atomic_max {
429                    AtomicRmwBinOp::AtomicMax
430                } else {
431                    AtomicRmwBinOp::AtomicMin
432                };
433
434                let ty = fn_args.type_at(0);
435                if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
    ty::Int(_) => true,
    _ => false,
}matches!(ty.kind(), ty::Int(_)) {
436                    let ordering = fn_args.const_at(1).to_value();
437                    let ptr = args[0].immediate();
438                    let val = args[1].immediate();
439                    bx.atomic_rmw(
440                        atom_op,
441                        ptr,
442                        val,
443                        parse_atomic_ordering(ordering),
444                        /* ret_ptr */ false,
445                    )
446                } else {
447                    invalid_monomorphization_int_type(ty);
448                    return Ok(());
449                }
450            }
451            sym::atomic_umax | sym::atomic_umin => {
452                let atom_op = if name == sym::atomic_umax {
453                    AtomicRmwBinOp::AtomicUMax
454                } else {
455                    AtomicRmwBinOp::AtomicUMin
456                };
457
458                let ty = fn_args.type_at(0);
459                if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
    ty::Uint(_) => true,
    _ => false,
}matches!(ty.kind(), ty::Uint(_)) {
460                    let ordering = fn_args.const_at(1).to_value();
461                    let ptr = args[0].immediate();
462                    let val = args[1].immediate();
463                    bx.atomic_rmw(
464                        atom_op,
465                        ptr,
466                        val,
467                        parse_atomic_ordering(ordering),
468                        /* ret_ptr */ false,
469                    )
470                } else {
471                    invalid_monomorphization_int_type(ty);
472                    return Ok(());
473                }
474            }
475            sym::atomic_xchg => {
476                let ty = fn_args.type_at(0);
477                let ordering = fn_args.const_at(1).to_value();
478                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
479                    let ptr = args[0].immediate();
480                    let val = args[1].immediate();
481                    let atomic_op = AtomicRmwBinOp::AtomicXchg;
482                    bx.atomic_rmw(
483                        atomic_op,
484                        ptr,
485                        val,
486                        parse_atomic_ordering(ordering),
487                        /* ret_ptr */ ty.is_raw_ptr(),
488                    )
489                } else {
490                    invalid_monomorphization_int_or_ptr_type(ty);
491                    return Ok(());
492                }
493            }
494            sym::atomic_xadd
495            | sym::atomic_xsub
496            | sym::atomic_and
497            | sym::atomic_nand
498            | sym::atomic_or
499            | sym::atomic_xor => {
500                let atom_op = match name {
501                    sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
502                    sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
503                    sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
504                    sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
505                    sym::atomic_or => AtomicRmwBinOp::AtomicOr,
506                    sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
507                    _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
508                };
509
510                // The type of the in-memory data.
511                let ty_mem = fn_args.type_at(0);
512                // The type of the 2nd operand, given by-value.
513                let ty_op = fn_args.type_at(1);
514
515                let ordering = fn_args.const_at(2).to_value();
516                // We require either both arguments to have the same integer type, or the first to
517                // be a pointer and the second to be `usize`.
518                if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
519                    || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
520                {
521                    let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
522                    let val = args[1].immediate(); // of type `ty_op`
523                    bx.atomic_rmw(
524                        atom_op,
525                        ptr,
526                        val,
527                        parse_atomic_ordering(ordering),
528                        /* ret_ptr */ ty_mem.is_raw_ptr(),
529                    )
530                } else {
531                    invalid_monomorphization_int_or_ptr_type(ty_mem);
532                    return Ok(());
533                }
534            }
535            sym::atomic_fence => {
536                let ordering = fn_args.const_at(0).to_value();
537                bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
538                return Ok(());
539            }
540
541            sym::atomic_singlethreadfence => {
542                let ordering = fn_args.const_at(0).to_value();
543                bx.atomic_fence(
544                    parse_atomic_ordering(ordering),
545                    SynchronizationScope::SingleThread,
546                );
547                return Ok(());
548            }
549
550            sym::nontemporal_store => {
551                let dst = args[0].deref(bx.cx());
552                args[1].val.nontemporal_store(bx, dst);
553                return Ok(());
554            }
555
556            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
557                let ty = fn_args.type_at(0);
558                let pointee_size = bx.layout_of(ty).size;
559
560                let a = args[0].immediate();
561                let b = args[1].immediate();
562                let a = bx.ptrtoint(a, bx.type_isize());
563                let b = bx.ptrtoint(b, bx.type_isize());
564                let pointee_size = bx.const_usize(pointee_size.bytes());
565                if name == sym::ptr_offset_from {
566                    // This is the same sequence that Clang emits for pointer subtraction.
567                    // It can be neither `nsw` nor `nuw` because the input is treated as
568                    // unsigned but then the output is treated as signed, so neither works.
569                    let d = bx.sub(a, b);
570                    // this is where the signed magic happens (notice the `s` in `exactsdiv`)
571                    bx.exactsdiv(d, pointee_size)
572                } else {
573                    // The `_unsigned` version knows the relative ordering of the pointers,
574                    // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
575                    let d = bx.unchecked_usub(a, b);
576                    bx.exactudiv(d, pointee_size)
577                }
578            }
579
580            sym::cold_path => {
581                // This is a no-op. The intrinsic is just a hint to the optimizer.
582                return Ok(());
583            }
584
585            _ => {
586                // Need to use backend-specific things in the implementation.
587                return bx.codegen_intrinsic_call(instance, args, result, span);
588            }
589        };
590
591        if result.layout.ty.is_bool() {
592            let val = bx.from_immediate(llval);
593            bx.store_to_place(val, result.val);
594        } else if !result.layout.ty.is_unit() {
595            bx.store_to_place(llval, result.val);
596        }
597        Ok(())
598    }
599}
600
601// Returns the width of an int Ty, and if it's signed or not
602// Returns None if the type is not an integer
603// FIXME: there’s multiple of this functions, investigate using some of the already existing
604// stuffs.
605fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
606    match ty.kind() {
607        ty::Int(t) => {
608            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
609        }
610        ty::Uint(t) => {
611            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
612        }
613        _ => None,
614    }
615}
616
617// Returns the width of a float Ty
618// Returns None if the type is not a float
619fn float_type_width(ty: Ty<'_>) -> Option<u64> {
620    match ty.kind() {
621        ty::Float(t) => Some(t.bit_width()),
622        _ => None,
623    }
624}