rustc_codegen_ssa/mir/
intrinsic.rs

1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::common::{AtomicRmwBinOp, SynchronizationScope};
12use crate::errors::InvalidMonomorphization;
13use crate::traits::*;
14use crate::{MemFlags, meth, size_of_val};
15
16fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
17    bx: &mut Bx,
18    allow_overlap: bool,
19    volatile: bool,
20    ty: Ty<'tcx>,
21    dst: Bx::Value,
22    src: Bx::Value,
23    count: Bx::Value,
24) {
25    let layout = bx.layout_of(ty);
26    let size = layout.size;
27    let align = layout.align.abi;
28    let size = bx.mul(bx.const_usize(size.bytes()), count);
29    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
30    if allow_overlap {
31        bx.memmove(dst, align, src, align, size, flags);
32    } else {
33        bx.memcpy(dst, align, src, align, size, flags, None);
34    }
35}
36
37fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
38    bx: &mut Bx,
39    volatile: bool,
40    ty: Ty<'tcx>,
41    dst: Bx::Value,
42    val: Bx::Value,
43    count: Bx::Value,
44) {
45    let layout = bx.layout_of(ty);
46    let size = layout.size;
47    let align = layout.align.abi;
48    let size = bx.mul(bx.const_usize(size.bytes()), count);
49    let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
50    bx.memset(dst, val, size, align, flags);
51}
52
53impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
54    /// In the `Err` case, returns the instance that should be called instead.
55    pub fn codegen_intrinsic_call(
56        &mut self,
57        bx: &mut Bx,
58        instance: ty::Instance<'tcx>,
59        args: &[OperandRef<'tcx, Bx::Value>],
60        result: PlaceRef<'tcx, Bx::Value>,
61        source_info: SourceInfo,
62    ) -> Result<(), ty::Instance<'tcx>> {
63        let span = source_info.span;
64
65        let name = bx.tcx().item_name(instance.def_id());
66        let fn_args = instance.args;
67
68        // If we're swapping something that's *not* an `OperandValue::Ref`,
69        // then we can do it directly and avoid the alloca.
70        // Otherwise, we'll let the fallback MIR body take care of it.
71        if let sym::typed_swap_nonoverlapping = name {
72            let pointee_ty = fn_args.type_at(0);
73            let pointee_layout = bx.layout_of(pointee_ty);
74            if !bx.is_backend_ref(pointee_layout)
75                // But if we're not going to optimize, trying to use the fallback
76                // body just makes things worse, so don't bother.
77                || bx.sess().opts.optimize == OptLevel::No
78                // NOTE(eddyb) SPIR-V's Logical addressing model doesn't allow for arbitrary
79                // reinterpretation of values as (chunkable) byte arrays, and the loop in the
80                // block optimization in `ptr::swap_nonoverlapping` is hard to rewrite back
81                // into the (unoptimized) direct swapping implementation, so we disable it.
82                || bx.sess().target.arch == "spirv"
83            {
84                let align = pointee_layout.align.abi;
85                let x_place = args[0].val.deref(align);
86                let y_place = args[1].val.deref(align);
87                bx.typed_place_swap(x_place, y_place, pointee_layout);
88                return Ok(());
89            }
90        }
91
92        let invalid_monomorphization_int_type = |ty| {
93            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
94        };
95        let invalid_monomorphization_int_or_ptr_type = |ty| {
96            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
97                span,
98                name,
99                ty,
100            });
101        };
102
103        let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
104            let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
105            discr.to_atomic_ordering()
106        };
107
108        if args.is_empty() {
109            match name {
110                sym::abort
111                | sym::unreachable
112                | sym::cold_path
113                | sym::breakpoint
114                | sym::assert_zero_valid
115                | sym::assert_mem_uninitialized_valid
116                | sym::assert_inhabited
117                | sym::ub_checks
118                | sym::contract_checks
119                | sym::atomic_fence
120                | sym::atomic_singlethreadfence
121                | sym::caller_location => {}
122                _ => {
123                    span_bug!(span, "nullary intrinsic {name} must either be in a const block or explicitly opted out because it is inherently a runtime intrinsic
124");
125                }
126            }
127        }
128
129        let llval = match name {
130            sym::abort => {
131                bx.abort();
132                return Ok(());
133            }
134
135            sym::caller_location => {
136                let location = self.get_caller_location(bx, source_info);
137                location.val.store(bx, result);
138                return Ok(());
139            }
140
141            sym::va_start => bx.va_start(args[0].immediate()),
142            sym::va_end => bx.va_end(args[0].immediate()),
143            sym::size_of_val => {
144                let tp_ty = fn_args.type_at(0);
145                let (_, meta) = args[0].val.pointer_parts();
146                let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
147                llsize
148            }
149            sym::align_of_val => {
150                let tp_ty = fn_args.type_at(0);
151                let (_, meta) = args[0].val.pointer_parts();
152                let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
153                llalign
154            }
155            sym::vtable_size | sym::vtable_align => {
156                let vtable = args[0].immediate();
157                let idx = match name {
158                    sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
159                    sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
160                    _ => bug!(),
161                };
162                let value = meth::VirtualIndex::from_index(idx).get_usize(
163                    bx,
164                    vtable,
165                    instance.ty(bx.tcx(), bx.typing_env()),
166                );
167                match name {
168                    // Size is always <= isize::MAX.
169                    sym::vtable_size => {
170                        let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
171                        bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
172                    }
173                    // Alignment is always nonzero.
174                    sym::vtable_align => {
175                        bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
176                    }
177                    _ => {}
178                }
179                value
180            }
181            sym::arith_offset => {
182                let ty = fn_args.type_at(0);
183                let layout = bx.layout_of(ty);
184                let ptr = args[0].immediate();
185                let offset = args[1].immediate();
186                bx.gep(bx.backend_type(layout), ptr, &[offset])
187            }
188            sym::copy => {
189                copy_intrinsic(
190                    bx,
191                    true,
192                    false,
193                    fn_args.type_at(0),
194                    args[1].immediate(),
195                    args[0].immediate(),
196                    args[2].immediate(),
197                );
198                return Ok(());
199            }
200            sym::write_bytes => {
201                memset_intrinsic(
202                    bx,
203                    false,
204                    fn_args.type_at(0),
205                    args[0].immediate(),
206                    args[1].immediate(),
207                    args[2].immediate(),
208                );
209                return Ok(());
210            }
211
212            sym::volatile_copy_nonoverlapping_memory => {
213                copy_intrinsic(
214                    bx,
215                    false,
216                    true,
217                    fn_args.type_at(0),
218                    args[0].immediate(),
219                    args[1].immediate(),
220                    args[2].immediate(),
221                );
222                return Ok(());
223            }
224            sym::volatile_copy_memory => {
225                copy_intrinsic(
226                    bx,
227                    true,
228                    true,
229                    fn_args.type_at(0),
230                    args[0].immediate(),
231                    args[1].immediate(),
232                    args[2].immediate(),
233                );
234                return Ok(());
235            }
236            sym::volatile_set_memory => {
237                memset_intrinsic(
238                    bx,
239                    true,
240                    fn_args.type_at(0),
241                    args[0].immediate(),
242                    args[1].immediate(),
243                    args[2].immediate(),
244                );
245                return Ok(());
246            }
247            sym::volatile_store => {
248                let dst = args[0].deref(bx.cx());
249                args[1].val.volatile_store(bx, dst);
250                return Ok(());
251            }
252            sym::unaligned_volatile_store => {
253                let dst = args[0].deref(bx.cx());
254                args[1].val.unaligned_volatile_store(bx, dst);
255                return Ok(());
256            }
257            sym::disjoint_bitor => {
258                let a = args[0].immediate();
259                let b = args[1].immediate();
260                bx.or_disjoint(a, b)
261            }
262            sym::exact_div => {
263                let ty = args[0].layout.ty;
264                match int_type_width_signed(ty, bx.tcx()) {
265                    Some((_width, signed)) => {
266                        if signed {
267                            bx.exactsdiv(args[0].immediate(), args[1].immediate())
268                        } else {
269                            bx.exactudiv(args[0].immediate(), args[1].immediate())
270                        }
271                    }
272                    None => {
273                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
274                            span,
275                            name,
276                            ty,
277                        });
278                        return Ok(());
279                    }
280                }
281            }
282            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
283                match float_type_width(args[0].layout.ty) {
284                    Some(_width) => match name {
285                        sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
286                        sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
287                        sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
288                        sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
289                        sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
290                        _ => bug!(),
291                    },
292                    None => {
293                        bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
294                            span,
295                            name,
296                            ty: args[0].layout.ty,
297                        });
298                        return Ok(());
299                    }
300                }
301            }
302            sym::fadd_algebraic
303            | sym::fsub_algebraic
304            | sym::fmul_algebraic
305            | sym::fdiv_algebraic
306            | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
307                Some(_width) => match name {
308                    sym::fadd_algebraic => {
309                        bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
310                    }
311                    sym::fsub_algebraic => {
312                        bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
313                    }
314                    sym::fmul_algebraic => {
315                        bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
316                    }
317                    sym::fdiv_algebraic => {
318                        bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
319                    }
320                    sym::frem_algebraic => {
321                        bx.frem_algebraic(args[0].immediate(), args[1].immediate())
322                    }
323                    _ => bug!(),
324                },
325                None => {
326                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
327                        span,
328                        name,
329                        ty: args[0].layout.ty,
330                    });
331                    return Ok(());
332                }
333            },
334
335            sym::float_to_int_unchecked => {
336                if float_type_width(args[0].layout.ty).is_none() {
337                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
338                        span,
339                        ty: args[0].layout.ty,
340                    });
341                    return Ok(());
342                }
343                let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
344                else {
345                    bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
346                        span,
347                        ty: result.layout.ty,
348                    });
349                    return Ok(());
350                };
351                if signed {
352                    bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
353                } else {
354                    bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
355                }
356            }
357
358            sym::atomic_load => {
359                let ty = fn_args.type_at(0);
360                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
361                    invalid_monomorphization_int_or_ptr_type(ty);
362                    return Ok(());
363                }
364                let ordering = fn_args.const_at(1).to_value();
365                let layout = bx.layout_of(ty);
366                let source = args[0].immediate();
367                bx.atomic_load(
368                    bx.backend_type(layout),
369                    source,
370                    parse_atomic_ordering(ordering),
371                    layout.size,
372                )
373            }
374            sym::atomic_store => {
375                let ty = fn_args.type_at(0);
376                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
377                    invalid_monomorphization_int_or_ptr_type(ty);
378                    return Ok(());
379                }
380                let ordering = fn_args.const_at(1).to_value();
381                let size = bx.layout_of(ty).size;
382                let val = args[1].immediate();
383                let ptr = args[0].immediate();
384                bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
385                return Ok(());
386            }
387            // These are all AtomicRMW ops
388            sym::atomic_cxchg | sym::atomic_cxchgweak => {
389                let ty = fn_args.type_at(0);
390                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
391                    invalid_monomorphization_int_or_ptr_type(ty);
392                    return Ok(());
393                }
394                let succ_ordering = fn_args.const_at(1).to_value();
395                let fail_ordering = fn_args.const_at(2).to_value();
396                let weak = name == sym::atomic_cxchgweak;
397                let dst = args[0].immediate();
398                let cmp = args[1].immediate();
399                let src = args[2].immediate();
400                let (val, success) = bx.atomic_cmpxchg(
401                    dst,
402                    cmp,
403                    src,
404                    parse_atomic_ordering(succ_ordering),
405                    parse_atomic_ordering(fail_ordering),
406                    weak,
407                );
408                let val = bx.from_immediate(val);
409                let success = bx.from_immediate(success);
410
411                let dest = result.project_field(bx, 0);
412                bx.store_to_place(val, dest.val);
413                let dest = result.project_field(bx, 1);
414                bx.store_to_place(success, dest.val);
415
416                return Ok(());
417            }
418            sym::atomic_max | sym::atomic_min => {
419                let atom_op = if name == sym::atomic_max {
420                    AtomicRmwBinOp::AtomicMax
421                } else {
422                    AtomicRmwBinOp::AtomicMin
423                };
424
425                let ty = fn_args.type_at(0);
426                if matches!(ty.kind(), ty::Int(_)) {
427                    let ordering = fn_args.const_at(1).to_value();
428                    let ptr = args[0].immediate();
429                    let val = args[1].immediate();
430                    bx.atomic_rmw(
431                        atom_op,
432                        ptr,
433                        val,
434                        parse_atomic_ordering(ordering),
435                        /* ret_ptr */ false,
436                    )
437                } else {
438                    invalid_monomorphization_int_type(ty);
439                    return Ok(());
440                }
441            }
442            sym::atomic_umax | sym::atomic_umin => {
443                let atom_op = if name == sym::atomic_umax {
444                    AtomicRmwBinOp::AtomicUMax
445                } else {
446                    AtomicRmwBinOp::AtomicUMin
447                };
448
449                let ty = fn_args.type_at(0);
450                if matches!(ty.kind(), ty::Uint(_)) {
451                    let ordering = fn_args.const_at(1).to_value();
452                    let ptr = args[0].immediate();
453                    let val = args[1].immediate();
454                    bx.atomic_rmw(
455                        atom_op,
456                        ptr,
457                        val,
458                        parse_atomic_ordering(ordering),
459                        /* ret_ptr */ false,
460                    )
461                } else {
462                    invalid_monomorphization_int_type(ty);
463                    return Ok(());
464                }
465            }
466            sym::atomic_xchg => {
467                let ty = fn_args.type_at(0);
468                let ordering = fn_args.const_at(1).to_value();
469                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
470                    let ptr = args[0].immediate();
471                    let val = args[1].immediate();
472                    let atomic_op = AtomicRmwBinOp::AtomicXchg;
473                    bx.atomic_rmw(
474                        atomic_op,
475                        ptr,
476                        val,
477                        parse_atomic_ordering(ordering),
478                        /* ret_ptr */ ty.is_raw_ptr(),
479                    )
480                } else {
481                    invalid_monomorphization_int_or_ptr_type(ty);
482                    return Ok(());
483                }
484            }
485            sym::atomic_xadd
486            | sym::atomic_xsub
487            | sym::atomic_and
488            | sym::atomic_nand
489            | sym::atomic_or
490            | sym::atomic_xor => {
491                let atom_op = match name {
492                    sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
493                    sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
494                    sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
495                    sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
496                    sym::atomic_or => AtomicRmwBinOp::AtomicOr,
497                    sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
498                    _ => unreachable!(),
499                };
500
501                // The type of the in-memory data.
502                let ty_mem = fn_args.type_at(0);
503                // The type of the 2nd operand, given by-value.
504                let ty_op = fn_args.type_at(1);
505
506                let ordering = fn_args.const_at(2).to_value();
507                // We require either both arguments to have the same integer type, or the first to
508                // be a pointer and the second to be `usize`.
509                if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
510                    || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
511                {
512                    let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
513                    let val = args[1].immediate(); // of type `ty_op`
514                    bx.atomic_rmw(
515                        atom_op,
516                        ptr,
517                        val,
518                        parse_atomic_ordering(ordering),
519                        /* ret_ptr */ ty_mem.is_raw_ptr(),
520                    )
521                } else {
522                    invalid_monomorphization_int_or_ptr_type(ty_mem);
523                    return Ok(());
524                }
525            }
526            sym::atomic_fence => {
527                let ordering = fn_args.const_at(0).to_value();
528                bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
529                return Ok(());
530            }
531
532            sym::atomic_singlethreadfence => {
533                let ordering = fn_args.const_at(0).to_value();
534                bx.atomic_fence(
535                    parse_atomic_ordering(ordering),
536                    SynchronizationScope::SingleThread,
537                );
538                return Ok(());
539            }
540
541            sym::nontemporal_store => {
542                let dst = args[0].deref(bx.cx());
543                args[1].val.nontemporal_store(bx, dst);
544                return Ok(());
545            }
546
547            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
548                let ty = fn_args.type_at(0);
549                let pointee_size = bx.layout_of(ty).size;
550
551                let a = args[0].immediate();
552                let b = args[1].immediate();
553                let a = bx.ptrtoint(a, bx.type_isize());
554                let b = bx.ptrtoint(b, bx.type_isize());
555                let pointee_size = bx.const_usize(pointee_size.bytes());
556                if name == sym::ptr_offset_from {
557                    // This is the same sequence that Clang emits for pointer subtraction.
558                    // It can be neither `nsw` nor `nuw` because the input is treated as
559                    // unsigned but then the output is treated as signed, so neither works.
560                    let d = bx.sub(a, b);
561                    // this is where the signed magic happens (notice the `s` in `exactsdiv`)
562                    bx.exactsdiv(d, pointee_size)
563                } else {
564                    // The `_unsigned` version knows the relative ordering of the pointers,
565                    // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
566                    let d = bx.unchecked_usub(a, b);
567                    bx.exactudiv(d, pointee_size)
568                }
569            }
570
571            sym::cold_path => {
572                // This is a no-op. The intrinsic is just a hint to the optimizer.
573                return Ok(());
574            }
575
576            _ => {
577                // Need to use backend-specific things in the implementation.
578                return bx.codegen_intrinsic_call(instance, args, result, span);
579            }
580        };
581
582        if result.layout.ty.is_bool() {
583            let val = bx.from_immediate(llval);
584            bx.store_to_place(val, result.val);
585        } else if !result.layout.ty.is_unit() {
586            bx.store_to_place(llval, result.val);
587        }
588        Ok(())
589    }
590}
591
592// Returns the width of an int Ty, and if it's signed or not
593// Returns None if the type is not an integer
594// FIXME: there’s multiple of this functions, investigate using some of the already existing
595// stuffs.
596fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
597    match ty.kind() {
598        ty::Int(t) => {
599            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
600        }
601        ty::Uint(t) => {
602            Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
603        }
604        _ => None,
605    }
606}
607
608// Returns the width of a float Ty
609// Returns None if the type is not a float
610fn float_type_width(ty: Ty<'_>) -> Option<u64> {
611    match ty.kind() {
612        ty::Float(t) => Some(t.bit_width()),
613        _ => None,
614    }
615}