rustc_const_eval/const_eval/
machine.rs

1use std::borrow::{Borrow, Cow};
2use std::fmt;
3use std::hash::Hash;
4
5use rustc_abi::{Align, Size};
6use rustc_ast::Mutability;
7use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
8use rustc_hir::def_id::{DefId, LocalDefId};
9use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem};
10use rustc_middle::mir::AssertMessage;
11use rustc_middle::mir::interpret::ReportedErrorInfo;
12use rustc_middle::query::TyCtxtAt;
13use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout};
14use rustc_middle::ty::{self, Ty, TyCtxt};
15use rustc_middle::{bug, mir};
16use rustc_span::{Span, Symbol, sym};
17use rustc_target::callconv::FnAbi;
18use tracing::debug;
19
20use super::error::*;
21use crate::errors::{LongRunning, LongRunningWarn};
22use crate::fluent_generated as fluent;
23use crate::interpret::{
24    self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
25    GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, RangeSet, Scalar,
26    compile_time_machine, interp_ok, throw_exhaust, throw_inval, throw_ub, throw_ub_custom,
27    throw_unsup, throw_unsup_format,
28};
29
30/// When hitting this many interpreted terminators we emit a deny by default lint
31/// that notfies the user that their constant takes a long time to evaluate. If that's
32/// what they intended, they can just allow the lint.
33const LINT_TERMINATOR_LIMIT: usize = 2_000_000;
34/// The limit used by `-Z tiny-const-eval-limit`. This smaller limit is useful for internal
35/// tests not needing to run 30s or more to show some behaviour.
36const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
37/// After this many interpreted terminators, we start emitting progress indicators at every
38/// power of two of interpreted terminators.
39const PROGRESS_INDICATOR_START: usize = 4_000_000;
40
41/// Extra machine state for CTFE, and the Machine instance.
42//
43// Should be public because out-of-tree rustc consumers need this
44// if they want to interact with constant values.
45pub struct CompileTimeMachine<'tcx> {
46    /// The number of terminators that have been evaluated.
47    ///
48    /// This is used to produce lints informing the user that the compiler is not stuck.
49    /// Set to `usize::MAX` to never report anything.
50    pub(super) num_evaluated_steps: usize,
51
52    /// The virtual call stack.
53    pub(super) stack: Vec<Frame<'tcx>>,
54
55    /// Pattern matching on consts with references would be unsound if those references
56    /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees,
57    /// we ensure that only immutable global memory can be accessed.
58    pub(super) can_access_mut_global: CanAccessMutGlobal,
59
60    /// Whether to check alignment during evaluation.
61    pub(super) check_alignment: CheckAlignment,
62
63    /// If `Some`, we are evaluating the initializer of the static with the given `LocalDefId`,
64    /// storing the result in the given `AllocId`.
65    /// Used to prevent reads from a static's base allocation, as that may allow for self-initialization loops.
66    pub(crate) static_root_ids: Option<(AllocId, LocalDefId)>,
67
68    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
69    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
70}
71
72#[derive(Copy, Clone)]
73pub enum CheckAlignment {
74    /// Ignore all alignment requirements.
75    /// This is mainly used in interning.
76    No,
77    /// Hard error when dereferencing a misaligned pointer.
78    Error,
79}
80
81#[derive(Copy, Clone, PartialEq)]
82pub(crate) enum CanAccessMutGlobal {
83    No,
84    Yes,
85}
86
87impl From<bool> for CanAccessMutGlobal {
88    fn from(value: bool) -> Self {
89        if value { Self::Yes } else { Self::No }
90    }
91}
92
93impl<'tcx> CompileTimeMachine<'tcx> {
94    pub(crate) fn new(
95        can_access_mut_global: CanAccessMutGlobal,
96        check_alignment: CheckAlignment,
97    ) -> Self {
98        CompileTimeMachine {
99            num_evaluated_steps: 0,
100            stack: Vec::new(),
101            can_access_mut_global,
102            check_alignment,
103            static_root_ids: None,
104            union_data_ranges: FxHashMap::default(),
105        }
106    }
107}
108
109impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
110    #[inline(always)]
111    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
112    where
113        K: Borrow<Q>,
114    {
115        FxIndexMap::contains_key(self, k)
116    }
117
118    #[inline(always)]
119    fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool
120    where
121        K: Borrow<Q>,
122    {
123        FxIndexMap::contains_key(self, k)
124    }
125
126    #[inline(always)]
127    fn insert(&mut self, k: K, v: V) -> Option<V> {
128        FxIndexMap::insert(self, k, v)
129    }
130
131    #[inline(always)]
132    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
133    where
134        K: Borrow<Q>,
135    {
136        // FIXME(#120456) - is `swap_remove` correct?
137        FxIndexMap::swap_remove(self, k)
138    }
139
140    #[inline(always)]
141    fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
142        self.iter().filter_map(move |(k, v)| f(k, v)).collect()
143    }
144
145    #[inline(always)]
146    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
147        match self.get(&k) {
148            Some(v) => Ok(v),
149            None => {
150                vacant()?;
151                bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
152            }
153        }
154    }
155
156    #[inline(always)]
157    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
158        match self.entry(k) {
159            IndexEntry::Occupied(e) => Ok(e.into_mut()),
160            IndexEntry::Vacant(e) => {
161                let v = vacant()?;
162                Ok(e.insert(v))
163            }
164        }
165    }
166}
167
168pub type CompileTimeInterpCx<'tcx> = InterpCx<'tcx, CompileTimeMachine<'tcx>>;
169
170#[derive(Debug, PartialEq, Eq, Copy, Clone)]
171pub enum MemoryKind {
172    Heap,
173}
174
175impl fmt::Display for MemoryKind {
176    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177        match self {
178            MemoryKind::Heap => write!(f, "heap allocation"),
179        }
180    }
181}
182
183impl interpret::MayLeak for MemoryKind {
184    #[inline(always)]
185    fn may_leak(self) -> bool {
186        match self {
187            MemoryKind::Heap => false,
188        }
189    }
190}
191
192impl interpret::MayLeak for ! {
193    #[inline(always)]
194    fn may_leak(self) -> bool {
195        // `self` is uninhabited
196        self
197    }
198}
199
200impl<'tcx> CompileTimeInterpCx<'tcx> {
201    fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
202        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
203        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
204
205        use rustc_session::RemapFileNameExt;
206        use rustc_session::config::RemapPathScopeComponents;
207        (
208            Symbol::intern(
209                &caller
210                    .file
211                    .name
212                    .for_scope(self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS)
213                    .to_string_lossy(),
214            ),
215            u32::try_from(caller.line).unwrap(),
216            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
217        )
218    }
219
220    /// "Intercept" a function call, because we have something special to do for it.
221    /// All `#[rustc_do_not_const_check]` functions MUST be hooked here.
222    /// If this returns `Some` function, which may be `instance` or a different function with
223    /// compatible arguments, then evaluation should continue with that function.
224    /// If this returns `None`, the function call has been handled and the function has returned.
225    fn hook_special_const_fn(
226        &mut self,
227        instance: ty::Instance<'tcx>,
228        args: &[FnArg<'tcx>],
229        _dest: &MPlaceTy<'tcx>,
230        _ret: Option<mir::BasicBlock>,
231    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
232        let def_id = instance.def_id();
233
234        if self.tcx.has_attr(def_id, sym::rustc_const_panic_str)
235            || self.tcx.is_lang_item(def_id, LangItem::BeginPanic)
236        {
237            let args = self.copy_fn_args(args);
238            // &str or &&str
239            assert!(args.len() == 1);
240
241            let mut msg_place = self.deref_pointer(&args[0])?;
242            while msg_place.layout.ty.is_ref() {
243                msg_place = self.deref_pointer(&msg_place)?;
244            }
245
246            let msg = Symbol::intern(self.read_str(&msg_place)?);
247            let span = self.find_closest_untracked_caller_location();
248            let (file, line, col) = self.location_triple_for_span(span);
249            return Err(ConstEvalErrKind::Panic { msg, file, line, col }).into();
250        } else if self.tcx.is_lang_item(def_id, LangItem::PanicFmt) {
251            // For panic_fmt, call const_panic_fmt instead.
252            let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
253            let new_instance = ty::Instance::expect_resolve(
254                *self.tcx,
255                self.typing_env(),
256                const_def_id,
257                instance.args,
258                self.cur_span(),
259            );
260
261            return interp_ok(Some(new_instance));
262        }
263        interp_ok(Some(instance))
264    }
265
266    /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
267    /// Returns `2` if the result is unknown.
268    /// Returns `1` if the pointers are guaranteed equal.
269    /// Returns `0` if the pointers are guaranteed inequal.
270    ///
271    /// Note that this intrinsic is exposed on stable for comparison with null. In other words, any
272    /// change to this function that affects comparison with null is insta-stable!
273    fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
274        interp_ok(match (a, b) {
275            // Comparisons between integers are always known.
276            (Scalar::Int { .. }, Scalar::Int { .. }) => {
277                if a == b {
278                    1
279                } else {
280                    0
281                }
282            }
283            // Comparisons of abstract pointers with null pointers are known if the pointer
284            // is in bounds, because if they are in bounds, the pointer can't be null.
285            // Inequality with integers other than null can never be known for sure.
286            (Scalar::Int(int), ptr @ Scalar::Ptr(..))
287            | (ptr @ Scalar::Ptr(..), Scalar::Int(int))
288                if int.is_null() && !self.scalar_may_be_null(ptr)? =>
289            {
290                0
291            }
292            // Equality with integers can never be known for sure.
293            (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
294            // FIXME: return a `1` for when both sides are the same pointer, *except* that
295            // some things (like functions and vtables) do not have stable addresses
296            // so we need to be careful around them (see e.g. #73722).
297            // FIXME: return `0` for at least some comparisons where we can reliably
298            // determine the result of runtime inequality tests at compile-time.
299            // Examples include comparison of addresses in different static items.
300            (Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
301        })
302    }
303}
304
305impl<'tcx> CompileTimeMachine<'tcx> {
306    #[inline(always)]
307    /// Find the first stack frame that is within the current crate, if any.
308    /// Otherwise, return the crate's HirId
309    pub fn best_lint_scope(&self, tcx: TyCtxt<'tcx>) -> hir::HirId {
310        self.stack.iter().find_map(|frame| frame.lint_root(tcx)).unwrap_or(CRATE_HIR_ID)
311    }
312}
313
314impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
315    compile_time_machine!(<'tcx>);
316
317    type MemoryKind = MemoryKind;
318
319    const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
320
321    #[inline(always)]
322    fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool {
323        matches!(ecx.machine.check_alignment, CheckAlignment::Error)
324    }
325
326    #[inline(always)]
327    fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
328        ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.is_uninhabited()
329    }
330
331    fn load_mir(
332        ecx: &InterpCx<'tcx, Self>,
333        instance: ty::InstanceKind<'tcx>,
334    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
335        match instance {
336            ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)),
337            _ => interp_ok(ecx.tcx.instance_mir(instance)),
338        }
339    }
340
341    fn find_mir_or_eval_fn(
342        ecx: &mut InterpCx<'tcx, Self>,
343        orig_instance: ty::Instance<'tcx>,
344        _abi: &FnAbi<'tcx, Ty<'tcx>>,
345        args: &[FnArg<'tcx>],
346        dest: &MPlaceTy<'tcx>,
347        ret: Option<mir::BasicBlock>,
348        _unwind: mir::UnwindAction, // unwinding is not supported in consts
349    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
350        debug!("find_mir_or_eval_fn: {:?}", orig_instance);
351
352        // Replace some functions.
353        let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
354            // Call has already been handled.
355            return interp_ok(None);
356        };
357
358        // Only check non-glue functions
359        if let ty::InstanceKind::Item(def) = instance.def {
360            // Execution might have wandered off into other crates, so we cannot do a stability-
361            // sensitive check here. But we can at least rule out functions that are not const at
362            // all. That said, we have to allow calling functions inside a trait marked with
363            // #[const_trait]. These *are* const-checked!
364            if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) {
365                // We certainly do *not* want to actually call the fn
366                // though, so be sure we return here.
367                throw_unsup_format!("calling non-const function `{}`", instance)
368            }
369        }
370
371        // This is a const fn. Call it.
372        // In case of replacement, we return the *original* instance to make backtraces work out
373        // (and we hope this does not confuse the FnAbi checks too much).
374        interp_ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
375    }
376
377    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
378        let msg = Symbol::intern(msg);
379        let span = ecx.find_closest_untracked_caller_location();
380        let (file, line, col) = ecx.location_triple_for_span(span);
381        Err(ConstEvalErrKind::Panic { msg, file, line, col }).into()
382    }
383
384    fn call_intrinsic(
385        ecx: &mut InterpCx<'tcx, Self>,
386        instance: ty::Instance<'tcx>,
387        args: &[OpTy<'tcx>],
388        dest: &MPlaceTy<'tcx, Self::Provenance>,
389        target: Option<mir::BasicBlock>,
390        _unwind: mir::UnwindAction,
391    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
392        // Shared intrinsics.
393        if ecx.eval_intrinsic(instance, args, dest, target)? {
394            return interp_ok(None);
395        }
396        let intrinsic_name = ecx.tcx.item_name(instance.def_id());
397
398        // CTFE-specific intrinsics.
399        match intrinsic_name {
400            sym::ptr_guaranteed_cmp => {
401                let a = ecx.read_scalar(&args[0])?;
402                let b = ecx.read_scalar(&args[1])?;
403                let cmp = ecx.guaranteed_cmp(a, b)?;
404                ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
405            }
406            sym::const_allocate => {
407                let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
408                let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
409
410                let align = match Align::from_bytes(align) {
411                    Ok(a) => a,
412                    Err(err) => throw_ub_custom!(
413                        fluent::const_eval_invalid_align_details,
414                        name = "const_allocate",
415                        err_kind = err.diag_ident(),
416                        align = err.align()
417                    ),
418                };
419
420                let ptr = ecx.allocate_ptr(
421                    Size::from_bytes(size),
422                    align,
423                    interpret::MemoryKind::Machine(MemoryKind::Heap),
424                    AllocInit::Uninit,
425                )?;
426                ecx.write_pointer(ptr, dest)?;
427            }
428            sym::const_deallocate => {
429                let ptr = ecx.read_pointer(&args[0])?;
430                let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
431                let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
432
433                let size = Size::from_bytes(size);
434                let align = match Align::from_bytes(align) {
435                    Ok(a) => a,
436                    Err(err) => throw_ub_custom!(
437                        fluent::const_eval_invalid_align_details,
438                        name = "const_deallocate",
439                        err_kind = err.diag_ident(),
440                        align = err.align()
441                    ),
442                };
443
444                // If an allocation is created in an another const,
445                // we don't deallocate it.
446                let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr, 0)?;
447                let is_allocated_in_another_const = matches!(
448                    ecx.tcx.try_get_global_alloc(alloc_id),
449                    Some(interpret::GlobalAlloc::Memory(_))
450                );
451
452                if !is_allocated_in_another_const {
453                    ecx.deallocate_ptr(
454                        ptr,
455                        Some((size, align)),
456                        interpret::MemoryKind::Machine(MemoryKind::Heap),
457                    )?;
458                }
459            }
460            // The intrinsic represents whether the value is known to the optimizer (LLVM).
461            // We're not doing any optimizations here, so there is no optimizer that could know the value.
462            // (We know the value here in the machine of course, but this is the runtime of that code,
463            // not the optimization stage.)
464            sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
465            _ => {
466                // We haven't handled the intrinsic, let's see if we can use a fallback body.
467                if ecx.tcx.intrinsic(instance.def_id()).unwrap().must_be_overridden {
468                    throw_unsup_format!(
469                        "intrinsic `{intrinsic_name}` is not supported at compile-time"
470                    );
471                }
472                return interp_ok(Some(ty::Instance {
473                    def: ty::InstanceKind::Item(instance.def_id()),
474                    args: instance.args,
475                }));
476            }
477        }
478
479        // Intrinsic is done, jump to next block.
480        ecx.return_to_block(target)?;
481        interp_ok(None)
482    }
483
484    fn assert_panic(
485        ecx: &mut InterpCx<'tcx, Self>,
486        msg: &AssertMessage<'tcx>,
487        _unwind: mir::UnwindAction,
488    ) -> InterpResult<'tcx> {
489        use rustc_middle::mir::AssertKind::*;
490        // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
491        let eval_to_int =
492            |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
493        let err = match msg {
494            BoundsCheck { len, index } => {
495                let len = eval_to_int(len)?;
496                let index = eval_to_int(index)?;
497                BoundsCheck { len, index }
498            }
499            Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
500            OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
501            DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
502            RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
503            ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
504            ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
505            MisalignedPointerDereference { ref required, ref found } => {
506                MisalignedPointerDereference {
507                    required: eval_to_int(required)?,
508                    found: eval_to_int(found)?,
509                }
510            }
511            NullPointerDereference => NullPointerDereference,
512        };
513        Err(ConstEvalErrKind::AssertFailure(err)).into()
514    }
515
516    fn binary_ptr_op(
517        _ecx: &InterpCx<'tcx, Self>,
518        _bin_op: mir::BinOp,
519        _left: &ImmTy<'tcx>,
520        _right: &ImmTy<'tcx>,
521    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
522        throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
523    }
524
525    fn increment_const_eval_counter(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
526        // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
527
528        if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
529            let (limit, start) = if ecx.tcx.sess.opts.unstable_opts.tiny_const_eval_limit {
530                (TINY_LINT_TERMINATOR_LIMIT, TINY_LINT_TERMINATOR_LIMIT)
531            } else {
532                (LINT_TERMINATOR_LIMIT, PROGRESS_INDICATOR_START)
533            };
534
535            ecx.machine.num_evaluated_steps = new_steps;
536            // By default, we have a *deny* lint kicking in after some time
537            // to ensure `loop {}` doesn't just go forever.
538            // In case that lint got reduced, in particular for `--cap-lint` situations, we also
539            // have a hard warning shown every now and then for really long executions.
540            if new_steps == limit {
541                // By default, we stop after a million steps, but the user can disable this lint
542                // to be able to run until the heat death of the universe or power loss, whichever
543                // comes first.
544                let hir_id = ecx.machine.best_lint_scope(*ecx.tcx);
545                let is_error = ecx
546                    .tcx
547                    .lint_level_at_node(
548                        rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
549                        hir_id,
550                    )
551                    .0
552                    .is_error();
553                let span = ecx.cur_span();
554                ecx.tcx.emit_node_span_lint(
555                    rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
556                    hir_id,
557                    span,
558                    LongRunning { item_span: ecx.tcx.span },
559                );
560                // If this was a hard error, don't bother continuing evaluation.
561                if is_error {
562                    let guard = ecx
563                        .tcx
564                        .dcx()
565                        .span_delayed_bug(span, "The deny lint should have already errored");
566                    throw_inval!(AlreadyReported(ReportedErrorInfo::allowed_in_infallible(guard)));
567                }
568            } else if new_steps > start && new_steps.is_power_of_two() {
569                // Only report after a certain number of terminators have been evaluated and the
570                // current number of evaluated terminators is a power of 2. The latter gives us a cheap
571                // way to implement exponential backoff.
572                let span = ecx.cur_span();
573                // We store a unique number in `force_duplicate` to evade `-Z deduplicate-diagnostics`.
574                // `new_steps` is guaranteed to be unique because `ecx.machine.num_evaluated_steps` is
575                // always increasing.
576                ecx.tcx.dcx().emit_warn(LongRunningWarn {
577                    span,
578                    item_span: ecx.tcx.span,
579                    force_duplicate: new_steps,
580                });
581            }
582        }
583
584        interp_ok(())
585    }
586
587    #[inline(always)]
588    fn expose_provenance(
589        _ecx: &InterpCx<'tcx, Self>,
590        _provenance: Self::Provenance,
591    ) -> InterpResult<'tcx> {
592        // This is only reachable with -Zunleash-the-miri-inside-of-you.
593        throw_unsup_format!("exposing pointers is not possible at compile-time")
594    }
595
596    #[inline(always)]
597    fn init_frame(
598        ecx: &mut InterpCx<'tcx, Self>,
599        frame: Frame<'tcx>,
600    ) -> InterpResult<'tcx, Frame<'tcx>> {
601        // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
602        if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
603            throw_exhaust!(StackFrameLimitReached)
604        } else {
605            interp_ok(frame)
606        }
607    }
608
609    #[inline(always)]
610    fn stack<'a>(
611        ecx: &'a InterpCx<'tcx, Self>,
612    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
613        &ecx.machine.stack
614    }
615
616    #[inline(always)]
617    fn stack_mut<'a>(
618        ecx: &'a mut InterpCx<'tcx, Self>,
619    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
620        &mut ecx.machine.stack
621    }
622
623    fn before_access_global(
624        _tcx: TyCtxtAt<'tcx>,
625        machine: &Self,
626        alloc_id: AllocId,
627        alloc: ConstAllocation<'tcx>,
628        _static_def_id: Option<DefId>,
629        is_write: bool,
630    ) -> InterpResult<'tcx> {
631        let alloc = alloc.inner();
632        if is_write {
633            // Write access. These are never allowed, but we give a targeted error message.
634            match alloc.mutability {
635                Mutability::Not => throw_ub!(WriteToReadOnly(alloc_id)),
636                Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal).into(),
637            }
638        } else {
639            // Read access. These are usually allowed, with some exceptions.
640            if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
641                // Machine configuration allows us read from anything (e.g., `static` initializer).
642                interp_ok(())
643            } else if alloc.mutability == Mutability::Mut {
644                // Machine configuration does not allow us to read statics (e.g., `const`
645                // initializer).
646                Err(ConstEvalErrKind::ConstAccessesMutGlobal).into()
647            } else {
648                // Immutable global, this read is fine.
649                assert_eq!(alloc.mutability, Mutability::Not);
650                interp_ok(())
651            }
652        }
653    }
654
655    fn retag_ptr_value(
656        ecx: &mut InterpCx<'tcx, Self>,
657        _kind: mir::RetagKind,
658        val: &ImmTy<'tcx, CtfeProvenance>,
659    ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> {
660        // If it's a frozen shared reference that's not already immutable, potentially make it immutable.
661        // (Do nothing on `None` provenance, that cannot store immutability anyway.)
662        if let ty::Ref(_, ty, mutbl) = val.layout.ty.kind()
663            && *mutbl == Mutability::Not
664            && val
665                .to_scalar_and_meta()
666                .0
667                .to_pointer(ecx)?
668                .provenance
669                .is_some_and(|p| !p.immutable())
670        {
671            // That next check is expensive, that's why we have all the guards above.
672            let is_immutable = ty.is_freeze(*ecx.tcx, ecx.typing_env());
673            let place = ecx.ref_to_mplace(val)?;
674            let new_place = if is_immutable {
675                place.map_provenance(CtfeProvenance::as_immutable)
676            } else {
677                // Even if it is not immutable, remember that it is a shared reference.
678                // This allows it to become part of the final value of the constant.
679                // (See <https://github.com/rust-lang/rust/pull/128543> for why we allow this
680                // even when there is interior mutability.)
681                place.map_provenance(CtfeProvenance::as_shared_ref)
682            };
683            interp_ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
684        } else {
685            interp_ok(val.clone())
686        }
687    }
688
689    fn before_memory_write(
690        _tcx: TyCtxtAt<'tcx>,
691        _machine: &mut Self,
692        _alloc_extra: &mut Self::AllocExtra,
693        (_alloc_id, immutable): (AllocId, bool),
694        range: AllocRange,
695    ) -> InterpResult<'tcx> {
696        if range.size == Size::ZERO {
697            // Nothing to check.
698            return interp_ok(());
699        }
700        // Reject writes through immutable pointers.
701        if immutable {
702            return Err(ConstEvalErrKind::WriteThroughImmutablePointer).into();
703        }
704        // Everything else is fine.
705        interp_ok(())
706    }
707
708    fn before_alloc_read(ecx: &InterpCx<'tcx, Self>, alloc_id: AllocId) -> InterpResult<'tcx> {
709        // Check if this is the currently evaluated static.
710        if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) {
711            return Err(ConstEvalErrKind::RecursiveStatic).into();
712        }
713        // If this is another static, make sure we fire off the query to detect cycles.
714        // But only do that when checks for static recursion are enabled.
715        if ecx.machine.static_root_ids.is_some() {
716            if let Some(GlobalAlloc::Static(def_id)) = ecx.tcx.try_get_global_alloc(alloc_id) {
717                if ecx.tcx.is_foreign_item(def_id) {
718                    throw_unsup!(ExternStatic(def_id));
719                }
720                ecx.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
721            }
722        }
723        interp_ok(())
724    }
725
726    fn cached_union_data_range<'e>(
727        ecx: &'e mut InterpCx<'tcx, Self>,
728        ty: Ty<'tcx>,
729        compute_range: impl FnOnce() -> RangeSet,
730    ) -> Cow<'e, RangeSet> {
731        if ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks {
732            Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
733        } else {
734            // Don't bother caching, we're only doing one validation at the end anyway.
735            Cow::Owned(compute_range())
736        }
737    }
738}
739
740// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
741// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
742// at the bottom of this file.