rustc_const_eval/interpret/
memory.rs

1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
8
9use std::assert_matches::assert_matches;
10use std::borrow::{Borrow, Cow};
11use std::cell::Cell;
12use std::collections::VecDeque;
13use std::{fmt, ptr};
14
15use rustc_abi::{Align, HasDataLayout, Size};
16use rustc_ast::Mutability;
17use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
18use rustc_middle::mir::display_allocation;
19use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
20use rustc_middle::{bug, throw_ub_format};
21use tracing::{debug, instrument, trace};
22
23use super::{
24    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
25    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
26    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
27    err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
28};
29use crate::fluent_generated as fluent;
30
31#[derive(Debug, PartialEq, Copy, Clone)]
32pub enum MemoryKind<T> {
33    /// Stack memory. Error if deallocated except during a stack pop.
34    Stack,
35    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
36    CallerLocation,
37    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
38    Machine(T),
39}
40
41impl<T: MayLeak> MayLeak for MemoryKind<T> {
42    #[inline]
43    fn may_leak(self) -> bool {
44        match self {
45            MemoryKind::Stack => false,
46            MemoryKind::CallerLocation => true,
47            MemoryKind::Machine(k) => k.may_leak(),
48        }
49    }
50}
51
52impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
53    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54        match self {
55            MemoryKind::Stack => write!(f, "stack variable"),
56            MemoryKind::CallerLocation => write!(f, "caller location"),
57            MemoryKind::Machine(m) => write!(f, "{m}"),
58        }
59    }
60}
61
62/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
63#[derive(Copy, Clone, PartialEq, Debug)]
64pub enum AllocKind {
65    /// A regular live data allocation.
66    LiveData,
67    /// A function allocation (that fn ptrs point to).
68    Function,
69    /// A (symbolic) vtable allocation.
70    VTable,
71    /// A dead allocation.
72    Dead,
73}
74
75/// Metadata about an `AllocId`.
76#[derive(Copy, Clone, PartialEq, Debug)]
77pub struct AllocInfo {
78    pub size: Size,
79    pub align: Align,
80    pub kind: AllocKind,
81    pub mutbl: Mutability,
82}
83
84impl AllocInfo {
85    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
86        Self { size, align, kind, mutbl }
87    }
88}
89
90/// The value of a function pointer.
91#[derive(Debug, Copy, Clone)]
92pub enum FnVal<'tcx, Other> {
93    Instance(Instance<'tcx>),
94    Other(Other),
95}
96
97impl<'tcx, Other> FnVal<'tcx, Other> {
98    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
99        match self {
100            FnVal::Instance(instance) => interp_ok(instance),
101            FnVal::Other(_) => {
102                throw_unsup_format!("'foreign' function pointers are not supported in this context")
103            }
104        }
105    }
106}
107
108// `Memory` has to depend on the `Machine` because some of its operations
109// (e.g., `get`) call a `Machine` hook.
110pub struct Memory<'tcx, M: Machine<'tcx>> {
111    /// Allocations local to this instance of the interpreter. The kind
112    /// helps ensure that the same mechanism is used for allocation and
113    /// deallocation. When an allocation is not found here, it is a
114    /// global and looked up in the `tcx` for read access. Some machines may
115    /// have to mutate this map even on a read-only access to a global (because
116    /// they do pointer provenance tracking and the allocations in `tcx` have
117    /// the wrong type), so we let the machine override this type.
118    /// Either way, if the machine allows writing to a global, doing so will
119    /// create a copy of the global allocation here.
120    // FIXME: this should not be public, but interning currently needs access to it
121    pub(super) alloc_map: M::MemoryMap,
122
123    /// Map for "extra" function pointers.
124    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
125
126    /// To be able to compare pointers with null, and to check alignment for accesses
127    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
128    /// that do not exist any more.
129    // FIXME: this should not be public, but interning currently needs access to it
130    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
131
132    /// This stores whether we are currently doing reads purely for the purpose of validation.
133    /// Those reads do not trigger the machine's hooks for memory reads.
134    /// Needless to say, this must only be set with great care!
135    validation_in_progress: Cell<bool>,
136}
137
138/// A reference to some allocation that was already bounds-checked for the given region
139/// and had the on-access machine hooks run.
140#[derive(Copy, Clone)]
141pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
142    alloc: &'a Allocation<Prov, Extra, Bytes>,
143    range: AllocRange,
144    tcx: TyCtxt<'tcx>,
145    alloc_id: AllocId,
146}
147/// A reference to some allocation that was already bounds-checked for the given region
148/// and had the on-access machine hooks run.
149pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
150    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
151    range: AllocRange,
152    tcx: TyCtxt<'tcx>,
153    alloc_id: AllocId,
154}
155
156impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
157    pub fn new() -> Self {
158        Memory {
159            alloc_map: M::MemoryMap::default(),
160            extra_fn_ptr_map: FxIndexMap::default(),
161            dead_alloc_map: FxIndexMap::default(),
162            validation_in_progress: Cell::new(false),
163        }
164    }
165
166    /// This is used by [priroda](https://github.com/oli-obk/priroda)
167    pub fn alloc_map(&self) -> &M::MemoryMap {
168        &self.alloc_map
169    }
170}
171
172impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
173    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
174    /// the machine pointer to the allocation. Must never be used
175    /// for any other pointers, nor for TLS statics.
176    ///
177    /// Using the resulting pointer represents a *direct* access to that memory
178    /// (e.g. by directly using a `static`),
179    /// as opposed to access through a pointer that was created by the program.
180    ///
181    /// This function can fail only if `ptr` points to an `extern static`.
182    #[inline]
183    pub fn global_root_pointer(
184        &self,
185        ptr: Pointer<CtfeProvenance>,
186    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
187        let alloc_id = ptr.provenance.alloc_id();
188        // We need to handle `extern static`.
189        match self.tcx.try_get_global_alloc(alloc_id) {
190            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
191                // Thread-local statics do not have a constant address. They *must* be accessed via
192                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
193                bug!("global memory cannot point to thread-local static")
194            }
195            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
196                return M::extern_static_pointer(self, def_id);
197            }
198            None => {
199                assert!(
200                    self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
201                    "{alloc_id:?} is neither global nor a function pointer"
202                );
203            }
204            _ => {}
205        }
206        // And we need to get the provenance.
207        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
208    }
209
210    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
211        let id = match fn_val {
212            FnVal::Instance(instance) => {
213                let salt = M::get_global_alloc_salt(self, Some(instance));
214                self.tcx.reserve_and_set_fn_alloc(instance, salt)
215            }
216            FnVal::Other(extra) => {
217                // FIXME(RalfJung): Should we have a cache here?
218                let id = self.tcx.reserve_alloc_id();
219                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
220                assert!(old.is_none());
221                id
222            }
223        };
224        // Functions are global allocations, so make sure we get the right root pointer.
225        // We know this is not an `extern static` so this cannot fail.
226        self.global_root_pointer(Pointer::from(id)).unwrap()
227    }
228
229    pub fn allocate_ptr(
230        &mut self,
231        size: Size,
232        align: Align,
233        kind: MemoryKind<M::MemoryKind>,
234        init: AllocInit,
235    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
236        let params = self.machine.get_default_alloc_params();
237        let alloc = if M::PANIC_ON_ALLOC_FAIL {
238            Allocation::new(size, align, init, params)
239        } else {
240            Allocation::try_new(size, align, init, params)?
241        };
242        self.insert_allocation(alloc, kind)
243    }
244
245    pub fn allocate_bytes_ptr(
246        &mut self,
247        bytes: &[u8],
248        align: Align,
249        kind: MemoryKind<M::MemoryKind>,
250        mutability: Mutability,
251    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
252        let params = self.machine.get_default_alloc_params();
253        let alloc = Allocation::from_bytes(bytes, align, mutability, params);
254        self.insert_allocation(alloc, kind)
255    }
256
257    pub fn insert_allocation(
258        &mut self,
259        alloc: Allocation<M::Provenance, (), M::Bytes>,
260        kind: MemoryKind<M::MemoryKind>,
261    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
262        assert!(alloc.size() <= self.max_size_of_val());
263        let id = self.tcx.reserve_alloc_id();
264        debug_assert_ne!(
265            Some(kind),
266            M::GLOBAL_KIND.map(MemoryKind::Machine),
267            "dynamically allocating global memory"
268        );
269        // This cannot be merged with the `adjust_global_allocation` code path
270        // since here we have an allocation that already uses `M::Bytes`.
271        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
272        let alloc = alloc.with_extra(extra);
273        self.memory.alloc_map.insert(id, (kind, alloc));
274        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
275    }
276
277    /// If this grows the allocation, `init_growth` determines
278    /// whether the additional space will be initialized.
279    pub fn reallocate_ptr(
280        &mut self,
281        ptr: Pointer<Option<M::Provenance>>,
282        old_size_and_align: Option<(Size, Align)>,
283        new_size: Size,
284        new_align: Align,
285        kind: MemoryKind<M::MemoryKind>,
286        init_growth: AllocInit,
287    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
288        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
289        if offset.bytes() != 0 {
290            throw_ub_custom!(
291                fluent::const_eval_realloc_or_alloc_with_offset,
292                ptr = format!("{ptr:?}"),
293                kind = "realloc"
294            );
295        }
296
297        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
298        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
299        // If requested, we zero-init the entire allocation, to ensure that a growing
300        // allocation has its new bytes properly set. For the part that is copied,
301        // `mem_copy` below will de-initialize things as necessary.
302        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
303        let old_size = match old_size_and_align {
304            Some((size, _align)) => size,
305            None => self.get_alloc_raw(alloc_id)?.size(),
306        };
307        // This will also call the access hooks.
308        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
309        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
310
311        interp_ok(new_ptr)
312    }
313
314    #[instrument(skip(self), level = "debug")]
315    pub fn deallocate_ptr(
316        &mut self,
317        ptr: Pointer<Option<M::Provenance>>,
318        old_size_and_align: Option<(Size, Align)>,
319        kind: MemoryKind<M::MemoryKind>,
320    ) -> InterpResult<'tcx> {
321        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
322        trace!("deallocating: {alloc_id:?}");
323
324        if offset.bytes() != 0 {
325            throw_ub_custom!(
326                fluent::const_eval_realloc_or_alloc_with_offset,
327                ptr = format!("{ptr:?}"),
328                kind = "dealloc",
329            );
330        }
331
332        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
333            // Deallocating global memory -- always an error
334            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
335                Some(GlobalAlloc::Function { .. }) => {
336                    err_ub_custom!(
337                        fluent::const_eval_invalid_dealloc,
338                        alloc_id = alloc_id,
339                        kind = "fn",
340                    )
341                }
342                Some(GlobalAlloc::VTable(..)) => {
343                    err_ub_custom!(
344                        fluent::const_eval_invalid_dealloc,
345                        alloc_id = alloc_id,
346                        kind = "vtable",
347                    )
348                }
349                Some(GlobalAlloc::TypeId { .. }) => {
350                    err_ub_custom!(
351                        fluent::const_eval_invalid_dealloc,
352                        alloc_id = alloc_id,
353                        kind = "typeid",
354                    )
355                }
356                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
357                    err_ub_custom!(
358                        fluent::const_eval_invalid_dealloc,
359                        alloc_id = alloc_id,
360                        kind = "static_mem"
361                    )
362                }
363                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),
364            })
365            .into();
366        };
367
368        if alloc.mutability.is_not() {
369            throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
370        }
371        if alloc_kind != kind {
372            throw_ub_custom!(
373                fluent::const_eval_dealloc_kind_mismatch,
374                alloc = alloc_id,
375                alloc_kind = format!("{alloc_kind}"),
376                kind = format!("{kind}"),
377            );
378        }
379        if let Some((size, align)) = old_size_and_align {
380            if size != alloc.size() || align != alloc.align {
381                throw_ub_custom!(
382                    fluent::const_eval_dealloc_incorrect_layout,
383                    alloc = alloc_id,
384                    size = alloc.size().bytes(),
385                    align = alloc.align.bytes(),
386                    size_found = size.bytes(),
387                    align_found = align.bytes(),
388                )
389            }
390        }
391
392        // Let the machine take some extra action
393        let size = alloc.size();
394        M::before_memory_deallocation(
395            self.tcx,
396            &mut self.machine,
397            &mut alloc.extra,
398            ptr,
399            (alloc_id, prov),
400            size,
401            alloc.align,
402            kind,
403        )?;
404
405        // Don't forget to remember size and align of this now-dead allocation
406        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
407        if old.is_some() {
408            bug!("Nothing can be deallocated twice");
409        }
410
411        interp_ok(())
412    }
413
414    /// Internal helper function to determine the allocation and offset of a pointer (if any).
415    #[inline(always)]
416    fn get_ptr_access(
417        &self,
418        ptr: Pointer<Option<M::Provenance>>,
419        size: Size,
420    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
421        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
422        Self::check_and_deref_ptr(
423            self,
424            ptr,
425            size,
426            CheckInAllocMsg::MemoryAccess,
427            |this, alloc_id, offset, prov| {
428                let (size, align) =
429                    this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;
430                interp_ok((size, align, (alloc_id, offset, prov)))
431            },
432        )
433    }
434
435    /// Check if the given pointer points to live memory of the given `size`.
436    /// The caller can control the error message for the out-of-bounds case.
437    #[inline(always)]
438    pub fn check_ptr_access(
439        &self,
440        ptr: Pointer<Option<M::Provenance>>,
441        size: Size,
442        msg: CheckInAllocMsg,
443    ) -> InterpResult<'tcx> {
444        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
445        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
446            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
447            interp_ok((size, align, ()))
448        })?;
449        interp_ok(())
450    }
451
452    /// Check whether the given pointer points to live memory for a signed amount of bytes.
453    /// A negative amounts means that the given range of memory to the left of the pointer
454    /// needs to be dereferenceable.
455    pub fn check_ptr_access_signed(
456        &self,
457        ptr: Pointer<Option<M::Provenance>>,
458        size: i64,
459        msg: CheckInAllocMsg,
460    ) -> InterpResult<'tcx> {
461        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
462            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
463            interp_ok((size, align, ()))
464        })?;
465        interp_ok(())
466    }
467
468    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
469    /// to the allocation it points to. Supports both shared and mutable references, as the actual
470    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
471    /// a pointer.
472    ///
473    /// `alloc_size` will only get called for non-zero-sized accesses.
474    ///
475    /// Returns `None` if and only if the size is 0.
476    fn check_and_deref_ptr<T, R: Borrow<Self>>(
477        this: R,
478        ptr: Pointer<Option<M::Provenance>>,
479        size: i64,
480        msg: CheckInAllocMsg,
481        alloc_size: impl FnOnce(
482            R,
483            AllocId,
484            Size,
485            M::ProvenanceExtra,
486        ) -> InterpResult<'tcx, (Size, Align, T)>,
487    ) -> InterpResult<'tcx, Option<T>> {
488        // Everything is okay with size 0.
489        if size == 0 {
490            return interp_ok(None);
491        }
492
493        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
494            Err(addr) => {
495                // We couldn't get a proper allocation.
496                throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
497            }
498            Ok((alloc_id, offset, prov)) => {
499                let tcx = this.borrow().tcx;
500                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
501                let offset = offset.bytes();
502                // Compute absolute begin and end of the range.
503                let (begin, end) = if size >= 0 {
504                    (Some(offset), offset.checked_add(size as u64))
505                } else {
506                    (offset.checked_sub(size.unsigned_abs()), Some(offset))
507                };
508                // Ensure both are within bounds.
509                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
510                if !in_bounds {
511                    throw_ub!(PointerOutOfBounds {
512                        alloc_id,
513                        alloc_size,
514                        ptr_offset: tcx.sign_extend_to_target_isize(offset),
515                        inbounds_size: size,
516                        msg,
517                    })
518                }
519
520                Some(ret_val)
521            }
522        })
523    }
524
525    pub(super) fn check_misalign(
526        &self,
527        misaligned: Option<Misalignment>,
528        msg: CheckAlignMsg,
529    ) -> InterpResult<'tcx> {
530        if let Some(misaligned) = misaligned {
531            throw_ub!(AlignmentCheckFailed(misaligned, msg))
532        }
533        interp_ok(())
534    }
535
536    pub(super) fn is_ptr_misaligned(
537        &self,
538        ptr: Pointer<Option<M::Provenance>>,
539        align: Align,
540    ) -> Option<Misalignment> {
541        if !M::enforce_alignment(self) || align.bytes() == 1 {
542            return None;
543        }
544
545        #[inline]
546        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
547            if offset.is_multiple_of(align.bytes()) {
548                None
549            } else {
550                // The biggest power of two through which `offset` is divisible.
551                let offset_pow2 = 1 << offset.trailing_zeros();
552                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
553            }
554        }
555
556        match self.ptr_try_get_alloc_id(ptr, 0) {
557            Err(addr) => is_offset_misaligned(addr, align),
558            Ok((alloc_id, offset, _prov)) => {
559                let alloc_info = self.get_alloc_info(alloc_id);
560                if let Some(misalign) = M::alignment_check(
561                    self,
562                    alloc_id,
563                    alloc_info.align,
564                    alloc_info.kind,
565                    offset,
566                    align,
567                ) {
568                    Some(misalign)
569                } else if M::Provenance::OFFSET_IS_ADDR {
570                    is_offset_misaligned(ptr.addr().bytes(), align)
571                } else {
572                    // Check allocation alignment and offset alignment.
573                    if alloc_info.align.bytes() < align.bytes() {
574                        Some(Misalignment { has: alloc_info.align, required: align })
575                    } else {
576                        is_offset_misaligned(offset.bytes(), align)
577                    }
578                }
579            }
580        }
581    }
582
583    /// Checks a pointer for misalignment.
584    ///
585    /// The error assumes this is checking the pointer used directly for an access.
586    pub fn check_ptr_align(
587        &self,
588        ptr: Pointer<Option<M::Provenance>>,
589        align: Align,
590    ) -> InterpResult<'tcx> {
591        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
592    }
593}
594
595impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
596    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
597    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
598        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
599        // is live, here all the IDs in the map are for dead allocations so we don't
600        // need to check for liveness.
601        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
602        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
603    }
604}
605
606/// Allocation accessors
607impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
608    /// Helper function to obtain a global (tcx) allocation.
609    /// This attempts to return a reference to an existing allocation if
610    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
611    /// this machine use the same pointer provenance, so it is indirected through
612    /// `M::adjust_allocation`.
613    fn get_global_alloc(
614        &self,
615        id: AllocId,
616        is_write: bool,
617    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
618        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
619            Some(GlobalAlloc::Memory(mem)) => {
620                // Memory of a constant or promoted or anonymous memory referenced by a static.
621                (mem, None)
622            }
623            Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
624            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
625            Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),
626            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
627            Some(GlobalAlloc::Static(def_id)) => {
628                assert!(self.tcx.is_static(def_id));
629                // Thread-local statics do not have a constant address. They *must* be accessed via
630                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
631                assert!(!self.tcx.is_thread_local_static(def_id));
632                // Notice that every static has two `AllocId` that will resolve to the same
633                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
634                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
635                // `eval_static_initializer` and it is the "resolved" ID.
636                // The resolved ID is never used by the interpreted program, it is hidden.
637                // This is relied upon for soundness of const-patterns; a pointer to the resolved
638                // ID would "sidestep" the checks that make sure consts do not point to statics!
639                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
640                // contains a reference to memory that was created during its evaluation (i.e., not
641                // to another static), those inner references only exist in "resolved" form.
642                if self.tcx.is_foreign_item(def_id) {
643                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
644                    // referencing arbitrary (declared) extern statics.
645                    throw_unsup!(ExternStatic(def_id));
646                }
647
648                // We don't give a span -- statics don't need that, they cannot be generic or associated.
649                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
650                (val, Some(def_id))
651            }
652        };
653        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
654        // We got tcx memory. Let the machine initialize its "extra" stuff.
655        M::adjust_global_allocation(
656            self,
657            id, // always use the ID we got as input, not the "hidden" one.
658            alloc.inner(),
659        )
660    }
661
662    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
663    /// The caller is responsible for calling the access hooks!
664    ///
665    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
666    pub fn get_alloc_raw(
667        &self,
668        id: AllocId,
669    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
670        // The error type of the inner closure here is somewhat funny. We have two
671        // ways of "erroring": An actual error, or because we got a reference from
672        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
673        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
674        let a = self.memory.alloc_map.get_or(id, || {
675            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
676            // so we use `report_err` for that.
677            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
678            match alloc {
679                Cow::Borrowed(alloc) => {
680                    // We got a ref, cheaply return that as an "error" so that the
681                    // map does not get mutated.
682                    Err(Ok(alloc))
683                }
684                Cow::Owned(alloc) => {
685                    // Need to put it into the map and return a ref to that
686                    let kind = M::GLOBAL_KIND.expect(
687                        "I got a global allocation that I have to copy but the machine does \
688                            not expect that to happen",
689                    );
690                    Ok((MemoryKind::Machine(kind), alloc))
691                }
692            }
693        });
694        // Now unpack that funny error type
695        match a {
696            Ok(a) => interp_ok(&a.1),
697            Err(a) => a.into(),
698        }
699    }
700
701    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
702    /// The caller is responsible for calling the access hooks!
703    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
704        let alloc = self.get_alloc_raw(id)?;
705        interp_ok(alloc.get_bytes_unchecked_raw())
706    }
707
708    /// Bounds-checked *but not align-checked* allocation access.
709    pub fn get_ptr_alloc<'a>(
710        &'a self,
711        ptr: Pointer<Option<M::Provenance>>,
712        size: Size,
713    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
714    {
715        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
716        let ptr_and_alloc = Self::check_and_deref_ptr(
717            self,
718            ptr,
719            size_i64,
720            CheckInAllocMsg::MemoryAccess,
721            |this, alloc_id, offset, prov| {
722                let alloc = this.get_alloc_raw(alloc_id)?;
723                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
724            },
725        )?;
726        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
727        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
728        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
729        if !self.memory.validation_in_progress.get() {
730            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
731                M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;
732            }
733        }
734
735        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
736            let range = alloc_range(offset, size);
737            if !self.memory.validation_in_progress.get() {
738                M::before_memory_read(
739                    self.tcx,
740                    &self.machine,
741                    &alloc.extra,
742                    ptr,
743                    (alloc_id, prov),
744                    range,
745                )?;
746            }
747            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
748        } else {
749            interp_ok(None)
750        }
751    }
752
753    /// Return the `extra` field of the given allocation.
754    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
755        interp_ok(&self.get_alloc_raw(id)?.extra)
756    }
757
758    /// Return the `mutability` field of the given allocation.
759    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
760        interp_ok(self.get_alloc_raw(id)?.mutability)
761    }
762
763    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
764    /// The caller is responsible for calling the access hooks!
765    ///
766    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
767    /// allocation.
768    ///
769    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
770    pub fn get_alloc_raw_mut(
771        &mut self,
772        id: AllocId,
773    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
774        // We have "NLL problem case #3" here, which cannot be worked around without loss of
775        // efficiency even for the common case where the key is in the map.
776        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
777        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
778        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
779        // It could be avoided with a totally separate codepath in Miri for handling the absolute address
780        // of global allocations, but that's not worth it.)
781        if self.memory.alloc_map.get_mut(id).is_none() {
782            // Slow path.
783            // Allocation not found locally, go look global.
784            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
785            let kind = M::GLOBAL_KIND.expect(
786                "I got a global allocation that I have to copy but the machine does \
787                    not expect that to happen",
788            );
789            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
790        }
791
792        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
793        if alloc.mutability.is_not() {
794            throw_ub!(WriteToReadOnly(id))
795        }
796        interp_ok((alloc, &mut self.machine))
797    }
798
799    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
800    /// The caller is responsible for calling the access hooks!
801    pub fn get_alloc_bytes_unchecked_raw_mut(
802        &mut self,
803        id: AllocId,
804    ) -> InterpResult<'tcx, *mut u8> {
805        let alloc = self.get_alloc_raw_mut(id)?.0;
806        interp_ok(alloc.get_bytes_unchecked_raw_mut())
807    }
808
809    /// Bounds-checked *but not align-checked* allocation access.
810    pub fn get_ptr_alloc_mut<'a>(
811        &'a mut self,
812        ptr: Pointer<Option<M::Provenance>>,
813        size: Size,
814    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
815    {
816        let tcx = self.tcx;
817        let validation_in_progress = self.memory.validation_in_progress.get();
818
819        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
820        let ptr_and_alloc = Self::check_and_deref_ptr(
821            self,
822            ptr,
823            size_i64,
824            CheckInAllocMsg::MemoryAccess,
825            |this, alloc_id, offset, prov| {
826                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
827                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
828            },
829        )?;
830
831        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
832            let range = alloc_range(offset, size);
833            if !validation_in_progress {
834                // For writes, it's okay to only call those when there actually is a non-zero
835                // amount of bytes to be written: a zero-sized write doesn't manifest anything.
836                M::before_alloc_access(tcx, machine, alloc_id)?;
837                M::before_memory_write(
838                    tcx,
839                    machine,
840                    &mut alloc.extra,
841                    ptr,
842                    (alloc_id, prov),
843                    range,
844                )?;
845            }
846            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
847        } else {
848            interp_ok(None)
849        }
850    }
851
852    /// Return the `extra` field of the given allocation.
853    pub fn get_alloc_extra_mut<'a>(
854        &'a mut self,
855        id: AllocId,
856    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
857        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
858        interp_ok((&mut alloc.extra, machine))
859    }
860
861    /// Check whether an allocation is live. This is faster than calling
862    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
863    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
864    pub fn is_alloc_live(&self, id: AllocId) -> bool {
865        self.memory.alloc_map.contains_key_ref(&id)
866            || self.memory.extra_fn_ptr_map.contains_key(&id)
867            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
868            // This also matches the order in `get_alloc_info`.
869            || self.tcx.try_get_global_alloc(id).is_some()
870    }
871
872    /// Obtain the size and alignment of an allocation, even if that allocation has
873    /// been deallocated.
874    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
875        // # Regular allocations
876        // Don't use `self.get_raw` here as that will
877        // a) cause cycles in case `id` refers to a static
878        // b) duplicate a global's allocation in miri
879        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
880            return AllocInfo::new(
881                alloc.size(),
882                alloc.align,
883                AllocKind::LiveData,
884                alloc.mutability,
885            );
886        }
887
888        // # Function pointers
889        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
890        if let Some(fn_val) = self.get_fn_alloc(id) {
891            let align = match fn_val {
892                FnVal::Instance(instance) => {
893                    self.tcx.codegen_fn_attrs(instance.def_id()).alignment.unwrap_or(Align::ONE)
894                }
895                // Machine-specific extra functions currently do not support alignment restrictions.
896                FnVal::Other(_) => Align::ONE,
897            };
898
899            return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
900        }
901
902        // # Global allocations
903        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
904            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
905            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
906            let kind = match global_alloc {
907                GlobalAlloc::TypeId { .. }
908                | GlobalAlloc::Static { .. }
909                | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
910                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
911                GlobalAlloc::VTable { .. } => AllocKind::VTable,
912            };
913            return AllocInfo::new(size, align, kind, mutbl);
914        }
915
916        // # Dead pointers
917        let (size, align) = *self
918            .memory
919            .dead_alloc_map
920            .get(&id)
921            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
922        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
923    }
924
925    /// Obtain the size and alignment of a *live* allocation.
926    fn get_live_alloc_size_and_align(
927        &self,
928        id: AllocId,
929        msg: CheckInAllocMsg,
930    ) -> InterpResult<'tcx, (Size, Align)> {
931        let info = self.get_alloc_info(id);
932        if matches!(info.kind, AllocKind::Dead) {
933            throw_ub!(PointerUseAfterFree(id, msg))
934        }
935        interp_ok((info.size, info.align))
936    }
937
938    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
939        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
940            Some(FnVal::Other(*extra))
941        } else {
942            match self.tcx.try_get_global_alloc(id) {
943                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
944                _ => None,
945            }
946        }
947    }
948
949    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
950    /// provenance refers to, as well as the segment of the hash that this pointer covers.
951    pub fn get_ptr_type_id(
952        &self,
953        ptr: Pointer<Option<M::Provenance>>,
954    ) -> InterpResult<'tcx, (Ty<'tcx>, Size)> {
955        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
956        let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else {
957            throw_ub_format!("type_id_eq: `TypeId` provenance is not a type id")
958        };
959        interp_ok((ty, offset))
960    }
961
962    pub fn get_ptr_fn(
963        &self,
964        ptr: Pointer<Option<M::Provenance>>,
965    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
966        trace!("get_ptr_fn({:?})", ptr);
967        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
968        if offset.bytes() != 0 {
969            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
970        }
971        self.get_fn_alloc(alloc_id)
972            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
973            .into()
974    }
975
976    /// Get the dynamic type of the given vtable pointer.
977    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
978    pub fn get_ptr_vtable_ty(
979        &self,
980        ptr: Pointer<Option<M::Provenance>>,
981        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
982    ) -> InterpResult<'tcx, Ty<'tcx>> {
983        trace!("get_ptr_vtable({:?})", ptr);
984        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
985        if offset.bytes() != 0 {
986            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
987        }
988        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
989            self.tcx.try_get_global_alloc(alloc_id)
990        else {
991            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
992        };
993        if let Some(expected_dyn_type) = expected_trait {
994            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
995        }
996        interp_ok(ty)
997    }
998
999    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
1000        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
1001        interp_ok(())
1002    }
1003
1004    /// Visit all allocations reachable from the given start set, by recursively traversing the
1005    /// provenance information of those allocations.
1006    pub fn visit_reachable_allocs(
1007        &mut self,
1008        start: Vec<AllocId>,
1009        mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,
1010    ) -> InterpResult<'tcx> {
1011        let mut done = FxHashSet::default();
1012        let mut todo = start;
1013        while let Some(id) = todo.pop() {
1014            if !done.insert(id) {
1015                // We already saw this allocation before, don't process it again.
1016                continue;
1017            }
1018            let info = self.get_alloc_info(id);
1019
1020            // Recurse, if there is data here.
1021            // Do this *before* invoking the callback, as the callback might mutate the
1022            // allocation and e.g. replace all provenance by wildcards!
1023            if matches!(info.kind, AllocKind::LiveData) {
1024                let alloc = self.get_alloc_raw(id)?;
1025                for prov in alloc.provenance().provenances() {
1026                    if let Some(id) = prov.get_alloc_id() {
1027                        todo.push(id);
1028                    }
1029                }
1030            }
1031
1032            // Call the callback.
1033            visit(self, id, &info)?;
1034        }
1035        interp_ok(())
1036    }
1037
1038    /// Create a lazy debug printer that prints the given allocation and all allocations it points
1039    /// to, recursively.
1040    #[must_use]
1041    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1042        self.dump_allocs(vec![id])
1043    }
1044
1045    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
1046    /// recursively.
1047    #[must_use]
1048    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1049        allocs.sort();
1050        allocs.dedup();
1051        DumpAllocs { ecx: self, allocs }
1052    }
1053
1054    /// Print the allocation's bytes, without any nested allocations.
1055    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1056        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1057        // want to be able to read all memory for diagnostics, even if that is cyclic.
1058        let alloc = self.get_alloc_raw(id).unwrap();
1059        let mut bytes = String::new();
1060        if alloc.size() != Size::ZERO {
1061            bytes = "\n".into();
1062            // FIXME(translation) there might be pieces that are translatable.
1063            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
1064                .unwrap();
1065        }
1066        bytes
1067    }
1068
1069    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1070    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1071    /// kind's `may_leak()` returns true.
1072    ///
1073    /// This is highly destructive, no more execution can happen after this!
1074    pub fn take_leaked_allocations(
1075        &mut self,
1076        static_roots: impl FnOnce(&Self) -> &[AllocId],
1077    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1078    {
1079        // Collect the set of allocations that are *reachable* from `Global` allocations.
1080        let reachable = {
1081            let mut reachable = FxHashSet::default();
1082            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1083            let mut todo: Vec<_> =
1084                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1085                    if Some(kind) == global_kind { Some(id) } else { None }
1086                });
1087            todo.extend(static_roots(self));
1088            while let Some(id) = todo.pop() {
1089                if reachable.insert(id) {
1090                    // This is a new allocation, add the allocations it points to `todo`.
1091                    // We only need to care about `alloc_map` memory here, as entirely unchanged
1092                    // global memory cannot point to memory relevant for the leak check.
1093                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1094                        todo.extend(
1095                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1096                        );
1097                    }
1098                }
1099            }
1100            reachable
1101        };
1102
1103        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1104        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1105            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1106        });
1107        let mut result = Vec::new();
1108        for &id in leaked.iter() {
1109            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1110            result.push((id, kind, alloc));
1111        }
1112        result
1113    }
1114
1115    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1116    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1117    ///
1118    /// We do this so Miri's allocation access tracking does not show the validation
1119    /// reads as spurious accesses.
1120    pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1121        // This deliberately uses `==` on `bool` to follow the pattern
1122        // `assert!(val.replace(new) == old)`.
1123        assert!(
1124            self.memory.validation_in_progress.replace(true) == false,
1125            "`validation_in_progress` was already set"
1126        );
1127        let res = f(self);
1128        assert!(
1129            self.memory.validation_in_progress.replace(false) == true,
1130            "`validation_in_progress` was unset by someone else"
1131        );
1132        res
1133    }
1134
1135    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1136    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1137    ///
1138    /// We do this so Miri's allocation access tracking does not show the validation
1139    /// reads as spurious accesses.
1140    pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {
1141        // This deliberately uses `==` on `bool` to follow the pattern
1142        // `assert!(val.replace(new) == old)`.
1143        assert!(
1144            self.memory.validation_in_progress.replace(true) == false,
1145            "`validation_in_progress` was already set"
1146        );
1147        let res = f(self);
1148        assert!(
1149            self.memory.validation_in_progress.replace(false) == true,
1150            "`validation_in_progress` was unset by someone else"
1151        );
1152        res
1153    }
1154
1155    pub(super) fn validation_in_progress(&self) -> bool {
1156        self.memory.validation_in_progress.get()
1157    }
1158}
1159
1160#[doc(hidden)]
1161/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1162pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1163    ecx: &'a InterpCx<'tcx, M>,
1164    allocs: Vec<AllocId>,
1165}
1166
1167impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
1168    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1169        // Cannot be a closure because it is generic in `Prov`, `Extra`.
1170        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1171            fmt: &mut std::fmt::Formatter<'_>,
1172            tcx: TyCtxt<'tcx>,
1173            allocs_to_print: &mut VecDeque<AllocId>,
1174            alloc: &Allocation<Prov, Extra, Bytes>,
1175        ) -> std::fmt::Result {
1176            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1177            {
1178                allocs_to_print.push_back(alloc_id);
1179            }
1180            write!(fmt, "{}", display_allocation(tcx, alloc))
1181        }
1182
1183        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1184        // `allocs_printed` contains all allocations that we have already printed.
1185        let mut allocs_printed = FxHashSet::default();
1186
1187        while let Some(id) = allocs_to_print.pop_front() {
1188            if !allocs_printed.insert(id) {
1189                // Already printed, so skip this.
1190                continue;
1191            }
1192
1193            write!(fmt, "{id:?}")?;
1194            match self.ecx.memory.alloc_map.get(id) {
1195                Some((kind, alloc)) => {
1196                    // normal alloc
1197                    write!(fmt, " ({kind}, ")?;
1198                    write_allocation_track_relocs(
1199                        &mut *fmt,
1200                        *self.ecx.tcx,
1201                        &mut allocs_to_print,
1202                        alloc,
1203                    )?;
1204                }
1205                None => {
1206                    // global alloc
1207                    match self.ecx.tcx.try_get_global_alloc(id) {
1208                        Some(GlobalAlloc::Memory(alloc)) => {
1209                            write!(fmt, " (unchanged global, ")?;
1210                            write_allocation_track_relocs(
1211                                &mut *fmt,
1212                                *self.ecx.tcx,
1213                                &mut allocs_to_print,
1214                                alloc.inner(),
1215                            )?;
1216                        }
1217                        Some(GlobalAlloc::Function { instance, .. }) => {
1218                            write!(fmt, " (fn: {instance})")?;
1219                        }
1220                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1221                            write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1222                        }
1223                        Some(GlobalAlloc::TypeId { ty }) => {
1224                            write!(fmt, " (typeid for {ty})")?;
1225                        }
1226                        Some(GlobalAlloc::Static(did)) => {
1227                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1228                        }
1229                        None => {
1230                            write!(fmt, " (deallocated)")?;
1231                        }
1232                    }
1233                }
1234            }
1235            writeln!(fmt)?;
1236        }
1237        Ok(())
1238    }
1239}
1240
1241/// Reading and writing.
1242impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1243    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1244{
1245    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1246        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1247    }
1248
1249    /// `range` is relative to this allocation reference, not the base of the allocation.
1250    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1251        let range = self.range.subrange(range);
1252        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
1253
1254        self.alloc
1255            .write_scalar(&self.tcx, range, val)
1256            .map_err(|e| e.to_interp_error(self.alloc_id))
1257            .into()
1258    }
1259
1260    /// `offset` is relative to this allocation reference, not the base of the allocation.
1261    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1262        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
1263    }
1264
1265    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1266    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
1267        let range = self.range.subrange(range);
1268
1269        self.alloc
1270            .write_uninit(&self.tcx, range)
1271            .map_err(|e| e.to_interp_error(self.alloc_id))
1272            .into()
1273    }
1274
1275    /// Mark the entire referenced range as uninitialized
1276    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
1277        self.alloc
1278            .write_uninit(&self.tcx, self.range)
1279            .map_err(|e| e.to_interp_error(self.alloc_id))
1280            .into()
1281    }
1282
1283    /// Remove all provenance in the reference range.
1284    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
1285        self.alloc
1286            .clear_provenance(&self.tcx, self.range)
1287            .map_err(|e| e.to_interp_error(self.alloc_id))
1288            .into()
1289    }
1290}
1291
1292impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1293    /// `range` is relative to this allocation reference, not the base of the allocation.
1294    pub fn read_scalar(
1295        &self,
1296        range: AllocRange,
1297        read_provenance: bool,
1298    ) -> InterpResult<'tcx, Scalar<Prov>> {
1299        let range = self.range.subrange(range);
1300        self.alloc
1301            .read_scalar(&self.tcx, range, read_provenance)
1302            .map_err(|e| e.to_interp_error(self.alloc_id))
1303            .into()
1304    }
1305
1306    /// `range` is relative to this allocation reference, not the base of the allocation.
1307    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1308        self.read_scalar(range, /*read_provenance*/ false)
1309    }
1310
1311    /// `offset` is relative to this allocation reference, not the base of the allocation.
1312    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1313        self.read_scalar(
1314            alloc_range(offset, self.tcx.data_layout().pointer_size()),
1315            /*read_provenance*/ true,
1316        )
1317    }
1318
1319    /// `range` is relative to this allocation reference, not the base of the allocation.
1320    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1321        self.alloc
1322            .get_bytes_strip_provenance(&self.tcx, self.range)
1323            .map_err(|e| e.to_interp_error(self.alloc_id))
1324            .into()
1325    }
1326
1327    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1328    pub fn has_provenance(&self) -> bool {
1329        !self.alloc.provenance().range_empty(self.range, &self.tcx)
1330    }
1331}
1332
1333impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1334    /// Reads the given number of bytes from memory, and strips their provenance if possible.
1335    /// Returns them as a slice.
1336    ///
1337    /// Performs appropriate bounds checks.
1338    pub fn read_bytes_ptr_strip_provenance(
1339        &self,
1340        ptr: Pointer<Option<M::Provenance>>,
1341        size: Size,
1342    ) -> InterpResult<'tcx, &[u8]> {
1343        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1344            // zero-sized access
1345            return interp_ok(&[]);
1346        };
1347        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1348        // (We are staying inside the bounds here so all is good.)
1349        interp_ok(
1350            alloc_ref
1351                .alloc
1352                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1353                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1354        )
1355    }
1356
1357    /// Writes the given stream of bytes into memory.
1358    ///
1359    /// Performs appropriate bounds checks.
1360    pub fn write_bytes_ptr(
1361        &mut self,
1362        ptr: Pointer<Option<M::Provenance>>,
1363        src: impl IntoIterator<Item = u8>,
1364    ) -> InterpResult<'tcx> {
1365        let mut src = src.into_iter();
1366        let (lower, upper) = src.size_hint();
1367        let len = upper.expect("can only write bounded iterators");
1368        assert_eq!(lower, len, "can only write iterators with a precise length");
1369
1370        let size = Size::from_bytes(len);
1371        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1372            // zero-sized access
1373            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1374            return interp_ok(());
1375        };
1376
1377        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1378        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1379        let alloc_id = alloc_ref.alloc_id;
1380        let bytes = alloc_ref
1381            .alloc
1382            .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
1383            .map_err(move |e| e.to_interp_error(alloc_id))?;
1384        // `zip` would stop when the first iterator ends; we want to definitely
1385        // cover all of `bytes`.
1386        for dest in bytes {
1387            *dest = src.next().expect("iterator was shorter than it said it would be");
1388        }
1389        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1390        interp_ok(())
1391    }
1392
1393    pub fn mem_copy(
1394        &mut self,
1395        src: Pointer<Option<M::Provenance>>,
1396        dest: Pointer<Option<M::Provenance>>,
1397        size: Size,
1398        nonoverlapping: bool,
1399    ) -> InterpResult<'tcx> {
1400        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1401    }
1402
1403    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1404    /// `i` is the index of the copy).
1405    ///
1406    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1407    /// may overlap is not supported.
1408    pub fn mem_copy_repeatedly(
1409        &mut self,
1410        src: Pointer<Option<M::Provenance>>,
1411        dest: Pointer<Option<M::Provenance>>,
1412        size: Size,
1413        num_copies: u64,
1414        nonoverlapping: bool,
1415    ) -> InterpResult<'tcx> {
1416        let tcx = self.tcx;
1417        // We need to do our own bounds-checks.
1418        let src_parts = self.get_ptr_access(src, size)?;
1419        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
1420
1421        // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized
1422        // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for
1423        // zero-sized writes.
1424        if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
1425        {
1426            M::before_alloc_access(tcx, &self.machine, alloc_id)?;
1427        }
1428
1429        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1430        // and once below to get the underlying `&[mut] Allocation`.
1431
1432        // Source alloc preparations and access hooks.
1433        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1434            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1435            return interp_ok(());
1436        };
1437        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1438        let src_range = alloc_range(src_offset, size);
1439        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
1440
1441        // Trigger read hook.
1442        // For the overlapping case, it is crucial that we trigger the read hook
1443        // before the write hook -- the aliasing model cares about the order.
1444        M::before_memory_read(
1445            tcx,
1446            &self.machine,
1447            &src_alloc.extra,
1448            src,
1449            (src_alloc_id, src_prov),
1450            src_range,
1451        )?;
1452        // We need the `dest` ptr for the next operation, so we get it now.
1453        // We already did the source checks and called the hooks so we are good to return early.
1454        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1455            // Zero-sized *destination*.
1456            return interp_ok(());
1457        };
1458
1459        // Prepare getting source provenance.
1460        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1461        // first copy the provenance to a temporary buffer, because
1462        // `get_bytes_mut` will clear the provenance, which is correct,
1463        // since we don't want to keep any provenance at the target.
1464        // This will also error if copying partial provenance is not supported.
1465        let provenance = src_alloc
1466            .provenance()
1467            .prepare_copy(src_range, dest_offset, num_copies, self)
1468            .map_err(|e| e.to_interp_error(src_alloc_id))?;
1469        // Prepare a copy of the initialization mask.
1470        let init = src_alloc.init_mask().prepare_copy(src_range);
1471
1472        // Destination alloc preparations...
1473        let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;
1474        let dest_range = alloc_range(dest_offset, size * num_copies);
1475        // ...and access hooks.
1476        M::before_alloc_access(tcx, machine, dest_alloc_id)?;
1477        M::before_memory_write(
1478            tcx,
1479            machine,
1480            &mut dest_alloc.extra,
1481            dest,
1482            (dest_alloc_id, dest_prov),
1483            dest_range,
1484        )?;
1485        // Yes we do overwrite all bytes in `dest_bytes`.
1486        let dest_bytes = dest_alloc
1487            .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
1488            .map_err(|e| e.to_interp_error(dest_alloc_id))?
1489            .as_mut_ptr();
1490
1491        if init.no_bytes_init() {
1492            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1493            // is marked as uninitialized but we otherwise omit changing the byte representation which may
1494            // be arbitrary for uninitialized bytes.
1495            // This also avoids writing to the target bytes so that the backing allocation is never
1496            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1497            // operating system this can avoid physically allocating the page.
1498            dest_alloc
1499                .write_uninit(&tcx, dest_range)
1500                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1501            // We can forget about the provenance, this is all not initialized anyway.
1502            return interp_ok(());
1503        }
1504
1505        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1506        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1507        // `dest` could possibly overlap.
1508        // The pointers above remain valid even if the `HashMap` table is moved around because they
1509        // point into the `Vec` storing the bytes.
1510        unsafe {
1511            if src_alloc_id == dest_alloc_id {
1512                if nonoverlapping {
1513                    // `Size` additions
1514                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
1515                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
1516                    {
1517                        throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
1518                    }
1519                }
1520            }
1521            if num_copies > 1 {
1522                assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1523            }
1524
1525            let size_in_bytes = size.bytes_usize();
1526            // For particularly large arrays (where this is perf-sensitive) it's common that
1527            // we're writing a single byte repeatedly. So, optimize that case to a memset.
1528            if size_in_bytes == 1 {
1529                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1530                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1531                let value = *src_bytes;
1532                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1533            } else if src_alloc_id == dest_alloc_id {
1534                let mut dest_ptr = dest_bytes;
1535                for _ in 0..num_copies {
1536                    // Here we rely on `src` and `dest` being non-overlapping if there is more than
1537                    // one copy.
1538                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1539                    dest_ptr = dest_ptr.add(size_in_bytes);
1540                }
1541            } else {
1542                let mut dest_ptr = dest_bytes;
1543                for _ in 0..num_copies {
1544                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1545                    dest_ptr = dest_ptr.add(size_in_bytes);
1546                }
1547            }
1548        }
1549
1550        // now fill in all the "init" data
1551        dest_alloc.init_mask_apply_copy(
1552            init,
1553            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1554            num_copies,
1555        );
1556        // copy the provenance to the destination
1557        dest_alloc.provenance_apply_copy(provenance);
1558
1559        interp_ok(())
1560    }
1561}
1562
1563/// Machine pointer introspection.
1564impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1565    /// Test if this value might be null.
1566    /// If the machine does not support ptr-to-int casts, this is conservative.
1567    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1568        match scalar.try_to_scalar_int() {
1569            Ok(int) => interp_ok(int.is_null()),
1570            Err(_) => {
1571                // We can't cast this pointer to an integer. Can only happen during CTFE.
1572                let ptr = scalar.to_pointer(self)?;
1573                match self.ptr_try_get_alloc_id(ptr, 0) {
1574                    Ok((alloc_id, offset, _)) => {
1575                        let info = self.get_alloc_info(alloc_id);
1576                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.
1577                        if offset <= info.size {
1578                            return interp_ok(false);
1579                        }
1580                        // If the allocation is N-aligned, and the offset is not divisible by N,
1581                        // then `base + offset` has a non-zero remainder after division by `N`,
1582                        // which means `base + offset` cannot be null.
1583                        if !offset.bytes().is_multiple_of(info.align.bytes()) {
1584                            return interp_ok(false);
1585                        }
1586                        // We don't know enough, this might be null.
1587                        interp_ok(true)
1588                    }
1589                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
1590                }
1591            }
1592        }
1593    }
1594
1595    /// Turning a "maybe pointer" into a proper pointer (and some information
1596    /// about where it points), or an absolute address.
1597    ///
1598    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1599    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1600    /// where a wildcard pointer sits right in between two allocations.
1601    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1602    /// for handling wildcard pointers.
1603    ///
1604    /// The result must be used immediately; it is not allowed to convert
1605    /// the returned data back into a `Pointer` and store that in machine state.
1606    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1607    /// we don't have an operation to turn it back into `M::Provenance`.)
1608    pub fn ptr_try_get_alloc_id(
1609        &self,
1610        ptr: Pointer<Option<M::Provenance>>,
1611        size: i64,
1612    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1613        match ptr.into_pointer_or_addr() {
1614            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1615                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1616                None => {
1617                    assert!(M::Provenance::OFFSET_IS_ADDR);
1618                    // Offset is absolute, as we just asserted.
1619                    let (_, addr) = ptr.into_raw_parts();
1620                    Err(addr.bytes())
1621                }
1622            },
1623            Err(addr) => Err(addr.bytes()),
1624        }
1625    }
1626
1627    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1628    ///
1629    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1630    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1631    /// where a wildcard pointer sits right in between two allocations.
1632    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1633    /// for handling wildcard pointers.
1634    ///
1635    /// The result must be used immediately; it is not allowed to convert
1636    /// the returned data back into a `Pointer` and store that in machine state.
1637    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1638    /// we don't have an operation to turn it back into `M::Provenance`.)
1639    #[inline(always)]
1640    pub fn ptr_get_alloc_id(
1641        &self,
1642        ptr: Pointer<Option<M::Provenance>>,
1643        size: i64,
1644    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1645        self.ptr_try_get_alloc_id(ptr, size)
1646            .map_err(|offset| {
1647                err_ub!(DanglingIntPointer {
1648                    addr: offset,
1649                    inbounds_size: size,
1650                    msg: CheckInAllocMsg::Dereferenceable
1651                })
1652            })
1653            .into()
1654    }
1655}