rustc_const_eval/interpret/
memory.rs

1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
8
9use std::assert_matches::assert_matches;
10use std::borrow::{Borrow, Cow};
11use std::collections::VecDeque;
12use std::{fmt, mem, ptr};
13
14use rustc_abi::{Align, HasDataLayout, Size};
15use rustc_ast::Mutability;
16use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
17use rustc_middle::bug;
18use rustc_middle::mir::display_allocation;
19use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
20use tracing::{debug, instrument, trace};
21
22use super::{
23    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
24    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
25    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
26    err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
27};
28use crate::fluent_generated as fluent;
29
30#[derive(Debug, PartialEq, Copy, Clone)]
31pub enum MemoryKind<T> {
32    /// Stack memory. Error if deallocated except during a stack pop.
33    Stack,
34    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
35    CallerLocation,
36    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
37    Machine(T),
38}
39
40impl<T: MayLeak> MayLeak for MemoryKind<T> {
41    #[inline]
42    fn may_leak(self) -> bool {
43        match self {
44            MemoryKind::Stack => false,
45            MemoryKind::CallerLocation => true,
46            MemoryKind::Machine(k) => k.may_leak(),
47        }
48    }
49}
50
51impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53        match self {
54            MemoryKind::Stack => write!(f, "stack variable"),
55            MemoryKind::CallerLocation => write!(f, "caller location"),
56            MemoryKind::Machine(m) => write!(f, "{m}"),
57        }
58    }
59}
60
61/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
62#[derive(Copy, Clone, PartialEq, Debug)]
63pub enum AllocKind {
64    /// A regular live data allocation.
65    LiveData,
66    /// A function allocation (that fn ptrs point to).
67    Function,
68    /// A (symbolic) vtable allocation.
69    VTable,
70    /// A dead allocation.
71    Dead,
72}
73
74/// Metadata about an `AllocId`.
75#[derive(Copy, Clone, PartialEq, Debug)]
76pub struct AllocInfo {
77    pub size: Size,
78    pub align: Align,
79    pub kind: AllocKind,
80    pub mutbl: Mutability,
81}
82
83impl AllocInfo {
84    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
85        Self { size, align, kind, mutbl }
86    }
87}
88
89/// The value of a function pointer.
90#[derive(Debug, Copy, Clone)]
91pub enum FnVal<'tcx, Other> {
92    Instance(Instance<'tcx>),
93    Other(Other),
94}
95
96impl<'tcx, Other> FnVal<'tcx, Other> {
97    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
98        match self {
99            FnVal::Instance(instance) => interp_ok(instance),
100            FnVal::Other(_) => {
101                throw_unsup_format!("'foreign' function pointers are not supported in this context")
102            }
103        }
104    }
105}
106
107// `Memory` has to depend on the `Machine` because some of its operations
108// (e.g., `get`) call a `Machine` hook.
109pub struct Memory<'tcx, M: Machine<'tcx>> {
110    /// Allocations local to this instance of the interpreter. The kind
111    /// helps ensure that the same mechanism is used for allocation and
112    /// deallocation. When an allocation is not found here, it is a
113    /// global and looked up in the `tcx` for read access. Some machines may
114    /// have to mutate this map even on a read-only access to a global (because
115    /// they do pointer provenance tracking and the allocations in `tcx` have
116    /// the wrong type), so we let the machine override this type.
117    /// Either way, if the machine allows writing to a global, doing so will
118    /// create a copy of the global allocation here.
119    // FIXME: this should not be public, but interning currently needs access to it
120    pub(super) alloc_map: M::MemoryMap,
121
122    /// Map for "extra" function pointers.
123    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
124
125    /// To be able to compare pointers with null, and to check alignment for accesses
126    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
127    /// that do not exist any more.
128    // FIXME: this should not be public, but interning currently needs access to it
129    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
130
131    /// This stores whether we are currently doing reads purely for the purpose of validation.
132    /// Those reads do not trigger the machine's hooks for memory reads.
133    /// Needless to say, this must only be set with great care!
134    validation_in_progress: bool,
135}
136
137/// A reference to some allocation that was already bounds-checked for the given region
138/// and had the on-access machine hooks run.
139#[derive(Copy, Clone)]
140pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
141    alloc: &'a Allocation<Prov, Extra, Bytes>,
142    range: AllocRange,
143    tcx: TyCtxt<'tcx>,
144    alloc_id: AllocId,
145}
146/// A reference to some allocation that was already bounds-checked for the given region
147/// and had the on-access machine hooks run.
148pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
149    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
150    range: AllocRange,
151    tcx: TyCtxt<'tcx>,
152    alloc_id: AllocId,
153}
154
155impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
156    pub fn new() -> Self {
157        Memory {
158            alloc_map: M::MemoryMap::default(),
159            extra_fn_ptr_map: FxIndexMap::default(),
160            dead_alloc_map: FxIndexMap::default(),
161            validation_in_progress: false,
162        }
163    }
164
165    /// This is used by [priroda](https://github.com/oli-obk/priroda)
166    pub fn alloc_map(&self) -> &M::MemoryMap {
167        &self.alloc_map
168    }
169}
170
171impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
172    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
173    /// the machine pointer to the allocation. Must never be used
174    /// for any other pointers, nor for TLS statics.
175    ///
176    /// Using the resulting pointer represents a *direct* access to that memory
177    /// (e.g. by directly using a `static`),
178    /// as opposed to access through a pointer that was created by the program.
179    ///
180    /// This function can fail only if `ptr` points to an `extern static`.
181    #[inline]
182    pub fn global_root_pointer(
183        &self,
184        ptr: Pointer<CtfeProvenance>,
185    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
186        let alloc_id = ptr.provenance.alloc_id();
187        // We need to handle `extern static`.
188        match self.tcx.try_get_global_alloc(alloc_id) {
189            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
190                // Thread-local statics do not have a constant address. They *must* be accessed via
191                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
192                bug!("global memory cannot point to thread-local static")
193            }
194            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
195                return M::extern_static_pointer(self, def_id);
196            }
197            None => {
198                assert!(
199                    self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
200                    "{alloc_id:?} is neither global nor a function pointer"
201                );
202            }
203            _ => {}
204        }
205        // And we need to get the provenance.
206        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
207    }
208
209    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
210        let id = match fn_val {
211            FnVal::Instance(instance) => {
212                let salt = M::get_global_alloc_salt(self, Some(instance));
213                self.tcx.reserve_and_set_fn_alloc(instance, salt)
214            }
215            FnVal::Other(extra) => {
216                // FIXME(RalfJung): Should we have a cache here?
217                let id = self.tcx.reserve_alloc_id();
218                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
219                assert!(old.is_none());
220                id
221            }
222        };
223        // Functions are global allocations, so make sure we get the right root pointer.
224        // We know this is not an `extern static` so this cannot fail.
225        self.global_root_pointer(Pointer::from(id)).unwrap()
226    }
227
228    pub fn allocate_ptr(
229        &mut self,
230        size: Size,
231        align: Align,
232        kind: MemoryKind<M::MemoryKind>,
233        init: AllocInit,
234    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
235        let alloc = if M::PANIC_ON_ALLOC_FAIL {
236            Allocation::new(size, align, init)
237        } else {
238            Allocation::try_new(size, align, init)?
239        };
240        self.insert_allocation(alloc, kind)
241    }
242
243    pub fn allocate_bytes_ptr(
244        &mut self,
245        bytes: &[u8],
246        align: Align,
247        kind: MemoryKind<M::MemoryKind>,
248        mutability: Mutability,
249    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
250        let alloc = Allocation::from_bytes(bytes, align, mutability);
251        self.insert_allocation(alloc, kind)
252    }
253
254    pub fn insert_allocation(
255        &mut self,
256        alloc: Allocation<M::Provenance, (), M::Bytes>,
257        kind: MemoryKind<M::MemoryKind>,
258    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
259        assert!(alloc.size() <= self.max_size_of_val());
260        let id = self.tcx.reserve_alloc_id();
261        debug_assert_ne!(
262            Some(kind),
263            M::GLOBAL_KIND.map(MemoryKind::Machine),
264            "dynamically allocating global memory"
265        );
266        // This cannot be merged with the `adjust_global_allocation` code path
267        // since here we have an allocation that already uses `M::Bytes`.
268        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
269        let alloc = alloc.with_extra(extra);
270        self.memory.alloc_map.insert(id, (kind, alloc));
271        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
272    }
273
274    /// If this grows the allocation, `init_growth` determines
275    /// whether the additional space will be initialized.
276    pub fn reallocate_ptr(
277        &mut self,
278        ptr: Pointer<Option<M::Provenance>>,
279        old_size_and_align: Option<(Size, Align)>,
280        new_size: Size,
281        new_align: Align,
282        kind: MemoryKind<M::MemoryKind>,
283        init_growth: AllocInit,
284    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
285        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
286        if offset.bytes() != 0 {
287            throw_ub_custom!(
288                fluent::const_eval_realloc_or_alloc_with_offset,
289                ptr = format!("{ptr:?}"),
290                kind = "realloc"
291            );
292        }
293
294        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
295        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
296        // If requested, we zero-init the entire allocation, to ensure that a growing
297        // allocation has its new bytes properly set. For the part that is copied,
298        // `mem_copy` below will de-initialize things as necessary.
299        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
300        let old_size = match old_size_and_align {
301            Some((size, _align)) => size,
302            None => self.get_alloc_raw(alloc_id)?.size(),
303        };
304        // This will also call the access hooks.
305        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
306        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
307
308        interp_ok(new_ptr)
309    }
310
311    #[instrument(skip(self), level = "debug")]
312    pub fn deallocate_ptr(
313        &mut self,
314        ptr: Pointer<Option<M::Provenance>>,
315        old_size_and_align: Option<(Size, Align)>,
316        kind: MemoryKind<M::MemoryKind>,
317    ) -> InterpResult<'tcx> {
318        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
319        trace!("deallocating: {alloc_id:?}");
320
321        if offset.bytes() != 0 {
322            throw_ub_custom!(
323                fluent::const_eval_realloc_or_alloc_with_offset,
324                ptr = format!("{ptr:?}"),
325                kind = "dealloc",
326            );
327        }
328
329        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
330            // Deallocating global memory -- always an error
331            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
332                Some(GlobalAlloc::Function { .. }) => {
333                    err_ub_custom!(
334                        fluent::const_eval_invalid_dealloc,
335                        alloc_id = alloc_id,
336                        kind = "fn",
337                    )
338                }
339                Some(GlobalAlloc::VTable(..)) => {
340                    err_ub_custom!(
341                        fluent::const_eval_invalid_dealloc,
342                        alloc_id = alloc_id,
343                        kind = "vtable",
344                    )
345                }
346                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
347                    err_ub_custom!(
348                        fluent::const_eval_invalid_dealloc,
349                        alloc_id = alloc_id,
350                        kind = "static_mem"
351                    )
352                }
353                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
354            })
355            .into();
356        };
357
358        if alloc.mutability.is_not() {
359            throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
360        }
361        if alloc_kind != kind {
362            throw_ub_custom!(
363                fluent::const_eval_dealloc_kind_mismatch,
364                alloc = alloc_id,
365                alloc_kind = format!("{alloc_kind}"),
366                kind = format!("{kind}"),
367            );
368        }
369        if let Some((size, align)) = old_size_and_align {
370            if size != alloc.size() || align != alloc.align {
371                throw_ub_custom!(
372                    fluent::const_eval_dealloc_incorrect_layout,
373                    alloc = alloc_id,
374                    size = alloc.size().bytes(),
375                    align = alloc.align.bytes(),
376                    size_found = size.bytes(),
377                    align_found = align.bytes(),
378                )
379            }
380        }
381
382        // Let the machine take some extra action
383        let size = alloc.size();
384        M::before_memory_deallocation(
385            self.tcx,
386            &mut self.machine,
387            &mut alloc.extra,
388            ptr,
389            (alloc_id, prov),
390            size,
391            alloc.align,
392            kind,
393        )?;
394
395        // Don't forget to remember size and align of this now-dead allocation
396        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
397        if old.is_some() {
398            bug!("Nothing can be deallocated twice");
399        }
400
401        interp_ok(())
402    }
403
404    /// Internal helper function to determine the allocation and offset of a pointer (if any).
405    #[inline(always)]
406    fn get_ptr_access(
407        &self,
408        ptr: Pointer<Option<M::Provenance>>,
409        size: Size,
410    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
411        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
412        Self::check_and_deref_ptr(
413            self,
414            ptr,
415            size,
416            CheckInAllocMsg::MemoryAccessTest,
417            |this, alloc_id, offset, prov| {
418                let (size, align) = this
419                    .get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
420                interp_ok((size, align, (alloc_id, offset, prov)))
421            },
422        )
423    }
424
425    /// Check if the given pointer points to live memory of the given `size`.
426    /// The caller can control the error message for the out-of-bounds case.
427    #[inline(always)]
428    pub fn check_ptr_access(
429        &self,
430        ptr: Pointer<Option<M::Provenance>>,
431        size: Size,
432        msg: CheckInAllocMsg,
433    ) -> InterpResult<'tcx> {
434        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
435        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
436            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
437            interp_ok((size, align, ()))
438        })?;
439        interp_ok(())
440    }
441
442    /// Check whether the given pointer points to live memory for a signed amount of bytes.
443    /// A negative amounts means that the given range of memory to the left of the pointer
444    /// needs to be dereferenceable.
445    pub fn check_ptr_access_signed(
446        &self,
447        ptr: Pointer<Option<M::Provenance>>,
448        size: i64,
449        msg: CheckInAllocMsg,
450    ) -> InterpResult<'tcx> {
451        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
452            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
453            interp_ok((size, align, ()))
454        })?;
455        interp_ok(())
456    }
457
458    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
459    /// to the allocation it points to. Supports both shared and mutable references, as the actual
460    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
461    /// a pointer.
462    ///
463    /// `alloc_size` will only get called for non-zero-sized accesses.
464    ///
465    /// Returns `None` if and only if the size is 0.
466    fn check_and_deref_ptr<T, R: Borrow<Self>>(
467        this: R,
468        ptr: Pointer<Option<M::Provenance>>,
469        size: i64,
470        msg: CheckInAllocMsg,
471        alloc_size: impl FnOnce(
472            R,
473            AllocId,
474            Size,
475            M::ProvenanceExtra,
476        ) -> InterpResult<'tcx, (Size, Align, T)>,
477    ) -> InterpResult<'tcx, Option<T>> {
478        // Everything is okay with size 0.
479        if size == 0 {
480            return interp_ok(None);
481        }
482
483        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
484            Err(addr) => {
485                // We couldn't get a proper allocation.
486                throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
487            }
488            Ok((alloc_id, offset, prov)) => {
489                let tcx = this.borrow().tcx;
490                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
491                let offset = offset.bytes();
492                // Compute absolute begin and end of the range.
493                let (begin, end) = if size >= 0 {
494                    (Some(offset), offset.checked_add(size as u64))
495                } else {
496                    (offset.checked_sub(size.unsigned_abs()), Some(offset))
497                };
498                // Ensure both are within bounds.
499                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
500                if !in_bounds {
501                    throw_ub!(PointerOutOfBounds {
502                        alloc_id,
503                        alloc_size,
504                        ptr_offset: tcx.sign_extend_to_target_isize(offset),
505                        inbounds_size: size,
506                        msg,
507                    })
508                }
509
510                Some(ret_val)
511            }
512        })
513    }
514
515    pub(super) fn check_misalign(
516        &self,
517        misaligned: Option<Misalignment>,
518        msg: CheckAlignMsg,
519    ) -> InterpResult<'tcx> {
520        if let Some(misaligned) = misaligned {
521            throw_ub!(AlignmentCheckFailed(misaligned, msg))
522        }
523        interp_ok(())
524    }
525
526    pub(super) fn is_ptr_misaligned(
527        &self,
528        ptr: Pointer<Option<M::Provenance>>,
529        align: Align,
530    ) -> Option<Misalignment> {
531        if !M::enforce_alignment(self) || align.bytes() == 1 {
532            return None;
533        }
534
535        #[inline]
536        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
537            if offset % align.bytes() == 0 {
538                None
539            } else {
540                // The biggest power of two through which `offset` is divisible.
541                let offset_pow2 = 1 << offset.trailing_zeros();
542                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
543            }
544        }
545
546        match self.ptr_try_get_alloc_id(ptr, 0) {
547            Err(addr) => is_offset_misaligned(addr, align),
548            Ok((alloc_id, offset, _prov)) => {
549                let alloc_info = self.get_alloc_info(alloc_id);
550                if let Some(misalign) = M::alignment_check(
551                    self,
552                    alloc_id,
553                    alloc_info.align,
554                    alloc_info.kind,
555                    offset,
556                    align,
557                ) {
558                    Some(misalign)
559                } else if M::Provenance::OFFSET_IS_ADDR {
560                    is_offset_misaligned(ptr.addr().bytes(), align)
561                } else {
562                    // Check allocation alignment and offset alignment.
563                    if alloc_info.align.bytes() < align.bytes() {
564                        Some(Misalignment { has: alloc_info.align, required: align })
565                    } else {
566                        is_offset_misaligned(offset.bytes(), align)
567                    }
568                }
569            }
570        }
571    }
572
573    /// Checks a pointer for misalignment.
574    ///
575    /// The error assumes this is checking the pointer used directly for an access.
576    pub fn check_ptr_align(
577        &self,
578        ptr: Pointer<Option<M::Provenance>>,
579        align: Align,
580    ) -> InterpResult<'tcx> {
581        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
582    }
583}
584
585impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
586    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
587    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
588        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
589        // is live, here all the IDs in the map are for dead allocations so we don't
590        // need to check for liveness.
591        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
592        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
593    }
594}
595
596/// Allocation accessors
597impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
598    /// Helper function to obtain a global (tcx) allocation.
599    /// This attempts to return a reference to an existing allocation if
600    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
601    /// this machine use the same pointer provenance, so it is indirected through
602    /// `M::adjust_allocation`.
603    fn get_global_alloc(
604        &self,
605        id: AllocId,
606        is_write: bool,
607    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
608        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
609            Some(GlobalAlloc::Memory(mem)) => {
610                // Memory of a constant or promoted or anonymous memory referenced by a static.
611                (mem, None)
612            }
613            Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
614            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
615            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccessTest)),
616            Some(GlobalAlloc::Static(def_id)) => {
617                assert!(self.tcx.is_static(def_id));
618                // Thread-local statics do not have a constant address. They *must* be accessed via
619                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
620                assert!(!self.tcx.is_thread_local_static(def_id));
621                // Notice that every static has two `AllocId` that will resolve to the same
622                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
623                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
624                // `eval_static_initializer` and it is the "resolved" ID.
625                // The resolved ID is never used by the interpreted program, it is hidden.
626                // This is relied upon for soundness of const-patterns; a pointer to the resolved
627                // ID would "sidestep" the checks that make sure consts do not point to statics!
628                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
629                // contains a reference to memory that was created during its evaluation (i.e., not
630                // to another static), those inner references only exist in "resolved" form.
631                if self.tcx.is_foreign_item(def_id) {
632                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
633                    // referencing arbitrary (declared) extern statics.
634                    throw_unsup!(ExternStatic(def_id));
635                }
636
637                // We don't give a span -- statics don't need that, they cannot be generic or associated.
638                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
639                (val, Some(def_id))
640            }
641        };
642        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
643        // We got tcx memory. Let the machine initialize its "extra" stuff.
644        M::adjust_global_allocation(
645            self,
646            id, // always use the ID we got as input, not the "hidden" one.
647            alloc.inner(),
648        )
649    }
650
651    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
652    /// The caller is responsible for calling the access hooks!
653    ///
654    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
655    fn get_alloc_raw(
656        &self,
657        id: AllocId,
658    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
659        // The error type of the inner closure here is somewhat funny. We have two
660        // ways of "erroring": An actual error, or because we got a reference from
661        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
662        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
663        let a = self.memory.alloc_map.get_or(id, || {
664            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
665            // so we use `report_err` for that.
666            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
667            match alloc {
668                Cow::Borrowed(alloc) => {
669                    // We got a ref, cheaply return that as an "error" so that the
670                    // map does not get mutated.
671                    Err(Ok(alloc))
672                }
673                Cow::Owned(alloc) => {
674                    // Need to put it into the map and return a ref to that
675                    let kind = M::GLOBAL_KIND.expect(
676                        "I got a global allocation that I have to copy but the machine does \
677                            not expect that to happen",
678                    );
679                    Ok((MemoryKind::Machine(kind), alloc))
680                }
681            }
682        });
683        // Now unpack that funny error type
684        match a {
685            Ok(a) => interp_ok(&a.1),
686            Err(a) => a.into(),
687        }
688    }
689
690    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
691    /// The caller is responsible for calling the access hooks!
692    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
693        let alloc = self.get_alloc_raw(id)?;
694        interp_ok(alloc.get_bytes_unchecked_raw())
695    }
696
697    /// Bounds-checked *but not align-checked* allocation access.
698    pub fn get_ptr_alloc<'a>(
699        &'a self,
700        ptr: Pointer<Option<M::Provenance>>,
701        size: Size,
702    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
703    {
704        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
705        let ptr_and_alloc = Self::check_and_deref_ptr(
706            self,
707            ptr,
708            size_i64,
709            CheckInAllocMsg::MemoryAccessTest,
710            |this, alloc_id, offset, prov| {
711                let alloc = this.get_alloc_raw(alloc_id)?;
712                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
713            },
714        )?;
715        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
716        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
717        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
718        if !self.memory.validation_in_progress {
719            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
720                M::before_alloc_read(self, alloc_id)?;
721            }
722        }
723
724        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
725            let range = alloc_range(offset, size);
726            if !self.memory.validation_in_progress {
727                M::before_memory_read(
728                    self.tcx,
729                    &self.machine,
730                    &alloc.extra,
731                    ptr,
732                    (alloc_id, prov),
733                    range,
734                )?;
735            }
736            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
737        } else {
738            interp_ok(None)
739        }
740    }
741
742    /// Return the `extra` field of the given allocation.
743    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
744        interp_ok(&self.get_alloc_raw(id)?.extra)
745    }
746
747    /// Return the `mutability` field of the given allocation.
748    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
749        interp_ok(self.get_alloc_raw(id)?.mutability)
750    }
751
752    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
753    /// The caller is responsible for calling the access hooks!
754    ///
755    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
756    /// allocation.
757    fn get_alloc_raw_mut(
758        &mut self,
759        id: AllocId,
760    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
761        // We have "NLL problem case #3" here, which cannot be worked around without loss of
762        // efficiency even for the common case where the key is in the map.
763        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
764        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
765        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
766        // It could be avoided with a totally separate codepath in Miri for handling the absolute address
767        // of global allocations, but that's not worth it.)
768        if self.memory.alloc_map.get_mut(id).is_none() {
769            // Slow path.
770            // Allocation not found locally, go look global.
771            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
772            let kind = M::GLOBAL_KIND.expect(
773                "I got a global allocation that I have to copy but the machine does \
774                    not expect that to happen",
775            );
776            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
777        }
778
779        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
780        if alloc.mutability.is_not() {
781            throw_ub!(WriteToReadOnly(id))
782        }
783        interp_ok((alloc, &mut self.machine))
784    }
785
786    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
787    /// The caller is responsible for calling the access hooks!
788    pub fn get_alloc_bytes_unchecked_raw_mut(
789        &mut self,
790        id: AllocId,
791    ) -> InterpResult<'tcx, *mut u8> {
792        let alloc = self.get_alloc_raw_mut(id)?.0;
793        interp_ok(alloc.get_bytes_unchecked_raw_mut())
794    }
795
796    /// Bounds-checked *but not align-checked* allocation access.
797    pub fn get_ptr_alloc_mut<'a>(
798        &'a mut self,
799        ptr: Pointer<Option<M::Provenance>>,
800        size: Size,
801    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
802    {
803        let tcx = self.tcx;
804        let validation_in_progress = self.memory.validation_in_progress;
805
806        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
807        let ptr_and_alloc = Self::check_and_deref_ptr(
808            self,
809            ptr,
810            size_i64,
811            CheckInAllocMsg::MemoryAccessTest,
812            |this, alloc_id, offset, prov| {
813                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
814                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
815            },
816        )?;
817
818        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
819            let range = alloc_range(offset, size);
820            if !validation_in_progress {
821                M::before_memory_write(
822                    tcx,
823                    machine,
824                    &mut alloc.extra,
825                    ptr,
826                    (alloc_id, prov),
827                    range,
828                )?;
829            }
830            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
831        } else {
832            interp_ok(None)
833        }
834    }
835
836    /// Return the `extra` field of the given allocation.
837    pub fn get_alloc_extra_mut<'a>(
838        &'a mut self,
839        id: AllocId,
840    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
841        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
842        interp_ok((&mut alloc.extra, machine))
843    }
844
845    /// Check whether an allocation is live. This is faster than calling
846    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
847    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
848    pub fn is_alloc_live(&self, id: AllocId) -> bool {
849        self.memory.alloc_map.contains_key_ref(&id)
850            || self.memory.extra_fn_ptr_map.contains_key(&id)
851            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
852            // This also matches the order in `get_alloc_info`.
853            || self.tcx.try_get_global_alloc(id).is_some()
854    }
855
856    /// Obtain the size and alignment of an allocation, even if that allocation has
857    /// been deallocated.
858    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
859        // # Regular allocations
860        // Don't use `self.get_raw` here as that will
861        // a) cause cycles in case `id` refers to a static
862        // b) duplicate a global's allocation in miri
863        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
864            return AllocInfo::new(
865                alloc.size(),
866                alloc.align,
867                AllocKind::LiveData,
868                alloc.mutability,
869            );
870        }
871
872        // # Function pointers
873        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
874        if self.get_fn_alloc(id).is_some() {
875            return AllocInfo::new(Size::ZERO, Align::ONE, AllocKind::Function, Mutability::Not);
876        }
877
878        // # Global allocations
879        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
880            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
881            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
882            let kind = match global_alloc {
883                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
884                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
885                GlobalAlloc::VTable { .. } => AllocKind::VTable,
886            };
887            return AllocInfo::new(size, align, kind, mutbl);
888        }
889
890        // # Dead pointers
891        let (size, align) = *self
892            .memory
893            .dead_alloc_map
894            .get(&id)
895            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
896        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
897    }
898
899    /// Obtain the size and alignment of a *live* allocation.
900    fn get_live_alloc_size_and_align(
901        &self,
902        id: AllocId,
903        msg: CheckInAllocMsg,
904    ) -> InterpResult<'tcx, (Size, Align)> {
905        let info = self.get_alloc_info(id);
906        if matches!(info.kind, AllocKind::Dead) {
907            throw_ub!(PointerUseAfterFree(id, msg))
908        }
909        interp_ok((info.size, info.align))
910    }
911
912    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
913        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
914            Some(FnVal::Other(*extra))
915        } else {
916            match self.tcx.try_get_global_alloc(id) {
917                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
918                _ => None,
919            }
920        }
921    }
922
923    pub fn get_ptr_fn(
924        &self,
925        ptr: Pointer<Option<M::Provenance>>,
926    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
927        trace!("get_ptr_fn({:?})", ptr);
928        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
929        if offset.bytes() != 0 {
930            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
931        }
932        self.get_fn_alloc(alloc_id)
933            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
934            .into()
935    }
936
937    /// Get the dynamic type of the given vtable pointer.
938    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
939    pub fn get_ptr_vtable_ty(
940        &self,
941        ptr: Pointer<Option<M::Provenance>>,
942        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
943    ) -> InterpResult<'tcx, Ty<'tcx>> {
944        trace!("get_ptr_vtable({:?})", ptr);
945        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
946        if offset.bytes() != 0 {
947            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
948        }
949        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
950            self.tcx.try_get_global_alloc(alloc_id)
951        else {
952            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
953        };
954        if let Some(expected_dyn_type) = expected_trait {
955            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
956        }
957        interp_ok(ty)
958    }
959
960    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
961        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
962        interp_ok(())
963    }
964
965    /// Handle the effect an FFI call might have on the state of allocations.
966    /// This overapproximates the modifications which external code might make to memory:
967    /// We set all reachable allocations as initialized, mark all reachable provenances as exposed
968    /// and overwrite them with `Provenance::WILDCARD`.
969    ///
970    /// The allocations in `ids` are assumed to be already exposed.
971    pub fn prepare_for_native_call(&mut self, ids: Vec<AllocId>) -> InterpResult<'tcx> {
972        let mut done = FxHashSet::default();
973        let mut todo = ids;
974        while let Some(id) = todo.pop() {
975            if !done.insert(id) {
976                // We already saw this allocation before, don't process it again.
977                continue;
978            }
979            let info = self.get_alloc_info(id);
980
981            // If there is no data behind this pointer, skip this.
982            if !matches!(info.kind, AllocKind::LiveData) {
983                continue;
984            }
985
986            // Expose all provenances in this allocation, and add them to `todo`.
987            let alloc = self.get_alloc_raw(id)?;
988            for prov in alloc.provenance().provenances() {
989                M::expose_provenance(self, prov)?;
990                if let Some(id) = prov.get_alloc_id() {
991                    todo.push(id);
992                }
993            }
994            // Also expose the provenance of the interpreter-level allocation, so it can
995            // be read by FFI. The `black_box` is defensive programming as LLVM likes
996            // to (incorrectly) optimize away ptr2int casts whose result is unused.
997            std::hint::black_box(alloc.get_bytes_unchecked_raw().expose_provenance());
998
999            // Prepare for possible write from native code if mutable.
1000            if info.mutbl.is_mut() {
1001                self.get_alloc_raw_mut(id)?
1002                    .0
1003                    .prepare_for_native_write()
1004                    .map_err(|e| e.to_interp_error(id))?;
1005            }
1006        }
1007        interp_ok(())
1008    }
1009
1010    /// Create a lazy debug printer that prints the given allocation and all allocations it points
1011    /// to, recursively.
1012    #[must_use]
1013    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1014        self.dump_allocs(vec![id])
1015    }
1016
1017    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
1018    /// recursively.
1019    #[must_use]
1020    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1021        allocs.sort();
1022        allocs.dedup();
1023        DumpAllocs { ecx: self, allocs }
1024    }
1025
1026    /// Print the allocation's bytes, without any nested allocations.
1027    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1028        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1029        // want to be able to read all memory for diagnostics, even if that is cyclic.
1030        let alloc = self.get_alloc_raw(id).unwrap();
1031        let mut bytes = String::new();
1032        if alloc.size() != Size::ZERO {
1033            bytes = "\n".into();
1034            // FIXME(translation) there might be pieces that are translatable.
1035            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
1036                .unwrap();
1037        }
1038        bytes
1039    }
1040
1041    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1042    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1043    /// kind's `may_leak()` returns true.
1044    ///
1045    /// This is highly destructive, no more execution can happen after this!
1046    pub fn take_leaked_allocations(
1047        &mut self,
1048        static_roots: impl FnOnce(&Self) -> &[AllocId],
1049    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1050    {
1051        // Collect the set of allocations that are *reachable* from `Global` allocations.
1052        let reachable = {
1053            let mut reachable = FxHashSet::default();
1054            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1055            let mut todo: Vec<_> =
1056                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1057                    if Some(kind) == global_kind { Some(id) } else { None }
1058                });
1059            todo.extend(static_roots(self));
1060            while let Some(id) = todo.pop() {
1061                if reachable.insert(id) {
1062                    // This is a new allocation, add the allocation it points to `todo`.
1063                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1064                        todo.extend(
1065                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1066                        );
1067                    }
1068                }
1069            }
1070            reachable
1071        };
1072
1073        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1074        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1075            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1076        });
1077        let mut result = Vec::new();
1078        for &id in leaked.iter() {
1079            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1080            result.push((id, kind, alloc));
1081        }
1082        result
1083    }
1084
1085    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1086    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1087    ///
1088    /// We do this so Miri's allocation access tracking does not show the validation
1089    /// reads as spurious accesses.
1090    pub fn run_for_validation<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1091        // This deliberately uses `==` on `bool` to follow the pattern
1092        // `assert!(val.replace(new) == old)`.
1093        assert!(
1094            mem::replace(&mut self.memory.validation_in_progress, true) == false,
1095            "`validation_in_progress` was already set"
1096        );
1097        let res = f(self);
1098        assert!(
1099            mem::replace(&mut self.memory.validation_in_progress, false) == true,
1100            "`validation_in_progress` was unset by someone else"
1101        );
1102        res
1103    }
1104
1105    pub(super) fn validation_in_progress(&self) -> bool {
1106        self.memory.validation_in_progress
1107    }
1108}
1109
1110#[doc(hidden)]
1111/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1112pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1113    ecx: &'a InterpCx<'tcx, M>,
1114    allocs: Vec<AllocId>,
1115}
1116
1117impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
1118    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1119        // Cannot be a closure because it is generic in `Prov`, `Extra`.
1120        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1121            fmt: &mut std::fmt::Formatter<'_>,
1122            tcx: TyCtxt<'tcx>,
1123            allocs_to_print: &mut VecDeque<AllocId>,
1124            alloc: &Allocation<Prov, Extra, Bytes>,
1125        ) -> std::fmt::Result {
1126            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1127            {
1128                allocs_to_print.push_back(alloc_id);
1129            }
1130            write!(fmt, "{}", display_allocation(tcx, alloc))
1131        }
1132
1133        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1134        // `allocs_printed` contains all allocations that we have already printed.
1135        let mut allocs_printed = FxHashSet::default();
1136
1137        while let Some(id) = allocs_to_print.pop_front() {
1138            if !allocs_printed.insert(id) {
1139                // Already printed, so skip this.
1140                continue;
1141            }
1142
1143            write!(fmt, "{id:?}")?;
1144            match self.ecx.memory.alloc_map.get(id) {
1145                Some((kind, alloc)) => {
1146                    // normal alloc
1147                    write!(fmt, " ({kind}, ")?;
1148                    write_allocation_track_relocs(
1149                        &mut *fmt,
1150                        *self.ecx.tcx,
1151                        &mut allocs_to_print,
1152                        alloc,
1153                    )?;
1154                }
1155                None => {
1156                    // global alloc
1157                    match self.ecx.tcx.try_get_global_alloc(id) {
1158                        Some(GlobalAlloc::Memory(alloc)) => {
1159                            write!(fmt, " (unchanged global, ")?;
1160                            write_allocation_track_relocs(
1161                                &mut *fmt,
1162                                *self.ecx.tcx,
1163                                &mut allocs_to_print,
1164                                alloc.inner(),
1165                            )?;
1166                        }
1167                        Some(GlobalAlloc::Function { instance, .. }) => {
1168                            write!(fmt, " (fn: {instance})")?;
1169                        }
1170                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1171                            write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1172                        }
1173                        Some(GlobalAlloc::Static(did)) => {
1174                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1175                        }
1176                        None => {
1177                            write!(fmt, " (deallocated)")?;
1178                        }
1179                    }
1180                }
1181            }
1182            writeln!(fmt)?;
1183        }
1184        Ok(())
1185    }
1186}
1187
1188/// Reading and writing.
1189impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1190    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1191{
1192    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1193        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1194    }
1195
1196    /// `range` is relative to this allocation reference, not the base of the allocation.
1197    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1198        let range = self.range.subrange(range);
1199        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
1200
1201        self.alloc
1202            .write_scalar(&self.tcx, range, val)
1203            .map_err(|e| e.to_interp_error(self.alloc_id))
1204            .into()
1205    }
1206
1207    /// `offset` is relative to this allocation reference, not the base of the allocation.
1208    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1209        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
1210    }
1211
1212    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1213    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
1214        let range = self.range.subrange(range);
1215
1216        self.alloc
1217            .write_uninit(&self.tcx, range)
1218            .map_err(|e| e.to_interp_error(self.alloc_id))
1219            .into()
1220    }
1221
1222    /// Mark the entire referenced range as uninitialized
1223    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
1224        self.alloc
1225            .write_uninit(&self.tcx, self.range)
1226            .map_err(|e| e.to_interp_error(self.alloc_id))
1227            .into()
1228    }
1229
1230    /// Remove all provenance in the reference range.
1231    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
1232        self.alloc
1233            .clear_provenance(&self.tcx, self.range)
1234            .map_err(|e| e.to_interp_error(self.alloc_id))
1235            .into()
1236    }
1237}
1238
1239impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1240    /// `range` is relative to this allocation reference, not the base of the allocation.
1241    pub fn read_scalar(
1242        &self,
1243        range: AllocRange,
1244        read_provenance: bool,
1245    ) -> InterpResult<'tcx, Scalar<Prov>> {
1246        let range = self.range.subrange(range);
1247        self.alloc
1248            .read_scalar(&self.tcx, range, read_provenance)
1249            .map_err(|e| e.to_interp_error(self.alloc_id))
1250            .into()
1251    }
1252
1253    /// `range` is relative to this allocation reference, not the base of the allocation.
1254    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1255        self.read_scalar(range, /*read_provenance*/ false)
1256    }
1257
1258    /// `offset` is relative to this allocation reference, not the base of the allocation.
1259    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1260        self.read_scalar(
1261            alloc_range(offset, self.tcx.data_layout().pointer_size),
1262            /*read_provenance*/ true,
1263        )
1264    }
1265
1266    /// `range` is relative to this allocation reference, not the base of the allocation.
1267    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1268        self.alloc
1269            .get_bytes_strip_provenance(&self.tcx, self.range)
1270            .map_err(|e| e.to_interp_error(self.alloc_id))
1271            .into()
1272    }
1273
1274    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1275    pub fn has_provenance(&self) -> bool {
1276        !self.alloc.provenance().range_empty(self.range, &self.tcx)
1277    }
1278}
1279
1280impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1281    /// Reads the given number of bytes from memory, and strips their provenance if possible.
1282    /// Returns them as a slice.
1283    ///
1284    /// Performs appropriate bounds checks.
1285    pub fn read_bytes_ptr_strip_provenance(
1286        &self,
1287        ptr: Pointer<Option<M::Provenance>>,
1288        size: Size,
1289    ) -> InterpResult<'tcx, &[u8]> {
1290        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1291            // zero-sized access
1292            return interp_ok(&[]);
1293        };
1294        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1295        // (We are staying inside the bounds here so all is good.)
1296        interp_ok(
1297            alloc_ref
1298                .alloc
1299                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1300                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1301        )
1302    }
1303
1304    /// Writes the given stream of bytes into memory.
1305    ///
1306    /// Performs appropriate bounds checks.
1307    pub fn write_bytes_ptr(
1308        &mut self,
1309        ptr: Pointer<Option<M::Provenance>>,
1310        src: impl IntoIterator<Item = u8>,
1311    ) -> InterpResult<'tcx> {
1312        let mut src = src.into_iter();
1313        let (lower, upper) = src.size_hint();
1314        let len = upper.expect("can only write bounded iterators");
1315        assert_eq!(lower, len, "can only write iterators with a precise length");
1316
1317        let size = Size::from_bytes(len);
1318        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1319            // zero-sized access
1320            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1321            return interp_ok(());
1322        };
1323
1324        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1325        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1326        let alloc_id = alloc_ref.alloc_id;
1327        let bytes = alloc_ref
1328            .alloc
1329            .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
1330            .map_err(move |e| e.to_interp_error(alloc_id))?;
1331        // `zip` would stop when the first iterator ends; we want to definitely
1332        // cover all of `bytes`.
1333        for dest in bytes {
1334            *dest = src.next().expect("iterator was shorter than it said it would be");
1335        }
1336        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1337        interp_ok(())
1338    }
1339
1340    pub fn mem_copy(
1341        &mut self,
1342        src: Pointer<Option<M::Provenance>>,
1343        dest: Pointer<Option<M::Provenance>>,
1344        size: Size,
1345        nonoverlapping: bool,
1346    ) -> InterpResult<'tcx> {
1347        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1348    }
1349
1350    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1351    /// `i` is the index of the copy).
1352    ///
1353    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1354    /// may overlap is not supported.
1355    pub fn mem_copy_repeatedly(
1356        &mut self,
1357        src: Pointer<Option<M::Provenance>>,
1358        dest: Pointer<Option<M::Provenance>>,
1359        size: Size,
1360        num_copies: u64,
1361        nonoverlapping: bool,
1362    ) -> InterpResult<'tcx> {
1363        let tcx = self.tcx;
1364        // We need to do our own bounds-checks.
1365        let src_parts = self.get_ptr_access(src, size)?;
1366        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
1367
1368        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1369        // and once below to get the underlying `&[mut] Allocation`.
1370
1371        // Source alloc preparations and access hooks.
1372        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1373            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1374            return interp_ok(());
1375        };
1376        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1377        let src_range = alloc_range(src_offset, size);
1378        assert!(!self.memory.validation_in_progress, "we can't be copying during validation");
1379        // For the overlapping case, it is crucial that we trigger the read hook
1380        // before the write hook -- the aliasing model cares about the order.
1381        M::before_memory_read(
1382            tcx,
1383            &self.machine,
1384            &src_alloc.extra,
1385            src,
1386            (src_alloc_id, src_prov),
1387            src_range,
1388        )?;
1389        // We need the `dest` ptr for the next operation, so we get it now.
1390        // We already did the source checks and called the hooks so we are good to return early.
1391        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1392            // Zero-sized *destination*.
1393            return interp_ok(());
1394        };
1395
1396        // Prepare getting source provenance.
1397        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1398        // first copy the provenance to a temporary buffer, because
1399        // `get_bytes_mut` will clear the provenance, which is correct,
1400        // since we don't want to keep any provenance at the target.
1401        // This will also error if copying partial provenance is not supported.
1402        let provenance = src_alloc
1403            .provenance()
1404            .prepare_copy(src_range, dest_offset, num_copies, self)
1405            .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1406        // Prepare a copy of the initialization mask.
1407        let init = src_alloc.init_mask().prepare_copy(src_range);
1408
1409        // Destination alloc preparations and access hooks.
1410        let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
1411        let dest_range = alloc_range(dest_offset, size * num_copies);
1412        M::before_memory_write(
1413            tcx,
1414            extra,
1415            &mut dest_alloc.extra,
1416            dest,
1417            (dest_alloc_id, dest_prov),
1418            dest_range,
1419        )?;
1420        // Yes we do overwrite all bytes in `dest_bytes`.
1421        let dest_bytes = dest_alloc
1422            .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
1423            .map_err(|e| e.to_interp_error(dest_alloc_id))?
1424            .as_mut_ptr();
1425
1426        if init.no_bytes_init() {
1427            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1428            // is marked as uninitialized but we otherwise omit changing the byte representation which may
1429            // be arbitrary for uninitialized bytes.
1430            // This also avoids writing to the target bytes so that the backing allocation is never
1431            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1432            // operating system this can avoid physically allocating the page.
1433            dest_alloc
1434                .write_uninit(&tcx, dest_range)
1435                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1436            // We can forget about the provenance, this is all not initialized anyway.
1437            return interp_ok(());
1438        }
1439
1440        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1441        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1442        // `dest` could possibly overlap.
1443        // The pointers above remain valid even if the `HashMap` table is moved around because they
1444        // point into the `Vec` storing the bytes.
1445        unsafe {
1446            if src_alloc_id == dest_alloc_id {
1447                if nonoverlapping {
1448                    // `Size` additions
1449                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
1450                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
1451                    {
1452                        throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
1453                    }
1454                }
1455            }
1456            if num_copies > 1 {
1457                assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1458            }
1459
1460            let size_in_bytes = size.bytes_usize();
1461            // For particularly large arrays (where this is perf-sensitive) it's common that
1462            // we're writing a single byte repeatedly. So, optimize that case to a memset.
1463            if size_in_bytes == 1 {
1464                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1465                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1466                let value = *src_bytes;
1467                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1468            } else if src_alloc_id == dest_alloc_id {
1469                let mut dest_ptr = dest_bytes;
1470                for _ in 0..num_copies {
1471                    // Here we rely on `src` and `dest` being non-overlapping if there is more than
1472                    // one copy.
1473                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1474                    dest_ptr = dest_ptr.add(size_in_bytes);
1475                }
1476            } else {
1477                let mut dest_ptr = dest_bytes;
1478                for _ in 0..num_copies {
1479                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1480                    dest_ptr = dest_ptr.add(size_in_bytes);
1481                }
1482            }
1483        }
1484
1485        // now fill in all the "init" data
1486        dest_alloc.init_mask_apply_copy(
1487            init,
1488            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1489            num_copies,
1490        );
1491        // copy the provenance to the destination
1492        dest_alloc.provenance_apply_copy(provenance);
1493
1494        interp_ok(())
1495    }
1496}
1497
1498/// Machine pointer introspection.
1499impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1500    /// Test if this value might be null.
1501    /// If the machine does not support ptr-to-int casts, this is conservative.
1502    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1503        match scalar.try_to_scalar_int() {
1504            Ok(int) => interp_ok(int.is_null()),
1505            Err(_) => {
1506                // We can't cast this pointer to an integer. Can only happen during CTFE.
1507                let ptr = scalar.to_pointer(self)?;
1508                match self.ptr_try_get_alloc_id(ptr, 0) {
1509                    Ok((alloc_id, offset, _)) => {
1510                        let info = self.get_alloc_info(alloc_id);
1511                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.
1512                        if offset <= info.size {
1513                            return interp_ok(false);
1514                        }
1515                        // If the allocation is N-aligned, and the offset is not divisible by N,
1516                        // then `base + offset` has a non-zero remainder after division by `N`,
1517                        // which means `base + offset` cannot be null.
1518                        if offset.bytes() % info.align.bytes() != 0 {
1519                            return interp_ok(false);
1520                        }
1521                        // We don't know enough, this might be null.
1522                        interp_ok(true)
1523                    }
1524                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
1525                }
1526            }
1527        }
1528    }
1529
1530    /// Turning a "maybe pointer" into a proper pointer (and some information
1531    /// about where it points), or an absolute address.
1532    ///
1533    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1534    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1535    /// where a wildcard pointer sits right in between two allocations.
1536    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1537    /// for handling wildcard pointers.
1538    ///
1539    /// The result must be used immediately; it is not allowed to convert
1540    /// the returned data back into a `Pointer` and store that in machine state.
1541    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1542    /// we don't have an operation to turn it back into `M::Provenance`.)
1543    pub fn ptr_try_get_alloc_id(
1544        &self,
1545        ptr: Pointer<Option<M::Provenance>>,
1546        size: i64,
1547    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1548        match ptr.into_pointer_or_addr() {
1549            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1550                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1551                None => {
1552                    assert!(M::Provenance::OFFSET_IS_ADDR);
1553                    let (_, addr) = ptr.into_parts();
1554                    Err(addr.bytes())
1555                }
1556            },
1557            Err(addr) => Err(addr.bytes()),
1558        }
1559    }
1560
1561    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1562    ///
1563    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1564    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1565    /// where a wildcard pointer sits right in between two allocations.
1566    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1567    /// for handling wildcard pointers.
1568    ///
1569    /// The result must be used immediately; it is not allowed to convert
1570    /// the returned data back into a `Pointer` and store that in machine state.
1571    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1572    /// we don't have an operation to turn it back into `M::Provenance`.)
1573    #[inline(always)]
1574    pub fn ptr_get_alloc_id(
1575        &self,
1576        ptr: Pointer<Option<M::Provenance>>,
1577        size: i64,
1578    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1579        self.ptr_try_get_alloc_id(ptr, size)
1580            .map_err(|offset| {
1581                err_ub!(DanglingIntPointer {
1582                    addr: offset,
1583                    inbounds_size: size,
1584                    msg: CheckInAllocMsg::InboundsTest
1585                })
1586            })
1587            .into()
1588    }
1589}