Skip to main content

rustc_const_eval/interpret/
memory.rs

1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
8
9use std::borrow::{Borrow, Cow};
10use std::cell::Cell;
11use std::collections::VecDeque;
12use std::{fmt, ptr};
13
14use rustc_abi::{Align, HasDataLayout, Size};
15use rustc_ast::Mutability;
16use rustc_data_structures::assert_matches;
17use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
18use rustc_errors::inline_fluent;
19use rustc_middle::mir::display_allocation;
20use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
21use rustc_middle::{bug, throw_ub_format};
22use tracing::{debug, instrument, trace};
23
24use super::{
25    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
26    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
27    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
28    err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
29};
30use crate::const_eval::ConstEvalErrKind;
31
32#[derive(#[automatically_derived]
impl<T: ::core::fmt::Debug> ::core::fmt::Debug for MemoryKind<T> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            MemoryKind::Stack =>
                ::core::fmt::Formatter::write_str(f, "Stack"),
            MemoryKind::CallerLocation =>
                ::core::fmt::Formatter::write_str(f, "CallerLocation"),
            MemoryKind::Machine(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Machine", &__self_0),
        }
    }
}Debug, #[automatically_derived]
impl<T: ::core::cmp::PartialEq> ::core::cmp::PartialEq for MemoryKind<T> {
    #[inline]
    fn eq(&self, other: &MemoryKind<T>) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (MemoryKind::Machine(__self_0), MemoryKind::Machine(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq, #[automatically_derived]
impl<T: ::core::marker::Copy> ::core::marker::Copy for MemoryKind<T> { }Copy, #[automatically_derived]
impl<T: ::core::clone::Clone> ::core::clone::Clone for MemoryKind<T> {
    #[inline]
    fn clone(&self) -> MemoryKind<T> {
        match self {
            MemoryKind::Stack => MemoryKind::Stack,
            MemoryKind::CallerLocation => MemoryKind::CallerLocation,
            MemoryKind::Machine(__self_0) =>
                MemoryKind::Machine(::core::clone::Clone::clone(__self_0)),
        }
    }
}Clone)]
33pub enum MemoryKind<T> {
34    /// Stack memory. Error if deallocated except during a stack pop.
35    Stack,
36    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
37    CallerLocation,
38    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
39    Machine(T),
40}
41
42impl<T: MayLeak> MayLeak for MemoryKind<T> {
43    #[inline]
44    fn may_leak(self) -> bool {
45        match self {
46            MemoryKind::Stack => false,
47            MemoryKind::CallerLocation => true,
48            MemoryKind::Machine(k) => k.may_leak(),
49        }
50    }
51}
52
53impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
54    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55        match self {
56            MemoryKind::Stack => f.write_fmt(format_args!("stack variable"))write!(f, "stack variable"),
57            MemoryKind::CallerLocation => f.write_fmt(format_args!("caller location"))write!(f, "caller location"),
58            MemoryKind::Machine(m) => f.write_fmt(format_args!("{0}", m))write!(f, "{m}"),
59        }
60    }
61}
62
63/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
64#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocKind {
    #[inline]
    fn clone(&self) -> AllocKind { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AllocKind {
    #[inline]
    fn eq(&self, other: &AllocKind) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::fmt::Debug for AllocKind {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                AllocKind::LiveData => "LiveData",
                AllocKind::Function => "Function",
                AllocKind::VTable => "VTable",
                AllocKind::TypeId => "TypeId",
                AllocKind::Dead => "Dead",
            })
    }
}Debug)]
65pub enum AllocKind {
66    /// A regular live data allocation.
67    LiveData,
68    /// A function allocation (that fn ptrs point to).
69    Function,
70    /// A vtable allocation.
71    VTable,
72    /// A TypeId allocation.
73    TypeId,
74    /// A dead allocation.
75    Dead,
76}
77
78/// Metadata about an `AllocId`.
79#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocInfo {
    #[inline]
    fn clone(&self) -> AllocInfo {
        let _: ::core::clone::AssertParamIsClone<Size>;
        let _: ::core::clone::AssertParamIsClone<Align>;
        let _: ::core::clone::AssertParamIsClone<AllocKind>;
        let _: ::core::clone::AssertParamIsClone<Mutability>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AllocInfo {
    #[inline]
    fn eq(&self, other: &AllocInfo) -> bool {
        self.size == other.size && self.align == other.align &&
                self.kind == other.kind && self.mutbl == other.mutbl
    }
}PartialEq, #[automatically_derived]
impl ::core::fmt::Debug for AllocInfo {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field4_finish(f, "AllocInfo",
            "size", &self.size, "align", &self.align, "kind", &self.kind,
            "mutbl", &&self.mutbl)
    }
}Debug)]
80pub struct AllocInfo {
81    pub size: Size,
82    pub align: Align,
83    pub kind: AllocKind,
84    pub mutbl: Mutability,
85}
86
87impl AllocInfo {
88    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
89        Self { size, align, kind, mutbl }
90    }
91}
92
93/// The value of a function pointer.
94#[derive(#[automatically_derived]
impl<'tcx, Other: ::core::fmt::Debug> ::core::fmt::Debug for
    FnVal<'tcx, Other> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            FnVal::Instance(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Instance", &__self_0),
            FnVal::Other(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Other",
                    &__self_0),
        }
    }
}Debug, #[automatically_derived]
impl<'tcx, Other: ::core::marker::Copy> ::core::marker::Copy for
    FnVal<'tcx, Other> {
}Copy, #[automatically_derived]
impl<'tcx, Other: ::core::clone::Clone> ::core::clone::Clone for
    FnVal<'tcx, Other> {
    #[inline]
    fn clone(&self) -> FnVal<'tcx, Other> {
        match self {
            FnVal::Instance(__self_0) =>
                FnVal::Instance(::core::clone::Clone::clone(__self_0)),
            FnVal::Other(__self_0) =>
                FnVal::Other(::core::clone::Clone::clone(__self_0)),
        }
    }
}Clone)]
95pub enum FnVal<'tcx, Other> {
96    Instance(Instance<'tcx>),
97    Other(Other),
98}
99
100impl<'tcx, Other> FnVal<'tcx, Other> {
101    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
102        match self {
103            FnVal::Instance(instance) => interp_ok(instance),
104            FnVal::Other(_) => {
105                do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
                    ::alloc::fmt::format(format_args!("\'foreign\' function pointers are not supported in this context"))
                })))throw_unsup_format!("'foreign' function pointers are not supported in this context")
106            }
107        }
108    }
109}
110
111// `Memory` has to depend on the `Machine` because some of its operations
112// (e.g., `get`) call a `Machine` hook.
113pub struct Memory<'tcx, M: Machine<'tcx>> {
114    /// Allocations local to this instance of the interpreter. The kind
115    /// helps ensure that the same mechanism is used for allocation and
116    /// deallocation. When an allocation is not found here, it is a
117    /// global and looked up in the `tcx` for read access. Some machines may
118    /// have to mutate this map even on a read-only access to a global (because
119    /// they do pointer provenance tracking and the allocations in `tcx` have
120    /// the wrong type), so we let the machine override this type.
121    /// Either way, if the machine allows writing to a global, doing so will
122    /// create a copy of the global allocation here.
123    // FIXME: this should not be public, but interning currently needs access to it
124    pub(super) alloc_map: M::MemoryMap,
125
126    /// Map for "extra" function pointers.
127    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
128
129    /// To be able to compare pointers with null, and to check alignment for accesses
130    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
131    /// that do not exist any more.
132    // FIXME: this should not be public, but interning currently needs access to it
133    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
134
135    /// This stores whether we are currently doing reads purely for the purpose of validation.
136    /// Those reads do not trigger the machine's hooks for memory reads.
137    /// Needless to say, this must only be set with great care!
138    validation_in_progress: Cell<bool>,
139}
140
141/// A reference to some allocation that was already bounds-checked for the given region
142/// and had the on-access machine hooks run.
143#[derive(#[automatically_derived]
impl<'a, 'tcx, Prov: ::core::marker::Copy + Provenance,
    Extra: ::core::marker::Copy, Bytes: ::core::marker::Copy + AllocBytes>
    ::core::marker::Copy for AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
}Copy, #[automatically_derived]
impl<'a, 'tcx, Prov: ::core::clone::Clone + Provenance,
    Extra: ::core::clone::Clone, Bytes: ::core::clone::Clone + AllocBytes>
    ::core::clone::Clone for AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
    #[inline]
    fn clone(&self) -> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
        AllocRef {
            alloc: ::core::clone::Clone::clone(&self.alloc),
            range: ::core::clone::Clone::clone(&self.range),
            tcx: ::core::clone::Clone::clone(&self.tcx),
            alloc_id: ::core::clone::Clone::clone(&self.alloc_id),
        }
    }
}Clone)]
144pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
145    alloc: &'a Allocation<Prov, Extra, Bytes>,
146    range: AllocRange,
147    tcx: TyCtxt<'tcx>,
148    alloc_id: AllocId,
149}
150/// A reference to some allocation that was already bounds-checked for the given region
151/// and had the on-access machine hooks run.
152pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
153    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
154    range: AllocRange,
155    tcx: TyCtxt<'tcx>,
156    alloc_id: AllocId,
157}
158
159impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
160    pub fn new() -> Self {
161        Memory {
162            alloc_map: M::MemoryMap::default(),
163            extra_fn_ptr_map: FxIndexMap::default(),
164            dead_alloc_map: FxIndexMap::default(),
165            validation_in_progress: Cell::new(false),
166        }
167    }
168
169    /// This is used by [priroda](https://github.com/oli-obk/priroda)
170    pub fn alloc_map(&self) -> &M::MemoryMap {
171        &self.alloc_map
172    }
173}
174
175impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
176    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
177    /// the machine pointer to the allocation. Must never be used
178    /// for any other pointers, nor for TLS statics.
179    ///
180    /// Using the resulting pointer represents a *direct* access to that memory
181    /// (e.g. by directly using a `static`),
182    /// as opposed to access through a pointer that was created by the program.
183    ///
184    /// This function can fail only if `ptr` points to an `extern static`.
185    #[inline]
186    pub fn global_root_pointer(
187        &self,
188        ptr: Pointer<CtfeProvenance>,
189    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
190        let alloc_id = ptr.provenance.alloc_id();
191        // We need to handle `extern static`.
192        match self.tcx.try_get_global_alloc(alloc_id) {
193            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
194                // Thread-local statics do not have a constant address. They *must* be accessed via
195                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
196                ::rustc_middle::util::bug::bug_fmt(format_args!("global memory cannot point to thread-local static"))bug!("global memory cannot point to thread-local static")
197            }
198            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
199                return M::extern_static_pointer(self, def_id);
200            }
201            None => {
202                if !self.memory.extra_fn_ptr_map.contains_key(&alloc_id) {
    {
        ::core::panicking::panic_fmt(format_args!("{0:?} is neither global nor a function pointer",
                alloc_id));
    }
};assert!(
203                    self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
204                    "{alloc_id:?} is neither global nor a function pointer"
205                );
206            }
207            _ => {}
208        }
209        // And we need to get the provenance.
210        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
211    }
212
213    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
214        let id = match fn_val {
215            FnVal::Instance(instance) => {
216                let salt = M::get_global_alloc_salt(self, Some(instance));
217                self.tcx.reserve_and_set_fn_alloc(instance, salt)
218            }
219            FnVal::Other(extra) => {
220                // FIXME(RalfJung): Should we have a cache here?
221                let id = self.tcx.reserve_alloc_id();
222                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
223                if !old.is_none() {
    ::core::panicking::panic("assertion failed: old.is_none()")
};assert!(old.is_none());
224                id
225            }
226        };
227        // Functions are global allocations, so make sure we get the right root pointer.
228        // We know this is not an `extern static` so this cannot fail.
229        self.global_root_pointer(Pointer::from(id)).unwrap()
230    }
231
232    pub fn allocate_ptr(
233        &mut self,
234        size: Size,
235        align: Align,
236        kind: MemoryKind<M::MemoryKind>,
237        init: AllocInit,
238    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
239        let params = self.machine.get_default_alloc_params();
240        let alloc = if M::PANIC_ON_ALLOC_FAIL {
241            Allocation::new(size, align, init, params)
242        } else {
243            Allocation::try_new(size, align, init, params)?
244        };
245        self.insert_allocation(alloc, kind)
246    }
247
248    pub fn allocate_bytes_ptr(
249        &mut self,
250        bytes: &[u8],
251        align: Align,
252        kind: MemoryKind<M::MemoryKind>,
253        mutability: Mutability,
254    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
255        let params = self.machine.get_default_alloc_params();
256        let alloc = Allocation::from_bytes(bytes, align, mutability, params);
257        self.insert_allocation(alloc, kind)
258    }
259
260    pub fn insert_allocation(
261        &mut self,
262        alloc: Allocation<M::Provenance, (), M::Bytes>,
263        kind: MemoryKind<M::MemoryKind>,
264    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
265        if !(alloc.size() <= self.max_size_of_val()) {
    ::core::panicking::panic("assertion failed: alloc.size() <= self.max_size_of_val()")
};assert!(alloc.size() <= self.max_size_of_val());
266        let id = self.tcx.reserve_alloc_id();
267        if true {
    match (&(Some(kind)), &(M::GLOBAL_KIND.map(MemoryKind::Machine))) {
        (left_val, right_val) => {
            if *left_val == *right_val {
                let kind = ::core::panicking::AssertKind::Ne;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val,
                    ::core::option::Option::Some(format_args!("dynamically allocating global memory")));
            }
        }
    };
};debug_assert_ne!(
268            Some(kind),
269            M::GLOBAL_KIND.map(MemoryKind::Machine),
270            "dynamically allocating global memory"
271        );
272        // This cannot be merged with the `adjust_global_allocation` code path
273        // since here we have an allocation that already uses `M::Bytes`.
274        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
275        let alloc = alloc.with_extra(extra);
276        self.memory.alloc_map.insert(id, (kind, alloc));
277        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
278    }
279
280    /// If this grows the allocation, `init_growth` determines
281    /// whether the additional space will be initialized.
282    pub fn reallocate_ptr(
283        &mut self,
284        ptr: Pointer<Option<M::Provenance>>,
285        old_size_and_align: Option<(Size, Align)>,
286        new_size: Size,
287        new_align: Align,
288        kind: MemoryKind<M::MemoryKind>,
289        init_growth: AllocInit,
290    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
291        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
292        if offset.bytes() != 0 {
293            do yeet {
        let (ptr, kind) =
            (::alloc::__export::must_use({
                        ::alloc::fmt::format(format_args!("{0:?}", ptr))
                    }), "realloc");
        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                    msg: ||
                        rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$kind ->
    [dealloc] deallocating
    [realloc] reallocating
    *[other] {\"\"}
} {$ptr} which does not point to the beginning of an object")),
                    add_args: Box::new(move |mut set_arg|
                            {
                                set_arg("ptr".into(),
                                    rustc_errors::IntoDiagArg::into_diag_arg(ptr, &mut None));
                                set_arg("kind".into(),
                                    rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                            }),
                }))
    };throw_ub_custom!(
294                inline_fluent!(
295                    "{$kind ->
296    [dealloc] deallocating
297    [realloc] reallocating
298    *[other] {\"\"}
299} {$ptr} which does not point to the beginning of an object"
300                ),
301                ptr = format!("{ptr:?}"),
302                kind = "realloc"
303            );
304        }
305
306        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
307        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
308        // If requested, we zero-init the entire allocation, to ensure that a growing
309        // allocation has its new bytes properly set. For the part that is copied,
310        // `mem_copy` below will de-initialize things as necessary.
311        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
312        let old_size = match old_size_and_align {
313            Some((size, _align)) => size,
314            None => self.get_alloc_raw(alloc_id)?.size(),
315        };
316        // This will also call the access hooks.
317        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
318        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
319
320        interp_ok(new_ptr)
321    }
322
323    /// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it.
324    pub fn make_const_heap_ptr_global(
325        &mut self,
326        ptr: Pointer<Option<CtfeProvenance>>,
327    ) -> InterpResult<'tcx>
328    where
329        M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>,
330    {
331        let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?;
332        if offset.bytes() != 0 {
333            return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into();
334        }
335
336        if self.tcx.try_get_global_alloc(alloc_id).is_some() {
337            // This points to something outside the current interpreter.
338            return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
339        }
340
341        // If we can't find it in `alloc_map` it must be dangling (because we don't use
342        // `extra_fn_ptr_map` in const-eval).
343        let (kind, alloc) = self
344            .memory
345            .alloc_map
346            .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?;
347
348        // Ensure this is actually a *heap* allocation, and record it as made-global.
349        match kind {
350            MemoryKind::Stack | MemoryKind::CallerLocation => {
351                return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
352            }
353            MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => {
354                if *was_made_global {
355                    return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id))
356                        .into();
357                }
358                *was_made_global = true;
359            }
360        }
361
362        // Prevent further mutation, this is now an immutable global.
363        alloc.mutability = Mutability::Not;
364
365        interp_ok(())
366    }
367
368    #[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("deallocate_ptr",
                                    "rustc_const_eval::interpret::memory",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
                                    ::tracing_core::__macro_support::Option::Some(368u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
                                    ::tracing_core::field::FieldSet::new(&["ptr",
                                                    "old_size_and_align", "kind"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = meta.fields().iter();
                                meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&ptr)
                                                            as &dyn Value)),
                                                (&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&old_size_and_align)
                                                            as &dyn Value)),
                                                (&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&kind)
                                                            as &dyn Value))])
                            })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: InterpResult<'tcx> = loop {};
            return __tracing_attr_fake_return;
        }
        {
            let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:376",
                                    "rustc_const_eval::interpret::memory",
                                    ::tracing::Level::TRACE,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
                                    ::tracing_core::__macro_support::Option::Some(376u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
                                    ::tracing_core::field::FieldSet::new(&["message"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::EVENT)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let enabled =
                    ::tracing::Level::TRACE <=
                                ::tracing::level_filters::STATIC_MAX_LEVEL &&
                            ::tracing::Level::TRACE <=
                                ::tracing::level_filters::LevelFilter::current() &&
                        {
                            let interest = __CALLSITE.interest();
                            !interest.is_never() &&
                                ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                    interest)
                        };
                if enabled {
                    (|value_set: ::tracing::field::ValueSet|
                                {
                                    let meta = __CALLSITE.metadata();
                                    ::tracing::Event::dispatch(meta, &value_set);
                                    ;
                                })({
                            #[allow(unused_imports)]
                            use ::tracing::field::{debug, display, Value};
                            let mut iter = __CALLSITE.metadata().fields().iter();
                            __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                ::tracing::__macro_support::Option::Some(&format_args!("deallocating: {0:?}",
                                                                alloc_id) as &dyn Value))])
                        });
                } else { ; }
            };
            if offset.bytes() != 0 {
                do yeet {
                        let (ptr, kind) =
                            (::alloc::__export::must_use({
                                        ::alloc::fmt::format(format_args!("{0:?}", ptr))
                                    }), "dealloc");
                        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                    msg: ||
                                        rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$kind ->
    [dealloc] deallocating
    [realloc] reallocating
    *[other] {\"\"}
} {$ptr} which does not point to the beginning of an object")),
                                    add_args: Box::new(move |mut set_arg|
                                            {
                                                set_arg("ptr".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(ptr, &mut None));
                                                set_arg("kind".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                            }),
                                }))
                    };
            }
            let Some((alloc_kind, mut alloc)) =
                self.memory.alloc_map.remove(&alloc_id) else {
                    return Err(match self.tcx.try_get_global_alloc(alloc_id) {
                                    Some(GlobalAlloc::Function { .. }) => {
                                        {
                                            let (alloc_id, kind) = (alloc_id, "fn");
                                            ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                                        msg: ||
                                                            rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
    [fn] a function
    [vtable] a vtable
    [static_mem] static memory
    *[other] {\"\"}
}")),
                                                        add_args: Box::new(move |mut set_arg|
                                                                {
                                                                    set_arg("alloc_id".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
                                                                            &mut None));
                                                                    set_arg("kind".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                                                }),
                                                    }))
                                        }
                                    }
                                    Some(GlobalAlloc::VTable(..)) => {
                                        {
                                            let (alloc_id, kind) = (alloc_id, "vtable");
                                            ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                                        msg: ||
                                                            rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
    [fn] a function
    [vtable] a vtable
    [static_mem] static memory
    *[other] {\"\"}
}")),
                                                        add_args: Box::new(move |mut set_arg|
                                                                {
                                                                    set_arg("alloc_id".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
                                                                            &mut None));
                                                                    set_arg("kind".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                                                }),
                                                    }))
                                        }
                                    }
                                    Some(GlobalAlloc::TypeId { .. }) => {
                                        {
                                            let (alloc_id, kind) = (alloc_id, "typeid");
                                            ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                                        msg: ||
                                                            rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
    [fn] a function
    [vtable] a vtable
    [static_mem] static memory
    *[other] {\"\"}
}")),
                                                        add_args: Box::new(move |mut set_arg|
                                                                {
                                                                    set_arg("alloc_id".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
                                                                            &mut None));
                                                                    set_arg("kind".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                                                }),
                                                    }))
                                        }
                                    }
                                    Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
                                        {
                                            let (alloc_id, kind) = (alloc_id, "static_mem");
                                            ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                                        msg: ||
                                                            rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
    [fn] a function
    [vtable] a vtable
    [static_mem] static memory
    *[other] {\"\"}
}")),
                                                        add_args: Box::new(move |mut set_arg|
                                                                {
                                                                    set_arg("alloc_id".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
                                                                            &mut None));
                                                                    set_arg("kind".into(),
                                                                        rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                                                }),
                                                    }))
                                        }
                                    }
                                    None =>
                                        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(alloc_id,
                                                CheckInAllocMsg::MemoryAccess)),
                                }).into();
                };
            if alloc.mutability.is_not() {
                do yeet {
                        let (alloc,) = (alloc_id,);
                        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                    msg: ||
                                        rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating immutable allocation {$alloc}")),
                                    add_args: Box::new(move |mut set_arg|
                                            {
                                                set_arg("alloc".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
                                            }),
                                }))
                    };
            }
            if alloc_kind != kind {
                do yeet {
                        let (alloc, alloc_kind, kind) =
                            (alloc_id,
                                ::alloc::__export::must_use({
                                        ::alloc::fmt::format(format_args!("{0}", alloc_kind))
                                    }),
                                ::alloc::__export::must_use({
                                        ::alloc::fmt::format(format_args!("{0}", kind))
                                    }));
                        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                    msg: ||
                                        rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc}, which is {$alloc_kind} memory, using {$kind} deallocation operation")),
                                    add_args: Box::new(move |mut set_arg|
                                            {
                                                set_arg("alloc".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
                                                set_arg("alloc_kind".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(alloc_kind,
                                                        &mut None));
                                                set_arg("kind".into(),
                                                    rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
                                            }),
                                }))
                    };
            }
            if let Some((size, align)) = old_size_and_align {
                if size != alloc.size() || align != alloc.align {
                    do yeet {
                            let (alloc, size, align, size_found, align_found) =
                                (alloc_id, alloc.size().bytes(), alloc.align.bytes(),
                                    size.bytes(), align.bytes());
                            ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                                        msg: ||
                                            rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("incorrect layout on deallocation: {$alloc} has size {$size} and alignment {$align}, but gave size {$size_found} and alignment {$align_found}")),
                                        add_args: Box::new(move |mut set_arg|
                                                {
                                                    set_arg("alloc".into(),
                                                        rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
                                                    set_arg("size".into(),
                                                        rustc_errors::IntoDiagArg::into_diag_arg(size, &mut None));
                                                    set_arg("align".into(),
                                                        rustc_errors::IntoDiagArg::into_diag_arg(align, &mut None));
                                                    set_arg("size_found".into(),
                                                        rustc_errors::IntoDiagArg::into_diag_arg(size_found,
                                                            &mut None));
                                                    set_arg("align_found".into(),
                                                        rustc_errors::IntoDiagArg::into_diag_arg(align_found,
                                                            &mut None));
                                                }),
                                    }))
                        }
                }
            }
            let size = alloc.size();
            M::before_memory_deallocation(self.tcx, &mut self.machine,
                    &mut alloc.extra, ptr, (alloc_id, prov), size, alloc.align,
                    kind)?;
            let old =
                self.memory.dead_alloc_map.insert(alloc_id,
                    (size, alloc.align));
            if old.is_some() {
                ::rustc_middle::util::bug::bug_fmt(format_args!("Nothing can be deallocated twice"));
            }
            interp_ok(())
        }
    }
}#[instrument(skip(self), level = "debug")]
369    pub fn deallocate_ptr(
370        &mut self,
371        ptr: Pointer<Option<M::Provenance>>,
372        old_size_and_align: Option<(Size, Align)>,
373        kind: MemoryKind<M::MemoryKind>,
374    ) -> InterpResult<'tcx> {
375        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
376        trace!("deallocating: {alloc_id:?}");
377
378        if offset.bytes() != 0 {
379            throw_ub_custom!(
380                inline_fluent!(
381                    "{$kind ->
382    [dealloc] deallocating
383    [realloc] reallocating
384    *[other] {\"\"}
385} {$ptr} which does not point to the beginning of an object"
386                ),
387                ptr = format!("{ptr:?}"),
388                kind = "dealloc",
389            );
390        }
391
392        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
393            // Deallocating global memory -- always an error
394            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
395                Some(GlobalAlloc::Function { .. }) => {
396                    err_ub_custom!(
397                        inline_fluent!(
398                            "deallocating {$alloc_id}, which is {$kind ->
399    [fn] a function
400    [vtable] a vtable
401    [static_mem] static memory
402    *[other] {\"\"}
403}"
404                        ),
405                        alloc_id = alloc_id,
406                        kind = "fn",
407                    )
408                }
409                Some(GlobalAlloc::VTable(..)) => {
410                    err_ub_custom!(
411                        inline_fluent!(
412                            "deallocating {$alloc_id}, which is {$kind ->
413    [fn] a function
414    [vtable] a vtable
415    [static_mem] static memory
416    *[other] {\"\"}
417}"
418                        ),
419                        alloc_id = alloc_id,
420                        kind = "vtable",
421                    )
422                }
423                Some(GlobalAlloc::TypeId { .. }) => {
424                    err_ub_custom!(
425                        inline_fluent!(
426                            "deallocating {$alloc_id}, which is {$kind ->
427    [fn] a function
428    [vtable] a vtable
429    [static_mem] static memory
430    *[other] {\"\"}
431}"
432                        ),
433                        alloc_id = alloc_id,
434                        kind = "typeid",
435                    )
436                }
437                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
438                    err_ub_custom!(
439                        inline_fluent!(
440                            "deallocating {$alloc_id}, which is {$kind ->
441    [fn] a function
442    [vtable] a vtable
443    [static_mem] static memory
444    *[other] {\"\"}
445}"
446                        ),
447                        alloc_id = alloc_id,
448                        kind = "static_mem"
449                    )
450                }
451                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),
452            })
453            .into();
454        };
455
456        if alloc.mutability.is_not() {
457            throw_ub_custom!(
458                inline_fluent!("deallocating immutable allocation {$alloc}"),
459                alloc = alloc_id,
460            );
461        }
462        if alloc_kind != kind {
463            throw_ub_custom!(
464                inline_fluent!(
465                    "deallocating {$alloc}, which is {$alloc_kind} memory, using {$kind} deallocation operation"
466                ),
467                alloc = alloc_id,
468                alloc_kind = format!("{alloc_kind}"),
469                kind = format!("{kind}"),
470            );
471        }
472        if let Some((size, align)) = old_size_and_align {
473            if size != alloc.size() || align != alloc.align {
474                throw_ub_custom!(
475                    inline_fluent!(
476                        "incorrect layout on deallocation: {$alloc} has size {$size} and alignment {$align}, but gave size {$size_found} and alignment {$align_found}"
477                    ),
478                    alloc = alloc_id,
479                    size = alloc.size().bytes(),
480                    align = alloc.align.bytes(),
481                    size_found = size.bytes(),
482                    align_found = align.bytes(),
483                )
484            }
485        }
486
487        // Let the machine take some extra action
488        let size = alloc.size();
489        M::before_memory_deallocation(
490            self.tcx,
491            &mut self.machine,
492            &mut alloc.extra,
493            ptr,
494            (alloc_id, prov),
495            size,
496            alloc.align,
497            kind,
498        )?;
499
500        // Don't forget to remember size and align of this now-dead allocation
501        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
502        if old.is_some() {
503            bug!("Nothing can be deallocated twice");
504        }
505
506        interp_ok(())
507    }
508
509    /// Internal helper function to determine the allocation and offset of a pointer (if any).
510    #[inline(always)]
511    fn get_ptr_access(
512        &self,
513        ptr: Pointer<Option<M::Provenance>>,
514        size: Size,
515    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
516        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
517        Self::check_and_deref_ptr(
518            self,
519            ptr,
520            size,
521            CheckInAllocMsg::MemoryAccess,
522            |this, alloc_id, offset, prov| {
523                let (size, align) =
524                    this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;
525                interp_ok((size, align, (alloc_id, offset, prov)))
526            },
527        )
528    }
529
530    /// Check if the given pointer points to live memory of the given `size`.
531    /// The caller can control the error message for the out-of-bounds case.
532    #[inline(always)]
533    pub fn check_ptr_access(
534        &self,
535        ptr: Pointer<Option<M::Provenance>>,
536        size: Size,
537        msg: CheckInAllocMsg,
538    ) -> InterpResult<'tcx> {
539        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
540        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
541            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
542            interp_ok((size, align, ()))
543        })?;
544        interp_ok(())
545    }
546
547    /// Check whether the given pointer points to live memory for a signed amount of bytes.
548    /// A negative amounts means that the given range of memory to the left of the pointer
549    /// needs to be dereferenceable.
550    pub fn check_ptr_access_signed(
551        &self,
552        ptr: Pointer<Option<M::Provenance>>,
553        size: i64,
554        msg: CheckInAllocMsg,
555    ) -> InterpResult<'tcx> {
556        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
557            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
558            interp_ok((size, align, ()))
559        })?;
560        interp_ok(())
561    }
562
563    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
564    /// to the allocation it points to. Supports both shared and mutable references, as the actual
565    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
566    /// a pointer.
567    ///
568    /// `alloc_size` will only get called for non-zero-sized accesses.
569    ///
570    /// Returns `None` if and only if the size is 0.
571    fn check_and_deref_ptr<T, R: Borrow<Self>>(
572        this: R,
573        ptr: Pointer<Option<M::Provenance>>,
574        size: i64,
575        msg: CheckInAllocMsg,
576        alloc_size: impl FnOnce(
577            R,
578            AllocId,
579            Size,
580            M::ProvenanceExtra,
581        ) -> InterpResult<'tcx, (Size, Align, T)>,
582    ) -> InterpResult<'tcx, Option<T>> {
583        // Everything is okay with size 0.
584        if size == 0 {
585            return interp_ok(None);
586        }
587
588        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
589            Err(addr) => {
590                // We couldn't get a proper allocation.
591                do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DanglingIntPointer {
            addr,
            inbounds_size: size,
            msg,
        });throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
592            }
593            Ok((alloc_id, offset, prov)) => {
594                let tcx = this.borrow().tcx;
595                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
596                let offset = offset.bytes();
597                // Compute absolute begin and end of the range.
598                let (begin, end) = if size >= 0 {
599                    (Some(offset), offset.checked_add(size as u64))
600                } else {
601                    (offset.checked_sub(size.unsigned_abs()), Some(offset))
602                };
603                // Ensure both are within bounds.
604                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
605                if !in_bounds {
606                    do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerOutOfBounds {
            alloc_id,
            alloc_size,
            ptr_offset: tcx.sign_extend_to_target_isize(offset),
            inbounds_size: size,
            msg,
        })throw_ub!(PointerOutOfBounds {
607                        alloc_id,
608                        alloc_size,
609                        ptr_offset: tcx.sign_extend_to_target_isize(offset),
610                        inbounds_size: size,
611                        msg,
612                    })
613                }
614
615                Some(ret_val)
616            }
617        })
618    }
619
620    pub(super) fn check_misalign(
621        &self,
622        misaligned: Option<Misalignment>,
623        msg: CheckAlignMsg,
624    ) -> InterpResult<'tcx> {
625        if let Some(misaligned) = misaligned {
626            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::AlignmentCheckFailed(misaligned,
            msg))throw_ub!(AlignmentCheckFailed(misaligned, msg))
627        }
628        interp_ok(())
629    }
630
631    pub(super) fn is_ptr_misaligned(
632        &self,
633        ptr: Pointer<Option<M::Provenance>>,
634        align: Align,
635    ) -> Option<Misalignment> {
636        if !M::enforce_alignment(self) || align.bytes() == 1 {
637            return None;
638        }
639
640        #[inline]
641        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
642            if offset.is_multiple_of(align.bytes()) {
643                None
644            } else {
645                // The biggest power of two through which `offset` is divisible.
646                let offset_pow2 = 1 << offset.trailing_zeros();
647                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
648            }
649        }
650
651        match self.ptr_try_get_alloc_id(ptr, 0) {
652            Err(addr) => is_offset_misaligned(addr, align),
653            Ok((alloc_id, offset, _prov)) => {
654                let alloc_info = self.get_alloc_info(alloc_id);
655                if let Some(misalign) = M::alignment_check(
656                    self,
657                    alloc_id,
658                    alloc_info.align,
659                    alloc_info.kind,
660                    offset,
661                    align,
662                ) {
663                    Some(misalign)
664                } else if M::Provenance::OFFSET_IS_ADDR {
665                    is_offset_misaligned(ptr.addr().bytes(), align)
666                } else {
667                    // Check allocation alignment and offset alignment.
668                    if alloc_info.align.bytes() < align.bytes() {
669                        Some(Misalignment { has: alloc_info.align, required: align })
670                    } else {
671                        is_offset_misaligned(offset.bytes(), align)
672                    }
673                }
674            }
675        }
676    }
677
678    /// Checks a pointer for misalignment.
679    ///
680    /// The error assumes this is checking the pointer used directly for an access.
681    pub fn check_ptr_align(
682        &self,
683        ptr: Pointer<Option<M::Provenance>>,
684        align: Align,
685    ) -> InterpResult<'tcx> {
686        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
687    }
688}
689
690impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
691    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
692    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
693        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
694        // is live, here all the IDs in the map are for dead allocations so we don't
695        // need to check for liveness.
696        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
697        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
698    }
699}
700
701/// Allocation accessors
702impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
703    /// Helper function to obtain a global (tcx) allocation.
704    /// This attempts to return a reference to an existing allocation if
705    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
706    /// this machine use the same pointer provenance, so it is indirected through
707    /// `M::adjust_allocation`.
708    fn get_global_alloc(
709        &self,
710        id: AllocId,
711        is_write: bool,
712    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
713        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
714            Some(GlobalAlloc::Memory(mem)) => {
715                // Memory of a constant or promoted or anonymous memory referenced by a static.
716                (mem, None)
717            }
718            Some(GlobalAlloc::Function { .. }) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefFunctionPointer(id))throw_ub!(DerefFunctionPointer(id)),
719            Some(GlobalAlloc::VTable(..)) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefVTablePointer(id))throw_ub!(DerefVTablePointer(id)),
720            Some(GlobalAlloc::TypeId { .. }) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefTypeIdPointer(id))throw_ub!(DerefTypeIdPointer(id)),
721            None => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(id,
            CheckInAllocMsg::MemoryAccess))throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
722            Some(GlobalAlloc::Static(def_id)) => {
723                if !self.tcx.is_static(def_id) {
    ::core::panicking::panic("assertion failed: self.tcx.is_static(def_id)")
};assert!(self.tcx.is_static(def_id));
724                // Thread-local statics do not have a constant address. They *must* be accessed via
725                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
726                if !!self.tcx.is_thread_local_static(def_id) {
    ::core::panicking::panic("assertion failed: !self.tcx.is_thread_local_static(def_id)")
};assert!(!self.tcx.is_thread_local_static(def_id));
727                // Notice that every static has two `AllocId` that will resolve to the same
728                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
729                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
730                // `eval_static_initializer` and it is the "resolved" ID.
731                // The resolved ID is never used by the interpreted program, it is hidden.
732                // This is relied upon for soundness of const-patterns; a pointer to the resolved
733                // ID would "sidestep" the checks that make sure consts do not point to statics!
734                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
735                // contains a reference to memory that was created during its evaluation (i.e., not
736                // to another static), those inner references only exist in "resolved" form.
737                if self.tcx.is_foreign_item(def_id) {
738                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
739                    // referencing arbitrary (declared) extern statics.
740                    do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::ExternStatic(def_id));throw_unsup!(ExternStatic(def_id));
741                }
742
743                // We don't give a span -- statics don't need that, they cannot be generic or associated.
744                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
745                (val, Some(def_id))
746            }
747        };
748        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
749        // We got tcx memory. Let the machine initialize its "extra" stuff.
750        M::adjust_global_allocation(
751            self,
752            id, // always use the ID we got as input, not the "hidden" one.
753            alloc.inner(),
754        )
755    }
756
757    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
758    /// The caller is responsible for calling the access hooks!
759    ///
760    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
761    pub fn get_alloc_raw(
762        &self,
763        id: AllocId,
764    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
765        // The error type of the inner closure here is somewhat funny. We have two
766        // ways of "erroring": An actual error, or because we got a reference from
767        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
768        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
769        let a = self.memory.alloc_map.get_or(id, || {
770            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
771            // so we use `report_err` for that.
772            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
773            match alloc {
774                Cow::Borrowed(alloc) => {
775                    // We got a ref, cheaply return that as an "error" so that the
776                    // map does not get mutated.
777                    Err(Ok(alloc))
778                }
779                Cow::Owned(alloc) => {
780                    // Need to put it into the map and return a ref to that
781                    let kind = M::GLOBAL_KIND.expect(
782                        "I got a global allocation that I have to copy but the machine does \
783                            not expect that to happen",
784                    );
785                    Ok((MemoryKind::Machine(kind), alloc))
786                }
787            }
788        });
789        // Now unpack that funny error type
790        match a {
791            Ok(a) => interp_ok(&a.1),
792            Err(a) => a.into(),
793        }
794    }
795
796    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
797    /// The caller is responsible for calling the access hooks!
798    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
799        let alloc = self.get_alloc_raw(id)?;
800        interp_ok(alloc.get_bytes_unchecked_raw())
801    }
802
803    /// Bounds-checked *but not align-checked* allocation access.
804    pub fn get_ptr_alloc<'a>(
805        &'a self,
806        ptr: Pointer<Option<M::Provenance>>,
807        size: Size,
808    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
809    {
810        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
811        let ptr_and_alloc = Self::check_and_deref_ptr(
812            self,
813            ptr,
814            size_i64,
815            CheckInAllocMsg::MemoryAccess,
816            |this, alloc_id, offset, prov| {
817                let alloc = this.get_alloc_raw(alloc_id)?;
818                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
819            },
820        )?;
821        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
822        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
823        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
824        if !self.memory.validation_in_progress.get() {
825            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
826                M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;
827            }
828        }
829
830        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
831            let range = alloc_range(offset, size);
832            if !self.memory.validation_in_progress.get() {
833                M::before_memory_read(
834                    self.tcx,
835                    &self.machine,
836                    &alloc.extra,
837                    ptr,
838                    (alloc_id, prov),
839                    range,
840                )?;
841            }
842            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
843        } else {
844            interp_ok(None)
845        }
846    }
847
848    /// Return the `extra` field of the given allocation.
849    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
850        interp_ok(&self.get_alloc_raw(id)?.extra)
851    }
852
853    /// Return the `mutability` field of the given allocation.
854    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
855        interp_ok(self.get_alloc_raw(id)?.mutability)
856    }
857
858    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
859    /// The caller is responsible for calling the access hooks!
860    ///
861    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
862    /// allocation.
863    ///
864    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
865    pub fn get_alloc_raw_mut(
866        &mut self,
867        id: AllocId,
868    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
869        // We have "NLL problem case #3" here, which cannot be worked around without loss of
870        // efficiency even for the common case where the key is in the map.
871        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
872        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
873        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
874        // It could be avoided with a totally separate codepath in Miri for handling the absolute address
875        // of global allocations, but that's not worth it.)
876        if self.memory.alloc_map.get_mut(id).is_none() {
877            // Slow path.
878            // Allocation not found locally, go look global.
879            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
880            let kind = M::GLOBAL_KIND.expect(
881                "I got a global allocation that I have to copy but the machine does \
882                    not expect that to happen",
883            );
884            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
885        }
886
887        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
888        if alloc.mutability.is_not() {
889            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::WriteToReadOnly(id))throw_ub!(WriteToReadOnly(id))
890        }
891        interp_ok((alloc, &mut self.machine))
892    }
893
894    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
895    /// The caller is responsible for calling the access hooks!
896    pub fn get_alloc_bytes_unchecked_raw_mut(
897        &mut self,
898        id: AllocId,
899    ) -> InterpResult<'tcx, *mut u8> {
900        let alloc = self.get_alloc_raw_mut(id)?.0;
901        interp_ok(alloc.get_bytes_unchecked_raw_mut())
902    }
903
904    /// Bounds-checked *but not align-checked* allocation access.
905    pub fn get_ptr_alloc_mut<'a>(
906        &'a mut self,
907        ptr: Pointer<Option<M::Provenance>>,
908        size: Size,
909    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
910    {
911        let tcx = self.tcx;
912        let validation_in_progress = self.memory.validation_in_progress.get();
913
914        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
915        let ptr_and_alloc = Self::check_and_deref_ptr(
916            self,
917            ptr,
918            size_i64,
919            CheckInAllocMsg::MemoryAccess,
920            |this, alloc_id, offset, prov| {
921                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
922                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
923            },
924        )?;
925
926        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
927            let range = alloc_range(offset, size);
928            if !validation_in_progress {
929                // For writes, it's okay to only call those when there actually is a non-zero
930                // amount of bytes to be written: a zero-sized write doesn't manifest anything.
931                M::before_alloc_access(tcx, machine, alloc_id)?;
932                M::before_memory_write(
933                    tcx,
934                    machine,
935                    &mut alloc.extra,
936                    ptr,
937                    (alloc_id, prov),
938                    range,
939                )?;
940            }
941            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
942        } else {
943            interp_ok(None)
944        }
945    }
946
947    /// Return the `extra` field of the given allocation.
948    pub fn get_alloc_extra_mut<'a>(
949        &'a mut self,
950        id: AllocId,
951    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
952        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
953        interp_ok((&mut alloc.extra, machine))
954    }
955
956    /// Check whether an allocation is live. This is faster than calling
957    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
958    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
959    pub fn is_alloc_live(&self, id: AllocId) -> bool {
960        self.memory.alloc_map.contains_key_ref(&id)
961            || self.memory.extra_fn_ptr_map.contains_key(&id)
962            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
963            // This also matches the order in `get_alloc_info`.
964            || self.tcx.try_get_global_alloc(id).is_some()
965    }
966
967    /// Obtain the size and alignment of an allocation, even if that allocation has
968    /// been deallocated.
969    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
970        // # Regular allocations
971        // Don't use `self.get_raw` here as that will
972        // a) cause cycles in case `id` refers to a static
973        // b) duplicate a global's allocation in miri
974        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
975            return AllocInfo::new(
976                alloc.size(),
977                alloc.align,
978                AllocKind::LiveData,
979                alloc.mutability,
980            );
981        }
982
983        // # Function pointers
984        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
985        if let Some(fn_val) = self.get_fn_alloc(id) {
986            let align = match fn_val {
987                FnVal::Instance(_instance) => {
988                    // FIXME: Until we have a clear design for the effects of align(N) functions
989                    // on the address of function pointers, we don't consider the align(N)
990                    // attribute on functions in the interpreter.
991                    // See <https://github.com/rust-lang/rust/issues/144661> for more context.
992                    Align::ONE
993                }
994                // Machine-specific extra functions currently do not support alignment restrictions.
995                FnVal::Other(_) => Align::ONE,
996            };
997
998            return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
999        }
1000
1001        // # Global allocations
1002        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
1003            // NOTE: `static` alignment from attributes has already been applied to the allocation.
1004            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
1005            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
1006            let kind = match global_alloc {
1007                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
1008                GlobalAlloc::Function { .. } => ::rustc_middle::util::bug::bug_fmt(format_args!("We already checked function pointers above"))bug!("We already checked function pointers above"),
1009                GlobalAlloc::VTable { .. } => AllocKind::VTable,
1010                GlobalAlloc::TypeId { .. } => AllocKind::TypeId,
1011            };
1012            return AllocInfo::new(size, align, kind, mutbl);
1013        }
1014
1015        // # Dead pointers
1016        let (size, align) = *self
1017            .memory
1018            .dead_alloc_map
1019            .get(&id)
1020            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
1021        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
1022    }
1023
1024    /// Obtain the size and alignment of a *live* allocation.
1025    fn get_live_alloc_size_and_align(
1026        &self,
1027        id: AllocId,
1028        msg: CheckInAllocMsg,
1029    ) -> InterpResult<'tcx, (Size, Align)> {
1030        let info = self.get_alloc_info(id);
1031        if info.kind == AllocKind::Dead {
1032            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(id,
            msg))throw_ub!(PointerUseAfterFree(id, msg))
1033        }
1034        interp_ok((info.size, info.align))
1035    }
1036
1037    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
1038        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
1039            Some(FnVal::Other(*extra))
1040        } else {
1041            match self.tcx.try_get_global_alloc(id) {
1042                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
1043                _ => None,
1044            }
1045        }
1046    }
1047
1048    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
1049    /// provenance refers to, as well as the segment of the hash that this pointer covers.
1050    pub fn get_ptr_type_id(
1051        &self,
1052        ptr: Pointer<Option<M::Provenance>>,
1053    ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {
1054        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
1055        let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else {
1056            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Ub(::alloc::__export::must_use({
                    ::alloc::fmt::format(format_args!("invalid `TypeId` value: not all bytes carry type id metadata"))
                })))throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")
1057        };
1058        interp_ok((ty, offset.bytes()))
1059    }
1060
1061    pub fn get_ptr_fn(
1062        &self,
1063        ptr: Pointer<Option<M::Provenance>>,
1064    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
1065        {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1065",
                        "rustc_const_eval::interpret::memory",
                        ::tracing::Level::TRACE,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
                        ::tracing_core::__macro_support::Option::Some(1065u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::TRACE <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("get_ptr_fn({0:?})",
                                                    ptr) as &dyn Value))])
            });
    } else { ; }
};trace!("get_ptr_fn({:?})", ptr);
1066        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
1067        if offset.bytes() != 0 {
1068            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidFunctionPointer(Pointer::new(alloc_id,
                offset)))throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
1069        }
1070        self.get_fn_alloc(alloc_id)
1071            .ok_or_else(|| ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidFunctionPointer(Pointer::new(alloc_id,
            offset)))err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
1072            .into()
1073    }
1074
1075    /// Get the dynamic type of the given vtable pointer.
1076    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
1077    pub fn get_ptr_vtable_ty(
1078        &self,
1079        ptr: Pointer<Option<M::Provenance>>,
1080        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
1081    ) -> InterpResult<'tcx, Ty<'tcx>> {
1082        {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1082",
                        "rustc_const_eval::interpret::memory",
                        ::tracing::Level::TRACE,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
                        ::tracing_core::__macro_support::Option::Some(1082u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::TRACE <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("get_ptr_vtable({0:?})",
                                                    ptr) as &dyn Value))])
            });
    } else { ; }
};trace!("get_ptr_vtable({:?})", ptr);
1083        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
1084        if offset.bytes() != 0 {
1085            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidVTablePointer(Pointer::new(alloc_id,
                offset)))throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1086        }
1087        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
1088            self.tcx.try_get_global_alloc(alloc_id)
1089        else {
1090            do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidVTablePointer(Pointer::new(alloc_id,
                offset)))throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1091        };
1092        if let Some(expected_dyn_type) = expected_trait {
1093            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
1094        }
1095        interp_ok(ty)
1096    }
1097
1098    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
1099        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
1100        interp_ok(())
1101    }
1102
1103    /// Visit all allocations reachable from the given start set, by recursively traversing the
1104    /// provenance information of those allocations.
1105    pub fn visit_reachable_allocs(
1106        &mut self,
1107        start: Vec<AllocId>,
1108        mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,
1109    ) -> InterpResult<'tcx> {
1110        let mut done = FxHashSet::default();
1111        let mut todo = start;
1112        while let Some(id) = todo.pop() {
1113            if !done.insert(id) {
1114                // We already saw this allocation before, don't process it again.
1115                continue;
1116            }
1117            let info = self.get_alloc_info(id);
1118
1119            // Recurse, if there is data here.
1120            // Do this *before* invoking the callback, as the callback might mutate the
1121            // allocation and e.g. replace all provenance by wildcards!
1122            if info.kind == AllocKind::LiveData {
1123                let alloc = self.get_alloc_raw(id)?;
1124                for prov in alloc.provenance().provenances() {
1125                    if let Some(id) = prov.get_alloc_id() {
1126                        todo.push(id);
1127                    }
1128                }
1129            }
1130
1131            // Call the callback.
1132            visit(self, id, &info)?;
1133        }
1134        interp_ok(())
1135    }
1136
1137    /// Create a lazy debug printer that prints the given allocation and all allocations it points
1138    /// to, recursively.
1139    #[must_use]
1140    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1141        self.dump_allocs(<[_]>::into_vec(::alloc::boxed::box_new([id]))vec![id])
1142    }
1143
1144    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
1145    /// recursively.
1146    #[must_use]
1147    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1148        allocs.sort();
1149        allocs.dedup();
1150        DumpAllocs { ecx: self, allocs }
1151    }
1152
1153    /// Print the allocation's bytes, without any nested allocations.
1154    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1155        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1156        // want to be able to read all memory for diagnostics, even if that is cyclic.
1157        let alloc = self.get_alloc_raw(id).unwrap();
1158        let mut bytes = String::new();
1159        if alloc.size() != Size::ZERO {
1160            bytes = "\n".into();
1161            // FIXME(translation) there might be pieces that are translatable.
1162            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
1163                .unwrap();
1164        }
1165        bytes
1166    }
1167
1168    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1169    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1170    /// kind's `may_leak()` returns true.
1171    ///
1172    /// This is highly destructive, no more execution can happen after this!
1173    pub fn take_leaked_allocations(
1174        &mut self,
1175        static_roots: impl FnOnce(&Self) -> &[AllocId],
1176    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1177    {
1178        // Collect the set of allocations that are *reachable* from `Global` allocations.
1179        let reachable = {
1180            let mut reachable = FxHashSet::default();
1181            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1182            let mut todo: Vec<_> =
1183                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1184                    if Some(kind) == global_kind { Some(id) } else { None }
1185                });
1186            todo.extend(static_roots(self));
1187            while let Some(id) = todo.pop() {
1188                if reachable.insert(id) {
1189                    // This is a new allocation, add the allocations it points to `todo`.
1190                    // We only need to care about `alloc_map` memory here, as entirely unchanged
1191                    // global memory cannot point to memory relevant for the leak check.
1192                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1193                        todo.extend(
1194                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1195                        );
1196                    }
1197                }
1198            }
1199            reachable
1200        };
1201
1202        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1203        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1204            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1205        });
1206        let mut result = Vec::new();
1207        for &id in leaked.iter() {
1208            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1209            result.push((id, kind, alloc));
1210        }
1211        result
1212    }
1213
1214    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1215    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1216    ///
1217    /// We do this so Miri's allocation access tracking does not show the validation
1218    /// reads as spurious accesses.
1219    pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1220        // This deliberately uses `==` on `bool` to follow the pattern
1221        // `assert!(val.replace(new) == old)`.
1222        if !(self.memory.validation_in_progress.replace(true) == false) {
    {
        ::core::panicking::panic_fmt(format_args!("`validation_in_progress` was already set"));
    }
};assert!(
1223            self.memory.validation_in_progress.replace(true) == false,
1224            "`validation_in_progress` was already set"
1225        );
1226        let res = f(self);
1227        if !(self.memory.validation_in_progress.replace(false) == true) {
    {
        ::core::panicking::panic_fmt(format_args!("`validation_in_progress` was unset by someone else"));
    }
};assert!(
1228            self.memory.validation_in_progress.replace(false) == true,
1229            "`validation_in_progress` was unset by someone else"
1230        );
1231        res
1232    }
1233
1234    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1235    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1236    ///
1237    /// We do this so Miri's allocation access tracking does not show the validation
1238    /// reads as spurious accesses.
1239    pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {
1240        // This deliberately uses `==` on `bool` to follow the pattern
1241        // `assert!(val.replace(new) == old)`.
1242        if !(self.memory.validation_in_progress.replace(true) == false) {
    {
        ::core::panicking::panic_fmt(format_args!("`validation_in_progress` was already set"));
    }
};assert!(
1243            self.memory.validation_in_progress.replace(true) == false,
1244            "`validation_in_progress` was already set"
1245        );
1246        let res = f(self);
1247        if !(self.memory.validation_in_progress.replace(false) == true) {
    {
        ::core::panicking::panic_fmt(format_args!("`validation_in_progress` was unset by someone else"));
    }
};assert!(
1248            self.memory.validation_in_progress.replace(false) == true,
1249            "`validation_in_progress` was unset by someone else"
1250        );
1251        res
1252    }
1253
1254    pub(super) fn validation_in_progress(&self) -> bool {
1255        self.memory.validation_in_progress.get()
1256    }
1257}
1258
1259#[doc(hidden)]
1260/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1261pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1262    ecx: &'a InterpCx<'tcx, M>,
1263    allocs: Vec<AllocId>,
1264}
1265
1266impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
1267    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1268        // Cannot be a closure because it is generic in `Prov`, `Extra`.
1269        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1270            fmt: &mut std::fmt::Formatter<'_>,
1271            tcx: TyCtxt<'tcx>,
1272            allocs_to_print: &mut VecDeque<AllocId>,
1273            alloc: &Allocation<Prov, Extra, Bytes>,
1274        ) -> std::fmt::Result {
1275            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1276            {
1277                allocs_to_print.push_back(alloc_id);
1278            }
1279            fmt.write_fmt(format_args!("{0}", display_allocation(tcx, alloc)))write!(fmt, "{}", display_allocation(tcx, alloc))
1280        }
1281
1282        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1283        // `allocs_printed` contains all allocations that we have already printed.
1284        let mut allocs_printed = FxHashSet::default();
1285
1286        while let Some(id) = allocs_to_print.pop_front() {
1287            if !allocs_printed.insert(id) {
1288                // Already printed, so skip this.
1289                continue;
1290            }
1291
1292            fmt.write_fmt(format_args!("{0:?}", id))write!(fmt, "{id:?}")?;
1293            match self.ecx.memory.alloc_map.get(id) {
1294                Some((kind, alloc)) => {
1295                    // normal alloc
1296                    fmt.write_fmt(format_args!(" ({0}, ", kind))write!(fmt, " ({kind}, ")?;
1297                    write_allocation_track_relocs(
1298                        &mut *fmt,
1299                        *self.ecx.tcx,
1300                        &mut allocs_to_print,
1301                        alloc,
1302                    )?;
1303                }
1304                None => {
1305                    // global alloc
1306                    match self.ecx.tcx.try_get_global_alloc(id) {
1307                        Some(GlobalAlloc::Memory(alloc)) => {
1308                            fmt.write_fmt(format_args!(" (unchanged global, "))write!(fmt, " (unchanged global, ")?;
1309                            write_allocation_track_relocs(
1310                                &mut *fmt,
1311                                *self.ecx.tcx,
1312                                &mut allocs_to_print,
1313                                alloc.inner(),
1314                            )?;
1315                        }
1316                        Some(GlobalAlloc::Function { instance, .. }) => {
1317                            fmt.write_fmt(format_args!(" (fn: {0})", instance))write!(fmt, " (fn: {instance})")?;
1318                        }
1319                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1320                            fmt.write_fmt(format_args!(" (vtable: impl {0} for {1})", dyn_ty, ty))write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1321                        }
1322                        Some(GlobalAlloc::TypeId { ty }) => {
1323                            fmt.write_fmt(format_args!(" (typeid for {0})", ty))write!(fmt, " (typeid for {ty})")?;
1324                        }
1325                        Some(GlobalAlloc::Static(did)) => {
1326                            fmt.write_fmt(format_args!(" (static: {0})", self.ecx.tcx.def_path_str(did)))write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1327                        }
1328                        None => {
1329                            fmt.write_fmt(format_args!(" (deallocated)"))write!(fmt, " (deallocated)")?;
1330                        }
1331                    }
1332                }
1333            }
1334            fmt.write_fmt(format_args!("\n"))writeln!(fmt)?;
1335        }
1336        Ok(())
1337    }
1338}
1339
1340/// Reading and writing.
1341impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1342    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1343{
1344    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1345        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1346    }
1347
1348    /// `range` is relative to this allocation reference, not the base of the allocation.
1349    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1350        let range = self.range.subrange(range);
1351        {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1351",
                        "rustc_const_eval::interpret::memory",
                        ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
                        ::tracing_core::__macro_support::Option::Some(1351u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("write_scalar at {0:?}{1:?}: {2:?}",
                                                    self.alloc_id, range, val) as &dyn Value))])
            });
    } else { ; }
};debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
1352
1353        self.alloc
1354            .write_scalar(&self.tcx, range, val)
1355            .map_err(|e| e.to_interp_error(self.alloc_id))
1356            .into()
1357    }
1358
1359    /// `offset` is relative to this allocation reference, not the base of the allocation.
1360    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1361        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
1362    }
1363
1364    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1365    pub fn write_uninit(&mut self, range: AllocRange) {
1366        let range = self.range.subrange(range);
1367
1368        self.alloc.write_uninit(&self.tcx, range);
1369    }
1370
1371    /// Mark the entire referenced range as uninitialized
1372    pub fn write_uninit_full(&mut self) {
1373        self.alloc.write_uninit(&self.tcx, self.range);
1374    }
1375
1376    /// Remove all provenance in the reference range.
1377    pub fn clear_provenance(&mut self) {
1378        self.alloc.clear_provenance(&self.tcx, self.range);
1379    }
1380}
1381
1382impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1383    /// `range` is relative to this allocation reference, not the base of the allocation.
1384    pub fn read_scalar(
1385        &self,
1386        range: AllocRange,
1387        read_provenance: bool,
1388    ) -> InterpResult<'tcx, Scalar<Prov>> {
1389        let range = self.range.subrange(range);
1390        self.alloc
1391            .read_scalar(&self.tcx, range, read_provenance)
1392            .map_err(|e| e.to_interp_error(self.alloc_id))
1393            .into()
1394    }
1395
1396    /// `range` is relative to this allocation reference, not the base of the allocation.
1397    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1398        self.read_scalar(range, /*read_provenance*/ false)
1399    }
1400
1401    /// `offset` is relative to this allocation reference, not the base of the allocation.
1402    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1403        self.read_scalar(
1404            alloc_range(offset, self.tcx.data_layout().pointer_size()),
1405            /*read_provenance*/ true,
1406        )
1407    }
1408
1409    /// `range` is relative to this allocation reference, not the base of the allocation.
1410    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1411        self.alloc
1412            .get_bytes_strip_provenance(&self.tcx, self.range)
1413            .map_err(|e| e.to_interp_error(self.alloc_id))
1414            .into()
1415    }
1416
1417    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1418    pub fn has_provenance(&self) -> bool {
1419        !self.alloc.provenance().range_empty(self.range, &self.tcx)
1420    }
1421}
1422
1423impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1424    /// Reads the given number of bytes from memory, and strips their provenance if possible.
1425    /// Returns them as a slice.
1426    ///
1427    /// Performs appropriate bounds checks.
1428    pub fn read_bytes_ptr_strip_provenance(
1429        &self,
1430        ptr: Pointer<Option<M::Provenance>>,
1431        size: Size,
1432    ) -> InterpResult<'tcx, &[u8]> {
1433        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1434            // zero-sized access
1435            return interp_ok(&[]);
1436        };
1437        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1438        // (We are staying inside the bounds here so all is good.)
1439        interp_ok(
1440            alloc_ref
1441                .alloc
1442                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1443                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1444        )
1445    }
1446
1447    /// Writes the given stream of bytes into memory.
1448    ///
1449    /// Performs appropriate bounds checks.
1450    pub fn write_bytes_ptr(
1451        &mut self,
1452        ptr: Pointer<Option<M::Provenance>>,
1453        src: impl IntoIterator<Item = u8>,
1454    ) -> InterpResult<'tcx> {
1455        let mut src = src.into_iter();
1456        let (lower, upper) = src.size_hint();
1457        let len = upper.expect("can only write bounded iterators");
1458        match (&lower, &len) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("can only write iterators with a precise length")));
        }
    }
};assert_eq!(lower, len, "can only write iterators with a precise length");
1459
1460        let size = Size::from_bytes(len);
1461        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1462            // zero-sized access
1463            match src.next() {
    None => {}
    ref left_val => {
        ::core::panicking::assert_matches_failed(left_val, "None",
            ::core::option::Option::Some(format_args!("iterator said it was empty but returned an element")));
    }
};assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1464            return interp_ok(());
1465        };
1466
1467        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1468        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1469        let bytes =
1470            alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
1471        // `zip` would stop when the first iterator ends; we want to definitely
1472        // cover all of `bytes`.
1473        for dest in bytes {
1474            *dest = src.next().expect("iterator was shorter than it said it would be");
1475        }
1476        match src.next() {
    None => {}
    ref left_val => {
        ::core::panicking::assert_matches_failed(left_val, "None",
            ::core::option::Option::Some(format_args!("iterator was longer than it said it would be")));
    }
};assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1477        interp_ok(())
1478    }
1479
1480    pub fn mem_copy(
1481        &mut self,
1482        src: Pointer<Option<M::Provenance>>,
1483        dest: Pointer<Option<M::Provenance>>,
1484        size: Size,
1485        nonoverlapping: bool,
1486    ) -> InterpResult<'tcx> {
1487        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1488    }
1489
1490    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1491    /// `i` is the index of the copy).
1492    ///
1493    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1494    /// may overlap is not supported.
1495    pub fn mem_copy_repeatedly(
1496        &mut self,
1497        src: Pointer<Option<M::Provenance>>,
1498        dest: Pointer<Option<M::Provenance>>,
1499        size: Size,
1500        num_copies: u64,
1501        nonoverlapping: bool,
1502    ) -> InterpResult<'tcx> {
1503        let tcx = self.tcx;
1504        // We need to do our own bounds-checks.
1505        let src_parts = self.get_ptr_access(src, size)?;
1506        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
1507
1508        // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized
1509        // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for
1510        // zero-sized writes.
1511        if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
1512        {
1513            M::before_alloc_access(tcx, &self.machine, alloc_id)?;
1514        }
1515
1516        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1517        // and once below to get the underlying `&[mut] Allocation`.
1518
1519        // Source alloc preparations and access hooks.
1520        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1521            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1522            return interp_ok(());
1523        };
1524        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1525        let src_range = alloc_range(src_offset, size);
1526        if !!self.memory.validation_in_progress.get() {
    {
        ::core::panicking::panic_fmt(format_args!("we can\'t be copying during validation"));
    }
};assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
1527
1528        // Trigger read hook.
1529        // For the overlapping case, it is crucial that we trigger the read hook
1530        // before the write hook -- the aliasing model cares about the order.
1531        M::before_memory_read(
1532            tcx,
1533            &self.machine,
1534            &src_alloc.extra,
1535            src,
1536            (src_alloc_id, src_prov),
1537            src_range,
1538        )?;
1539        // We need the `dest` ptr for the next operation, so we get it now.
1540        // We already did the source checks and called the hooks so we are good to return early.
1541        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1542            // Zero-sized *destination*.
1543            return interp_ok(());
1544        };
1545
1546        // Prepare getting source provenance.
1547        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1548        // First copy the provenance to a temporary buffer, because
1549        // `get_bytes_unchecked_for_overwrite_ptr` will clear the provenance (in preparation for
1550        // inserting the new provenance), and that can overlap with the source range.
1551        let provenance = src_alloc.provenance_prepare_copy(src_range, self);
1552        // Prepare a copy of the initialization mask.
1553        let init = src_alloc.init_mask().prepare_copy(src_range);
1554
1555        // Destination alloc preparations...
1556        let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;
1557        let dest_range = alloc_range(dest_offset, size * num_copies);
1558        // ...and access hooks.
1559        M::before_alloc_access(tcx, machine, dest_alloc_id)?;
1560        M::before_memory_write(
1561            tcx,
1562            machine,
1563            &mut dest_alloc.extra,
1564            dest,
1565            (dest_alloc_id, dest_prov),
1566            dest_range,
1567        )?;
1568        // Yes we do overwrite all bytes in `dest_bytes`.
1569        let dest_bytes =
1570            dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
1571
1572        if init.no_bytes_init() {
1573            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1574            // is marked as uninitialized but we otherwise omit changing the byte representation which may
1575            // be arbitrary for uninitialized bytes.
1576            // This also avoids writing to the target bytes so that the backing allocation is never
1577            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1578            // operating system this can avoid physically allocating the page.
1579            dest_alloc.write_uninit(&tcx, dest_range);
1580            // `write_uninit` also resets the provenance, so we are done.
1581            return interp_ok(());
1582        }
1583
1584        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1585        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1586        // `dest` could possibly overlap.
1587        // The pointers above remain valid even if the `HashMap` table is moved around because they
1588        // point into the `Vec` storing the bytes.
1589        unsafe {
1590            if src_alloc_id == dest_alloc_id {
1591                if nonoverlapping {
1592                    // `Size` additions
1593                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
1594                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
1595                    {
1596                        do yeet {
        ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
                    msg: ||
                        rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("`copy_nonoverlapping` called on overlapping ranges")),
                    add_args: Box::new(move |mut set_arg| {}),
                }))
    };throw_ub_custom!(inline_fluent!(
1597                            "`copy_nonoverlapping` called on overlapping ranges"
1598                        ));
1599                    }
1600                }
1601            }
1602            if num_copies > 1 {
1603                if !nonoverlapping {
    {
        ::core::panicking::panic_fmt(format_args!("multi-copy only supported in non-overlapping mode"));
    }
};assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1604            }
1605
1606            let size_in_bytes = size.bytes_usize();
1607            // For particularly large arrays (where this is perf-sensitive) it's common that
1608            // we're writing a single byte repeatedly. So, optimize that case to a memset.
1609            if size_in_bytes == 1 {
1610                if true {
    if !(num_copies >= 1) {
        ::core::panicking::panic("assertion failed: num_copies >= 1")
    };
};debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1611                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1612                let value = *src_bytes;
1613                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1614            } else if src_alloc_id == dest_alloc_id {
1615                let mut dest_ptr = dest_bytes;
1616                for _ in 0..num_copies {
1617                    // Here we rely on `src` and `dest` being non-overlapping if there is more than
1618                    // one copy.
1619                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1620                    dest_ptr = dest_ptr.add(size_in_bytes);
1621                }
1622            } else {
1623                let mut dest_ptr = dest_bytes;
1624                for _ in 0..num_copies {
1625                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1626                    dest_ptr = dest_ptr.add(size_in_bytes);
1627                }
1628            }
1629        }
1630
1631        // now fill in all the "init" data
1632        dest_alloc.init_mask_apply_copy(
1633            init,
1634            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1635            num_copies,
1636        );
1637        // copy the provenance to the destination
1638        dest_alloc.provenance_apply_copy(provenance, alloc_range(dest_offset, size), num_copies);
1639
1640        interp_ok(())
1641    }
1642}
1643
1644/// Machine pointer introspection.
1645impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1646    /// Test if this value might be null.
1647    /// If the machine does not support ptr-to-int casts, this is conservative.
1648    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1649        match scalar.try_to_scalar_int() {
1650            Ok(int) => interp_ok(int.is_null()),
1651            Err(_) => {
1652                // We can't cast this pointer to an integer. Can only happen during CTFE.
1653                let ptr = scalar.to_pointer(self)?;
1654                match self.ptr_try_get_alloc_id(ptr, 0) {
1655                    Ok((alloc_id, offset, _)) => {
1656                        let info = self.get_alloc_info(alloc_id);
1657                        if info.kind == AllocKind::TypeId {
1658                            // We *could* actually precisely answer this question since here,
1659                            // the offset *is* the integer value. But the entire point of making
1660                            // this a pointer is not to leak the integer value, so we say everything
1661                            // might be null.
1662                            return interp_ok(true);
1663                        }
1664                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.
1665                        if offset <= info.size {
1666                            return interp_ok(false);
1667                        }
1668                        // If the allocation is N-aligned, and the offset is not divisible by N,
1669                        // then `base + offset` has a non-zero remainder after division by `N`,
1670                        // which means `base + offset` cannot be null.
1671                        if !offset.bytes().is_multiple_of(info.align.bytes()) {
1672                            return interp_ok(false);
1673                        }
1674                        // We don't know enough, this might be null.
1675                        interp_ok(true)
1676                    }
1677                    Err(_offset) => ::rustc_middle::util::bug::bug_fmt(format_args!("a non-int scalar is always a pointer"))bug!("a non-int scalar is always a pointer"),
1678                }
1679            }
1680        }
1681    }
1682
1683    /// Turning a "maybe pointer" into a proper pointer (and some information
1684    /// about where it points), or an absolute address.
1685    ///
1686    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1687    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1688    /// where a wildcard pointer sits right in between two allocations.
1689    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1690    /// for handling wildcard pointers.
1691    ///
1692    /// The result must be used immediately; it is not allowed to convert
1693    /// the returned data back into a `Pointer` and store that in machine state.
1694    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1695    /// we don't have an operation to turn it back into `M::Provenance`.)
1696    pub fn ptr_try_get_alloc_id(
1697        &self,
1698        ptr: Pointer<Option<M::Provenance>>,
1699        size: i64,
1700    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1701        match ptr.into_pointer_or_addr() {
1702            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1703                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1704                None => {
1705                    if !M::Provenance::OFFSET_IS_ADDR {
    ::core::panicking::panic("assertion failed: M::Provenance::OFFSET_IS_ADDR")
};assert!(M::Provenance::OFFSET_IS_ADDR);
1706                    // Offset is absolute, as we just asserted.
1707                    let (_, addr) = ptr.into_raw_parts();
1708                    Err(addr.bytes())
1709                }
1710            },
1711            Err(addr) => Err(addr.bytes()),
1712        }
1713    }
1714
1715    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1716    ///
1717    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1718    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1719    /// where a wildcard pointer sits right in between two allocations.
1720    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1721    /// for handling wildcard pointers.
1722    ///
1723    /// The result must be used immediately; it is not allowed to convert
1724    /// the returned data back into a `Pointer` and store that in machine state.
1725    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1726    /// we don't have an operation to turn it back into `M::Provenance`.)
1727    #[inline(always)]
1728    pub fn ptr_get_alloc_id(
1729        &self,
1730        ptr: Pointer<Option<M::Provenance>>,
1731        size: i64,
1732    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1733        self.ptr_try_get_alloc_id(ptr, size)
1734            .map_err(|offset| {
1735                ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DanglingIntPointer {
        addr: offset,
        inbounds_size: size,
        msg: CheckInAllocMsg::Dereferenceable,
    })err_ub!(DanglingIntPointer {
1736                    addr: offset,
1737                    inbounds_size: size,
1738                    msg: CheckInAllocMsg::Dereferenceable
1739                })
1740            })
1741            .into()
1742    }
1743}