1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
89use std::borrow::{Borrow, Cow};
10use std::cell::Cell;
11use std::collections::VecDeque;
12use std::{fmt, ptr};
1314use rustc_abi::{Align, HasDataLayout, Size};
15use rustc_ast::Mutability;
16use rustc_data_structures::assert_matches;
17use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
18use rustc_errors::inline_fluent;
19use rustc_middle::mir::display_allocation;
20use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
21use rustc_middle::{bug, throw_ub_format};
22use tracing::{debug, instrument, trace};
2324use super::{
25AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
26CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
27Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
28err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
29};
30use crate::const_eval::ConstEvalErrKind;
3132#[derive(#[automatically_derived]
impl<T: ::core::fmt::Debug> ::core::fmt::Debug for MemoryKind<T> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
MemoryKind::Stack =>
::core::fmt::Formatter::write_str(f, "Stack"),
MemoryKind::CallerLocation =>
::core::fmt::Formatter::write_str(f, "CallerLocation"),
MemoryKind::Machine(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Machine", &__self_0),
}
}
}Debug, #[automatically_derived]
impl<T: ::core::cmp::PartialEq> ::core::cmp::PartialEq for MemoryKind<T> {
#[inline]
fn eq(&self, other: &MemoryKind<T>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(MemoryKind::Machine(__self_0), MemoryKind::Machine(__arg1_0))
=> __self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<T: ::core::marker::Copy> ::core::marker::Copy for MemoryKind<T> { }Copy, #[automatically_derived]
impl<T: ::core::clone::Clone> ::core::clone::Clone for MemoryKind<T> {
#[inline]
fn clone(&self) -> MemoryKind<T> {
match self {
MemoryKind::Stack => MemoryKind::Stack,
MemoryKind::CallerLocation => MemoryKind::CallerLocation,
MemoryKind::Machine(__self_0) =>
MemoryKind::Machine(::core::clone::Clone::clone(__self_0)),
}
}
}Clone)]
33pub enum MemoryKind<T> {
34/// Stack memory. Error if deallocated except during a stack pop.
35Stack,
36/// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
37CallerLocation,
38/// Additional memory kinds a machine wishes to distinguish from the builtin ones.
39Machine(T),
40}
4142impl<T: MayLeak> MayLeakfor MemoryKind<T> {
43#[inline]
44fn may_leak(self) -> bool {
45match self {
46 MemoryKind::Stack => false,
47 MemoryKind::CallerLocation => true,
48 MemoryKind::Machine(k) => k.may_leak(),
49 }
50 }
51}
5253impl<T: fmt::Display> fmt::Displayfor MemoryKind<T> {
54fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55match self {
56 MemoryKind::Stack => f.write_fmt(format_args!("stack variable"))write!(f, "stack variable"),
57 MemoryKind::CallerLocation => f.write_fmt(format_args!("caller location"))write!(f, "caller location"),
58 MemoryKind::Machine(m) => f.write_fmt(format_args!("{0}", m))write!(f, "{m}"),
59 }
60 }
61}
6263/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
64#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocKind {
#[inline]
fn clone(&self) -> AllocKind { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AllocKind {
#[inline]
fn eq(&self, other: &AllocKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::fmt::Debug for AllocKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
AllocKind::LiveData => "LiveData",
AllocKind::Function => "Function",
AllocKind::VTable => "VTable",
AllocKind::TypeId => "TypeId",
AllocKind::Dead => "Dead",
})
}
}Debug)]
65pub enum AllocKind {
66/// A regular live data allocation.
67LiveData,
68/// A function allocation (that fn ptrs point to).
69Function,
70/// A vtable allocation.
71VTable,
72/// A TypeId allocation.
73TypeId,
74/// A dead allocation.
75Dead,
76}
7778/// Metadata about an `AllocId`.
79#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocInfo { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocInfo {
#[inline]
fn clone(&self) -> AllocInfo {
let _: ::core::clone::AssertParamIsClone<Size>;
let _: ::core::clone::AssertParamIsClone<Align>;
let _: ::core::clone::AssertParamIsClone<AllocKind>;
let _: ::core::clone::AssertParamIsClone<Mutability>;
*self
}
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for AllocInfo {
#[inline]
fn eq(&self, other: &AllocInfo) -> bool {
self.size == other.size && self.align == other.align &&
self.kind == other.kind && self.mutbl == other.mutbl
}
}PartialEq, #[automatically_derived]
impl ::core::fmt::Debug for AllocInfo {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "AllocInfo",
"size", &self.size, "align", &self.align, "kind", &self.kind,
"mutbl", &&self.mutbl)
}
}Debug)]
80pub struct AllocInfo {
81pub size: Size,
82pub align: Align,
83pub kind: AllocKind,
84pub mutbl: Mutability,
85}
8687impl AllocInfo {
88fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
89Self { size, align, kind, mutbl }
90 }
91}
9293/// The value of a function pointer.
94#[derive(#[automatically_derived]
impl<'tcx, Other: ::core::fmt::Debug> ::core::fmt::Debug for
FnVal<'tcx, Other> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
FnVal::Instance(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"Instance", &__self_0),
FnVal::Other(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Other",
&__self_0),
}
}
}Debug, #[automatically_derived]
impl<'tcx, Other: ::core::marker::Copy> ::core::marker::Copy for
FnVal<'tcx, Other> {
}Copy, #[automatically_derived]
impl<'tcx, Other: ::core::clone::Clone> ::core::clone::Clone for
FnVal<'tcx, Other> {
#[inline]
fn clone(&self) -> FnVal<'tcx, Other> {
match self {
FnVal::Instance(__self_0) =>
FnVal::Instance(::core::clone::Clone::clone(__self_0)),
FnVal::Other(__self_0) =>
FnVal::Other(::core::clone::Clone::clone(__self_0)),
}
}
}Clone)]
95pub enum FnVal<'tcx, Other> {
96 Instance(Instance<'tcx>),
97 Other(Other),
98}
99100impl<'tcx, Other> FnVal<'tcx, Other> {
101pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
102match self {
103 FnVal::Instance(instance) => interp_ok(instance),
104 FnVal::Other(_) => {
105do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("\'foreign\' function pointers are not supported in this context"))
})))throw_unsup_format!("'foreign' function pointers are not supported in this context")106 }
107 }
108 }
109}
110111// `Memory` has to depend on the `Machine` because some of its operations
112// (e.g., `get`) call a `Machine` hook.
113pub struct Memory<'tcx, M: Machine<'tcx>> {
114/// Allocations local to this instance of the interpreter. The kind
115 /// helps ensure that the same mechanism is used for allocation and
116 /// deallocation. When an allocation is not found here, it is a
117 /// global and looked up in the `tcx` for read access. Some machines may
118 /// have to mutate this map even on a read-only access to a global (because
119 /// they do pointer provenance tracking and the allocations in `tcx` have
120 /// the wrong type), so we let the machine override this type.
121 /// Either way, if the machine allows writing to a global, doing so will
122 /// create a copy of the global allocation here.
123// FIXME: this should not be public, but interning currently needs access to it
124pub(super) alloc_map: M::MemoryMap,
125126/// Map for "extra" function pointers.
127extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
128129/// To be able to compare pointers with null, and to check alignment for accesses
130 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
131 /// that do not exist any more.
132// FIXME: this should not be public, but interning currently needs access to it
133pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
134135/// This stores whether we are currently doing reads purely for the purpose of validation.
136 /// Those reads do not trigger the machine's hooks for memory reads.
137 /// Needless to say, this must only be set with great care!
138validation_in_progress: Cell<bool>,
139}
140141/// A reference to some allocation that was already bounds-checked for the given region
142/// and had the on-access machine hooks run.
143#[derive(#[automatically_derived]
impl<'a, 'tcx, Prov: ::core::marker::Copy + Provenance,
Extra: ::core::marker::Copy, Bytes: ::core::marker::Copy + AllocBytes>
::core::marker::Copy for AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
}Copy, #[automatically_derived]
impl<'a, 'tcx, Prov: ::core::clone::Clone + Provenance,
Extra: ::core::clone::Clone, Bytes: ::core::clone::Clone + AllocBytes>
::core::clone::Clone for AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
#[inline]
fn clone(&self) -> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
AllocRef {
alloc: ::core::clone::Clone::clone(&self.alloc),
range: ::core::clone::Clone::clone(&self.range),
tcx: ::core::clone::Clone::clone(&self.tcx),
alloc_id: ::core::clone::Clone::clone(&self.alloc_id),
}
}
}Clone)]
144pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
145 alloc: &'a Allocation<Prov, Extra, Bytes>,
146 range: AllocRange,
147 tcx: TyCtxt<'tcx>,
148 alloc_id: AllocId,
149}
150/// A reference to some allocation that was already bounds-checked for the given region
151/// and had the on-access machine hooks run.
152pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
153 alloc: &'a mut Allocation<Prov, Extra, Bytes>,
154 range: AllocRange,
155 tcx: TyCtxt<'tcx>,
156 alloc_id: AllocId,
157}
158159impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
160pub fn new() -> Self {
161Memory {
162 alloc_map: M::MemoryMap::default(),
163 extra_fn_ptr_map: FxIndexMap::default(),
164 dead_alloc_map: FxIndexMap::default(),
165 validation_in_progress: Cell::new(false),
166 }
167 }
168169/// This is used by [priroda](https://github.com/oli-obk/priroda)
170pub fn alloc_map(&self) -> &M::MemoryMap {
171&self.alloc_map
172 }
173}
174175impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
176/// Call this to turn untagged "global" pointers (obtained via `tcx`) into
177 /// the machine pointer to the allocation. Must never be used
178 /// for any other pointers, nor for TLS statics.
179 ///
180 /// Using the resulting pointer represents a *direct* access to that memory
181 /// (e.g. by directly using a `static`),
182 /// as opposed to access through a pointer that was created by the program.
183 ///
184 /// This function can fail only if `ptr` points to an `extern static`.
185#[inline]
186pub fn global_root_pointer(
187&self,
188 ptr: Pointer<CtfeProvenance>,
189 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
190let alloc_id = ptr.provenance.alloc_id();
191// We need to handle `extern static`.
192match self.tcx.try_get_global_alloc(alloc_id) {
193Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
194// Thread-local statics do not have a constant address. They *must* be accessed via
195 // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
196::rustc_middle::util::bug::bug_fmt(format_args!("global memory cannot point to thread-local static"))bug!("global memory cannot point to thread-local static")197 }
198Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
199return M::extern_static_pointer(self, def_id);
200 }
201None => {
202if !self.memory.extra_fn_ptr_map.contains_key(&alloc_id) {
{
::core::panicking::panic_fmt(format_args!("{0:?} is neither global nor a function pointer",
alloc_id));
}
};assert!(
203self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
204"{alloc_id:?} is neither global nor a function pointer"
205);
206 }
207_ => {}
208 }
209// And we need to get the provenance.
210M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
211 }
212213pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
214let id = match fn_val {
215 FnVal::Instance(instance) => {
216let salt = M::get_global_alloc_salt(self, Some(instance));
217self.tcx.reserve_and_set_fn_alloc(instance, salt)
218 }
219 FnVal::Other(extra) => {
220// FIXME(RalfJung): Should we have a cache here?
221let id = self.tcx.reserve_alloc_id();
222let old = self.memory.extra_fn_ptr_map.insert(id, extra);
223if !old.is_none() {
::core::panicking::panic("assertion failed: old.is_none()")
};assert!(old.is_none());
224id225 }
226 };
227// Functions are global allocations, so make sure we get the right root pointer.
228 // We know this is not an `extern static` so this cannot fail.
229self.global_root_pointer(Pointer::from(id)).unwrap()
230 }
231232pub fn allocate_ptr(
233&mut self,
234 size: Size,
235 align: Align,
236 kind: MemoryKind<M::MemoryKind>,
237 init: AllocInit,
238 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
239let params = self.machine.get_default_alloc_params();
240let alloc = if M::PANIC_ON_ALLOC_FAIL {
241Allocation::new(size, align, init, params)
242 } else {
243Allocation::try_new(size, align, init, params)?
244};
245self.insert_allocation(alloc, kind)
246 }
247248pub fn allocate_bytes_ptr(
249&mut self,
250 bytes: &[u8],
251 align: Align,
252 kind: MemoryKind<M::MemoryKind>,
253 mutability: Mutability,
254 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
255let params = self.machine.get_default_alloc_params();
256let alloc = Allocation::from_bytes(bytes, align, mutability, params);
257self.insert_allocation(alloc, kind)
258 }
259260pub fn insert_allocation(
261&mut self,
262 alloc: Allocation<M::Provenance, (), M::Bytes>,
263 kind: MemoryKind<M::MemoryKind>,
264 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
265if !(alloc.size() <= self.max_size_of_val()) {
::core::panicking::panic("assertion failed: alloc.size() <= self.max_size_of_val()")
};assert!(alloc.size() <= self.max_size_of_val());
266let id = self.tcx.reserve_alloc_id();
267if true {
match (&(Some(kind)), &(M::GLOBAL_KIND.map(MemoryKind::Machine))) {
(left_val, right_val) => {
if *left_val == *right_val {
let kind = ::core::panicking::AssertKind::Ne;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val,
::core::option::Option::Some(format_args!("dynamically allocating global memory")));
}
}
};
};debug_assert_ne!(
268Some(kind),
269 M::GLOBAL_KIND.map(MemoryKind::Machine),
270"dynamically allocating global memory"
271);
272// This cannot be merged with the `adjust_global_allocation` code path
273 // since here we have an allocation that already uses `M::Bytes`.
274let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
275let alloc = alloc.with_extra(extra);
276self.memory.alloc_map.insert(id, (kind, alloc));
277 M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
278 }
279280/// If this grows the allocation, `init_growth` determines
281 /// whether the additional space will be initialized.
282pub fn reallocate_ptr(
283&mut self,
284 ptr: Pointer<Option<M::Provenance>>,
285 old_size_and_align: Option<(Size, Align)>,
286 new_size: Size,
287 new_align: Align,
288 kind: MemoryKind<M::MemoryKind>,
289 init_growth: AllocInit,
290 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
291let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
292if offset.bytes() != 0 {
293do yeet {
let (ptr, kind) =
(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", ptr))
}), "realloc");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$kind ->
[dealloc] deallocating
[realloc] reallocating
*[other] {\"\"}
} {$ptr} which does not point to the beginning of an object")),
add_args: Box::new(move |mut set_arg|
{
set_arg("ptr".into(),
rustc_errors::IntoDiagArg::into_diag_arg(ptr, &mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
};throw_ub_custom!(
294inline_fluent!(
295"{$kind ->
296 [dealloc] deallocating
297 [realloc] reallocating
298 *[other] {\"\"}
299} {$ptr} which does not point to the beginning of an object"
300),
301 ptr = format!("{ptr:?}"),
302 kind = "realloc"
303);
304 }
305306// For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
307 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
308 // If requested, we zero-init the entire allocation, to ensure that a growing
309 // allocation has its new bytes properly set. For the part that is copied,
310 // `mem_copy` below will de-initialize things as necessary.
311let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
312let old_size = match old_size_and_align {
313Some((size, _align)) => size,
314None => self.get_alloc_raw(alloc_id)?.size(),
315 };
316// This will also call the access hooks.
317self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
318self.deallocate_ptr(ptr, old_size_and_align, kind)?;
319320interp_ok(new_ptr)
321 }
322323/// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it.
324pub fn make_const_heap_ptr_global(
325&mut self,
326 ptr: Pointer<Option<CtfeProvenance>>,
327 ) -> InterpResult<'tcx>
328where
329M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>,
330 {
331let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?;
332if offset.bytes() != 0 {
333return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into();
334 }
335336if self.tcx.try_get_global_alloc(alloc_id).is_some() {
337// This points to something outside the current interpreter.
338return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
339 }
340341// If we can't find it in `alloc_map` it must be dangling (because we don't use
342 // `extra_fn_ptr_map` in const-eval).
343let (kind, alloc) = self344 .memory
345 .alloc_map
346 .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?;
347348// Ensure this is actually a *heap* allocation, and record it as made-global.
349match kind {
350 MemoryKind::Stack | MemoryKind::CallerLocation => {
351return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
352 }
353 MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => {
354if *was_made_global {
355return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id))
356 .into();
357 }
358*was_made_global = true;
359 }
360 }
361362// Prevent further mutation, this is now an immutable global.
363alloc.mutability = Mutability::Not;
364365interp_ok(())
366 }
367368#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("deallocate_ptr",
"rustc_const_eval::interpret::memory",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
::tracing_core::__macro_support::Option::Some(368u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
::tracing_core::field::FieldSet::new(&["ptr",
"old_size_and_align", "kind"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = meta.fields().iter();
meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&ptr)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&old_size_and_align)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&kind)
as &dyn Value))])
})
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: InterpResult<'tcx> = loop {};
return __tracing_attr_fake_return;
}
{
let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:376",
"rustc_const_eval::interpret::memory",
::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
::tracing_core::__macro_support::Option::Some(376u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("deallocating: {0:?}",
alloc_id) as &dyn Value))])
});
} else { ; }
};
if offset.bytes() != 0 {
do yeet {
let (ptr, kind) =
(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", ptr))
}), "dealloc");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("{$kind ->
[dealloc] deallocating
[realloc] reallocating
*[other] {\"\"}
} {$ptr} which does not point to the beginning of an object")),
add_args: Box::new(move |mut set_arg|
{
set_arg("ptr".into(),
rustc_errors::IntoDiagArg::into_diag_arg(ptr, &mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
};
}
let Some((alloc_kind, mut alloc)) =
self.memory.alloc_map.remove(&alloc_id) else {
return Err(match self.tcx.try_get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function { .. }) => {
{
let (alloc_id, kind) = (alloc_id, "fn");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
[fn] a function
[vtable] a vtable
[static_mem] static memory
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc_id".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
&mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
}
}
Some(GlobalAlloc::VTable(..)) => {
{
let (alloc_id, kind) = (alloc_id, "vtable");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
[fn] a function
[vtable] a vtable
[static_mem] static memory
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc_id".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
&mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
}
}
Some(GlobalAlloc::TypeId { .. }) => {
{
let (alloc_id, kind) = (alloc_id, "typeid");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
[fn] a function
[vtable] a vtable
[static_mem] static memory
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc_id".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
&mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
}
}
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
{
let (alloc_id, kind) = (alloc_id, "static_mem");
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc_id}, which is {$kind ->
[fn] a function
[vtable] a vtable
[static_mem] static memory
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc_id".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc_id,
&mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
}
}
None =>
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(alloc_id,
CheckInAllocMsg::MemoryAccess)),
}).into();
};
if alloc.mutability.is_not() {
do yeet {
let (alloc,) = (alloc_id,);
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating immutable allocation {$alloc}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
}),
}))
};
}
if alloc_kind != kind {
do yeet {
let (alloc, alloc_kind, kind) =
(alloc_id,
::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0}", alloc_kind))
}),
::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0}", kind))
}));
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("deallocating {$alloc}, which is {$alloc_kind} memory, using {$kind} deallocation operation")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
set_arg("alloc_kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc_kind,
&mut None));
set_arg("kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(kind, &mut None));
}),
}))
};
}
if let Some((size, align)) = old_size_and_align {
if size != alloc.size() || align != alloc.align {
do yeet {
let (alloc, size, align, size_found, align_found) =
(alloc_id, alloc.size().bytes(), alloc.align.bytes(),
size.bytes(), align.bytes());
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("incorrect layout on deallocation: {$alloc} has size {$size} and alignment {$align}, but gave size {$size_found} and alignment {$align_found}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("alloc".into(),
rustc_errors::IntoDiagArg::into_diag_arg(alloc, &mut None));
set_arg("size".into(),
rustc_errors::IntoDiagArg::into_diag_arg(size, &mut None));
set_arg("align".into(),
rustc_errors::IntoDiagArg::into_diag_arg(align, &mut None));
set_arg("size_found".into(),
rustc_errors::IntoDiagArg::into_diag_arg(size_found,
&mut None));
set_arg("align_found".into(),
rustc_errors::IntoDiagArg::into_diag_arg(align_found,
&mut None));
}),
}))
}
}
}
let size = alloc.size();
M::before_memory_deallocation(self.tcx, &mut self.machine,
&mut alloc.extra, ptr, (alloc_id, prov), size, alloc.align,
kind)?;
let old =
self.memory.dead_alloc_map.insert(alloc_id,
(size, alloc.align));
if old.is_some() {
::rustc_middle::util::bug::bug_fmt(format_args!("Nothing can be deallocated twice"));
}
interp_ok(())
}
}
}#[instrument(skip(self), level = "debug")]369pub fn deallocate_ptr(
370&mut self,
371 ptr: Pointer<Option<M::Provenance>>,
372 old_size_and_align: Option<(Size, Align)>,
373 kind: MemoryKind<M::MemoryKind>,
374 ) -> InterpResult<'tcx> {
375let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
376trace!("deallocating: {alloc_id:?}");
377378if offset.bytes() != 0 {
379throw_ub_custom!(
380inline_fluent!(
381"{$kind ->
382 [dealloc] deallocating
383 [realloc] reallocating
384 *[other] {\"\"}
385} {$ptr} which does not point to the beginning of an object"
386),
387 ptr = format!("{ptr:?}"),
388 kind = "dealloc",
389 );
390 }
391392let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
393// Deallocating global memory -- always an error
394return Err(match self.tcx.try_get_global_alloc(alloc_id) {
395Some(GlobalAlloc::Function { .. }) => {
396err_ub_custom!(
397inline_fluent!(
398"deallocating {$alloc_id}, which is {$kind ->
399 [fn] a function
400 [vtable] a vtable
401 [static_mem] static memory
402 *[other] {\"\"}
403}"
404),
405 alloc_id = alloc_id,
406 kind = "fn",
407 )
408 }
409Some(GlobalAlloc::VTable(..)) => {
410err_ub_custom!(
411inline_fluent!(
412"deallocating {$alloc_id}, which is {$kind ->
413 [fn] a function
414 [vtable] a vtable
415 [static_mem] static memory
416 *[other] {\"\"}
417}"
418),
419 alloc_id = alloc_id,
420 kind = "vtable",
421 )
422 }
423Some(GlobalAlloc::TypeId { .. }) => {
424err_ub_custom!(
425inline_fluent!(
426"deallocating {$alloc_id}, which is {$kind ->
427 [fn] a function
428 [vtable] a vtable
429 [static_mem] static memory
430 *[other] {\"\"}
431}"
432),
433 alloc_id = alloc_id,
434 kind = "typeid",
435 )
436 }
437Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
438err_ub_custom!(
439inline_fluent!(
440"deallocating {$alloc_id}, which is {$kind ->
441 [fn] a function
442 [vtable] a vtable
443 [static_mem] static memory
444 *[other] {\"\"}
445}"
446),
447 alloc_id = alloc_id,
448 kind = "static_mem"
449)
450 }
451None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),
452 })
453 .into();
454 };
455456if alloc.mutability.is_not() {
457throw_ub_custom!(
458inline_fluent!("deallocating immutable allocation {$alloc}"),
459 alloc = alloc_id,
460 );
461 }
462if alloc_kind != kind {
463throw_ub_custom!(
464inline_fluent!(
465"deallocating {$alloc}, which is {$alloc_kind} memory, using {$kind} deallocation operation"
466),
467 alloc = alloc_id,
468 alloc_kind = format!("{alloc_kind}"),
469 kind = format!("{kind}"),
470 );
471 }
472if let Some((size, align)) = old_size_and_align {
473if size != alloc.size() || align != alloc.align {
474throw_ub_custom!(
475inline_fluent!(
476"incorrect layout on deallocation: {$alloc} has size {$size} and alignment {$align}, but gave size {$size_found} and alignment {$align_found}"
477),
478 alloc = alloc_id,
479 size = alloc.size().bytes(),
480 align = alloc.align.bytes(),
481 size_found = size.bytes(),
482 align_found = align.bytes(),
483 )
484 }
485 }
486487// Let the machine take some extra action
488let size = alloc.size();
489 M::before_memory_deallocation(
490self.tcx,
491&mut self.machine,
492&mut alloc.extra,
493 ptr,
494 (alloc_id, prov),
495 size,
496 alloc.align,
497 kind,
498 )?;
499500// Don't forget to remember size and align of this now-dead allocation
501let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
502if old.is_some() {
503bug!("Nothing can be deallocated twice");
504 }
505506 interp_ok(())
507 }
508509/// Internal helper function to determine the allocation and offset of a pointer (if any).
510#[inline(always)]
511fn get_ptr_access(
512&self,
513 ptr: Pointer<Option<M::Provenance>>,
514 size: Size,
515 ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
516let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
517Self::check_and_deref_ptr(
518self,
519ptr,
520size,
521 CheckInAllocMsg::MemoryAccess,
522 |this, alloc_id, offset, prov| {
523let (size, align) =
524this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;
525interp_ok((size, align, (alloc_id, offset, prov)))
526 },
527 )
528 }
529530/// Check if the given pointer points to live memory of the given `size`.
531 /// The caller can control the error message for the out-of-bounds case.
532#[inline(always)]
533pub fn check_ptr_access(
534&self,
535 ptr: Pointer<Option<M::Provenance>>,
536 size: Size,
537 msg: CheckInAllocMsg,
538 ) -> InterpResult<'tcx> {
539let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
540Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
541let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
542interp_ok((size, align, ()))
543 })?;
544interp_ok(())
545 }
546547/// Check whether the given pointer points to live memory for a signed amount of bytes.
548 /// A negative amounts means that the given range of memory to the left of the pointer
549 /// needs to be dereferenceable.
550pub fn check_ptr_access_signed(
551&self,
552 ptr: Pointer<Option<M::Provenance>>,
553 size: i64,
554 msg: CheckInAllocMsg,
555 ) -> InterpResult<'tcx> {
556Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
557let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
558interp_ok((size, align, ()))
559 })?;
560interp_ok(())
561 }
562563/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
564 /// to the allocation it points to. Supports both shared and mutable references, as the actual
565 /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
566 /// a pointer.
567 ///
568 /// `alloc_size` will only get called for non-zero-sized accesses.
569 ///
570 /// Returns `None` if and only if the size is 0.
571fn check_and_deref_ptr<T, R: Borrow<Self>>(
572 this: R,
573 ptr: Pointer<Option<M::Provenance>>,
574 size: i64,
575 msg: CheckInAllocMsg,
576 alloc_size: impl FnOnce(
577 R,
578AllocId,
579Size,
580 M::ProvenanceExtra,
581 ) -> InterpResult<'tcx, (Size, Align, T)>,
582 ) -> InterpResult<'tcx, Option<T>> {
583// Everything is okay with size 0.
584if size == 0 {
585return interp_ok(None);
586 }
587588interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
589Err(addr) => {
590// We couldn't get a proper allocation.
591do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DanglingIntPointer {
addr,
inbounds_size: size,
msg,
});throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
592 }
593Ok((alloc_id, offset, prov)) => {
594let tcx = this.borrow().tcx;
595let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
596let offset = offset.bytes();
597// Compute absolute begin and end of the range.
598let (begin, end) = if size >= 0 {
599 (Some(offset), offset.checked_add(sizeas u64))
600 } else {
601 (offset.checked_sub(size.unsigned_abs()), Some(offset))
602 };
603// Ensure both are within bounds.
604let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
605if !in_bounds {
606do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerOutOfBounds {
alloc_id,
alloc_size,
ptr_offset: tcx.sign_extend_to_target_isize(offset),
inbounds_size: size,
msg,
})throw_ub!(PointerOutOfBounds {
607 alloc_id,
608 alloc_size,
609 ptr_offset: tcx.sign_extend_to_target_isize(offset),
610 inbounds_size: size,
611 msg,
612 })613 }
614615Some(ret_val)
616 }
617 })
618 }
619620pub(super) fn check_misalign(
621&self,
622 misaligned: Option<Misalignment>,
623 msg: CheckAlignMsg,
624 ) -> InterpResult<'tcx> {
625if let Some(misaligned) = misaligned {
626do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::AlignmentCheckFailed(misaligned,
msg))throw_ub!(AlignmentCheckFailed(misaligned, msg))627 }
628interp_ok(())
629 }
630631pub(super) fn is_ptr_misaligned(
632&self,
633 ptr: Pointer<Option<M::Provenance>>,
634 align: Align,
635 ) -> Option<Misalignment> {
636if !M::enforce_alignment(self) || align.bytes() == 1 {
637return None;
638 }
639640#[inline]
641fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
642if offset.is_multiple_of(align.bytes()) {
643None644 } else {
645// The biggest power of two through which `offset` is divisible.
646let offset_pow2 = 1 << offset.trailing_zeros();
647Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
648 }
649 }
650651match self.ptr_try_get_alloc_id(ptr, 0) {
652Err(addr) => is_offset_misaligned(addr, align),
653Ok((alloc_id, offset, _prov)) => {
654let alloc_info = self.get_alloc_info(alloc_id);
655if let Some(misalign) = M::alignment_check(
656self,
657alloc_id,
658alloc_info.align,
659alloc_info.kind,
660offset,
661align,
662 ) {
663Some(misalign)
664 } else if M::Provenance::OFFSET_IS_ADDR {
665is_offset_misaligned(ptr.addr().bytes(), align)
666 } else {
667// Check allocation alignment and offset alignment.
668if alloc_info.align.bytes() < align.bytes() {
669Some(Misalignment { has: alloc_info.align, required: align })
670 } else {
671is_offset_misaligned(offset.bytes(), align)
672 }
673 }
674 }
675 }
676 }
677678/// Checks a pointer for misalignment.
679 ///
680 /// The error assumes this is checking the pointer used directly for an access.
681pub fn check_ptr_align(
682&self,
683 ptr: Pointer<Option<M::Provenance>>,
684 align: Align,
685 ) -> InterpResult<'tcx> {
686self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
687 }
688}
689690impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
691/// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
692pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
693// Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
694 // is live, here all the IDs in the map are for dead allocations so we don't
695 // need to check for liveness.
696#[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
697self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
698 }
699}
700701/// Allocation accessors
702impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
703/// Helper function to obtain a global (tcx) allocation.
704 /// This attempts to return a reference to an existing allocation if
705 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
706 /// this machine use the same pointer provenance, so it is indirected through
707 /// `M::adjust_allocation`.
708fn get_global_alloc(
709&self,
710 id: AllocId,
711 is_write: bool,
712 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
713let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
714Some(GlobalAlloc::Memory(mem)) => {
715// Memory of a constant or promoted or anonymous memory referenced by a static.
716(mem, None)
717 }
718Some(GlobalAlloc::Function { .. }) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefFunctionPointer(id))throw_ub!(DerefFunctionPointer(id)),
719Some(GlobalAlloc::VTable(..)) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefVTablePointer(id))throw_ub!(DerefVTablePointer(id)),
720Some(GlobalAlloc::TypeId { .. }) => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DerefTypeIdPointer(id))throw_ub!(DerefTypeIdPointer(id)),
721None => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(id,
CheckInAllocMsg::MemoryAccess))throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
722Some(GlobalAlloc::Static(def_id)) => {
723if !self.tcx.is_static(def_id) {
::core::panicking::panic("assertion failed: self.tcx.is_static(def_id)")
};assert!(self.tcx.is_static(def_id));
724// Thread-local statics do not have a constant address. They *must* be accessed via
725 // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
726if !!self.tcx.is_thread_local_static(def_id) {
::core::panicking::panic("assertion failed: !self.tcx.is_thread_local_static(def_id)")
};assert!(!self.tcx.is_thread_local_static(def_id));
727// Notice that every static has two `AllocId` that will resolve to the same
728 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
729 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
730 // `eval_static_initializer` and it is the "resolved" ID.
731 // The resolved ID is never used by the interpreted program, it is hidden.
732 // This is relied upon for soundness of const-patterns; a pointer to the resolved
733 // ID would "sidestep" the checks that make sure consts do not point to statics!
734 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
735 // contains a reference to memory that was created during its evaluation (i.e., not
736 // to another static), those inner references only exist in "resolved" form.
737if self.tcx.is_foreign_item(def_id) {
738// This is unreachable in Miri, but can happen in CTFE where we actually *do* support
739 // referencing arbitrary (declared) extern statics.
740do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::ExternStatic(def_id));throw_unsup!(ExternStatic(def_id));
741 }
742743// We don't give a span -- statics don't need that, they cannot be generic or associated.
744let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
745 (val, Some(def_id))
746 }
747 };
748 M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
749// We got tcx memory. Let the machine initialize its "extra" stuff.
750M::adjust_global_allocation(
751self,
752id, // always use the ID we got as input, not the "hidden" one.
753alloc.inner(),
754 )
755 }
756757/// Gives raw access to the `Allocation`, without bounds or alignment checks.
758 /// The caller is responsible for calling the access hooks!
759 ///
760 /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
761pub fn get_alloc_raw(
762&self,
763 id: AllocId,
764 ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
765// The error type of the inner closure here is somewhat funny. We have two
766 // ways of "erroring": An actual error, or because we got a reference from
767 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
768 // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
769let a = self.memory.alloc_map.get_or(id, || {
770// We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
771 // so we use `report_err` for that.
772let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
773match alloc {
774 Cow::Borrowed(alloc) => {
775// We got a ref, cheaply return that as an "error" so that the
776 // map does not get mutated.
777Err(Ok(alloc))
778 }
779 Cow::Owned(alloc) => {
780// Need to put it into the map and return a ref to that
781let kind = M::GLOBAL_KIND.expect(
782"I got a global allocation that I have to copy but the machine does \
783 not expect that to happen",
784 );
785Ok((MemoryKind::Machine(kind), alloc))
786 }
787 }
788 });
789// Now unpack that funny error type
790match a {
791Ok(a) => interp_ok(&a.1),
792Err(a) => a.into(),
793 }
794 }
795796/// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
797 /// The caller is responsible for calling the access hooks!
798pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
799let alloc = self.get_alloc_raw(id)?;
800interp_ok(alloc.get_bytes_unchecked_raw())
801 }
802803/// Bounds-checked *but not align-checked* allocation access.
804pub fn get_ptr_alloc<'a>(
805&'a self,
806 ptr: Pointer<Option<M::Provenance>>,
807 size: Size,
808 ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
809 {
810let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
811let ptr_and_alloc = Self::check_and_deref_ptr(
812self,
813ptr,
814size_i64,
815 CheckInAllocMsg::MemoryAccess,
816 |this, alloc_id, offset, prov| {
817let alloc = this.get_alloc_raw(alloc_id)?;
818interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
819 },
820 )?;
821// We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
822 // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
823 // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
824if !self.memory.validation_in_progress.get() {
825if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
826 M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;
827 }
828 }
829830if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
831let range = alloc_range(offset, size);
832if !self.memory.validation_in_progress.get() {
833 M::before_memory_read(
834self.tcx,
835&self.machine,
836&alloc.extra,
837ptr,
838 (alloc_id, prov),
839range,
840 )?;
841 }
842interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
843 } else {
844interp_ok(None)
845 }
846 }
847848/// Return the `extra` field of the given allocation.
849pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
850interp_ok(&self.get_alloc_raw(id)?.extra)
851 }
852853/// Return the `mutability` field of the given allocation.
854pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
855interp_ok(self.get_alloc_raw(id)?.mutability)
856 }
857858/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
859 /// The caller is responsible for calling the access hooks!
860 ///
861 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
862 /// allocation.
863 ///
864 /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
865pub fn get_alloc_raw_mut(
866&mut self,
867 id: AllocId,
868 ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
869// We have "NLL problem case #3" here, which cannot be worked around without loss of
870 // efficiency even for the common case where the key is in the map.
871 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
872 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
873 // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
874 // It could be avoided with a totally separate codepath in Miri for handling the absolute address
875 // of global allocations, but that's not worth it.)
876if self.memory.alloc_map.get_mut(id).is_none() {
877// Slow path.
878 // Allocation not found locally, go look global.
879let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
880let kind = M::GLOBAL_KIND.expect(
881"I got a global allocation that I have to copy but the machine does \
882 not expect that to happen",
883 );
884self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
885 }
886887let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
888if alloc.mutability.is_not() {
889do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::WriteToReadOnly(id))throw_ub!(WriteToReadOnly(id))890 }
891interp_ok((alloc, &mut self.machine))
892 }
893894/// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
895 /// The caller is responsible for calling the access hooks!
896pub fn get_alloc_bytes_unchecked_raw_mut(
897&mut self,
898 id: AllocId,
899 ) -> InterpResult<'tcx, *mut u8> {
900let alloc = self.get_alloc_raw_mut(id)?.0;
901interp_ok(alloc.get_bytes_unchecked_raw_mut())
902 }
903904/// Bounds-checked *but not align-checked* allocation access.
905pub fn get_ptr_alloc_mut<'a>(
906&'a mut self,
907 ptr: Pointer<Option<M::Provenance>>,
908 size: Size,
909 ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
910 {
911let tcx = self.tcx;
912let validation_in_progress = self.memory.validation_in_progress.get();
913914let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
915let ptr_and_alloc = Self::check_and_deref_ptr(
916self,
917ptr,
918size_i64,
919 CheckInAllocMsg::MemoryAccess,
920 |this, alloc_id, offset, prov| {
921let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
922interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
923 },
924 )?;
925926if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
927let range = alloc_range(offset, size);
928if !validation_in_progress {
929// For writes, it's okay to only call those when there actually is a non-zero
930 // amount of bytes to be written: a zero-sized write doesn't manifest anything.
931M::before_alloc_access(tcx, machine, alloc_id)?;
932 M::before_memory_write(
933tcx,
934machine,
935&mut alloc.extra,
936ptr,
937 (alloc_id, prov),
938range,
939 )?;
940 }
941interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
942 } else {
943interp_ok(None)
944 }
945 }
946947/// Return the `extra` field of the given allocation.
948pub fn get_alloc_extra_mut<'a>(
949&'a mut self,
950 id: AllocId,
951 ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
952let (alloc, machine) = self.get_alloc_raw_mut(id)?;
953interp_ok((&mut alloc.extra, machine))
954 }
955956/// Check whether an allocation is live. This is faster than calling
957 /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
958 /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
959pub fn is_alloc_live(&self, id: AllocId) -> bool {
960self.memory.alloc_map.contains_key_ref(&id)
961 || self.memory.extra_fn_ptr_map.contains_key(&id)
962// We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
963 // This also matches the order in `get_alloc_info`.
964|| self.tcx.try_get_global_alloc(id).is_some()
965 }
966967/// Obtain the size and alignment of an allocation, even if that allocation has
968 /// been deallocated.
969pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
970// # Regular allocations
971 // Don't use `self.get_raw` here as that will
972 // a) cause cycles in case `id` refers to a static
973 // b) duplicate a global's allocation in miri
974if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
975return AllocInfo::new(
976alloc.size(),
977alloc.align,
978 AllocKind::LiveData,
979alloc.mutability,
980 );
981 }
982983// # Function pointers
984 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
985if let Some(fn_val) = self.get_fn_alloc(id) {
986let align = match fn_val {
987 FnVal::Instance(_instance) => {
988// FIXME: Until we have a clear design for the effects of align(N) functions
989 // on the address of function pointers, we don't consider the align(N)
990 // attribute on functions in the interpreter.
991 // See <https://github.com/rust-lang/rust/issues/144661> for more context.
992Align::ONE993 }
994// Machine-specific extra functions currently do not support alignment restrictions.
995FnVal::Other(_) => Align::ONE,
996 };
997998return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
999 }
10001001// # Global allocations
1002if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
1003// NOTE: `static` alignment from attributes has already been applied to the allocation.
1004let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
1005let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
1006let kind = match global_alloc {
1007 GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
1008 GlobalAlloc::Function { .. } => ::rustc_middle::util::bug::bug_fmt(format_args!("We already checked function pointers above"))bug!("We already checked function pointers above"),
1009 GlobalAlloc::VTable { .. } => AllocKind::VTable,
1010 GlobalAlloc::TypeId { .. } => AllocKind::TypeId,
1011 };
1012return AllocInfo::new(size, align, kind, mutbl);
1013 }
10141015// # Dead pointers
1016let (size, align) = *self1017 .memory
1018 .dead_alloc_map
1019 .get(&id)
1020 .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
1021AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
1022 }
10231024/// Obtain the size and alignment of a *live* allocation.
1025fn get_live_alloc_size_and_align(
1026&self,
1027 id: AllocId,
1028 msg: CheckInAllocMsg,
1029 ) -> InterpResult<'tcx, (Size, Align)> {
1030let info = self.get_alloc_info(id);
1031if info.kind == AllocKind::Dead {
1032do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::PointerUseAfterFree(id,
msg))throw_ub!(PointerUseAfterFree(id, msg))1033 }
1034interp_ok((info.size, info.align))
1035 }
10361037fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
1038if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
1039Some(FnVal::Other(*extra))
1040 } else {
1041match self.tcx.try_get_global_alloc(id) {
1042Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
1043_ => None,
1044 }
1045 }
1046 }
10471048/// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
1049 /// provenance refers to, as well as the segment of the hash that this pointer covers.
1050pub fn get_ptr_type_id(
1051&self,
1052 ptr: Pointer<Option<M::Provenance>>,
1053 ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {
1054let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
1055let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else {
1056do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Ub(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("invalid `TypeId` value: not all bytes carry type id metadata"))
})))throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")1057 };
1058interp_ok((ty, offset.bytes()))
1059 }
10601061pub fn get_ptr_fn(
1062&self,
1063 ptr: Pointer<Option<M::Provenance>>,
1064 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
1065{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1065",
"rustc_const_eval::interpret::memory",
::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
::tracing_core::__macro_support::Option::Some(1065u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("get_ptr_fn({0:?})",
ptr) as &dyn Value))])
});
} else { ; }
};trace!("get_ptr_fn({:?})", ptr);
1066let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
1067if offset.bytes() != 0 {
1068do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidFunctionPointer(Pointer::new(alloc_id,
offset)))throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))1069 }
1070self.get_fn_alloc(alloc_id)
1071 .ok_or_else(|| ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidFunctionPointer(Pointer::new(alloc_id,
offset)))err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
1072 .into()
1073 }
10741075/// Get the dynamic type of the given vtable pointer.
1076 /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
1077pub fn get_ptr_vtable_ty(
1078&self,
1079 ptr: Pointer<Option<M::Provenance>>,
1080 expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
1081 ) -> InterpResult<'tcx, Ty<'tcx>> {
1082{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1082",
"rustc_const_eval::interpret::memory",
::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
::tracing_core::__macro_support::Option::Some(1082u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("get_ptr_vtable({0:?})",
ptr) as &dyn Value))])
});
} else { ; }
};trace!("get_ptr_vtable({:?})", ptr);
1083let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
1084if offset.bytes() != 0 {
1085do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidVTablePointer(Pointer::new(alloc_id,
offset)))throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))1086 }
1087let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
1088self.tcx.try_get_global_alloc(alloc_id)
1089else {
1090do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::InvalidVTablePointer(Pointer::new(alloc_id,
offset)))throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))1091 };
1092if let Some(expected_dyn_type) = expected_trait {
1093self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
1094 }
1095interp_ok(ty)
1096 }
10971098pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
1099self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
1100interp_ok(())
1101 }
11021103/// Visit all allocations reachable from the given start set, by recursively traversing the
1104 /// provenance information of those allocations.
1105pub fn visit_reachable_allocs(
1106&mut self,
1107 start: Vec<AllocId>,
1108mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,
1109 ) -> InterpResult<'tcx> {
1110let mut done = FxHashSet::default();
1111let mut todo = start;
1112while let Some(id) = todo.pop() {
1113if !done.insert(id) {
1114// We already saw this allocation before, don't process it again.
1115continue;
1116 }
1117let info = self.get_alloc_info(id);
11181119// Recurse, if there is data here.
1120 // Do this *before* invoking the callback, as the callback might mutate the
1121 // allocation and e.g. replace all provenance by wildcards!
1122if info.kind == AllocKind::LiveData {
1123let alloc = self.get_alloc_raw(id)?;
1124for prov in alloc.provenance().provenances() {
1125if let Some(id) = prov.get_alloc_id() {
1126 todo.push(id);
1127 }
1128 }
1129 }
11301131// Call the callback.
1132visit(self, id, &info)?;
1133 }
1134interp_ok(())
1135 }
11361137/// Create a lazy debug printer that prints the given allocation and all allocations it points
1138 /// to, recursively.
1139#[must_use]
1140pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1141self.dump_allocs(<[_]>::into_vec(::alloc::boxed::box_new([id]))vec![id])
1142 }
11431144/// Create a lazy debug printer for a list of allocations and all allocations they point to,
1145 /// recursively.
1146#[must_use]
1147pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1148allocs.sort();
1149allocs.dedup();
1150DumpAllocs { ecx: self, allocs }
1151 }
11521153/// Print the allocation's bytes, without any nested allocations.
1154pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1155// Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1156 // want to be able to read all memory for diagnostics, even if that is cyclic.
1157let alloc = self.get_alloc_raw(id).unwrap();
1158let mut bytes = String::new();
1159if alloc.size() != Size::ZERO {
1160bytes = "\n".into();
1161// FIXME(translation) there might be pieces that are translatable.
1162rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, " ")
1163 .unwrap();
1164 }
1165bytes1166 }
11671168/// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1169 /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1170 /// kind's `may_leak()` returns true.
1171 ///
1172 /// This is highly destructive, no more execution can happen after this!
1173pub fn take_leaked_allocations(
1174&mut self,
1175 static_roots: impl FnOnce(&Self) -> &[AllocId],
1176 ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1177 {
1178// Collect the set of allocations that are *reachable* from `Global` allocations.
1179let reachable = {
1180let mut reachable = FxHashSet::default();
1181let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1182let mut todo: Vec<_> =
1183self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1184if Some(kind) == global_kind { Some(id) } else { None }
1185 });
1186todo.extend(static_roots(self));
1187while let Some(id) = todo.pop() {
1188if reachable.insert(id) {
1189// This is a new allocation, add the allocations it points to `todo`.
1190 // We only need to care about `alloc_map` memory here, as entirely unchanged
1191 // global memory cannot point to memory relevant for the leak check.
1192if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1193 todo.extend(
1194 alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1195 );
1196 }
1197 }
1198 }
1199reachable1200 };
12011202// All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1203let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1204if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1205 });
1206let mut result = Vec::new();
1207for &id in leaked.iter() {
1208let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1209 result.push((id, kind, alloc));
1210 }
1211result1212 }
12131214/// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1215 /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1216 ///
1217 /// We do this so Miri's allocation access tracking does not show the validation
1218 /// reads as spurious accesses.
1219pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1220// This deliberately uses `==` on `bool` to follow the pattern
1221 // `assert!(val.replace(new) == old)`.
1222if !(self.memory.validation_in_progress.replace(true) == false) {
{
::core::panicking::panic_fmt(format_args!("`validation_in_progress` was already set"));
}
};assert!(
1223self.memory.validation_in_progress.replace(true) == false,
1224"`validation_in_progress` was already set"
1225);
1226let res = f(self);
1227if !(self.memory.validation_in_progress.replace(false) == true) {
{
::core::panicking::panic_fmt(format_args!("`validation_in_progress` was unset by someone else"));
}
};assert!(
1228self.memory.validation_in_progress.replace(false) == true,
1229"`validation_in_progress` was unset by someone else"
1230);
1231res1232 }
12331234/// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1235 /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1236 ///
1237 /// We do this so Miri's allocation access tracking does not show the validation
1238 /// reads as spurious accesses.
1239pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {
1240// This deliberately uses `==` on `bool` to follow the pattern
1241 // `assert!(val.replace(new) == old)`.
1242if !(self.memory.validation_in_progress.replace(true) == false) {
{
::core::panicking::panic_fmt(format_args!("`validation_in_progress` was already set"));
}
};assert!(
1243self.memory.validation_in_progress.replace(true) == false,
1244"`validation_in_progress` was already set"
1245);
1246let res = f(self);
1247if !(self.memory.validation_in_progress.replace(false) == true) {
{
::core::panicking::panic_fmt(format_args!("`validation_in_progress` was unset by someone else"));
}
};assert!(
1248self.memory.validation_in_progress.replace(false) == true,
1249"`validation_in_progress` was unset by someone else"
1250);
1251res1252 }
12531254pub(super) fn validation_in_progress(&self) -> bool {
1255self.memory.validation_in_progress.get()
1256 }
1257}
12581259#[doc(hidden)]
1260/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1261pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1262 ecx: &'a InterpCx<'tcx, M>,
1263 allocs: Vec<AllocId>,
1264}
12651266impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debugfor DumpAllocs<'a, 'tcx, M> {
1267fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1268// Cannot be a closure because it is generic in `Prov`, `Extra`.
1269fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1270 fmt: &mut std::fmt::Formatter<'_>,
1271 tcx: TyCtxt<'tcx>,
1272 allocs_to_print: &mut VecDeque<AllocId>,
1273 alloc: &Allocation<Prov, Extra, Bytes>,
1274 ) -> std::fmt::Result {
1275for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1276 {
1277 allocs_to_print.push_back(alloc_id);
1278 }
1279fmt.write_fmt(format_args!("{0}", display_allocation(tcx, alloc)))write!(fmt, "{}", display_allocation(tcx, alloc))1280 }
12811282let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1283// `allocs_printed` contains all allocations that we have already printed.
1284let mut allocs_printed = FxHashSet::default();
12851286while let Some(id) = allocs_to_print.pop_front() {
1287if !allocs_printed.insert(id) {
1288// Already printed, so skip this.
1289continue;
1290 }
12911292fmt.write_fmt(format_args!("{0:?}", id))write!(fmt, "{id:?}")?;
1293match self.ecx.memory.alloc_map.get(id) {
1294Some((kind, alloc)) => {
1295// normal alloc
1296fmt.write_fmt(format_args!(" ({0}, ", kind))write!(fmt, " ({kind}, ")?;
1297 write_allocation_track_relocs(
1298&mut *fmt,
1299*self.ecx.tcx,
1300&mut allocs_to_print,
1301 alloc,
1302 )?;
1303 }
1304None => {
1305// global alloc
1306match self.ecx.tcx.try_get_global_alloc(id) {
1307Some(GlobalAlloc::Memory(alloc)) => {
1308fmt.write_fmt(format_args!(" (unchanged global, "))write!(fmt, " (unchanged global, ")?;
1309 write_allocation_track_relocs(
1310&mut *fmt,
1311*self.ecx.tcx,
1312&mut allocs_to_print,
1313 alloc.inner(),
1314 )?;
1315 }
1316Some(GlobalAlloc::Function { instance, .. }) => {
1317fmt.write_fmt(format_args!(" (fn: {0})", instance))write!(fmt, " (fn: {instance})")?;
1318 }
1319Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1320fmt.write_fmt(format_args!(" (vtable: impl {0} for {1})", dyn_ty, ty))write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1321 }
1322Some(GlobalAlloc::TypeId { ty }) => {
1323fmt.write_fmt(format_args!(" (typeid for {0})", ty))write!(fmt, " (typeid for {ty})")?;
1324 }
1325Some(GlobalAlloc::Static(did)) => {
1326fmt.write_fmt(format_args!(" (static: {0})", self.ecx.tcx.def_path_str(did)))write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1327 }
1328None => {
1329fmt.write_fmt(format_args!(" (deallocated)"))write!(fmt, " (deallocated)")?;
1330 }
1331 }
1332 }
1333 }
1334fmt.write_fmt(format_args!("\n"))writeln!(fmt)?;
1335 }
1336Ok(())
1337 }
1338}
13391340/// Reading and writing.
1341impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1342AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1343{
1344pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1345AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1346 }
13471348/// `range` is relative to this allocation reference, not the base of the allocation.
1349pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1350let range = self.range.subrange(range);
1351{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/interpret/memory.rs:1351",
"rustc_const_eval::interpret::memory",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/interpret/memory.rs"),
::tracing_core::__macro_support::Option::Some(1351u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::interpret::memory"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("write_scalar at {0:?}{1:?}: {2:?}",
self.alloc_id, range, val) as &dyn Value))])
});
} else { ; }
};debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
13521353self.alloc
1354 .write_scalar(&self.tcx, range, val)
1355 .map_err(|e| e.to_interp_error(self.alloc_id))
1356 .into()
1357 }
13581359/// `offset` is relative to this allocation reference, not the base of the allocation.
1360pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1361self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
1362 }
13631364/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1365pub fn write_uninit(&mut self, range: AllocRange) {
1366let range = self.range.subrange(range);
13671368self.alloc.write_uninit(&self.tcx, range);
1369 }
13701371/// Mark the entire referenced range as uninitialized
1372pub fn write_uninit_full(&mut self) {
1373self.alloc.write_uninit(&self.tcx, self.range);
1374 }
13751376/// Remove all provenance in the reference range.
1377pub fn clear_provenance(&mut self) {
1378self.alloc.clear_provenance(&self.tcx, self.range);
1379 }
1380}
13811382impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1383/// `range` is relative to this allocation reference, not the base of the allocation.
1384pub fn read_scalar(
1385&self,
1386 range: AllocRange,
1387 read_provenance: bool,
1388 ) -> InterpResult<'tcx, Scalar<Prov>> {
1389let range = self.range.subrange(range);
1390self.alloc
1391 .read_scalar(&self.tcx, range, read_provenance)
1392 .map_err(|e| e.to_interp_error(self.alloc_id))
1393 .into()
1394 }
13951396/// `range` is relative to this allocation reference, not the base of the allocation.
1397pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1398self.read_scalar(range, /*read_provenance*/ false)
1399 }
14001401/// `offset` is relative to this allocation reference, not the base of the allocation.
1402pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1403self.read_scalar(
1404alloc_range(offset, self.tcx.data_layout().pointer_size()),
1405/*read_provenance*/ true,
1406 )
1407 }
14081409/// `range` is relative to this allocation reference, not the base of the allocation.
1410pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1411self.alloc
1412 .get_bytes_strip_provenance(&self.tcx, self.range)
1413 .map_err(|e| e.to_interp_error(self.alloc_id))
1414 .into()
1415 }
14161417/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1418pub fn has_provenance(&self) -> bool {
1419 !self.alloc.provenance().range_empty(self.range, &self.tcx)
1420 }
1421}
14221423impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1424/// Reads the given number of bytes from memory, and strips their provenance if possible.
1425 /// Returns them as a slice.
1426 ///
1427 /// Performs appropriate bounds checks.
1428pub fn read_bytes_ptr_strip_provenance(
1429&self,
1430 ptr: Pointer<Option<M::Provenance>>,
1431 size: Size,
1432 ) -> InterpResult<'tcx, &[u8]> {
1433let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1434// zero-sized access
1435return interp_ok(&[]);
1436 };
1437// Side-step AllocRef and directly access the underlying bytes more efficiently.
1438 // (We are staying inside the bounds here so all is good.)
1439interp_ok(
1440alloc_ref1441 .alloc
1442 .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1443 .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1444 )
1445 }
14461447/// Writes the given stream of bytes into memory.
1448 ///
1449 /// Performs appropriate bounds checks.
1450pub fn write_bytes_ptr(
1451&mut self,
1452 ptr: Pointer<Option<M::Provenance>>,
1453 src: impl IntoIterator<Item = u8>,
1454 ) -> InterpResult<'tcx> {
1455let mut src = src.into_iter();
1456let (lower, upper) = src.size_hint();
1457let len = upper.expect("can only write bounded iterators");
1458match (&lower, &len) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("can only write iterators with a precise length")));
}
}
};assert_eq!(lower, len, "can only write iterators with a precise length");
14591460let size = Size::from_bytes(len);
1461let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1462// zero-sized access
1463match src.next() {
None => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val, "None",
::core::option::Option::Some(format_args!("iterator said it was empty but returned an element")));
}
};assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1464return interp_ok(());
1465 };
14661467// Side-step AllocRef and directly access the underlying bytes more efficiently.
1468 // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1469let bytes =
1470alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
1471// `zip` would stop when the first iterator ends; we want to definitely
1472 // cover all of `bytes`.
1473for dest in bytes {
1474*dest = src.next().expect("iterator was shorter than it said it would be");
1475 }
1476match src.next() {
None => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val, "None",
::core::option::Option::Some(format_args!("iterator was longer than it said it would be")));
}
};assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1477interp_ok(())
1478 }
14791480pub fn mem_copy(
1481&mut self,
1482 src: Pointer<Option<M::Provenance>>,
1483 dest: Pointer<Option<M::Provenance>>,
1484 size: Size,
1485 nonoverlapping: bool,
1486 ) -> InterpResult<'tcx> {
1487self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1488 }
14891490/// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1491 /// `i` is the index of the copy).
1492 ///
1493 /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1494 /// may overlap is not supported.
1495pub fn mem_copy_repeatedly(
1496&mut self,
1497 src: Pointer<Option<M::Provenance>>,
1498 dest: Pointer<Option<M::Provenance>>,
1499 size: Size,
1500 num_copies: u64,
1501 nonoverlapping: bool,
1502 ) -> InterpResult<'tcx> {
1503let tcx = self.tcx;
1504// We need to do our own bounds-checks.
1505let src_parts = self.get_ptr_access(src, size)?;
1506let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
15071508 // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized
1509 // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for
1510 // zero-sized writes.
1511if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
1512 {
1513 M::before_alloc_access(tcx, &self.machine, alloc_id)?;
1514 }
15151516// FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1517 // and once below to get the underlying `&[mut] Allocation`.
15181519 // Source alloc preparations and access hooks.
1520let Some((src_alloc_id, src_offset, src_prov)) = src_partselse {
1521// Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1522return interp_ok(());
1523 };
1524let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1525let src_range = alloc_range(src_offset, size);
1526if !!self.memory.validation_in_progress.get() {
{
::core::panicking::panic_fmt(format_args!("we can\'t be copying during validation"));
}
};assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
15271528// Trigger read hook.
1529 // For the overlapping case, it is crucial that we trigger the read hook
1530 // before the write hook -- the aliasing model cares about the order.
1531M::before_memory_read(
1532tcx,
1533&self.machine,
1534&src_alloc.extra,
1535src,
1536 (src_alloc_id, src_prov),
1537src_range,
1538 )?;
1539// We need the `dest` ptr for the next operation, so we get it now.
1540 // We already did the source checks and called the hooks so we are good to return early.
1541let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_partselse {
1542// Zero-sized *destination*.
1543return interp_ok(());
1544 };
15451546// Prepare getting source provenance.
1547let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1548 // First copy the provenance to a temporary buffer, because
1549 // `get_bytes_unchecked_for_overwrite_ptr` will clear the provenance (in preparation for
1550 // inserting the new provenance), and that can overlap with the source range.
1551let provenance = src_alloc.provenance_prepare_copy(src_range, self);
1552// Prepare a copy of the initialization mask.
1553let init = src_alloc.init_mask().prepare_copy(src_range);
15541555// Destination alloc preparations...
1556let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;
1557let dest_range = alloc_range(dest_offset, size * num_copies);
1558// ...and access hooks.
1559M::before_alloc_access(tcx, machine, dest_alloc_id)?;
1560 M::before_memory_write(
1561tcx,
1562machine,
1563&mut dest_alloc.extra,
1564dest,
1565 (dest_alloc_id, dest_prov),
1566dest_range,
1567 )?;
1568// Yes we do overwrite all bytes in `dest_bytes`.
1569let dest_bytes =
1570dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
15711572if init.no_bytes_init() {
1573// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1574 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1575 // be arbitrary for uninitialized bytes.
1576 // This also avoids writing to the target bytes so that the backing allocation is never
1577 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1578 // operating system this can avoid physically allocating the page.
1579dest_alloc.write_uninit(&tcx, dest_range);
1580// `write_uninit` also resets the provenance, so we are done.
1581return interp_ok(());
1582 }
15831584// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1585 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1586 // `dest` could possibly overlap.
1587 // The pointers above remain valid even if the `HashMap` table is moved around because they
1588 // point into the `Vec` storing the bytes.
1589unsafe {
1590if src_alloc_id == dest_alloc_id {
1591if nonoverlapping {
1592// `Size` additions
1593if (src_offset <= dest_offset && src_offset + size > dest_offset)
1594 || (dest_offset <= src_offset && dest_offset + size > src_offset)
1595 {
1596do yeet {
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("`copy_nonoverlapping` called on overlapping ranges")),
add_args: Box::new(move |mut set_arg| {}),
}))
};throw_ub_custom!(inline_fluent!(
1597"`copy_nonoverlapping` called on overlapping ranges"
1598));
1599 }
1600 }
1601 }
1602if num_copies > 1 {
1603if !nonoverlapping {
{
::core::panicking::panic_fmt(format_args!("multi-copy only supported in non-overlapping mode"));
}
};assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1604 }
16051606let size_in_bytes = size.bytes_usize();
1607// For particularly large arrays (where this is perf-sensitive) it's common that
1608 // we're writing a single byte repeatedly. So, optimize that case to a memset.
1609if size_in_bytes == 1 {
1610if true {
if !(num_copies >= 1) {
::core::panicking::panic("assertion failed: num_copies >= 1")
};
};debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1611 // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1612let value = *src_bytes;
1613dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1614 } else if src_alloc_id == dest_alloc_id {
1615let mut dest_ptr = dest_bytes;
1616for _ in 0..num_copies {
1617// Here we rely on `src` and `dest` being non-overlapping if there is more than
1618 // one copy.
1619ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1620 dest_ptr = dest_ptr.add(size_in_bytes);
1621 }
1622 } else {
1623let mut dest_ptr = dest_bytes;
1624for _ in 0..num_copies {
1625 ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1626 dest_ptr = dest_ptr.add(size_in_bytes);
1627 }
1628 }
1629 }
16301631// now fill in all the "init" data
1632dest_alloc.init_mask_apply_copy(
1633init,
1634alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1635num_copies,
1636 );
1637// copy the provenance to the destination
1638dest_alloc.provenance_apply_copy(provenance, alloc_range(dest_offset, size), num_copies);
16391640interp_ok(())
1641 }
1642}
16431644/// Machine pointer introspection.
1645impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1646/// Test if this value might be null.
1647 /// If the machine does not support ptr-to-int casts, this is conservative.
1648pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1649match scalar.try_to_scalar_int() {
1650Ok(int) => interp_ok(int.is_null()),
1651Err(_) => {
1652// We can't cast this pointer to an integer. Can only happen during CTFE.
1653let ptr = scalar.to_pointer(self)?;
1654match self.ptr_try_get_alloc_id(ptr, 0) {
1655Ok((alloc_id, offset, _)) => {
1656let info = self.get_alloc_info(alloc_id);
1657if info.kind == AllocKind::TypeId {
1658// We *could* actually precisely answer this question since here,
1659 // the offset *is* the integer value. But the entire point of making
1660 // this a pointer is not to leak the integer value, so we say everything
1661 // might be null.
1662return interp_ok(true);
1663 }
1664// If the pointer is in-bounds (including "at the end"), it is definitely not null.
1665if offset <= info.size {
1666return interp_ok(false);
1667 }
1668// If the allocation is N-aligned, and the offset is not divisible by N,
1669 // then `base + offset` has a non-zero remainder after division by `N`,
1670 // which means `base + offset` cannot be null.
1671if !offset.bytes().is_multiple_of(info.align.bytes()) {
1672return interp_ok(false);
1673 }
1674// We don't know enough, this might be null.
1675interp_ok(true)
1676 }
1677Err(_offset) => ::rustc_middle::util::bug::bug_fmt(format_args!("a non-int scalar is always a pointer"))bug!("a non-int scalar is always a pointer"),
1678 }
1679 }
1680 }
1681 }
16821683/// Turning a "maybe pointer" into a proper pointer (and some information
1684 /// about where it points), or an absolute address.
1685 ///
1686 /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1687 /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1688 /// where a wildcard pointer sits right in between two allocations.
1689 /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1690 /// for handling wildcard pointers.
1691 ///
1692 /// The result must be used immediately; it is not allowed to convert
1693 /// the returned data back into a `Pointer` and store that in machine state.
1694 /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1695 /// we don't have an operation to turn it back into `M::Provenance`.)
1696pub fn ptr_try_get_alloc_id(
1697&self,
1698 ptr: Pointer<Option<M::Provenance>>,
1699 size: i64,
1700 ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1701match ptr.into_pointer_or_addr() {
1702Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1703Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1704None => {
1705if !M::Provenance::OFFSET_IS_ADDR {
::core::panicking::panic("assertion failed: M::Provenance::OFFSET_IS_ADDR")
};assert!(M::Provenance::OFFSET_IS_ADDR);
1706// Offset is absolute, as we just asserted.
1707let (_, addr) = ptr.into_raw_parts();
1708Err(addr.bytes())
1709 }
1710 },
1711Err(addr) => Err(addr.bytes()),
1712 }
1713 }
17141715/// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1716 ///
1717 /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1718 /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1719 /// where a wildcard pointer sits right in between two allocations.
1720 /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1721 /// for handling wildcard pointers.
1722 ///
1723 /// The result must be used immediately; it is not allowed to convert
1724 /// the returned data back into a `Pointer` and store that in machine state.
1725 /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1726 /// we don't have an operation to turn it back into `M::Provenance`.)
1727#[inline(always)]
1728pub fn ptr_get_alloc_id(
1729&self,
1730 ptr: Pointer<Option<M::Provenance>>,
1731 size: i64,
1732 ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1733self.ptr_try_get_alloc_id(ptr, size)
1734 .map_err(|offset| {
1735::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::DanglingIntPointer {
addr: offset,
inbounds_size: size,
msg: CheckInAllocMsg::Dereferenceable,
})err_ub!(DanglingIntPointer {
1736 addr: offset,
1737 inbounds_size: size,
1738 msg: CheckInAllocMsg::Dereferenceable
1739 })1740 })
1741 .into()
1742 }
1743}