1//! The virtual memory representation of the MIR interpreter.
23mod init_mask;
4mod provenance_map;
56use std::borrow::Cow;
7use std::hash::Hash;
8use std::ops::{Deref, DerefMut, Range};
9use std::{fmt, hash, ptr};
1011use either::{Left, Right};
12use init_mask::*;
13pub use init_mask::{InitChunk, InitChunkIter};
14use provenance_map::*;
15use rustc_abi::{Align, HasDataLayout, Size};
16use rustc_ast::Mutability;
17use rustc_data_structures::intern::Interned;
18use rustc_macros::HashStable;
19use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
2021use super::{
22AllocId, BadBytesAccess, CtfeProvenance, InterpErrorKind, InterpResult, Pointer, Provenance,
23ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo,
24interp_ok, read_target_uint, write_target_uint,
25};
26use crate::ty;
2728/// Functionality required for the bytes of an `Allocation`.
29pub trait AllocBytes: Clone + fmt::Debug + Deref<Target = [u8]> + DerefMut<Target = [u8]> {
30/// The type of extra parameters passed in when creating an allocation.
31 /// Can be used by `interpret::Machine` instances to make runtime-configuration-dependent
32 /// decisions about the allocation strategy.
33type AllocParams;
3435/// Create an `AllocBytes` from a slice of `u8`.
36fn from_bytes<'a>(
37 slice: impl Into<Cow<'a, [u8]>>,
38 _align: Align,
39 _params: Self::AllocParams,
40 ) -> Self;
4142/// Create a zeroed `AllocBytes` of the specified size and alignment.
43 /// Returns `None` if we ran out of memory on the host.
44fn zeroed(size: Size, _align: Align, _params: Self::AllocParams) -> Option<Self>;
4546/// Gives direct access to the raw underlying storage.
47 ///
48 /// Crucially this pointer is compatible with:
49 /// - other pointers returned by this method, and
50 /// - references returned from `deref()`, as long as there was no write.
51fn as_mut_ptr(&mut self) -> *mut u8;
5253/// Gives direct access to the raw underlying storage.
54 ///
55 /// Crucially this pointer is compatible with:
56 /// - other pointers returned by this method, and
57 /// - references returned from `deref()`, as long as there was no write.
58fn as_ptr(&self) -> *const u8;
59}
6061/// Default `bytes` for `Allocation` is a `Box<u8>`.
62impl AllocBytesfor Box<[u8]> {
63type AllocParams = ();
6465fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align, _params: ()) -> Self {
66 Box::<[u8]>::from(slice.into())
67 }
6869fn zeroed(size: Size, _align: Align, _params: ()) -> Option<Self> {
70let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes().try_into().ok()?).ok()?;
71// SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
72let bytes = unsafe { bytes.assume_init() };
73Some(bytes)
74 }
7576fn as_mut_ptr(&mut self) -> *mut u8 {
77Box::as_mut_ptr(self).cast()
78 }
7980fn as_ptr(&self) -> *const u8 {
81Box::as_ptr(self).cast()
82 }
83}
8485/// This type represents an Allocation in the Miri/CTFE core engine.
86///
87/// Its public API is rather low-level, working directly with allocation offsets and a custom error
88/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
89/// module provides higher-level access.
90// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
91// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
92#[derive(#[automatically_derived]
impl<Prov: ::core::clone::Clone + Provenance, Extra: ::core::clone::Clone,
Bytes: ::core::clone::Clone> ::core::clone::Clone for
Allocation<Prov, Extra, Bytes> {
#[inline]
fn clone(&self) -> Allocation<Prov, Extra, Bytes> {
Allocation {
bytes: ::core::clone::Clone::clone(&self.bytes),
provenance: ::core::clone::Clone::clone(&self.provenance),
init_mask: ::core::clone::Clone::clone(&self.init_mask),
align: ::core::clone::Clone::clone(&self.align),
mutability: ::core::clone::Clone::clone(&self.mutability),
extra: ::core::clone::Clone::clone(&self.extra),
}
}
}Clone, #[automatically_derived]
impl<Prov: ::core::cmp::Eq + Provenance, Extra: ::core::cmp::Eq,
Bytes: ::core::cmp::Eq> ::core::cmp::Eq for Allocation<Prov, Extra, Bytes>
{
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<Bytes>;
let _: ::core::cmp::AssertParamIsEq<ProvenanceMap<Prov>>;
let _: ::core::cmp::AssertParamIsEq<InitMask>;
let _: ::core::cmp::AssertParamIsEq<Align>;
let _: ::core::cmp::AssertParamIsEq<Mutability>;
let _: ::core::cmp::AssertParamIsEq<Extra>;
}
}Eq, #[automatically_derived]
impl<Prov: ::core::cmp::PartialEq + Provenance, Extra: ::core::cmp::PartialEq,
Bytes: ::core::cmp::PartialEq> ::core::cmp::PartialEq for
Allocation<Prov, Extra, Bytes> {
#[inline]
fn eq(&self, other: &Allocation<Prov, Extra, Bytes>) -> bool {
self.bytes == other.bytes && self.provenance == other.provenance &&
self.init_mask == other.init_mask &&
self.align == other.align &&
self.mutability == other.mutability &&
self.extra == other.extra
}
}PartialEq)]
93#[derive(const _: () =
{
impl<'__ctx, Prov: Provenance, Extra, Bytes>
::rustc_data_structures::stable_hasher::HashStable<::rustc_query_system::ich::StableHashingContext<'__ctx>>
for Allocation<Prov, Extra, Bytes> where
Bytes: ::rustc_data_structures::stable_hasher::HashStable<::rustc_query_system::ich::StableHashingContext<'__ctx>>,
Prov: ::rustc_data_structures::stable_hasher::HashStable<::rustc_query_system::ich::StableHashingContext<'__ctx>>,
Extra: ::rustc_data_structures::stable_hasher::HashStable<::rustc_query_system::ich::StableHashingContext<'__ctx>>
{
#[inline]
fn hash_stable(&self,
__hcx:
&mut ::rustc_query_system::ich::StableHashingContext<'__ctx>,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Allocation {
bytes: ref __binding_0,
provenance: ref __binding_1,
init_mask: ref __binding_2,
align: ref __binding_3,
mutability: ref __binding_4,
extra: ref __binding_5 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
{ __binding_4.hash_stable(__hcx, __hasher); }
{ __binding_5.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable)]
94pub struct Allocation<Prov: Provenance = CtfeProvenance, Extra = (), Bytes = Box<[u8]>> {
95/// The actual bytes of the allocation.
96 /// Note that the bytes of a pointer represent the offset of the pointer.
97bytes: Bytes,
98/// Maps from byte addresses to extra provenance data for each pointer.
99 /// Only the first byte of a pointer is inserted into the map; i.e.,
100 /// every entry in this map applies to `pointer_size` consecutive bytes starting
101 /// at the given offset.
102provenance: ProvenanceMap<Prov>,
103/// Denotes which part of this allocation is initialized.
104 ///
105 /// Invariant: the uninitialized parts have no provenance.
106init_mask: InitMask,
107/// The alignment of the allocation to detect unaligned reads.
108 /// (`Align` guarantees that this is a power of two.)
109pub align: Align,
110/// `true` if the allocation is mutable.
111 /// Also used by codegen to determine if a static should be put into mutable memory,
112 /// which happens for `static mut` and `static` with interior mutability.
113pub mutability: Mutability,
114/// Extra state for the machine.
115pub extra: Extra,
116}
117118/// Helper struct that packs an alignment, mutability, and "all bytes are zero" flag together.
119///
120/// Alignment values always have 2 free high bits, and we check for this in our [`Encodable`] impl.
121struct AllocFlags {
122 align: Align,
123 mutability: Mutability,
124 all_zero: bool,
125}
126127impl<E: Encoder> Encodable<E> for AllocFlags {
128fn encode(&self, encoder: &mut E) {
129// Make sure Align::MAX can be stored with the high 2 bits unset.
130const {
131let max_supported_align_repr = u8::MAX >> 2;
132let max_supported_align = 1 << max_supported_align_repr;
133if !(Align::MAX.bytes() <= max_supported_align) {
::core::panicking::panic("assertion failed: Align::MAX.bytes() <= max_supported_align")
}assert!(Align::MAX.bytes() <= max_supported_align)134 }
135136let mut flags = self.align.bytes().trailing_zeros() as u8;
137flags |= match self.mutability {
138 Mutability::Not => 0,
139 Mutability::Mut => 1 << 6,
140 };
141flags |= (self.all_zero as u8) << 7;
142flags.encode(encoder);
143 }
144}
145146impl<D: Decoder> Decodable<D> for AllocFlags {
147fn decode(decoder: &mut D) -> Self {
148let flags: u8 = Decodable::decode(decoder);
149let align = flags & 0b0011_1111;
150let mutability = flags & 0b0100_0000;
151let all_zero = flags & 0b1000_0000;
152153let align = Align::from_bytes(1 << align).unwrap();
154let mutability = match mutability {
1550 => Mutability::Not,
156_ => Mutability::Mut,
157 };
158let all_zero = all_zero > 0;
159160AllocFlags { align, mutability, all_zero }
161 }
162}
163164/// Efficiently detect whether a slice of `u8` is all zero.
165///
166/// This is used in encoding of [`Allocation`] to special-case all-zero allocations. It is only
167/// optimized a little, because for many allocations the encoding of the actual bytes does not
168/// dominate runtime.
169#[inline]
170fn all_zero(buf: &[u8]) -> bool {
171// In the empty case we wouldn't encode any contents even without this system where we
172 // special-case allocations whose contents are all 0. We can return anything in the empty case.
173if buf.is_empty() {
174return true;
175 }
176// Just fast-rejecting based on the first element significantly reduces the amount that we end
177 // up walking the whole array.
178if buf[0] != 0 {
179return false;
180 }
181182// This strategy of combining all slice elements with & or | is unbeatable for the large
183 // all-zero case because it is so well-understood by autovectorization.
184buf.iter().fold(true, |acc, b| acc & (*b == 0))
185}
186187/// Custom encoder for [`Allocation`] to more efficiently represent the case where all bytes are 0.
188impl<Prov: Provenance, Extra, E: Encoder> Encodable<E> for Allocation<Prov, Extra, Box<[u8]>>
189where
190ProvenanceMap<Prov>: Encodable<E>,
191 Extra: Encodable<E>,
192{
193fn encode(&self, encoder: &mut E) {
194let all_zero = all_zero(&self.bytes);
195AllocFlags { align: self.align, mutability: self.mutability, all_zero }.encode(encoder);
196197encoder.emit_usize(self.bytes.len());
198if !all_zero {
199encoder.emit_raw_bytes(&self.bytes);
200 }
201self.provenance.encode(encoder);
202self.init_mask.encode(encoder);
203self.extra.encode(encoder);
204 }
205}
206207impl<Prov: Provenance, Extra, D: Decoder> Decodable<D> for Allocation<Prov, Extra, Box<[u8]>>
208where
209ProvenanceMap<Prov>: Decodable<D>,
210 Extra: Decodable<D>,
211{
212fn decode(decoder: &mut D) -> Self {
213let AllocFlags { align, mutability, all_zero } = Decodable::decode(decoder);
214215let len = decoder.read_usize();
216let bytes = if all_zero { ::alloc::vec::from_elem(0u8, len)vec![0u8; len] } else { decoder.read_raw_bytes(len).to_vec() };
217let bytes = <Box<[u8]> as AllocBytes>::from_bytes(bytes, align, ());
218219let provenance = Decodable::decode(decoder);
220let init_mask = Decodable::decode(decoder);
221let extra = Decodable::decode(decoder);
222223Self { bytes, provenance, init_mask, align, mutability, extra }
224 }
225}
226227/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
228/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
229/// Used when these two structures are large: we only partially hash the larger fields in that
230/// situation. See the comment at the top of their respective `Hash` impl for more details.
231const MAX_BYTES_TO_HASH: usize = 64;
232233/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
234/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
235/// bytes.
236const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
237238// Const allocations are only hashed for interning. However, they can be large, making the hashing
239// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
240// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
241// large.
242impl hash::Hashfor Allocation {
243fn hash<H: hash::Hasher>(&self, state: &mut H) {
244let Self {
245 bytes,
246 provenance,
247 init_mask,
248 align,
249 mutability,
250 extra: (), // don't bother hashing ()
251} = self;
252253// Partially hash the `bytes` buffer when it is large. To limit collisions with common
254 // prefixes and suffixes, we hash the length and some slices of the buffer.
255let byte_count = bytes.len();
256if byte_count > MAX_HASHED_BUFFER_LEN {
257// Hash the buffer's length.
258byte_count.hash(state);
259260// And its head and tail.
261bytes[..MAX_BYTES_TO_HASH].hash(state);
262bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
263 } else {
264bytes.hash(state);
265 }
266267// Hash the other fields as usual.
268provenance.hash(state);
269init_mask.hash(state);
270align.hash(state);
271mutability.hash(state);
272 }
273}
274275/// Interned types generally have an `Outer` type and an `Inner` type, where
276/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
277/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
278/// outer type and `TyKind` is its inner type.
279///
280/// Here things are different because only const allocations are interned. This
281/// means that both the inner type (`Allocation`) and the outer type
282/// (`ConstAllocation`) are used quite a bit.
283#[derive(#[automatically_derived]
impl<'tcx> ::core::marker::Copy for ConstAllocation<'tcx> { }Copy, #[automatically_derived]
impl<'tcx> ::core::clone::Clone for ConstAllocation<'tcx> {
#[inline]
fn clone(&self) -> ConstAllocation<'tcx> {
let _: ::core::clone::AssertParamIsClone<Interned<'tcx, Allocation>>;
*self
}
}Clone, #[automatically_derived]
impl<'tcx> ::core::cmp::PartialEq for ConstAllocation<'tcx> {
#[inline]
fn eq(&self, other: &ConstAllocation<'tcx>) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl<'tcx> ::core::cmp::Eq for ConstAllocation<'tcx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) -> () {
let _: ::core::cmp::AssertParamIsEq<Interned<'tcx, Allocation>>;
}
}Eq, #[automatically_derived]
impl<'tcx> ::core::hash::Hash for ConstAllocation<'tcx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, const _: () =
{
impl<'tcx, '__ctx>
::rustc_data_structures::stable_hasher::HashStable<::rustc_query_system::ich::StableHashingContext<'__ctx>>
for ConstAllocation<'tcx> {
#[inline]
fn hash_stable(&self,
__hcx:
&mut ::rustc_query_system::ich::StableHashingContext<'__ctx>,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ConstAllocation(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable)]
284#[rustc_pass_by_value]
285pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
286287impl<'tcx> fmt::Debugfor ConstAllocation<'tcx> {
288fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
289// The debug representation of this is very verbose and basically useless,
290 // so don't print it.
291f.write_fmt(format_args!("ConstAllocation {{ .. }}"))write!(f, "ConstAllocation {{ .. }}")292 }
293}
294295impl<'tcx> ConstAllocation<'tcx> {
296pub fn inner(self) -> &'tcx Allocation {
297self.0.0
298}
299}
300301/// We have our own error type that does not know about the `AllocId`; that information
302/// is added when converting to `InterpError`.
303#[derive(#[automatically_derived]
impl ::core::fmt::Debug for AllocError {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
AllocError::ScalarSizeMismatch(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ScalarSizeMismatch", &__self_0),
AllocError::ReadPointerAsInt(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ReadPointerAsInt", &__self_0),
AllocError::ReadPartialPointer(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ReadPartialPointer", &__self_0),
AllocError::InvalidUninitBytes(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"InvalidUninitBytes", &__self_0),
}
}
}Debug)]
304pub enum AllocError {
305/// A scalar had the wrong size.
306ScalarSizeMismatch(ScalarSizeMismatch),
307/// Encountered a pointer where we needed raw bytes.
308ReadPointerAsInt(Option<BadBytesAccess>),
309/// Partially copying a pointer.
310ReadPartialPointer(Size),
311/// Using uninitialized data where it is not allowed.
312InvalidUninitBytes(Option<BadBytesAccess>),
313}
314pub type AllocResult<T = ()> = Result<T, AllocError>;
315316impl From<ScalarSizeMismatch> for AllocError {
317fn from(s: ScalarSizeMismatch) -> Self {
318 AllocError::ScalarSizeMismatch(s)
319 }
320}
321322impl AllocError {
323pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpErrorKind<'tcx> {
324use AllocError::*;
325match self {
326ScalarSizeMismatch(s) => {
327 InterpErrorKind::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
328 }
329ReadPointerAsInt(info) => InterpErrorKind::Unsupported(
330 UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
331 ),
332ReadPartialPointer(offset) => InterpErrorKind::Unsupported(
333 UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
334 ),
335InvalidUninitBytes(info) => InterpErrorKind::UndefinedBehavior(
336 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
337 ),
338 }
339 }
340}
341342/// The information that makes up a memory access: offset and size.
343#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocRange { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocRange {
#[inline]
fn clone(&self) -> AllocRange {
let _: ::core::clone::AssertParamIsClone<Size>;
*self
}
}Clone)]
344pub struct AllocRange {
345pub start: Size,
346pub size: Size,
347}
348349impl fmt::Debugfor AllocRange {
350fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
351f.write_fmt(format_args!("[{0:#x}..{1:#x}]", self.start.bytes(),
self.end().bytes()))write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())352 }
353}
354355/// Free-starting constructor for less syntactic overhead.
356#[inline(always)]
357pub fn alloc_range(start: Size, size: Size) -> AllocRange {
358AllocRange { start, size }
359}
360361impl From<Range<Size>> for AllocRange {
362#[inline]
363fn from(r: Range<Size>) -> Self {
364alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
365}
366}
367368impl From<Range<usize>> for AllocRange {
369#[inline]
370fn from(r: Range<usize>) -> Self {
371AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
372 }
373}
374375impl AllocRange {
376#[inline(always)]
377pub fn end(self) -> Size {
378self.start + self.size // This does overflow checking.
379}
380381/// Returns the `subrange` within this range; panics if it is not a subrange.
382#[inline]
383pub fn subrange(self, subrange: AllocRange) -> AllocRange {
384let sub_start = self.start + subrange.start;
385let range = alloc_range(sub_start, subrange.size);
386if !(range.end() <= self.end()) {
{
::core::panicking::panic_fmt(format_args!("access outside the bounds for given AllocRange"));
}
};assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
387range388 }
389}
390391/// Whether a new allocation should be initialized with zero-bytes.
392pub enum AllocInit {
393 Uninit,
394 Zero,
395}
396397// The constructors are all without extra; the extra gets added by a machine hook later.
398impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
399/// Creates an allocation initialized by the given bytes
400pub fn from_bytes<'a>(
401 slice: impl Into<Cow<'a, [u8]>>,
402 align: Align,
403 mutability: Mutability,
404 params: <Bytes as AllocBytes>::AllocParams,
405 ) -> Self {
406let bytes = Bytes::from_bytes(slice, align, params);
407let size = Size::from_bytes(bytes.len());
408Self {
409bytes,
410 provenance: ProvenanceMap::new(),
411 init_mask: InitMask::new(size, true),
412align,
413mutability,
414 extra: (),
415 }
416 }
417418pub fn from_bytes_byte_aligned_immutable<'a>(
419 slice: impl Into<Cow<'a, [u8]>>,
420 params: <Bytes as AllocBytes>::AllocParams,
421 ) -> Self {
422Allocation::from_bytes(slice, Align::ONE, Mutability::Not, params)
423 }
424425fn new_inner<R>(
426 size: Size,
427 align: Align,
428 init: AllocInit,
429 params: <Bytes as AllocBytes>::AllocParams,
430 fail: impl FnOnce() -> R,
431 ) -> Result<Self, R> {
432// We raise an error if we cannot create the allocation on the host.
433 // This results in an error that can happen non-deterministically, since the memory
434 // available to the compiler can change between runs. Normally queries are always
435 // deterministic. However, we can be non-deterministic here because all uses of const
436 // evaluation (including ConstProp!) will make compilation fail (via hard error
437 // or ICE) upon encountering a `MemoryExhausted` error.
438let bytes = Bytes::zeroed(size, align, params).ok_or_else(fail)?;
439440Ok(Allocation {
441bytes,
442 provenance: ProvenanceMap::new(),
443 init_mask: InitMask::new(
444size,
445match init {
446 AllocInit::Uninit => false,
447 AllocInit::Zero => true,
448 },
449 ),
450align,
451 mutability: Mutability::Mut,
452 extra: (),
453 })
454 }
455456/// Try to create an Allocation of `size` bytes, failing if there is not enough memory
457 /// available to the compiler to do so.
458pub fn try_new<'tcx>(
459 size: Size,
460 align: Align,
461 init: AllocInit,
462 params: <Bytes as AllocBytes>::AllocParams,
463 ) -> InterpResult<'tcx, Self> {
464Self::new_inner(size, align, init, params, || {
465 ty::tls::with(|tcx| tcx.dcx().delayed_bug("exhausted memory during interpretation"));
466 InterpErrorKind::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
467 })
468 .into()
469 }
470471/// Try to create an Allocation of `size` bytes, panics if there is not enough memory
472 /// available to the compiler to do so.
473 ///
474 /// Example use case: To obtain an Allocation filled with specific data,
475 /// first call this function and then call write_scalar to fill in the right data.
476pub fn new(
477 size: Size,
478 align: Align,
479 init: AllocInit,
480 params: <Bytes as AllocBytes>::AllocParams,
481 ) -> Self {
482match Self::new_inner(size, align, init, params, || {
483{
::core::panicking::panic_fmt(format_args!("interpreter ran out of memory: cannot create allocation of {0} bytes",
size.bytes()));
};panic!(
484"interpreter ran out of memory: cannot create allocation of {} bytes",
485 size.bytes()
486 );
487 }) {
488Ok(x) => x,
489Err(x) => x,
490 }
491 }
492493/// Add the extra.
494pub fn with_extra<Extra>(self, extra: Extra) -> Allocation<Prov, Extra, Bytes> {
495Allocation {
496 bytes: self.bytes,
497 provenance: self.provenance,
498 init_mask: self.init_mask,
499 align: self.align,
500 mutability: self.mutability,
501extra,
502 }
503 }
504}
505506impl Allocation {
507/// Adjust allocation from the ones in `tcx` to a custom Machine instance
508 /// with a different `Provenance` and `Byte` type.
509pub fn adjust_from_tcx<'tcx, Prov: Provenance, Bytes: AllocBytes>(
510&self,
511 cx: &impl HasDataLayout,
512 alloc_bytes: impl FnOnce(&[u8], Align) -> InterpResult<'tcx, Bytes>,
513mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> InterpResult<'tcx, Pointer<Prov>>,
514 ) -> InterpResult<'tcx, Allocation<Prov, (), Bytes>> {
515// Copy the data.
516let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
517// Adjust provenance of pointers stored in this allocation.
518let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
519let ptr_size = cx.data_layout().pointer_size().bytes_usize();
520let endian = cx.data_layout().endian;
521for &(offset, alloc_id) in self.provenance.ptrs().iter() {
522let idx = offset.bytes_usize();
523let ptr_bytes = &mut bytes[idx..idx + ptr_size];
524let bits = read_target_uint(endian, ptr_bytes).unwrap();
525let (ptr_prov, ptr_offset) =
526 adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_raw_parts();
527 write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
528 new_provenance.push((offset, ptr_prov));
529 }
530// Create allocation.
531interp_ok(Allocation {
532bytes,
533 provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
534 init_mask: self.init_mask.clone(),
535 align: self.align,
536 mutability: self.mutability,
537 extra: self.extra,
538 })
539 }
540}
541542/// Raw accessors. Provide access to otherwise private bytes.
543impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
544pub fn len(&self) -> usize {
545self.bytes.len()
546 }
547548pub fn size(&self) -> Size {
549Size::from_bytes(self.len())
550 }
551552/// Looks at a slice which may contain uninitialized bytes or provenance. This differs
553 /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
554 /// edges) at all.
555 /// This must not be used for reads affecting the interpreter execution.
556pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
557&self.bytes[range]
558 }
559560/// Returns the mask indicating which bytes are initialized.
561pub fn init_mask(&self) -> &InitMask {
562&self.init_mask
563 }
564565/// Returns the provenance map.
566pub fn provenance(&self) -> &ProvenanceMap<Prov> {
567&self.provenance
568 }
569}
570571/// Byte accessors.
572impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
573/// This is the entirely abstraction-violating way to just grab the raw bytes without
574 /// caring about provenance or initialization.
575 ///
576 /// This function also guarantees that the resulting pointer will remain stable
577 /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
578 /// on that.
579#[inline]
580pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
581&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
582 }
583584/// Checks that these bytes are initialized, and then strip provenance (if possible) and return
585 /// them.
586 ///
587 /// It is the caller's responsibility to check bounds and alignment beforehand.
588 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
589 /// on `InterpCx` instead.
590#[inline]
591pub fn get_bytes_strip_provenance(
592&self,
593 cx: &impl HasDataLayout,
594 range: AllocRange,
595 ) -> AllocResult<&[u8]> {
596self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
597 AllocError::InvalidUninitBytes(Some(BadBytesAccess {
598 access: range,
599 bad: uninit_range,
600 }))
601 })?;
602if !Prov::OFFSET_IS_ADDR && !self.provenance.range_empty(range, cx) {
603// Find the provenance.
604let (prov_range, _prov) = self605 .provenance
606 .get_range(range, cx)
607 .next()
608 .expect("there must be provenance somewhere here");
609let start = prov_range.start.max(range.start); // the pointer might begin before `range`!
610let end = prov_range.end().min(range.end()); // the pointer might end after `range`!
611return Err(AllocError::ReadPointerAsInt(Some(BadBytesAccess {
612 access: range,
613 bad: AllocRange::from(start..end),
614 })));
615 }
616Ok(self.get_bytes_unchecked(range))
617 }
618619/// This is the entirely abstraction-violating way to just get mutable access to the raw bytes.
620 /// Just calling this already marks everything as defined and removes provenance, so be sure to
621 /// actually overwrite all the data there!
622 ///
623 /// It is the caller's responsibility to check bounds and alignment beforehand.
624 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
625 /// on `InterpCx` instead.
626pub fn get_bytes_unchecked_for_overwrite(
627&mut self,
628 cx: &impl HasDataLayout,
629 range: AllocRange,
630 ) -> &mut [u8] {
631self.mark_init(range, true);
632self.provenance.clear(range, &self.bytes, cx);
633634&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
635 }
636637/// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases
638 /// into this memory.
639pub fn get_bytes_unchecked_for_overwrite_ptr(
640&mut self,
641 cx: &impl HasDataLayout,
642 range: AllocRange,
643 ) -> *mut [u8] {
644self.mark_init(range, true);
645self.provenance.clear(range, &self.bytes, cx);
646647if !(range.end().bytes_usize() <= self.bytes.len()) {
::core::panicking::panic("assertion failed: range.end().bytes_usize() <= self.bytes.len()")
};assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
648 // Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`.
649let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
650let len = range.end().bytes_usize() - range.start.bytes_usize();
651 ptr::slice_from_raw_parts_mut(begin_ptr, len)
652 }
653654/// This gives direct mutable access to the entire buffer, just exposing their internal state
655 /// without resetting anything. Directly exposes `AllocBytes::as_mut_ptr`. Only works if
656 /// `OFFSET_IS_ADDR` is true.
657pub fn get_bytes_unchecked_raw_mut(&mut self) -> *mut u8 {
658if !Prov::OFFSET_IS_ADDR {
::core::panicking::panic("assertion failed: Prov::OFFSET_IS_ADDR")
};assert!(Prov::OFFSET_IS_ADDR);
659self.bytes.as_mut_ptr()
660 }
661662/// This gives direct immutable access to the entire buffer, just exposing their internal state
663 /// without resetting anything. Directly exposes `AllocBytes::as_ptr`. Only works if
664 /// `OFFSET_IS_ADDR` is true.
665pub fn get_bytes_unchecked_raw(&self) -> *const u8 {
666if !Prov::OFFSET_IS_ADDR {
::core::panicking::panic("assertion failed: Prov::OFFSET_IS_ADDR")
};assert!(Prov::OFFSET_IS_ADDR);
667self.bytes.as_ptr()
668 }
669}
670671/// Reading and writing.
672impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
673/// Sets the init bit for the given range.
674fn mark_init(&mut self, range: AllocRange, is_init: bool) {
675if range.size.bytes() == 0 {
676return;
677 }
678if !(self.mutability == Mutability::Mut) {
::core::panicking::panic("assertion failed: self.mutability == Mutability::Mut")
};assert!(self.mutability == Mutability::Mut);
679self.init_mask.set_range(range, is_init);
680 }
681682/// Reads a *non-ZST* scalar.
683 ///
684 /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
685 /// supports that) provenance is entirely ignored.
686 ///
687 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
688 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
689 ///
690 /// It is the caller's responsibility to check bounds and alignment beforehand.
691 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
692pub fn read_scalar(
693&self,
694 cx: &impl HasDataLayout,
695 range: AllocRange,
696 read_provenance: bool,
697 ) -> AllocResult<Scalar<Prov>> {
698// First and foremost, if anything is uninit, bail.
699if let Err(bad) = self.init_mask.is_range_initialized(range) {
700return Err(AllocError::InvalidUninitBytes(Some(BadBytesAccess {
701 access: range,
702bad,
703 })));
704 }
705706// Get the integer part of the result. We HAVE TO check provenance before returning this!
707let bytes = self.get_bytes_unchecked(range);
708let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
709710if read_provenance {
711match (&range.size, &cx.data_layout().pointer_size()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(range.size, cx.data_layout().pointer_size());
712713if let Some(prov) = self.provenance.read_ptr(range.start, cx)? {
714// Assemble the bits with their provenance.
715let ptr = Pointer::new(prov, Size::from_bytes(bits));
716Ok(Scalar::from_pointer(ptr, cx))
717 } else {
718// Return raw bits without provenance.
719Ok(Scalar::from_uint(bits, range.size))
720 }
721 } else {
722// We are *not* reading a pointer.
723 // If we can just ignore provenance or there is none, that's easy.
724if Prov::OFFSET_IS_ADDR || self.provenance.range_empty(range, cx) {
725// We just strip provenance.
726return Ok(Scalar::from_uint(bits, range.size));
727 }
728// There is some provenance and we don't have OFFSET_IS_ADDR. This doesn't work.
729return Err(AllocError::ReadPointerAsInt(None));
730 }
731 }
732733/// Writes a *non-ZST* scalar.
734 ///
735 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
736 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
737 ///
738 /// It is the caller's responsibility to check bounds and alignment beforehand.
739 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
740pub fn write_scalar(
741&mut self,
742 cx: &impl HasDataLayout,
743 range: AllocRange,
744 val: Scalar<Prov>,
745 ) -> AllocResult {
746if !(self.mutability == Mutability::Mut) {
::core::panicking::panic("assertion failed: self.mutability == Mutability::Mut")
};assert!(self.mutability == Mutability::Mut);
747748// `to_bits_or_ptr_internal` is the right method because we just want to store this data
749 // as-is into memory. This also double-checks that `val.size()` matches `range.size`.
750let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
751Right(ptr) => {
752let (provenance, offset) = ptr.into_raw_parts();
753 (u128::from(offset.bytes()), Some(provenance))
754 }
755Left(data) => (data, None),
756 };
757758let endian = cx.data_layout().endian;
759// Yes we do overwrite all the bytes in `dst`.
760let dst = self.get_bytes_unchecked_for_overwrite(cx, range);
761write_target_uint(endian, dst, bytes).unwrap();
762763// See if we have to also store some provenance.
764if let Some(provenance) = provenance {
765match (&range.size, &cx.data_layout().pointer_size()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(range.size, cx.data_layout().pointer_size());
766self.provenance.insert_ptr(range.start, provenance, cx);
767 }
768769Ok(())
770 }
771772/// Write "uninit" to the given memory range.
773pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
774self.mark_init(range, false);
775self.provenance.clear(range, &self.bytes, cx);
776 }
777778/// Mark all bytes in the given range as initialised and reset the provenance
779 /// to wildcards. This entirely breaks the normal mechanisms for tracking
780 /// initialisation and is only provided for Miri operating in native-lib
781 /// mode. UB will be missed if the underlying bytes were not actually written to.
782 ///
783 /// If `range` is `None`, defaults to performing this on the whole allocation.
784pub fn process_native_write(&mut self, cx: &impl HasDataLayout, range: Option<AllocRange>) {
785let range = range.unwrap_or_else(|| AllocRange {
786 start: Size::ZERO,
787 size: Size::from_bytes(self.len()),
788 });
789self.mark_init(range, true);
790self.provenance.write_wildcards(cx, &self.bytes, range);
791 }
792793/// Remove all provenance in the given memory range.
794pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
795self.provenance.clear(range, &self.bytes, cx);
796 }
797798pub fn provenance_merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool {
799self.provenance.merge_bytes(cx)
800 }
801802pub fn provenance_prepare_copy(
803&self,
804 range: AllocRange,
805 cx: &impl HasDataLayout,
806 ) -> ProvenanceCopy<Prov> {
807self.provenance.prepare_copy(range, &self.bytes, cx)
808 }
809810/// Applies a previously prepared provenance copy.
811 /// The affected range is expected to be clear of provenance.
812 ///
813 /// This is dangerous to use as it can violate internal `Allocation` invariants!
814 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
815pub fn provenance_apply_copy(
816&mut self,
817 copy: ProvenanceCopy<Prov>,
818 range: AllocRange,
819 repeat: u64,
820 ) {
821self.provenance.apply_copy(copy, range, repeat)
822 }
823824/// Applies a previously prepared copy of the init mask.
825 ///
826 /// This is dangerous to use as it can violate internal `Allocation` invariants!
827 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
828pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
829self.init_mask.apply_copy(copy, range, repeat)
830 }
831}