1//! The virtual memory representation of the MIR interpreter.
23mod init_mask;
4mod provenance_map;
56use std::alloc::{self, Layout};
7use std::borrow::Cow;
8use std::hash::Hash;
9use std::ops::{Deref, DerefMut, Range};
10use std::{fmt, hash, ptr};
1112use either::{Left, Right};
13use init_mask::*;
14pub use init_mask::{InitChunk, InitChunkIter};
15use provenance_map::*;
16use rustc_abi::{Align, HasDataLayout, Size};
17use rustc_ast::Mutability;
18use rustc_data_structures::intern::Interned;
19use rustc_macros::HashStable;
20use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
2122use super::{
23AllocId, BadBytesAccess, CtfeProvenance, InterpErrorKind, InterpResult, Pointer, Provenance,
24ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo,
25interp_ok, read_target_uint, write_target_uint,
26};
27use crate::ty;
2829/// Functionality required for the bytes of an `Allocation`.
30pub trait AllocBytes: Clone + fmt::Debug + Deref<Target = [u8]> + DerefMut<Target = [u8]> {
31/// The type of extra parameters passed in when creating an allocation.
32 /// Can be used by `interpret::Machine` instances to make runtime-configuration-dependent
33 /// decisions about the allocation strategy.
34type AllocParams;
3536/// Create an `AllocBytes` from a slice of `u8`.
37fn from_bytes<'a>(
38 slice: impl Into<Cow<'a, [u8]>>,
39 _align: Align,
40 _params: Self::AllocParams,
41 ) -> Self;
4243/// Create a zeroed `AllocBytes` of the specified size and alignment.
44 /// Returns `None` if we ran out of memory on the host.
45fn zeroed(size: Size, _align: Align, _params: Self::AllocParams) -> Option<Self>;
4647/// Gives direct access to the raw underlying storage.
48 ///
49 /// Crucially this pointer is compatible with:
50 /// - other pointers returned by this method, and
51 /// - references returned from `deref()`, as long as there was no write.
52fn as_mut_ptr(&mut self) -> *mut u8;
5354/// Gives direct access to the raw underlying storage.
55 ///
56 /// Crucially this pointer is compatible with:
57 /// - other pointers returned by this method, and
58 /// - references returned from `deref()`, as long as there was no write.
59fn as_ptr(&self) -> *const u8;
60}
6162/// Default `bytes` for `Allocation` is a `Box<u8>`.
63impl AllocBytesfor Box<[u8]> {
64type AllocParams = ();
6566fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align, _params: ()) -> Self {
67 Box::<[u8]>::from(slice.into())
68 }
6970fn zeroed(size: Size, _align: Align, _params: ()) -> Option<Self> {
71let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes().try_into().ok()?).ok()?;
72// SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
73let bytes = unsafe { bytes.assume_init() };
74Some(bytes)
75 }
7677fn as_mut_ptr(&mut self) -> *mut u8 {
78Box::as_mut_ptr(self).cast()
79 }
8081fn as_ptr(&self) -> *const u8 {
82Box::as_ptr(self).cast()
83 }
84}
8586/// This type represents an Allocation in the Miri/CTFE core engine.
87///
88/// Its public API is rather low-level, working directly with allocation offsets and a custom error
89/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
90/// module provides higher-level access.
91// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
92// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
93#[derive(#[automatically_derived]
impl<Prov: ::core::clone::Clone + Provenance, Extra: ::core::clone::Clone,
Bytes: ::core::clone::Clone> ::core::clone::Clone for
Allocation<Prov, Extra, Bytes> {
#[inline]
fn clone(&self) -> Allocation<Prov, Extra, Bytes> {
Allocation {
bytes: ::core::clone::Clone::clone(&self.bytes),
provenance: ::core::clone::Clone::clone(&self.provenance),
init_mask: ::core::clone::Clone::clone(&self.init_mask),
align: ::core::clone::Clone::clone(&self.align),
mutability: ::core::clone::Clone::clone(&self.mutability),
extra: ::core::clone::Clone::clone(&self.extra),
}
}
}Clone, #[automatically_derived]
impl<Prov: ::core::cmp::Eq + Provenance, Extra: ::core::cmp::Eq,
Bytes: ::core::cmp::Eq> ::core::cmp::Eq for Allocation<Prov, Extra, Bytes>
{
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Bytes>;
let _: ::core::cmp::AssertParamIsEq<ProvenanceMap<Prov>>;
let _: ::core::cmp::AssertParamIsEq<InitMask>;
let _: ::core::cmp::AssertParamIsEq<Align>;
let _: ::core::cmp::AssertParamIsEq<Mutability>;
let _: ::core::cmp::AssertParamIsEq<Extra>;
}
}Eq, #[automatically_derived]
impl<Prov: ::core::cmp::PartialEq + Provenance, Extra: ::core::cmp::PartialEq,
Bytes: ::core::cmp::PartialEq> ::core::cmp::PartialEq for
Allocation<Prov, Extra, Bytes> {
#[inline]
fn eq(&self, other: &Allocation<Prov, Extra, Bytes>) -> bool {
self.bytes == other.bytes && self.provenance == other.provenance &&
self.init_mask == other.init_mask &&
self.align == other.align &&
self.mutability == other.mutability &&
self.extra == other.extra
}
}PartialEq)]
94#[derive(const _: () =
{
impl<'__ctx, Prov: Provenance, Extra, Bytes>
::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>
for Allocation<Prov, Extra, Bytes> where
Bytes: ::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>,
Prov: ::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>,
Extra: ::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>
{
#[inline]
fn hash_stable(&self,
__hcx: &mut ::rustc_middle::ich::StableHashingContext<'__ctx>,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
Allocation {
bytes: ref __binding_0,
provenance: ref __binding_1,
init_mask: ref __binding_2,
align: ref __binding_3,
mutability: ref __binding_4,
extra: ref __binding_5 } => {
{ __binding_0.hash_stable(__hcx, __hasher); }
{ __binding_1.hash_stable(__hcx, __hasher); }
{ __binding_2.hash_stable(__hcx, __hasher); }
{ __binding_3.hash_stable(__hcx, __hasher); }
{ __binding_4.hash_stable(__hcx, __hasher); }
{ __binding_5.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable)]
95pub struct Allocation<Prov: Provenance = CtfeProvenance, Extra = (), Bytes = Box<[u8]>> {
96/// The actual bytes of the allocation.
97 /// Note that the bytes of a pointer represent the offset of the pointer.
98bytes: Bytes,
99/// Maps from byte addresses to extra provenance data for each pointer.
100 /// Only the first byte of a pointer is inserted into the map; i.e.,
101 /// every entry in this map applies to `pointer_size` consecutive bytes starting
102 /// at the given offset.
103provenance: ProvenanceMap<Prov>,
104/// Denotes which part of this allocation is initialized.
105 ///
106 /// Invariant: the uninitialized parts have no provenance.
107init_mask: InitMask,
108/// The alignment of the allocation to detect unaligned reads.
109 /// (`Align` guarantees that this is a power of two.)
110pub align: Align,
111/// `true` if the allocation is mutable.
112 /// Also used by codegen to determine if a static should be put into mutable memory,
113 /// which happens for `static mut` and `static` with interior mutability.
114pub mutability: Mutability,
115/// Extra state for the machine.
116pub extra: Extra,
117}
118119/// Helper struct that packs an alignment, mutability, and "all bytes are zero" flag together.
120///
121/// Alignment values always have 2 free high bits, and we check for this in our [`Encodable`] impl.
122struct AllocFlags {
123 align: Align,
124 mutability: Mutability,
125 all_zero: bool,
126}
127128impl<E: Encoder> Encodable<E> for AllocFlags {
129fn encode(&self, encoder: &mut E) {
130// Make sure Align::MAX can be stored with the high 2 bits unset.
131const {
132let max_supported_align_repr = u8::MAX >> 2;
133let max_supported_align = 1 << max_supported_align_repr;
134if !(Align::MAX.bytes() <= max_supported_align) {
::core::panicking::panic("assertion failed: Align::MAX.bytes() <= max_supported_align")
}assert!(Align::MAX.bytes() <= max_supported_align)135 }
136137let mut flags = self.align.bytes().trailing_zeros() as u8;
138flags |= match self.mutability {
139 Mutability::Not => 0,
140 Mutability::Mut => 1 << 6,
141 };
142flags |= (self.all_zero as u8) << 7;
143flags.encode(encoder);
144 }
145}
146147impl<D: Decoder> Decodable<D> for AllocFlags {
148fn decode(decoder: &mut D) -> Self {
149let flags: u8 = Decodable::decode(decoder);
150let align = flags & 0b0011_1111;
151let mutability = flags & 0b0100_0000;
152let all_zero = flags & 0b1000_0000;
153154let align = Align::from_bytes(1 << align).unwrap();
155let mutability = match mutability {
1560 => Mutability::Not,
157_ => Mutability::Mut,
158 };
159let all_zero = all_zero > 0;
160161AllocFlags { align, mutability, all_zero }
162 }
163}
164165/// Efficiently detect whether a slice of `u8` is all zero.
166///
167/// This is used in encoding of [`Allocation`] to special-case all-zero allocations. It is only
168/// optimized a little, because for many allocations the encoding of the actual bytes does not
169/// dominate runtime.
170#[inline]
171fn all_zero(buf: &[u8]) -> bool {
172// In the empty case we wouldn't encode any contents even without this system where we
173 // special-case allocations whose contents are all 0. We can return anything in the empty case.
174if buf.is_empty() {
175return true;
176 }
177// Just fast-rejecting based on the first element significantly reduces the amount that we end
178 // up walking the whole array.
179if buf[0] != 0 {
180return false;
181 }
182183// This strategy of combining all slice elements with & or | is unbeatable for the large
184 // all-zero case because it is so well-understood by autovectorization.
185buf.iter().fold(true, |acc, b| acc & (*b == 0))
186}
187188/// Custom encoder for [`Allocation`] to more efficiently represent the case where all bytes are 0.
189impl<Prov: Provenance, Extra, E: Encoder> Encodable<E> for Allocation<Prov, Extra, Box<[u8]>>
190where
191ProvenanceMap<Prov>: Encodable<E>,
192 Extra: Encodable<E>,
193{
194fn encode(&self, encoder: &mut E) {
195let all_zero = all_zero(&self.bytes);
196AllocFlags { align: self.align, mutability: self.mutability, all_zero }.encode(encoder);
197198encoder.emit_usize(self.bytes.len());
199if !all_zero {
200encoder.emit_raw_bytes(&self.bytes);
201 }
202self.provenance.encode(encoder);
203self.init_mask.encode(encoder);
204self.extra.encode(encoder);
205 }
206}
207208impl<Prov: Provenance, Extra, D: Decoder> Decodable<D> for Allocation<Prov, Extra, Box<[u8]>>
209where
210ProvenanceMap<Prov>: Decodable<D>,
211 Extra: Decodable<D>,
212{
213fn decode(decoder: &mut D) -> Self {
214let AllocFlags { align, mutability, all_zero } = Decodable::decode(decoder);
215216let len = decoder.read_usize();
217let bytes = if all_zero { ::alloc::vec::from_elem(0u8, len)vec![0u8; len] } else { decoder.read_raw_bytes(len).to_vec() };
218let bytes = <Box<[u8]> as AllocBytes>::from_bytes(bytes, align, ());
219220let provenance = Decodable::decode(decoder);
221let init_mask = Decodable::decode(decoder);
222let extra = Decodable::decode(decoder);
223224Self { bytes, provenance, init_mask, align, mutability, extra }
225 }
226}
227228/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
229/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
230/// Used when these two structures are large: we only partially hash the larger fields in that
231/// situation. See the comment at the top of their respective `Hash` impl for more details.
232const MAX_BYTES_TO_HASH: usize = 64;
233234/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
235/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
236/// bytes.
237const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
238239// Const allocations are only hashed for interning. However, they can be large, making the hashing
240// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
241// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
242// large.
243impl hash::Hashfor Allocation {
244fn hash<H: hash::Hasher>(&self, state: &mut H) {
245let Self {
246 bytes,
247 provenance,
248 init_mask,
249 align,
250 mutability,
251 extra: (), // don't bother hashing ()
252} = self;
253254// Partially hash the `bytes` buffer when it is large. To limit collisions with common
255 // prefixes and suffixes, we hash the length and some slices of the buffer.
256let byte_count = bytes.len();
257if byte_count > MAX_HASHED_BUFFER_LEN {
258// Hash the buffer's length.
259byte_count.hash(state);
260261// And its head and tail.
262bytes[..MAX_BYTES_TO_HASH].hash(state);
263bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
264 } else {
265bytes.hash(state);
266 }
267268// Hash the other fields as usual.
269provenance.hash(state);
270init_mask.hash(state);
271align.hash(state);
272mutability.hash(state);
273 }
274}
275276/// Interned types generally have an `Outer` type and an `Inner` type, where
277/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
278/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
279/// outer type and `TyKind` is its inner type.
280///
281/// Here things are different because only const allocations are interned. This
282/// means that both the inner type (`Allocation`) and the outer type
283/// (`ConstAllocation`) are used quite a bit.
284#[derive(#[automatically_derived]
impl<'tcx> ::core::marker::Copy for ConstAllocation<'tcx> { }Copy, #[automatically_derived]
impl<'tcx> ::core::clone::Clone for ConstAllocation<'tcx> {
#[inline]
fn clone(&self) -> ConstAllocation<'tcx> {
let _: ::core::clone::AssertParamIsClone<Interned<'tcx, Allocation>>;
*self
}
}Clone, #[automatically_derived]
impl<'tcx> ::core::cmp::PartialEq for ConstAllocation<'tcx> {
#[inline]
fn eq(&self, other: &ConstAllocation<'tcx>) -> bool { self.0 == other.0 }
}PartialEq, #[automatically_derived]
impl<'tcx> ::core::cmp::Eq for ConstAllocation<'tcx> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<Interned<'tcx, Allocation>>;
}
}Eq, #[automatically_derived]
impl<'tcx> ::core::hash::Hash for ConstAllocation<'tcx> {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}Hash, const _: () =
{
impl<'tcx, '__ctx>
::rustc_data_structures::stable_hasher::HashStable<::rustc_middle::ich::StableHashingContext<'__ctx>>
for ConstAllocation<'tcx> {
#[inline]
fn hash_stable(&self,
__hcx: &mut ::rustc_middle::ich::StableHashingContext<'__ctx>,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
match *self {
ConstAllocation(ref __binding_0) => {
{ __binding_0.hash_stable(__hcx, __hasher); }
}
}
}
}
};HashStable)]
285#[rustc_pass_by_value]
286pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
287288impl<'tcx> fmt::Debugfor ConstAllocation<'tcx> {
289fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
290// The debug representation of this is very verbose and basically useless,
291 // so don't print it.
292f.write_fmt(format_args!("ConstAllocation {{ .. }}"))write!(f, "ConstAllocation {{ .. }}")293 }
294}
295296impl<'tcx> ConstAllocation<'tcx> {
297pub fn inner(self) -> &'tcx Allocation {
298self.0.0
299}
300}
301302/// We have our own error type that does not know about the `AllocId`; that information
303/// is added when converting to `InterpError`.
304#[derive(#[automatically_derived]
impl ::core::fmt::Debug for AllocError {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
AllocError::ScalarSizeMismatch(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ScalarSizeMismatch", &__self_0),
AllocError::ReadPointerAsInt(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ReadPointerAsInt", &__self_0),
AllocError::ReadPartialPointer(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"ReadPartialPointer", &__self_0),
AllocError::InvalidUninitBytes(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"InvalidUninitBytes", &__self_0),
}
}
}Debug)]
305pub enum AllocError {
306/// A scalar had the wrong size.
307ScalarSizeMismatch(ScalarSizeMismatch),
308/// Encountered a pointer where we needed raw bytes.
309ReadPointerAsInt(Option<BadBytesAccess>),
310/// Partially copying a pointer.
311ReadPartialPointer(Size),
312/// Using uninitialized data where it is not allowed.
313InvalidUninitBytes(Option<BadBytesAccess>),
314}
315pub type AllocResult<T = ()> = Result<T, AllocError>;
316317impl From<ScalarSizeMismatch> for AllocError {
318fn from(s: ScalarSizeMismatch) -> Self {
319 AllocError::ScalarSizeMismatch(s)
320 }
321}
322323impl AllocError {
324pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpErrorKind<'tcx> {
325use AllocError::*;
326match self {
327ScalarSizeMismatch(s) => {
328 InterpErrorKind::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
329 }
330ReadPointerAsInt(info) => InterpErrorKind::Unsupported(
331 UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
332 ),
333ReadPartialPointer(offset) => InterpErrorKind::Unsupported(
334 UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
335 ),
336InvalidUninitBytes(info) => InterpErrorKind::UndefinedBehavior(
337 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
338 ),
339 }
340 }
341}
342343/// The information that makes up a memory access: offset and size.
344#[derive(#[automatically_derived]
impl ::core::marker::Copy for AllocRange { }Copy, #[automatically_derived]
impl ::core::clone::Clone for AllocRange {
#[inline]
fn clone(&self) -> AllocRange {
let _: ::core::clone::AssertParamIsClone<Size>;
*self
}
}Clone)]
345pub struct AllocRange {
346pub start: Size,
347pub size: Size,
348}
349350impl fmt::Debugfor AllocRange {
351fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
352f.write_fmt(format_args!("[{0:#x}..{1:#x}]", self.start.bytes(),
self.end().bytes()))write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())353 }
354}
355356/// Free-starting constructor for less syntactic overhead.
357#[inline(always)]
358pub fn alloc_range(start: Size, size: Size) -> AllocRange {
359AllocRange { start, size }
360}
361362impl From<Range<Size>> for AllocRange {
363#[inline]
364fn from(r: Range<Size>) -> Self {
365alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
366}
367}
368369impl From<Range<usize>> for AllocRange {
370#[inline]
371fn from(r: Range<usize>) -> Self {
372AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
373 }
374}
375376impl AllocRange {
377#[inline(always)]
378pub fn end(self) -> Size {
379self.start + self.size // This does overflow checking.
380}
381382/// Returns the `subrange` within this range; panics if it is not a subrange.
383#[inline]
384pub fn subrange(self, subrange: AllocRange) -> AllocRange {
385let sub_start = self.start + subrange.start;
386let range = alloc_range(sub_start, subrange.size);
387if !(range.end() <= self.end()) {
{
::core::panicking::panic_fmt(format_args!("access outside the bounds for given AllocRange"));
}
};assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
388range389 }
390}
391392/// Whether a new allocation should be initialized with zero-bytes.
393pub enum AllocInit {
394 Uninit,
395 Zero,
396}
397398// The constructors are all without extra; the extra gets added by a machine hook later.
399impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
400/// Creates an allocation initialized by the given bytes
401pub fn from_bytes<'a>(
402 slice: impl Into<Cow<'a, [u8]>>,
403 align: Align,
404 mutability: Mutability,
405 params: <Bytes as AllocBytes>::AllocParams,
406 ) -> Self {
407let bytes = Bytes::from_bytes(slice, align, params);
408let size = Size::from_bytes(bytes.len());
409Self {
410bytes,
411 provenance: ProvenanceMap::new(),
412 init_mask: InitMask::new(size, true),
413align,
414mutability,
415 extra: (),
416 }
417 }
418419pub fn from_bytes_byte_aligned_immutable<'a>(
420 slice: impl Into<Cow<'a, [u8]>>,
421 params: <Bytes as AllocBytes>::AllocParams,
422 ) -> Self {
423Allocation::from_bytes(slice, Align::ONE, Mutability::Not, params)
424 }
425426fn new_inner<R>(
427 size: Size,
428 align: Align,
429 init: AllocInit,
430 params: <Bytes as AllocBytes>::AllocParams,
431 fail: impl FnOnce() -> R,
432 ) -> Result<Self, R> {
433// We raise an error if we cannot create the allocation on the host.
434 // This results in an error that can happen non-deterministically, since the memory
435 // available to the compiler can change between runs. Normally queries are always
436 // deterministic. However, we can be non-deterministic here because all uses of const
437 // evaluation (including ConstProp!) will make compilation fail (via hard error
438 // or OOM) upon encountering a `MemoryExhausted` error.
439let bytes = Bytes::zeroed(size, align, params).ok_or_else(fail)?;
440441Ok(Allocation {
442bytes,
443 provenance: ProvenanceMap::new(),
444 init_mask: InitMask::new(
445size,
446match init {
447 AllocInit::Uninit => false,
448 AllocInit::Zero => true,
449 },
450 ),
451align,
452 mutability: Mutability::Mut,
453 extra: (),
454 })
455 }
456457/// Try to create an Allocation of `size` bytes, failing if there is not enough memory
458 /// available to the compiler to do so.
459pub fn try_new<'tcx>(
460 size: Size,
461 align: Align,
462 init: AllocInit,
463 params: <Bytes as AllocBytes>::AllocParams,
464 ) -> InterpResult<'tcx, Self> {
465Self::new_inner(size, align, init, params, || {
466 ty::tls::with(|tcx| tcx.dcx().delayed_bug("exhausted memory during interpretation"));
467 InterpErrorKind::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
468 })
469 .into()
470 }
471472/// Try to create an Allocation of `size` bytes. Aborts if there is not enough memory
473 /// available to the compiler to do so.
474 ///
475 /// Example use case: To obtain an Allocation filled with specific data,
476 /// first call this function and then call write_scalar to fill in the right data.
477pub fn new(
478 size: Size,
479 align: Align,
480 init: AllocInit,
481 params: <Bytes as AllocBytes>::AllocParams,
482 ) -> Self {
483match Self::new_inner(size, align, init, params, || {
484// `size` may actually be bigger than isize::MAX since it is a *target* size.
485 // Clamp it to isize::MAX to still give a somewhat reasonable error message.
486alloc::handle_alloc_error(
487Layout::from_size_align(
488size.bytes().min(isize::MAXas u64) as usize,
489align.bytes_usize(),
490 )
491 .unwrap(),
492 )
493 }) {
494Ok(x) => x,
495Err(x) => x,
496 }
497 }
498499/// Add the extra.
500pub fn with_extra<Extra>(self, extra: Extra) -> Allocation<Prov, Extra, Bytes> {
501Allocation {
502 bytes: self.bytes,
503 provenance: self.provenance,
504 init_mask: self.init_mask,
505 align: self.align,
506 mutability: self.mutability,
507extra,
508 }
509 }
510}
511512impl Allocation {
513/// Adjust allocation from the ones in `tcx` to a custom Machine instance
514 /// with a different `Provenance` and `Byte` type.
515pub fn adjust_from_tcx<'tcx, Prov: Provenance, Bytes: AllocBytes>(
516&self,
517 cx: &impl HasDataLayout,
518 alloc_bytes: impl FnOnce(&[u8], Align) -> InterpResult<'tcx, Bytes>,
519mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> InterpResult<'tcx, Pointer<Prov>>,
520 ) -> InterpResult<'tcx, Allocation<Prov, (), Bytes>> {
521// Copy the data.
522let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
523// Adjust provenance of pointers stored in this allocation.
524let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
525let ptr_size = cx.data_layout().pointer_size().bytes_usize();
526let endian = cx.data_layout().endian;
527for &(offset, alloc_id) in self.provenance.ptrs().iter() {
528let idx = offset.bytes_usize();
529let ptr_bytes = &mut bytes[idx..idx + ptr_size];
530let bits = read_target_uint(endian, ptr_bytes).unwrap();
531let (ptr_prov, ptr_offset) =
532 adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_raw_parts();
533 write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
534 new_provenance.push((offset, ptr_prov));
535 }
536// Create allocation.
537interp_ok(Allocation {
538bytes,
539 provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
540 init_mask: self.init_mask.clone(),
541 align: self.align,
542 mutability: self.mutability,
543 extra: self.extra,
544 })
545 }
546}
547548/// Raw accessors. Provide access to otherwise private bytes.
549impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
550pub fn len(&self) -> usize {
551self.bytes.len()
552 }
553554pub fn size(&self) -> Size {
555Size::from_bytes(self.len())
556 }
557558/// Looks at a slice which may contain uninitialized bytes or provenance. This differs
559 /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
560 /// edges) at all.
561 /// This must not be used for reads affecting the interpreter execution.
562pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
563&self.bytes[range]
564 }
565566/// Returns the mask indicating which bytes are initialized.
567pub fn init_mask(&self) -> &InitMask {
568&self.init_mask
569 }
570571/// Returns the provenance map.
572pub fn provenance(&self) -> &ProvenanceMap<Prov> {
573&self.provenance
574 }
575}
576577/// Byte accessors.
578impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
579/// This is the entirely abstraction-violating way to just grab the raw bytes without
580 /// caring about provenance or initialization.
581 ///
582 /// This function also guarantees that the resulting pointer will remain stable
583 /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
584 /// on that.
585#[inline]
586pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
587&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
588 }
589590/// Checks that these bytes are initialized, and then strip provenance (if possible) and return
591 /// them.
592 ///
593 /// It is the caller's responsibility to check bounds and alignment beforehand.
594 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
595 /// on `InterpCx` instead.
596#[inline]
597pub fn get_bytes_strip_provenance(
598&self,
599 cx: &impl HasDataLayout,
600 range: AllocRange,
601 ) -> AllocResult<&[u8]> {
602self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
603 AllocError::InvalidUninitBytes(Some(BadBytesAccess {
604 access: range,
605 bad: uninit_range,
606 }))
607 })?;
608if !Prov::OFFSET_IS_ADDR && !self.provenance.range_empty(range, cx) {
609// Find the provenance.
610let (prov_range, _prov) = self611 .provenance
612 .get_range(range, cx)
613 .next()
614 .expect("there must be provenance somewhere here");
615let start = prov_range.start.max(range.start); // the pointer might begin before `range`!
616let end = prov_range.end().min(range.end()); // the pointer might end after `range`!
617return Err(AllocError::ReadPointerAsInt(Some(BadBytesAccess {
618 access: range,
619 bad: AllocRange::from(start..end),
620 })));
621 }
622Ok(self.get_bytes_unchecked(range))
623 }
624625/// This is the entirely abstraction-violating way to just get mutable access to the raw bytes.
626 /// Just calling this already marks everything as defined and removes provenance, so be sure to
627 /// actually overwrite all the data there!
628 ///
629 /// It is the caller's responsibility to check bounds and alignment beforehand.
630 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
631 /// on `InterpCx` instead.
632pub fn get_bytes_unchecked_for_overwrite(
633&mut self,
634 cx: &impl HasDataLayout,
635 range: AllocRange,
636 ) -> &mut [u8] {
637self.mark_init(range, true);
638self.provenance.clear(range, &self.bytes, cx);
639640&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
641 }
642643/// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases
644 /// into this memory.
645pub fn get_bytes_unchecked_for_overwrite_ptr(
646&mut self,
647 cx: &impl HasDataLayout,
648 range: AllocRange,
649 ) -> *mut [u8] {
650self.mark_init(range, true);
651self.provenance.clear(range, &self.bytes, cx);
652653if !(range.end().bytes_usize() <= self.bytes.len()) {
::core::panicking::panic("assertion failed: range.end().bytes_usize() <= self.bytes.len()")
};assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
654 // Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`.
655let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
656let len = range.end().bytes_usize() - range.start.bytes_usize();
657 ptr::slice_from_raw_parts_mut(begin_ptr, len)
658 }
659660/// This gives direct mutable access to the entire buffer, just exposing their internal state
661 /// without resetting anything. Directly exposes `AllocBytes::as_mut_ptr`. Only works if
662 /// `OFFSET_IS_ADDR` is true.
663pub fn get_bytes_unchecked_raw_mut(&mut self) -> *mut u8 {
664if !Prov::OFFSET_IS_ADDR {
::core::panicking::panic("assertion failed: Prov::OFFSET_IS_ADDR")
};assert!(Prov::OFFSET_IS_ADDR);
665self.bytes.as_mut_ptr()
666 }
667668/// This gives direct immutable access to the entire buffer, just exposing their internal state
669 /// without resetting anything. Directly exposes `AllocBytes::as_ptr`. Only works if
670 /// `OFFSET_IS_ADDR` is true.
671pub fn get_bytes_unchecked_raw(&self) -> *const u8 {
672if !Prov::OFFSET_IS_ADDR {
::core::panicking::panic("assertion failed: Prov::OFFSET_IS_ADDR")
};assert!(Prov::OFFSET_IS_ADDR);
673self.bytes.as_ptr()
674 }
675}
676677/// Reading and writing.
678impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
679/// Sets the init bit for the given range.
680fn mark_init(&mut self, range: AllocRange, is_init: bool) {
681if range.size.bytes() == 0 {
682return;
683 }
684if !(self.mutability == Mutability::Mut) {
::core::panicking::panic("assertion failed: self.mutability == Mutability::Mut")
};assert!(self.mutability == Mutability::Mut);
685self.init_mask.set_range(range, is_init);
686 }
687688/// Reads a *non-ZST* scalar.
689 ///
690 /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
691 /// supports that) provenance is entirely ignored.
692 ///
693 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
694 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
695 ///
696 /// It is the caller's responsibility to check bounds and alignment beforehand.
697 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
698pub fn read_scalar(
699&self,
700 cx: &impl HasDataLayout,
701 range: AllocRange,
702 read_provenance: bool,
703 ) -> AllocResult<Scalar<Prov>> {
704// First and foremost, if anything is uninit, bail.
705if let Err(bad) = self.init_mask.is_range_initialized(range) {
706return Err(AllocError::InvalidUninitBytes(Some(BadBytesAccess {
707 access: range,
708bad,
709 })));
710 }
711712// Get the integer part of the result. We HAVE TO check provenance before returning this!
713let bytes = self.get_bytes_unchecked(range);
714let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
715716if read_provenance {
717match (&range.size, &cx.data_layout().pointer_size()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(range.size, cx.data_layout().pointer_size());
718719if let Some(prov) = self.provenance.read_ptr(range.start, cx)? {
720// Assemble the bits with their provenance.
721let ptr = Pointer::new(prov, Size::from_bytes(bits));
722Ok(Scalar::from_pointer(ptr, cx))
723 } else {
724// Return raw bits without provenance.
725Ok(Scalar::from_uint(bits, range.size))
726 }
727 } else {
728// We are *not* reading a pointer.
729 // If we can just ignore provenance or there is none, that's easy.
730if Prov::OFFSET_IS_ADDR || self.provenance.range_empty(range, cx) {
731// We just strip provenance.
732return Ok(Scalar::from_uint(bits, range.size));
733 }
734// There is some provenance and we don't have OFFSET_IS_ADDR. This doesn't work.
735return Err(AllocError::ReadPointerAsInt(None));
736 }
737 }
738739/// Writes a *non-ZST* scalar.
740 ///
741 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
742 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
743 ///
744 /// It is the caller's responsibility to check bounds and alignment beforehand.
745 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
746pub fn write_scalar(
747&mut self,
748 cx: &impl HasDataLayout,
749 range: AllocRange,
750 val: Scalar<Prov>,
751 ) -> AllocResult {
752if !(self.mutability == Mutability::Mut) {
::core::panicking::panic("assertion failed: self.mutability == Mutability::Mut")
};assert!(self.mutability == Mutability::Mut);
753754// `to_bits_or_ptr_internal` is the right method because we just want to store this data
755 // as-is into memory. This also double-checks that `val.size()` matches `range.size`.
756let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
757Right(ptr) => {
758let (provenance, offset) = ptr.into_raw_parts();
759 (u128::from(offset.bytes()), Some(provenance))
760 }
761Left(data) => (data, None),
762 };
763764let endian = cx.data_layout().endian;
765// Yes we do overwrite all the bytes in `dst`.
766let dst = self.get_bytes_unchecked_for_overwrite(cx, range);
767write_target_uint(endian, dst, bytes).unwrap();
768769// See if we have to also store some provenance.
770if let Some(provenance) = provenance {
771match (&range.size, &cx.data_layout().pointer_size()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(range.size, cx.data_layout().pointer_size());
772self.provenance.insert_ptr(range.start, provenance, cx);
773 }
774775Ok(())
776 }
777778/// Write "uninit" to the given memory range.
779pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
780self.mark_init(range, false);
781self.provenance.clear(range, &self.bytes, cx);
782 }
783784/// Mark all bytes in the given range as initialised and reset the provenance
785 /// to wildcards. This entirely breaks the normal mechanisms for tracking
786 /// initialisation and is only provided for Miri operating in native-lib
787 /// mode. UB will be missed if the underlying bytes were not actually written to.
788 ///
789 /// If `range` is `None`, defaults to performing this on the whole allocation.
790pub fn process_native_write(&mut self, cx: &impl HasDataLayout, range: Option<AllocRange>) {
791let range = range.unwrap_or_else(|| AllocRange {
792 start: Size::ZERO,
793 size: Size::from_bytes(self.len()),
794 });
795self.mark_init(range, true);
796self.provenance.write_wildcards(cx, &self.bytes, range);
797 }
798799/// Remove all provenance in the given memory range.
800pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
801self.provenance.clear(range, &self.bytes, cx);
802 }
803804pub fn provenance_merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool {
805self.provenance.merge_bytes(cx)
806 }
807808pub fn provenance_prepare_copy(
809&self,
810 range: AllocRange,
811 cx: &impl HasDataLayout,
812 ) -> ProvenanceCopy<Prov> {
813self.provenance.prepare_copy(range, &self.bytes, cx)
814 }
815816/// Applies a previously prepared provenance copy.
817 /// The affected range is expected to be clear of provenance.
818 ///
819 /// This is dangerous to use as it can violate internal `Allocation` invariants!
820 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
821pub fn provenance_apply_copy(
822&mut self,
823 copy: ProvenanceCopy<Prov>,
824 range: AllocRange,
825 repeat: u64,
826 ) {
827self.provenance.apply_copy(copy, range, repeat)
828 }
829830/// Applies a previously prepared copy of the init mask.
831 ///
832 /// This is dangerous to use as it can violate internal `Allocation` invariants!
833 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
834pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
835self.init_mask.apply_copy(copy, range, repeat)
836 }
837}