rustc_middle/mir/interpret/allocation.rs
1//! The virtual memory representation of the MIR interpreter.
2
3mod init_mask;
4mod provenance_map;
5
6use std::borrow::Cow;
7use std::hash::Hash;
8use std::ops::{Deref, DerefMut, Range};
9use std::{fmt, hash, ptr};
10
11use either::{Left, Right};
12use init_mask::*;
13pub use init_mask::{InitChunk, InitChunkIter};
14use provenance_map::*;
15use rustc_abi::{Align, HasDataLayout, Size};
16use rustc_ast::Mutability;
17use rustc_data_structures::intern::Interned;
18use rustc_macros::{HashStable, TyDecodable, TyEncodable};
19
20use super::{
21 AllocId, BadBytesAccess, CtfeProvenance, InterpErrorKind, InterpResult, Pointer,
22 PointerArithmetic, Provenance, ResourceExhaustionInfo, Scalar, ScalarSizeMismatch,
23 UndefinedBehaviorInfo, UnsupportedOpInfo, interp_ok, read_target_uint, write_target_uint,
24};
25use crate::ty;
26
27/// Functionality required for the bytes of an `Allocation`.
28pub trait AllocBytes: Clone + fmt::Debug + Deref<Target = [u8]> + DerefMut<Target = [u8]> {
29 /// Create an `AllocBytes` from a slice of `u8`.
30 fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self;
31
32 /// Create a zeroed `AllocBytes` of the specified size and alignment.
33 /// Returns `None` if we ran out of memory on the host.
34 fn zeroed(size: Size, _align: Align) -> Option<Self>;
35
36 /// Gives direct access to the raw underlying storage.
37 ///
38 /// Crucially this pointer is compatible with:
39 /// - other pointers returned by this method, and
40 /// - references returned from `deref()`, as long as there was no write.
41 fn as_mut_ptr(&mut self) -> *mut u8;
42
43 /// Gives direct access to the raw underlying storage.
44 ///
45 /// Crucially this pointer is compatible with:
46 /// - other pointers returned by this method, and
47 /// - references returned from `deref()`, as long as there was no write.
48 fn as_ptr(&self) -> *const u8;
49}
50
51/// Default `bytes` for `Allocation` is a `Box<u8>`.
52impl AllocBytes for Box<[u8]> {
53 fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self {
54 Box::<[u8]>::from(slice.into())
55 }
56
57 fn zeroed(size: Size, _align: Align) -> Option<Self> {
58 let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes().try_into().ok()?).ok()?;
59 // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
60 let bytes = unsafe { bytes.assume_init() };
61 Some(bytes)
62 }
63
64 fn as_mut_ptr(&mut self) -> *mut u8 {
65 Box::as_mut_ptr(self).cast()
66 }
67
68 fn as_ptr(&self) -> *const u8 {
69 Box::as_ptr(self).cast()
70 }
71}
72
73/// This type represents an Allocation in the Miri/CTFE core engine.
74///
75/// Its public API is rather low-level, working directly with allocation offsets and a custom error
76/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
77/// module provides higher-level access.
78// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
79// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
80#[derive(Clone, Eq, PartialEq, TyEncodable, TyDecodable)]
81#[derive(HashStable)]
82pub struct Allocation<Prov: Provenance = CtfeProvenance, Extra = (), Bytes = Box<[u8]>> {
83 /// The actual bytes of the allocation.
84 /// Note that the bytes of a pointer represent the offset of the pointer.
85 bytes: Bytes,
86 /// Maps from byte addresses to extra provenance data for each pointer.
87 /// Only the first byte of a pointer is inserted into the map; i.e.,
88 /// every entry in this map applies to `pointer_size` consecutive bytes starting
89 /// at the given offset.
90 provenance: ProvenanceMap<Prov>,
91 /// Denotes which part of this allocation is initialized.
92 init_mask: InitMask,
93 /// The alignment of the allocation to detect unaligned reads.
94 /// (`Align` guarantees that this is a power of two.)
95 pub align: Align,
96 /// `true` if the allocation is mutable.
97 /// Also used by codegen to determine if a static should be put into mutable memory,
98 /// which happens for `static mut` and `static` with interior mutability.
99 pub mutability: Mutability,
100 /// Extra state for the machine.
101 pub extra: Extra,
102}
103
104/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
105/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
106/// Used when these two structures are large: we only partially hash the larger fields in that
107/// situation. See the comment at the top of their respective `Hash` impl for more details.
108const MAX_BYTES_TO_HASH: usize = 64;
109
110/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
111/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
112/// bytes.
113const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
114
115// Const allocations are only hashed for interning. However, they can be large, making the hashing
116// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
117// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
118// large.
119impl hash::Hash for Allocation {
120 fn hash<H: hash::Hasher>(&self, state: &mut H) {
121 let Self {
122 bytes,
123 provenance,
124 init_mask,
125 align,
126 mutability,
127 extra: (), // don't bother hashing ()
128 } = self;
129
130 // Partially hash the `bytes` buffer when it is large. To limit collisions with common
131 // prefixes and suffixes, we hash the length and some slices of the buffer.
132 let byte_count = bytes.len();
133 if byte_count > MAX_HASHED_BUFFER_LEN {
134 // Hash the buffer's length.
135 byte_count.hash(state);
136
137 // And its head and tail.
138 bytes[..MAX_BYTES_TO_HASH].hash(state);
139 bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
140 } else {
141 bytes.hash(state);
142 }
143
144 // Hash the other fields as usual.
145 provenance.hash(state);
146 init_mask.hash(state);
147 align.hash(state);
148 mutability.hash(state);
149 }
150}
151
152/// Interned types generally have an `Outer` type and an `Inner` type, where
153/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
154/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
155/// outer type and `TyKind` is its inner type.
156///
157/// Here things are different because only const allocations are interned. This
158/// means that both the inner type (`Allocation`) and the outer type
159/// (`ConstAllocation`) are used quite a bit.
160#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable)]
161#[rustc_pass_by_value]
162pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
163
164impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
165 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
166 // The debug representation of this is very verbose and basically useless,
167 // so don't print it.
168 write!(f, "ConstAllocation {{ .. }}")
169 }
170}
171
172impl<'tcx> ConstAllocation<'tcx> {
173 pub fn inner(self) -> &'tcx Allocation {
174 self.0.0
175 }
176}
177
178/// We have our own error type that does not know about the `AllocId`; that information
179/// is added when converting to `InterpError`.
180#[derive(Debug)]
181pub enum AllocError {
182 /// A scalar had the wrong size.
183 ScalarSizeMismatch(ScalarSizeMismatch),
184 /// Encountered a pointer where we needed raw bytes.
185 ReadPointerAsInt(Option<BadBytesAccess>),
186 /// Partially overwriting a pointer.
187 OverwritePartialPointer(Size),
188 /// Partially copying a pointer.
189 ReadPartialPointer(Size),
190 /// Using uninitialized data where it is not allowed.
191 InvalidUninitBytes(Option<BadBytesAccess>),
192}
193pub type AllocResult<T = ()> = Result<T, AllocError>;
194
195impl From<ScalarSizeMismatch> for AllocError {
196 fn from(s: ScalarSizeMismatch) -> Self {
197 AllocError::ScalarSizeMismatch(s)
198 }
199}
200
201impl AllocError {
202 pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpErrorKind<'tcx> {
203 use AllocError::*;
204 match self {
205 ScalarSizeMismatch(s) => {
206 InterpErrorKind::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
207 }
208 ReadPointerAsInt(info) => InterpErrorKind::Unsupported(
209 UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
210 ),
211 OverwritePartialPointer(offset) => InterpErrorKind::Unsupported(
212 UnsupportedOpInfo::OverwritePartialPointer(Pointer::new(alloc_id, offset)),
213 ),
214 ReadPartialPointer(offset) => InterpErrorKind::Unsupported(
215 UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
216 ),
217 InvalidUninitBytes(info) => InterpErrorKind::UndefinedBehavior(
218 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
219 ),
220 }
221 }
222}
223
224/// The information that makes up a memory access: offset and size.
225#[derive(Copy, Clone)]
226pub struct AllocRange {
227 pub start: Size,
228 pub size: Size,
229}
230
231impl fmt::Debug for AllocRange {
232 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
233 write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
234 }
235}
236
237/// Free-starting constructor for less syntactic overhead.
238#[inline(always)]
239pub fn alloc_range(start: Size, size: Size) -> AllocRange {
240 AllocRange { start, size }
241}
242
243impl From<Range<Size>> for AllocRange {
244 #[inline]
245 fn from(r: Range<Size>) -> Self {
246 alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
247 }
248}
249
250impl From<Range<usize>> for AllocRange {
251 #[inline]
252 fn from(r: Range<usize>) -> Self {
253 AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
254 }
255}
256
257impl AllocRange {
258 #[inline(always)]
259 pub fn end(self) -> Size {
260 self.start + self.size // This does overflow checking.
261 }
262
263 /// Returns the `subrange` within this range; panics if it is not a subrange.
264 #[inline]
265 pub fn subrange(self, subrange: AllocRange) -> AllocRange {
266 let sub_start = self.start + subrange.start;
267 let range = alloc_range(sub_start, subrange.size);
268 assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
269 range
270 }
271}
272
273/// Whether a new allocation should be initialized with zero-bytes.
274pub enum AllocInit {
275 Uninit,
276 Zero,
277}
278
279// The constructors are all without extra; the extra gets added by a machine hook later.
280impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
281 /// Creates an allocation initialized by the given bytes
282 pub fn from_bytes<'a>(
283 slice: impl Into<Cow<'a, [u8]>>,
284 align: Align,
285 mutability: Mutability,
286 ) -> Self {
287 let bytes = Bytes::from_bytes(slice, align);
288 let size = Size::from_bytes(bytes.len());
289 Self {
290 bytes,
291 provenance: ProvenanceMap::new(),
292 init_mask: InitMask::new(size, true),
293 align,
294 mutability,
295 extra: (),
296 }
297 }
298
299 pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
300 Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
301 }
302
303 fn new_inner<R>(
304 size: Size,
305 align: Align,
306 init: AllocInit,
307 fail: impl FnOnce() -> R,
308 ) -> Result<Self, R> {
309 // We raise an error if we cannot create the allocation on the host.
310 // This results in an error that can happen non-deterministically, since the memory
311 // available to the compiler can change between runs. Normally queries are always
312 // deterministic. However, we can be non-deterministic here because all uses of const
313 // evaluation (including ConstProp!) will make compilation fail (via hard error
314 // or ICE) upon encountering a `MemoryExhausted` error.
315 let bytes = Bytes::zeroed(size, align).ok_or_else(fail)?;
316
317 Ok(Allocation {
318 bytes,
319 provenance: ProvenanceMap::new(),
320 init_mask: InitMask::new(
321 size,
322 match init {
323 AllocInit::Uninit => false,
324 AllocInit::Zero => true,
325 },
326 ),
327 align,
328 mutability: Mutability::Mut,
329 extra: (),
330 })
331 }
332
333 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
334 /// available to the compiler to do so.
335 pub fn try_new<'tcx>(size: Size, align: Align, init: AllocInit) -> InterpResult<'tcx, Self> {
336 Self::new_inner(size, align, init, || {
337 ty::tls::with(|tcx| tcx.dcx().delayed_bug("exhausted memory during interpretation"));
338 InterpErrorKind::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
339 })
340 .into()
341 }
342
343 /// Try to create an Allocation of `size` bytes, panics if there is not enough memory
344 /// available to the compiler to do so.
345 ///
346 /// Example use case: To obtain an Allocation filled with specific data,
347 /// first call this function and then call write_scalar to fill in the right data.
348 pub fn new(size: Size, align: Align, init: AllocInit) -> Self {
349 match Self::new_inner(size, align, init, || {
350 panic!(
351 "interpreter ran out of memory: cannot create allocation of {} bytes",
352 size.bytes()
353 );
354 }) {
355 Ok(x) => x,
356 Err(x) => x,
357 }
358 }
359
360 /// Add the extra.
361 pub fn with_extra<Extra>(self, extra: Extra) -> Allocation<Prov, Extra, Bytes> {
362 Allocation {
363 bytes: self.bytes,
364 provenance: self.provenance,
365 init_mask: self.init_mask,
366 align: self.align,
367 mutability: self.mutability,
368 extra,
369 }
370 }
371}
372
373impl Allocation {
374 /// Adjust allocation from the ones in `tcx` to a custom Machine instance
375 /// with a different `Provenance` and `Byte` type.
376 pub fn adjust_from_tcx<'tcx, Prov: Provenance, Bytes: AllocBytes>(
377 &self,
378 cx: &impl HasDataLayout,
379 mut alloc_bytes: impl FnMut(&[u8], Align) -> InterpResult<'tcx, Bytes>,
380 mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> InterpResult<'tcx, Pointer<Prov>>,
381 ) -> InterpResult<'tcx, Allocation<Prov, (), Bytes>> {
382 // Copy the data.
383 let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
384 // Adjust provenance of pointers stored in this allocation.
385 let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
386 let ptr_size = cx.data_layout().pointer_size.bytes_usize();
387 let endian = cx.data_layout().endian;
388 for &(offset, alloc_id) in self.provenance.ptrs().iter() {
389 let idx = offset.bytes_usize();
390 let ptr_bytes = &mut bytes[idx..idx + ptr_size];
391 let bits = read_target_uint(endian, ptr_bytes).unwrap();
392 let (ptr_prov, ptr_offset) =
393 adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
394 write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
395 new_provenance.push((offset, ptr_prov));
396 }
397 // Create allocation.
398 interp_ok(Allocation {
399 bytes,
400 provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
401 init_mask: self.init_mask.clone(),
402 align: self.align,
403 mutability: self.mutability,
404 extra: self.extra,
405 })
406 }
407}
408
409/// Raw accessors. Provide access to otherwise private bytes.
410impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
411 pub fn len(&self) -> usize {
412 self.bytes.len()
413 }
414
415 pub fn size(&self) -> Size {
416 Size::from_bytes(self.len())
417 }
418
419 /// Looks at a slice which may contain uninitialized bytes or provenance. This differs
420 /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
421 /// edges) at all.
422 /// This must not be used for reads affecting the interpreter execution.
423 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
424 &self.bytes[range]
425 }
426
427 /// Returns the mask indicating which bytes are initialized.
428 pub fn init_mask(&self) -> &InitMask {
429 &self.init_mask
430 }
431
432 /// Returns the provenance map.
433 pub fn provenance(&self) -> &ProvenanceMap<Prov> {
434 &self.provenance
435 }
436}
437
438/// Byte accessors.
439impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
440 /// This is the entirely abstraction-violating way to just grab the raw bytes without
441 /// caring about provenance or initialization.
442 ///
443 /// This function also guarantees that the resulting pointer will remain stable
444 /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
445 /// on that.
446 #[inline]
447 pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
448 &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
449 }
450
451 /// Checks that these bytes are initialized, and then strip provenance (if possible) and return
452 /// them.
453 ///
454 /// It is the caller's responsibility to check bounds and alignment beforehand.
455 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
456 /// on `InterpCx` instead.
457 #[inline]
458 pub fn get_bytes_strip_provenance(
459 &self,
460 cx: &impl HasDataLayout,
461 range: AllocRange,
462 ) -> AllocResult<&[u8]> {
463 self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
464 AllocError::InvalidUninitBytes(Some(BadBytesAccess {
465 access: range,
466 bad: uninit_range,
467 }))
468 })?;
469 if !Prov::OFFSET_IS_ADDR && !self.provenance.range_empty(range, cx) {
470 // Find the provenance.
471 let (offset, _prov) = self
472 .provenance
473 .range_get_ptrs(range, cx)
474 .first()
475 .copied()
476 .expect("there must be provenance somewhere here");
477 let start = offset.max(range.start); // the pointer might begin before `range`!
478 let end = (offset + cx.pointer_size()).min(range.end()); // the pointer might end after `range`!
479 return Err(AllocError::ReadPointerAsInt(Some(BadBytesAccess {
480 access: range,
481 bad: AllocRange::from(start..end),
482 })));
483 }
484 Ok(self.get_bytes_unchecked(range))
485 }
486
487 /// This is the entirely abstraction-violating way to just get mutable access to the raw bytes.
488 /// Just calling this already marks everything as defined and removes provenance, so be sure to
489 /// actually overwrite all the data there!
490 ///
491 /// It is the caller's responsibility to check bounds and alignment beforehand.
492 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
493 /// on `InterpCx` instead.
494 pub fn get_bytes_unchecked_for_overwrite(
495 &mut self,
496 cx: &impl HasDataLayout,
497 range: AllocRange,
498 ) -> AllocResult<&mut [u8]> {
499 self.mark_init(range, true);
500 self.provenance.clear(range, cx)?;
501
502 Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
503 }
504
505 /// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases
506 /// into this memory.
507 pub fn get_bytes_unchecked_for_overwrite_ptr(
508 &mut self,
509 cx: &impl HasDataLayout,
510 range: AllocRange,
511 ) -> AllocResult<*mut [u8]> {
512 self.mark_init(range, true);
513 self.provenance.clear(range, cx)?;
514
515 assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
516 // Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`.
517 let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
518 let len = range.end().bytes_usize() - range.start.bytes_usize();
519 Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
520 }
521
522 /// This gives direct mutable access to the entire buffer, just exposing their internal state
523 /// without resetting anything. Directly exposes `AllocBytes::as_mut_ptr`. Only works if
524 /// `OFFSET_IS_ADDR` is true.
525 pub fn get_bytes_unchecked_raw_mut(&mut self) -> *mut u8 {
526 assert!(Prov::OFFSET_IS_ADDR);
527 self.bytes.as_mut_ptr()
528 }
529
530 /// This gives direct immutable access to the entire buffer, just exposing their internal state
531 /// without resetting anything. Directly exposes `AllocBytes::as_ptr`. Only works if
532 /// `OFFSET_IS_ADDR` is true.
533 pub fn get_bytes_unchecked_raw(&self) -> *const u8 {
534 assert!(Prov::OFFSET_IS_ADDR);
535 self.bytes.as_ptr()
536 }
537}
538
539/// Reading and writing.
540impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
541 /// Sets the init bit for the given range.
542 fn mark_init(&mut self, range: AllocRange, is_init: bool) {
543 if range.size.bytes() == 0 {
544 return;
545 }
546 assert!(self.mutability == Mutability::Mut);
547 self.init_mask.set_range(range, is_init);
548 }
549
550 /// Reads a *non-ZST* scalar.
551 ///
552 /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
553 /// supports that) provenance is entirely ignored.
554 ///
555 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
556 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
557 ///
558 /// It is the caller's responsibility to check bounds and alignment beforehand.
559 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
560 pub fn read_scalar(
561 &self,
562 cx: &impl HasDataLayout,
563 range: AllocRange,
564 read_provenance: bool,
565 ) -> AllocResult<Scalar<Prov>> {
566 // First and foremost, if anything is uninit, bail.
567 if self.init_mask.is_range_initialized(range).is_err() {
568 return Err(AllocError::InvalidUninitBytes(None));
569 }
570
571 // Get the integer part of the result. We HAVE TO check provenance before returning this!
572 let bytes = self.get_bytes_unchecked(range);
573 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
574
575 if read_provenance {
576 assert_eq!(range.size, cx.data_layout().pointer_size);
577
578 // When reading data with provenance, the easy case is finding provenance exactly where we
579 // are reading, then we can put data and provenance back together and return that.
580 if let Some(prov) = self.provenance.get_ptr(range.start) {
581 // Now we can return the bits, with their appropriate provenance.
582 let ptr = Pointer::new(prov, Size::from_bytes(bits));
583 return Ok(Scalar::from_pointer(ptr, cx));
584 }
585
586 // If we can work on pointers byte-wise, join the byte-wise provenances.
587 if Prov::OFFSET_IS_ADDR {
588 let mut prov = self.provenance.get(range.start, cx);
589 for offset in Size::from_bytes(1)..range.size {
590 let this_prov = self.provenance.get(range.start + offset, cx);
591 prov = Prov::join(prov, this_prov);
592 }
593 // Now use this provenance.
594 let ptr = Pointer::new(prov, Size::from_bytes(bits));
595 return Ok(Scalar::from_maybe_pointer(ptr, cx));
596 } else {
597 // Without OFFSET_IS_ADDR, the only remaining case we can handle is total absence of
598 // provenance.
599 if self.provenance.range_empty(range, cx) {
600 return Ok(Scalar::from_uint(bits, range.size));
601 }
602 // Else we have mixed provenance, that doesn't work.
603 return Err(AllocError::ReadPartialPointer(range.start));
604 }
605 } else {
606 // We are *not* reading a pointer.
607 // If we can just ignore provenance or there is none, that's easy.
608 if Prov::OFFSET_IS_ADDR || self.provenance.range_empty(range, cx) {
609 // We just strip provenance.
610 return Ok(Scalar::from_uint(bits, range.size));
611 }
612 // There is some provenance and we don't have OFFSET_IS_ADDR. This doesn't work.
613 return Err(AllocError::ReadPointerAsInt(None));
614 }
615 }
616
617 /// Writes a *non-ZST* scalar.
618 ///
619 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
620 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
621 ///
622 /// It is the caller's responsibility to check bounds and alignment beforehand.
623 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
624 pub fn write_scalar(
625 &mut self,
626 cx: &impl HasDataLayout,
627 range: AllocRange,
628 val: Scalar<Prov>,
629 ) -> AllocResult {
630 assert!(self.mutability == Mutability::Mut);
631
632 // `to_bits_or_ptr_internal` is the right method because we just want to store this data
633 // as-is into memory. This also double-checks that `val.size()` matches `range.size`.
634 let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
635 Right(ptr) => {
636 let (provenance, offset) = ptr.into_parts();
637 (u128::from(offset.bytes()), Some(provenance))
638 }
639 Left(data) => (data, None),
640 };
641
642 let endian = cx.data_layout().endian;
643 // Yes we do overwrite all the bytes in `dst`.
644 let dst = self.get_bytes_unchecked_for_overwrite(cx, range)?;
645 write_target_uint(endian, dst, bytes).unwrap();
646
647 // See if we have to also store some provenance.
648 if let Some(provenance) = provenance {
649 assert_eq!(range.size, cx.data_layout().pointer_size);
650 self.provenance.insert_ptr(range.start, provenance, cx);
651 }
652
653 Ok(())
654 }
655
656 /// Write "uninit" to the given memory range.
657 pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
658 self.mark_init(range, false);
659 self.provenance.clear(range, cx)?;
660 Ok(())
661 }
662
663 /// Initialize all previously uninitialized bytes in the entire allocation, and set
664 /// provenance of everything to `Wildcard`. Before calling this, make sure all
665 /// provenance in this allocation is exposed!
666 pub fn prepare_for_native_write(&mut self) -> AllocResult {
667 let full_range = AllocRange { start: Size::ZERO, size: Size::from_bytes(self.len()) };
668 // Overwrite uninitialized bytes with 0, to ensure we don't leak whatever their value happens to be.
669 for chunk in self.init_mask.range_as_init_chunks(full_range) {
670 if !chunk.is_init() {
671 let uninit_bytes = &mut self.bytes
672 [chunk.range().start.bytes_usize()..chunk.range().end.bytes_usize()];
673 uninit_bytes.fill(0);
674 }
675 }
676 // Mark everything as initialized now.
677 self.mark_init(full_range, true);
678
679 // Set provenance of all bytes to wildcard.
680 self.provenance.write_wildcards(self.len());
681
682 Ok(())
683 }
684
685 /// Remove all provenance in the given memory range.
686 pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
687 self.provenance.clear(range, cx)?;
688 return Ok(());
689 }
690
691 /// Applies a previously prepared provenance copy.
692 /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
693 /// to be clear of provenance.
694 ///
695 /// This is dangerous to use as it can violate internal `Allocation` invariants!
696 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
697 pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
698 self.provenance.apply_copy(copy)
699 }
700
701 /// Applies a previously prepared copy of the init mask.
702 ///
703 /// This is dangerous to use as it can violate internal `Allocation` invariants!
704 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
705 pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
706 self.init_mask.apply_copy(copy, range, repeat)
707 }
708}