rustc_const_eval/interpret/
place.rs

1//! Computations on places -- field projections, going from mir::Place, and writing
2//! into a place.
3//! All high-level functions to write to memory work on places as destinations.
4
5use std::assert_matches::assert_matches;
6
7use either::{Either, Left, Right};
8use rustc_abi::{BackendRepr, HasDataLayout, Size};
9use rustc_middle::ty::Ty;
10use rustc_middle::ty::layout::TyAndLayout;
11use rustc_middle::{bug, mir, span_bug};
12use tracing::field::Empty;
13use tracing::{instrument, trace};
14
15use super::{
16    AllocInit, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, ImmTy, Immediate, InterpCx,
17    InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer,
18    Projectable, Provenance, Scalar, alloc_range, interp_ok, mir_assign_valid_types,
19};
20use crate::enter_trace_span;
21
22#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
23/// Information required for the sound usage of a `MemPlace`.
24pub enum MemPlaceMeta<Prov: Provenance = CtfeProvenance> {
25    /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
26    Meta(Scalar<Prov>),
27    /// `Sized` types or unsized `extern type`
28    None,
29}
30
31impl<Prov: Provenance> MemPlaceMeta<Prov> {
32    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
33    pub fn unwrap_meta(self) -> Scalar<Prov> {
34        match self {
35            Self::Meta(s) => s,
36            Self::None => {
37                bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
38            }
39        }
40    }
41
42    #[inline(always)]
43    pub fn has_meta(self) -> bool {
44        match self {
45            Self::Meta(_) => true,
46            Self::None => false,
47        }
48    }
49}
50
51#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
52pub(super) struct MemPlace<Prov: Provenance = CtfeProvenance> {
53    /// The pointer can be a pure integer, with the `None` provenance.
54    pub ptr: Pointer<Option<Prov>>,
55    /// Metadata for unsized places. Interpretation is up to the type.
56    /// Must not be present for sized types, but can be missing for unsized types
57    /// (e.g., `extern type`).
58    pub meta: MemPlaceMeta<Prov>,
59    /// Stores whether this place was created based on a sufficiently aligned pointer.
60    misaligned: Option<Misalignment>,
61}
62
63impl<Prov: Provenance> MemPlace<Prov> {
64    /// Adjust the provenance of the main pointer (metadata is unaffected).
65    fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
66        MemPlace { ptr: self.ptr.map_provenance(|p| p.map(f)), ..self }
67    }
68
69    /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
70    #[inline]
71    fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
72        Immediate::new_pointer_with_meta(self.ptr, self.meta, cx)
73    }
74
75    #[inline]
76    // Not called `offset_with_meta` to avoid confusion with the trait method.
77    fn offset_with_meta_<'tcx, M: Machine<'tcx, Provenance = Prov>>(
78        self,
79        offset: Size,
80        mode: OffsetMode,
81        meta: MemPlaceMeta<Prov>,
82        ecx: &InterpCx<'tcx, M>,
83    ) -> InterpResult<'tcx, Self> {
84        debug_assert!(
85            !meta.has_meta() || self.meta.has_meta(),
86            "cannot use `offset_with_meta` to add metadata to a place"
87        );
88        let ptr = match mode {
89            OffsetMode::Inbounds => {
90                ecx.ptr_offset_inbounds(self.ptr, offset.bytes().try_into().unwrap())?
91            }
92            OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
93        };
94        interp_ok(MemPlace { ptr, meta, misaligned: self.misaligned })
95    }
96}
97
98/// A MemPlace with its layout. Constructing it is only possible in this module.
99#[derive(Clone, Hash, Eq, PartialEq)]
100pub struct MPlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
101    mplace: MemPlace<Prov>,
102    pub layout: TyAndLayout<'tcx>,
103}
104
105impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
106    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
107        // Printing `layout` results in too much noise; just print a nice version of the type.
108        f.debug_struct("MPlaceTy")
109            .field("mplace", &self.mplace)
110            .field("ty", &format_args!("{}", self.layout.ty))
111            .finish()
112    }
113}
114
115impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
116    /// Produces a MemPlace that works for ZST but nothing else.
117    /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
118    /// don't need to worry about memory leaks.
119    #[inline]
120    pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
121        assert!(layout.is_zst());
122        let align = layout.align.abi;
123        let ptr = Pointer::without_provenance(align.bytes()); // no provenance, absolute address
124        MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout }
125    }
126
127    /// Adjust the provenance of the main pointer (metadata is unaffected).
128    pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
129        MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
130    }
131
132    #[inline(always)]
133    pub(super) fn mplace(&self) -> &MemPlace<Prov> {
134        &self.mplace
135    }
136
137    #[inline(always)]
138    pub fn ptr(&self) -> Pointer<Option<Prov>> {
139        self.mplace.ptr
140    }
141
142    #[inline(always)]
143    pub fn to_ref(&self, cx: &impl HasDataLayout) -> Immediate<Prov> {
144        self.mplace.to_ref(cx)
145    }
146}
147
148impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
149    #[inline(always)]
150    fn layout(&self) -> TyAndLayout<'tcx> {
151        self.layout
152    }
153
154    #[inline(always)]
155    fn meta(&self) -> MemPlaceMeta<Prov> {
156        self.mplace.meta
157    }
158
159    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
160        &self,
161        offset: Size,
162        mode: OffsetMode,
163        meta: MemPlaceMeta<Prov>,
164        layout: TyAndLayout<'tcx>,
165        ecx: &InterpCx<'tcx, M>,
166    ) -> InterpResult<'tcx, Self> {
167        interp_ok(MPlaceTy {
168            mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?,
169            layout,
170        })
171    }
172
173    #[inline(always)]
174    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
175        &self,
176        _ecx: &InterpCx<'tcx, M>,
177    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
178        interp_ok(self.clone().into())
179    }
180}
181
182#[derive(Copy, Clone, Debug)]
183pub(super) enum Place<Prov: Provenance = CtfeProvenance> {
184    /// A place referring to a value allocated in the `Memory` system.
185    Ptr(MemPlace<Prov>),
186
187    /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
188    /// where in the local this place is located; if it is `None`, no projection has been applied
189    /// and the type of the place is exactly the type of the local.
190    /// Such projections are meaningful even if the offset is 0, since they can change layouts.
191    /// (Without that optimization, we'd just always be a `MemPlace`.)
192    /// `Local` places always refer to the current stack frame, so they are unstable under
193    /// function calls/returns and switching betweens stacks of different threads!
194    /// We carry around the address of the `locals` buffer of the correct stack frame as a sanity
195    /// check to be able to catch some cases of using a dangling `Place`.
196    ///
197    /// This variant shall not be used for unsized types -- those must always live in memory.
198    Local { local: mir::Local, offset: Option<Size>, locals_addr: usize },
199}
200
201/// An evaluated place, together with its type.
202///
203/// This may reference a stack frame by its index, so `PlaceTy` should generally not be kept around
204/// for longer than a single operation. Popping and then pushing a stack frame can make `PlaceTy`
205/// point to the wrong destination. If the interpreter has multiple stacks, stack switching will
206/// also invalidate a `PlaceTy`.
207#[derive(Clone)]
208pub struct PlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
209    place: Place<Prov>, // Keep this private; it helps enforce invariants.
210    pub layout: TyAndLayout<'tcx>,
211}
212
213impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
214    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
215        // Printing `layout` results in too much noise; just print a nice version of the type.
216        f.debug_struct("PlaceTy")
217            .field("place", &self.place)
218            .field("ty", &format_args!("{}", self.layout.ty))
219            .finish()
220    }
221}
222
223impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
224    #[inline(always)]
225    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
226        PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
227    }
228}
229
230impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
231    #[inline(always)]
232    pub(super) fn place(&self) -> &Place<Prov> {
233        &self.place
234    }
235
236    /// A place is either an mplace or some local.
237    ///
238    /// Note that the return value can be different even for logically identical places!
239    /// Specifically, if a local is stored in-memory, this may return `Local` or `MPlaceTy`
240    /// depending on how the place was constructed. In other words, seeing `Local` here does *not*
241    /// imply that this place does not point to memory. Every caller must therefore always handle
242    /// both cases.
243    #[inline(always)]
244    pub fn as_mplace_or_local(
245        &self,
246    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
247        match self.place {
248            Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
249            Place::Local { local, offset, locals_addr } => {
250                Right((local, offset, locals_addr, self.layout))
251            }
252        }
253    }
254
255    #[inline(always)]
256    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
257    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
258        self.as_mplace_or_local().left().unwrap_or_else(|| {
259            bug!(
260                "PlaceTy of type {} was a local when it was expected to be an MPlace",
261                self.layout.ty
262            )
263        })
264    }
265}
266
267impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
268    #[inline(always)]
269    fn layout(&self) -> TyAndLayout<'tcx> {
270        self.layout
271    }
272
273    #[inline]
274    fn meta(&self) -> MemPlaceMeta<Prov> {
275        match self.as_mplace_or_local() {
276            Left(mplace) => mplace.meta(),
277            Right(_) => {
278                debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
279                MemPlaceMeta::None
280            }
281        }
282    }
283
284    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
285        &self,
286        offset: Size,
287        mode: OffsetMode,
288        meta: MemPlaceMeta<Prov>,
289        layout: TyAndLayout<'tcx>,
290        ecx: &InterpCx<'tcx, M>,
291    ) -> InterpResult<'tcx, Self> {
292        interp_ok(match self.as_mplace_or_local() {
293            Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
294            Right((local, old_offset, locals_addr, _)) => {
295                debug_assert!(layout.is_sized(), "unsized locals should live in memory");
296                assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
297                // `Place::Local` are always in-bounds of their surrounding local, so we can just
298                // check directly if this remains in-bounds. This cannot actually be violated since
299                // projections are type-checked and bounds-checked.
300                assert!(offset + layout.size <= self.layout.size);
301
302                // Size `+`, ensures no overflow.
303                let new_offset = old_offset.unwrap_or(Size::ZERO) + offset;
304
305                PlaceTy {
306                    place: Place::Local { local, offset: Some(new_offset), locals_addr },
307                    layout,
308                }
309            }
310        })
311    }
312
313    #[inline(always)]
314    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
315        &self,
316        ecx: &InterpCx<'tcx, M>,
317    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
318        ecx.place_to_op(self)
319    }
320}
321
322// These are defined here because they produce a place.
323impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
324    #[inline(always)]
325    pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
326        match self.op() {
327            Operand::Indirect(mplace) => Left(MPlaceTy { mplace: *mplace, layout: self.layout }),
328            Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
329        }
330    }
331
332    #[inline(always)]
333    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
334    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
335        self.as_mplace_or_imm().left().unwrap_or_else(|| {
336            bug!(
337                "OpTy of type {} was immediate when it was expected to be an MPlace",
338                self.layout.ty
339            )
340        })
341    }
342}
343
344/// The `Weiteable` trait describes interpreter values that can be written to.
345pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
346    fn to_place(&self) -> PlaceTy<'tcx, Prov>;
347
348    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
349        &self,
350        ecx: &mut InterpCx<'tcx, M>,
351    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
352}
353
354impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
355    #[inline(always)]
356    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
357        self.clone()
358    }
359
360    #[inline(always)]
361    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
362        &self,
363        ecx: &mut InterpCx<'tcx, M>,
364    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
365        ecx.force_allocation(self)
366    }
367}
368
369impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
370    #[inline(always)]
371    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
372        self.clone().into()
373    }
374
375    #[inline(always)]
376    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
377        &self,
378        _ecx: &mut InterpCx<'tcx, M>,
379    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
380        interp_ok(self.clone())
381    }
382}
383
384// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
385impl<'tcx, Prov, M> InterpCx<'tcx, M>
386where
387    Prov: Provenance,
388    M: Machine<'tcx, Provenance = Prov>,
389{
390    fn ptr_with_meta_to_mplace(
391        &self,
392        ptr: Pointer<Option<M::Provenance>>,
393        meta: MemPlaceMeta<M::Provenance>,
394        layout: TyAndLayout<'tcx>,
395        unaligned: bool,
396    ) -> MPlaceTy<'tcx, M::Provenance> {
397        let misaligned =
398            if unaligned { None } else { self.is_ptr_misaligned(ptr, layout.align.abi) };
399        MPlaceTy { mplace: MemPlace { ptr, meta, misaligned }, layout }
400    }
401
402    pub fn ptr_to_mplace(
403        &self,
404        ptr: Pointer<Option<M::Provenance>>,
405        layout: TyAndLayout<'tcx>,
406    ) -> MPlaceTy<'tcx, M::Provenance> {
407        assert!(layout.is_sized());
408        self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout, /*unaligned*/ false)
409    }
410
411    pub fn ptr_to_mplace_unaligned(
412        &self,
413        ptr: Pointer<Option<M::Provenance>>,
414        layout: TyAndLayout<'tcx>,
415    ) -> MPlaceTy<'tcx, M::Provenance> {
416        assert!(layout.is_sized());
417        self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout, /*unaligned*/ true)
418    }
419
420    /// Take a value, which represents a (thin or wide) reference, and make it a place.
421    /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
422    ///
423    /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
424    /// want to ever use the place for memory access!
425    /// Generally prefer `deref_pointer`.
426    pub fn ref_to_mplace(
427        &self,
428        val: &ImmTy<'tcx, M::Provenance>,
429    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
430        let pointee_type =
431            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type");
432        let layout = self.layout_of(pointee_type)?;
433        let (ptr, meta) = val.to_scalar_and_meta();
434
435        // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
436        // we hence can't call `size_and_align_of` since that asserts more validity than we want.
437        let ptr = ptr.to_pointer(self)?;
438        interp_ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
439    }
440
441    /// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
442    /// `align` information is lost!
443    /// This is the inverse of `ref_to_mplace`.
444    pub fn mplace_to_ref(
445        &self,
446        mplace: &MPlaceTy<'tcx, M::Provenance>,
447    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
448        let imm = mplace.mplace.to_ref(self);
449        let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
450        interp_ok(ImmTy::from_immediate(imm, layout))
451    }
452
453    /// Take an operand, representing a pointer, and dereference it to a place.
454    /// Corresponds to the `*` operator in Rust.
455    #[instrument(skip(self), level = "trace")]
456    pub fn deref_pointer(
457        &self,
458        src: &impl Projectable<'tcx, M::Provenance>,
459    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
460        if src.layout().ty.is_box() {
461            // Derefer should have removed all Box derefs.
462            // Some `Box` are not immediates (if they have a custom allocator)
463            // so the code below would fail.
464            bug!("dereferencing {}", src.layout().ty);
465        }
466
467        let val = self.read_immediate(src)?;
468        trace!("deref to {} on {:?}", val.layout.ty, *val);
469
470        let mplace = self.ref_to_mplace(&val)?;
471        interp_ok(mplace)
472    }
473
474    #[inline]
475    pub(super) fn get_place_alloc(
476        &self,
477        mplace: &MPlaceTy<'tcx, M::Provenance>,
478    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
479    {
480        let (size, _align) = self
481            .size_and_align_of_val(mplace)?
482            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
483        // We check alignment separately, and *after* checking everything else.
484        // If an access is both OOB and misaligned, we want to see the bounds error.
485        let a = self.get_ptr_alloc(mplace.ptr(), size)?;
486        self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
487        interp_ok(a)
488    }
489
490    #[inline]
491    pub(super) fn get_place_alloc_mut(
492        &mut self,
493        mplace: &MPlaceTy<'tcx, M::Provenance>,
494    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
495    {
496        let (size, _align) = self
497            .size_and_align_of_val(mplace)?
498            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
499        // We check alignment separately, and raise that error *after* checking everything else.
500        // If an access is both OOB and misaligned, we want to see the bounds error.
501        // However we have to call `check_misalign` first to make the borrow checker happy.
502        let misalign_res = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
503        // An error from get_ptr_alloc_mut takes precedence.
504        let (a, ()) = self.get_ptr_alloc_mut(mplace.ptr(), size).and(misalign_res)?;
505        interp_ok(a)
506    }
507
508    /// Turn a local in the current frame into a place.
509    pub fn local_to_place(
510        &self,
511        local: mir::Local,
512    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
513        let frame = self.frame();
514        let layout = self.layout_of_local(frame, local, None)?;
515        let place = if layout.is_sized() {
516            // We can just always use the `Local` for sized values.
517            Place::Local { local, offset: None, locals_addr: frame.locals_addr() }
518        } else {
519            // Other parts of the system rely on `Place::Local` never being unsized.
520            match frame.locals[local].access()? {
521                Operand::Immediate(_) => bug!(),
522                Operand::Indirect(mplace) => Place::Ptr(*mplace),
523            }
524        };
525        interp_ok(PlaceTy { place, layout })
526    }
527
528    /// Computes a place. You should only use this if you intend to write into this
529    /// place; for reading, a more efficient alternative is `eval_place_to_op`.
530    #[instrument(skip(self), level = "trace")]
531    pub fn eval_place(
532        &self,
533        mir_place: mir::Place<'tcx>,
534    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
535        let _trace =
536            enter_trace_span!(M, step::eval_place, ?mir_place, tracing_separate_thread = Empty);
537
538        let mut place = self.local_to_place(mir_place.local)?;
539        // Using `try_fold` turned out to be bad for performance, hence the loop.
540        for elem in mir_place.projection.iter() {
541            place = self.project(&place, elem)?
542        }
543
544        trace!("{:?}", self.dump_place(&place));
545        // Sanity-check the type we ended up with.
546        if cfg!(debug_assertions) {
547            let normalized_place_ty = self
548                .instantiate_from_current_frame_and_normalize_erasing_regions(
549                    mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
550                )?;
551            if !mir_assign_valid_types(
552                *self.tcx,
553                self.typing_env,
554                self.layout_of(normalized_place_ty)?,
555                place.layout,
556            ) {
557                span_bug!(
558                    self.cur_span(),
559                    "eval_place of a MIR place with type {} produced an interpreter place with type {}",
560                    normalized_place_ty,
561                    place.layout.ty,
562                )
563            }
564        }
565        interp_ok(place)
566    }
567
568    /// Given a place, returns either the underlying mplace or a reference to where the value of
569    /// this place is stored.
570    #[inline(always)]
571    fn as_mplace_or_mutable_local(
572        &mut self,
573        place: &PlaceTy<'tcx, M::Provenance>,
574    ) -> InterpResult<
575        'tcx,
576        Either<
577            MPlaceTy<'tcx, M::Provenance>,
578            (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local),
579        >,
580    > {
581        interp_ok(match place.to_place().as_mplace_or_local() {
582            Left(mplace) => Left(mplace),
583            Right((local, offset, locals_addr, layout)) => {
584                if offset.is_some() {
585                    // This has been projected to a part of this local, or had the type changed.
586                    // FIXME: there are cases where we could still avoid allocating an mplace.
587                    Left(place.force_mplace(self)?)
588                } else {
589                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
590                    debug_assert_eq!(self.layout_of_local(self.frame(), local, None)?, layout);
591                    match self.frame_mut().locals[local].access_mut()? {
592                        Operand::Indirect(mplace) => {
593                            // The local is in memory.
594                            Left(MPlaceTy { mplace: *mplace, layout })
595                        }
596                        Operand::Immediate(local_val) => {
597                            // The local still has the optimized representation.
598                            Right((local_val, layout, local))
599                        }
600                    }
601                }
602            }
603        })
604    }
605
606    /// Write an immediate to a place
607    #[inline(always)]
608    #[instrument(skip(self), level = "trace")]
609    pub fn write_immediate(
610        &mut self,
611        src: Immediate<M::Provenance>,
612        dest: &impl Writeable<'tcx, M::Provenance>,
613    ) -> InterpResult<'tcx> {
614        self.write_immediate_no_validate(src, dest)?;
615
616        if M::enforce_validity(self, dest.layout()) {
617            // Data got changed, better make sure it matches the type!
618            // Also needed to reset padding.
619            self.validate_operand(
620                &dest.to_place(),
621                M::enforce_validity_recursively(self, dest.layout()),
622                /*reset_provenance_and_padding*/ true,
623            )?;
624        }
625
626        interp_ok(())
627    }
628
629    /// Write a scalar to a place
630    #[inline(always)]
631    pub fn write_scalar(
632        &mut self,
633        val: impl Into<Scalar<M::Provenance>>,
634        dest: &impl Writeable<'tcx, M::Provenance>,
635    ) -> InterpResult<'tcx> {
636        self.write_immediate(Immediate::Scalar(val.into()), dest)
637    }
638
639    /// Write a pointer to a place
640    #[inline(always)]
641    pub fn write_pointer(
642        &mut self,
643        ptr: impl Into<Pointer<Option<M::Provenance>>>,
644        dest: &impl Writeable<'tcx, M::Provenance>,
645    ) -> InterpResult<'tcx> {
646        self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
647    }
648
649    /// Write an immediate to a place.
650    /// If you use this you are responsible for validating that things got copied at the
651    /// right type.
652    pub(super) fn write_immediate_no_validate(
653        &mut self,
654        src: Immediate<M::Provenance>,
655        dest: &impl Writeable<'tcx, M::Provenance>,
656    ) -> InterpResult<'tcx> {
657        assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
658
659        match self.as_mplace_or_mutable_local(&dest.to_place())? {
660            Right((local_val, local_layout, local)) => {
661                // Local can be updated in-place.
662                *local_val = src;
663                // Call the machine hook (the data race detector needs to know about this write).
664                if !self.validation_in_progress() {
665                    M::after_local_write(self, local, /*storage_live*/ false)?;
666                }
667                // Double-check that the value we are storing and the local fit to each other.
668                // Things can ge wrong in quite weird ways when this is violated.
669                // Unfortunately this is too expensive to do in release builds.
670                if cfg!(debug_assertions) {
671                    src.assert_matches_abi(
672                        local_layout.backend_repr,
673                        "invalid immediate for given destination place",
674                        self,
675                    );
676                }
677            }
678            Left(mplace) => {
679                self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)?;
680            }
681        }
682        interp_ok(())
683    }
684
685    /// Write an immediate to memory.
686    /// If you use this you are responsible for validating that things got copied at the
687    /// right layout.
688    fn write_immediate_to_mplace_no_validate(
689        &mut self,
690        value: Immediate<M::Provenance>,
691        layout: TyAndLayout<'tcx>,
692        dest: MemPlace<M::Provenance>,
693    ) -> InterpResult<'tcx> {
694        // We use the sizes from `value` below.
695        // Ensure that matches the type of the place it is written to.
696        value.assert_matches_abi(
697            layout.backend_repr,
698            "invalid immediate for given destination place",
699            self,
700        );
701        // Note that it is really important that the type here is the right one, and matches the
702        // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
703        // to handle padding properly, which is only correct if we never look at this data with the
704        // wrong type.
705
706        let tcx = *self.tcx;
707        let will_later_validate = M::enforce_validity(self, layout);
708        let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
709            // zero-sized access
710            return interp_ok(());
711        };
712
713        match value {
714            Immediate::Scalar(scalar) => {
715                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?;
716            }
717            Immediate::ScalarPair(a_val, b_val) => {
718                let BackendRepr::ScalarPair(_a, b) = layout.backend_repr else {
719                    span_bug!(
720                        self.cur_span(),
721                        "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
722                        layout
723                    )
724                };
725                let a_size = a_val.size();
726                let b_offset = a_size.align_to(b.align(&tcx).abi);
727                assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
728
729                // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
730                // but that does not work: We could be a newtype around a pair, then the
731                // fields do not match the `ScalarPair` components.
732
733                // In preparation, if we do *not* later reset the padding, we clear the entire
734                // destination now to ensure that no stray pointer fragments are being
735                // preserved (see <https://github.com/rust-lang/rust/issues/148470>).
736                // We can skip this if there is no padding (e.g. for wide pointers).
737                if !will_later_validate && a_size + b_val.size() != layout.size {
738                    alloc.write_uninit_full();
739                }
740
741                alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
742                alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
743            }
744            Immediate::Uninit => alloc.write_uninit_full(),
745        }
746        interp_ok(())
747    }
748
749    pub fn write_uninit(
750        &mut self,
751        dest: &impl Writeable<'tcx, M::Provenance>,
752    ) -> InterpResult<'tcx> {
753        match self.as_mplace_or_mutable_local(&dest.to_place())? {
754            Right((local_val, _local_layout, local)) => {
755                *local_val = Immediate::Uninit;
756                // Call the machine hook (the data race detector needs to know about this write).
757                if !self.validation_in_progress() {
758                    M::after_local_write(self, local, /*storage_live*/ false)?;
759                }
760            }
761            Left(mplace) => {
762                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
763                    // Zero-sized access
764                    return interp_ok(());
765                };
766                alloc.write_uninit_full();
767            }
768        }
769        interp_ok(())
770    }
771
772    /// Remove all provenance in the given place.
773    pub fn clear_provenance(
774        &mut self,
775        dest: &impl Writeable<'tcx, M::Provenance>,
776    ) -> InterpResult<'tcx> {
777        // If this is an efficiently represented local variable without provenance, skip the
778        // `as_mplace_or_mutable_local` that would otherwise force this local into memory.
779        if let Right(imm) = dest.to_op(self)?.as_mplace_or_imm() {
780            if !imm.has_provenance() {
781                return interp_ok(());
782            }
783        }
784        match self.as_mplace_or_mutable_local(&dest.to_place())? {
785            Right((local_val, _local_layout, local)) => {
786                local_val.clear_provenance()?;
787                // Call the machine hook (the data race detector needs to know about this write).
788                if !self.validation_in_progress() {
789                    M::after_local_write(self, local, /*storage_live*/ false)?;
790                }
791            }
792            Left(mplace) => {
793                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
794                    // Zero-sized access
795                    return interp_ok(());
796                };
797                alloc.clear_provenance();
798            }
799        }
800        interp_ok(())
801    }
802
803    /// Copies the data from an operand to a place.
804    /// The layouts of the `src` and `dest` may disagree.
805    #[inline(always)]
806    pub fn copy_op_allow_transmute(
807        &mut self,
808        src: &impl Projectable<'tcx, M::Provenance>,
809        dest: &impl Writeable<'tcx, M::Provenance>,
810    ) -> InterpResult<'tcx> {
811        self.copy_op_inner(src, dest, /* allow_transmute */ true)
812    }
813
814    /// Copies the data from an operand to a place.
815    /// `src` and `dest` must have the same layout and the copied value will be validated.
816    #[inline(always)]
817    pub fn copy_op(
818        &mut self,
819        src: &impl Projectable<'tcx, M::Provenance>,
820        dest: &impl Writeable<'tcx, M::Provenance>,
821    ) -> InterpResult<'tcx> {
822        self.copy_op_inner(src, dest, /* allow_transmute */ false)
823    }
824
825    /// Copies the data from an operand to a place.
826    /// `allow_transmute` indicates whether the layouts may disagree.
827    #[inline(always)]
828    #[instrument(skip(self), level = "trace")]
829    fn copy_op_inner(
830        &mut self,
831        src: &impl Projectable<'tcx, M::Provenance>,
832        dest: &impl Writeable<'tcx, M::Provenance>,
833        allow_transmute: bool,
834    ) -> InterpResult<'tcx> {
835        // These are technically *two* typed copies: `src` is a not-yet-loaded value,
836        // so we're doing a typed copy at `src` type from there to some intermediate storage.
837        // And then we're doing a second typed copy from that intermediate storage to `dest`.
838        // But as an optimization, we only make a single direct copy here.
839
840        // Do the actual copy.
841        self.copy_op_no_validate(src, dest, allow_transmute)?;
842
843        if M::enforce_validity(self, dest.layout()) {
844            let dest = dest.to_place();
845            // Given that there were two typed copies, we have to ensure this is valid at both types,
846            // and we have to ensure this loses provenance and padding according to both types.
847            // But if the types are identical, we only do one pass.
848            if src.layout().ty != dest.layout().ty {
849                self.validate_operand(
850                    &dest.transmute(src.layout(), self)?,
851                    M::enforce_validity_recursively(self, src.layout()),
852                    /*reset_provenance_and_padding*/ true,
853                )?;
854            }
855            self.validate_operand(
856                &dest,
857                M::enforce_validity_recursively(self, dest.layout()),
858                /*reset_provenance_and_padding*/ true,
859            )?;
860        }
861
862        interp_ok(())
863    }
864
865    /// Copies the data from an operand to a place.
866    /// `allow_transmute` indicates whether the layouts may disagree.
867    /// Also, if you use this you are responsible for validating that things get copied at the
868    /// right type.
869    #[instrument(skip(self), level = "trace")]
870    pub(super) fn copy_op_no_validate(
871        &mut self,
872        src: &impl Projectable<'tcx, M::Provenance>,
873        dest: &impl Writeable<'tcx, M::Provenance>,
874        allow_transmute: bool,
875    ) -> InterpResult<'tcx> {
876        // We do NOT compare the types for equality, because well-typed code can
877        // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
878        let layout_compat =
879            mir_assign_valid_types(*self.tcx, self.typing_env, src.layout(), dest.layout());
880        if !allow_transmute && !layout_compat {
881            span_bug!(
882                self.cur_span(),
883                "type mismatch when copying!\nsrc: {},\ndest: {}",
884                src.layout().ty,
885                dest.layout().ty,
886            );
887        }
888
889        // Let us see if the layout is simple so we take a shortcut,
890        // avoid force_allocation.
891        let src = match self.read_immediate_raw(src)? {
892            Right(src_val) => {
893                assert!(!src.layout().is_unsized());
894                assert!(!dest.layout().is_unsized());
895                assert_eq!(src.layout().size, dest.layout().size);
896                // Yay, we got a value that we can write directly.
897                return if layout_compat {
898                    self.write_immediate_no_validate(*src_val, dest)
899                } else {
900                    // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
901                    // loaded using the offsets defined by `src.layout`. When we put this back into
902                    // the destination, we have to use the same offsets! So (a) we make sure we
903                    // write back to memory, and (b) we use `dest` *with the source layout*.
904                    let dest_mem = dest.force_mplace(self)?;
905                    self.write_immediate_to_mplace_no_validate(
906                        *src_val,
907                        src.layout(),
908                        dest_mem.mplace,
909                    )
910                };
911            }
912            Left(mplace) => mplace,
913        };
914        // Slow path, this does not fit into an immediate. Just memcpy.
915        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
916
917        let dest = dest.force_mplace(self)?;
918        let Some((dest_size, _)) = self.size_and_align_of_val(&dest)? else {
919            span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
920        };
921        if cfg!(debug_assertions) {
922            let src_size = self.size_and_align_of_val(&src)?.unwrap().0;
923            assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
924        } else {
925            // As a cheap approximation, we compare the fixed parts of the size.
926            assert_eq!(src.layout.size, dest.layout.size);
927        }
928
929        // Setting `nonoverlapping` here only has an effect when we don't hit the fast-path above,
930        // but that should at least match what LLVM does where `memcpy` is also only used when the
931        // type does not have Scalar/ScalarPair layout.
932        // (Or as the `Assign` docs put it, assignments "not producing primitives" must be
933        // non-overlapping.)
934        // We check alignment separately, and *after* checking everything else.
935        // If an access is both OOB and misaligned, we want to see the bounds error.
936        self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
937        self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
938        self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
939        interp_ok(())
940    }
941
942    /// Ensures that a place is in memory, and returns where it is.
943    /// If the place currently refers to a local that doesn't yet have a matching allocation,
944    /// create such an allocation.
945    /// This is essentially `force_to_memplace`.
946    #[instrument(skip(self), level = "trace")]
947    pub fn force_allocation(
948        &mut self,
949        place: &PlaceTy<'tcx, M::Provenance>,
950    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
951        let mplace = match place.place {
952            Place::Local { local, offset, locals_addr } => {
953                debug_assert_eq!(locals_addr, self.frame().locals_addr());
954                let whole_local = match self.frame_mut().locals[local].access_mut()? {
955                    &mut Operand::Immediate(local_val) => {
956                        // We need to make an allocation.
957
958                        // We need the layout of the local. We can NOT use the layout we got,
959                        // that might e.g., be an inner field of a struct with `Scalar` layout,
960                        // that has different alignment than the outer field.
961                        let local_layout = self.layout_of_local(&self.frame(), local, None)?;
962                        assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
963                        let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
964                        // Preserve old value. (As an optimization, we can skip this if it was uninit.)
965                        if !matches!(local_val, Immediate::Uninit) {
966                            // We don't have to validate as we can assume the local was already
967                            // valid for its type. We must not use any part of `place` here, that
968                            // could be a projection to a part of the local!
969                            self.write_immediate_to_mplace_no_validate(
970                                local_val,
971                                local_layout,
972                                mplace.mplace,
973                            )?;
974                        }
975                        M::after_local_moved_to_memory(self, local, &mplace)?;
976                        // Now we can call `access_mut` again, asserting it goes well, and actually
977                        // overwrite things. This points to the entire allocation, not just the part
978                        // the place refers to, i.e. we do this before we apply `offset`.
979                        *self.frame_mut().locals[local].access_mut().unwrap() =
980                            Operand::Indirect(mplace.mplace);
981                        mplace.mplace
982                    }
983                    &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
984                };
985                if let Some(offset) = offset {
986                    // This offset is always inbounds, no need to check it again.
987                    whole_local.offset_with_meta_(
988                        offset,
989                        OffsetMode::Wrapping,
990                        MemPlaceMeta::None,
991                        self,
992                    )?
993                } else {
994                    // Preserve wide place metadata, do not call `offset`.
995                    whole_local
996                }
997            }
998            Place::Ptr(mplace) => mplace,
999        };
1000        // Return with the original layout and align, so that the caller can go on
1001        interp_ok(MPlaceTy { mplace, layout: place.layout })
1002    }
1003
1004    pub fn allocate_dyn(
1005        &mut self,
1006        layout: TyAndLayout<'tcx>,
1007        kind: MemoryKind<M::MemoryKind>,
1008        meta: MemPlaceMeta<M::Provenance>,
1009    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1010        let Some((size, align)) = self.size_and_align_from_meta(&meta, &layout)? else {
1011            span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
1012        };
1013        let ptr = self.allocate_ptr(size, align, kind, AllocInit::Uninit)?;
1014        interp_ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
1015    }
1016
1017    pub fn allocate(
1018        &mut self,
1019        layout: TyAndLayout<'tcx>,
1020        kind: MemoryKind<M::MemoryKind>,
1021    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1022        assert!(layout.is_sized());
1023        self.allocate_dyn(layout, kind, MemPlaceMeta::None)
1024    }
1025
1026    /// Allocates a sequence of bytes in the interpreter's memory with alignment 1.
1027    /// This is allocated in immutable global memory and deduplicated.
1028    pub fn allocate_bytes_dedup(
1029        &mut self,
1030        bytes: &[u8],
1031    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
1032        let salt = M::get_global_alloc_salt(self, None);
1033        let id = self.tcx.allocate_bytes_dedup(bytes, salt);
1034
1035        // Turn untagged "global" pointers (obtained via `tcx`) into the machine pointer to the allocation.
1036        M::adjust_alloc_root_pointer(
1037            &self,
1038            Pointer::from(id),
1039            M::GLOBAL_KIND.map(MemoryKind::Machine),
1040        )
1041    }
1042
1043    /// Allocates a string in the interpreter's memory, returning it as a (wide) place.
1044    /// This is allocated in immutable global memory and deduplicated.
1045    pub fn allocate_str_dedup(
1046        &mut self,
1047        s: &str,
1048    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1049        let bytes = s.as_bytes();
1050        let ptr = self.allocate_bytes_dedup(bytes)?;
1051
1052        // Create length metadata for the string.
1053        let meta = Scalar::from_target_usize(u64::try_from(bytes.len()).unwrap(), self);
1054
1055        // Get layout for Rust's str type.
1056        let layout = self.layout_of(self.tcx.types.str_).unwrap();
1057
1058        // Combine pointer and metadata into a wide pointer.
1059        interp_ok(self.ptr_with_meta_to_mplace(
1060            ptr.into(),
1061            MemPlaceMeta::Meta(meta),
1062            layout,
1063            /*unaligned*/ false,
1064        ))
1065    }
1066
1067    pub fn raw_const_to_mplace(
1068        &self,
1069        raw: mir::ConstAlloc<'tcx>,
1070    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1071        // This must be an allocation in `tcx`
1072        let _ = self.tcx.global_alloc(raw.alloc_id);
1073        let ptr = self.global_root_pointer(Pointer::from(raw.alloc_id))?;
1074        let layout = self.layout_of(raw.ty)?;
1075        interp_ok(self.ptr_to_mplace(ptr.into(), layout))
1076    }
1077}
1078
1079// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
1080#[cfg(target_pointer_width = "64")]
1081mod size_asserts {
1082    use rustc_data_structures::static_assert_size;
1083
1084    use super::*;
1085    // tidy-alphabetical-start
1086    static_assert_size!(MPlaceTy<'_>, 64);
1087    static_assert_size!(MemPlace, 48);
1088    static_assert_size!(MemPlaceMeta, 24);
1089    static_assert_size!(Place, 48);
1090    static_assert_size!(PlaceTy<'_>, 64);
1091    // tidy-alphabetical-end
1092}