rustc_const_eval/interpret/
place.rs

1//! Computations on places -- field projections, going from mir::Place, and writing
2//! into a place.
3//! All high-level functions to write to memory work on places as destinations.
4
5use std::assert_matches::assert_matches;
6
7use either::{Either, Left, Right};
8use rustc_abi::{BackendRepr, HasDataLayout, Size};
9use rustc_middle::ty::Ty;
10use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
11use rustc_middle::{bug, mir, span_bug};
12use tracing::{instrument, trace};
13
14use super::{
15    AllocInit, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, ImmTy, Immediate, InterpCx,
16    InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer,
17    Projectable, Provenance, Scalar, alloc_range, interp_ok, mir_assign_valid_types,
18};
19
20#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
21/// Information required for the sound usage of a `MemPlace`.
22pub enum MemPlaceMeta<Prov: Provenance = CtfeProvenance> {
23    /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
24    Meta(Scalar<Prov>),
25    /// `Sized` types or unsized `extern type`
26    None,
27}
28
29impl<Prov: Provenance> MemPlaceMeta<Prov> {
30    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
31    pub fn unwrap_meta(self) -> Scalar<Prov> {
32        match self {
33            Self::Meta(s) => s,
34            Self::None => {
35                bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
36            }
37        }
38    }
39
40    #[inline(always)]
41    pub fn has_meta(self) -> bool {
42        match self {
43            Self::Meta(_) => true,
44            Self::None => false,
45        }
46    }
47}
48
49#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
50pub(super) struct MemPlace<Prov: Provenance = CtfeProvenance> {
51    /// The pointer can be a pure integer, with the `None` provenance.
52    pub ptr: Pointer<Option<Prov>>,
53    /// Metadata for unsized places. Interpretation is up to the type.
54    /// Must not be present for sized types, but can be missing for unsized types
55    /// (e.g., `extern type`).
56    pub meta: MemPlaceMeta<Prov>,
57    /// Stores whether this place was created based on a sufficiently aligned pointer.
58    misaligned: Option<Misalignment>,
59}
60
61impl<Prov: Provenance> MemPlace<Prov> {
62    /// Adjust the provenance of the main pointer (metadata is unaffected).
63    fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
64        MemPlace { ptr: self.ptr.map_provenance(|p| p.map(f)), ..self }
65    }
66
67    /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
68    #[inline]
69    fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
70        Immediate::new_pointer_with_meta(self.ptr, self.meta, cx)
71    }
72
73    #[inline]
74    // Not called `offset_with_meta` to avoid confusion with the trait method.
75    fn offset_with_meta_<'tcx, M: Machine<'tcx, Provenance = Prov>>(
76        self,
77        offset: Size,
78        mode: OffsetMode,
79        meta: MemPlaceMeta<Prov>,
80        ecx: &InterpCx<'tcx, M>,
81    ) -> InterpResult<'tcx, Self> {
82        debug_assert!(
83            !meta.has_meta() || self.meta.has_meta(),
84            "cannot use `offset_with_meta` to add metadata to a place"
85        );
86        let ptr = match mode {
87            OffsetMode::Inbounds => {
88                ecx.ptr_offset_inbounds(self.ptr, offset.bytes().try_into().unwrap())?
89            }
90            OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
91        };
92        interp_ok(MemPlace { ptr, meta, misaligned: self.misaligned })
93    }
94}
95
96/// A MemPlace with its layout. Constructing it is only possible in this module.
97#[derive(Clone, Hash, Eq, PartialEq)]
98pub struct MPlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
99    mplace: MemPlace<Prov>,
100    pub layout: TyAndLayout<'tcx>,
101}
102
103impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
104    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
105        // Printing `layout` results in too much noise; just print a nice version of the type.
106        f.debug_struct("MPlaceTy")
107            .field("mplace", &self.mplace)
108            .field("ty", &format_args!("{}", self.layout.ty))
109            .finish()
110    }
111}
112
113impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
114    /// Produces a MemPlace that works for ZST but nothing else.
115    /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
116    /// don't need to worry about memory leaks.
117    #[inline]
118    pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
119        assert!(layout.is_zst());
120        let align = layout.align.abi;
121        let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
122        MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout }
123    }
124
125    /// Adjust the provenance of the main pointer (metadata is unaffected).
126    pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
127        MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
128    }
129
130    #[inline(always)]
131    pub(super) fn mplace(&self) -> &MemPlace<Prov> {
132        &self.mplace
133    }
134
135    #[inline(always)]
136    pub fn ptr(&self) -> Pointer<Option<Prov>> {
137        self.mplace.ptr
138    }
139
140    #[inline(always)]
141    pub fn to_ref(&self, cx: &impl HasDataLayout) -> Immediate<Prov> {
142        self.mplace.to_ref(cx)
143    }
144}
145
146impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
147    #[inline(always)]
148    fn layout(&self) -> TyAndLayout<'tcx> {
149        self.layout
150    }
151
152    #[inline(always)]
153    fn meta(&self) -> MemPlaceMeta<Prov> {
154        self.mplace.meta
155    }
156
157    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
158        &self,
159        offset: Size,
160        mode: OffsetMode,
161        meta: MemPlaceMeta<Prov>,
162        layout: TyAndLayout<'tcx>,
163        ecx: &InterpCx<'tcx, M>,
164    ) -> InterpResult<'tcx, Self> {
165        interp_ok(MPlaceTy {
166            mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?,
167            layout,
168        })
169    }
170
171    #[inline(always)]
172    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
173        &self,
174        _ecx: &InterpCx<'tcx, M>,
175    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
176        interp_ok(self.clone().into())
177    }
178}
179
180#[derive(Copy, Clone, Debug)]
181pub(super) enum Place<Prov: Provenance = CtfeProvenance> {
182    /// A place referring to a value allocated in the `Memory` system.
183    Ptr(MemPlace<Prov>),
184
185    /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
186    /// where in the local this place is located; if it is `None`, no projection has been applied
187    /// and the type of the place is exactly the type of the local.
188    /// Such projections are meaningful even if the offset is 0, since they can change layouts.
189    /// (Without that optimization, we'd just always be a `MemPlace`.)
190    /// `Local` places always refer to the current stack frame, so they are unstable under
191    /// function calls/returns and switching betweens stacks of different threads!
192    /// We carry around the address of the `locals` buffer of the correct stack frame as a sanity
193    /// check to be able to catch some cases of using a dangling `Place`.
194    ///
195    /// This variant shall not be used for unsized types -- those must always live in memory.
196    Local { local: mir::Local, offset: Option<Size>, locals_addr: usize },
197}
198
199/// An evaluated place, together with its type.
200///
201/// This may reference a stack frame by its index, so `PlaceTy` should generally not be kept around
202/// for longer than a single operation. Popping and then pushing a stack frame can make `PlaceTy`
203/// point to the wrong destination. If the interpreter has multiple stacks, stack switching will
204/// also invalidate a `PlaceTy`.
205#[derive(Clone)]
206pub struct PlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
207    place: Place<Prov>, // Keep this private; it helps enforce invariants.
208    pub layout: TyAndLayout<'tcx>,
209}
210
211impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
212    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
213        // Printing `layout` results in too much noise; just print a nice version of the type.
214        f.debug_struct("PlaceTy")
215            .field("place", &self.place)
216            .field("ty", &format_args!("{}", self.layout.ty))
217            .finish()
218    }
219}
220
221impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
222    #[inline(always)]
223    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
224        PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
225    }
226}
227
228impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
229    #[inline(always)]
230    pub(super) fn place(&self) -> &Place<Prov> {
231        &self.place
232    }
233
234    /// A place is either an mplace or some local.
235    #[inline(always)]
236    pub fn as_mplace_or_local(
237        &self,
238    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
239        match self.place {
240            Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
241            Place::Local { local, offset, locals_addr } => {
242                Right((local, offset, locals_addr, self.layout))
243            }
244        }
245    }
246
247    #[inline(always)]
248    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
249    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
250        self.as_mplace_or_local().left().unwrap_or_else(|| {
251            bug!(
252                "PlaceTy of type {} was a local when it was expected to be an MPlace",
253                self.layout.ty
254            )
255        })
256    }
257}
258
259impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
260    #[inline(always)]
261    fn layout(&self) -> TyAndLayout<'tcx> {
262        self.layout
263    }
264
265    #[inline]
266    fn meta(&self) -> MemPlaceMeta<Prov> {
267        match self.as_mplace_or_local() {
268            Left(mplace) => mplace.meta(),
269            Right(_) => {
270                debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
271                MemPlaceMeta::None
272            }
273        }
274    }
275
276    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
277        &self,
278        offset: Size,
279        mode: OffsetMode,
280        meta: MemPlaceMeta<Prov>,
281        layout: TyAndLayout<'tcx>,
282        ecx: &InterpCx<'tcx, M>,
283    ) -> InterpResult<'tcx, Self> {
284        interp_ok(match self.as_mplace_or_local() {
285            Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
286            Right((local, old_offset, locals_addr, _)) => {
287                debug_assert!(layout.is_sized(), "unsized locals should live in memory");
288                assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
289                // `Place::Local` are always in-bounds of their surrounding local, so we can just
290                // check directly if this remains in-bounds. This cannot actually be violated since
291                // projections are type-checked and bounds-checked.
292                assert!(offset + layout.size <= self.layout.size);
293
294                // Size `+`, ensures no overflow.
295                let new_offset = old_offset.unwrap_or(Size::ZERO) + offset;
296
297                PlaceTy {
298                    place: Place::Local { local, offset: Some(new_offset), locals_addr },
299                    layout,
300                }
301            }
302        })
303    }
304
305    #[inline(always)]
306    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
307        &self,
308        ecx: &InterpCx<'tcx, M>,
309    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
310        ecx.place_to_op(self)
311    }
312}
313
314// These are defined here because they produce a place.
315impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
316    #[inline(always)]
317    pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
318        match self.op() {
319            Operand::Indirect(mplace) => Left(MPlaceTy { mplace: *mplace, layout: self.layout }),
320            Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
321        }
322    }
323
324    #[inline(always)]
325    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
326    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
327        self.as_mplace_or_imm().left().unwrap_or_else(|| {
328            bug!(
329                "OpTy of type {} was immediate when it was expected to be an MPlace",
330                self.layout.ty
331            )
332        })
333    }
334}
335
336/// The `Weiteable` trait describes interpreter values that can be written to.
337pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
338    fn to_place(&self) -> PlaceTy<'tcx, Prov>;
339
340    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
341        &self,
342        ecx: &mut InterpCx<'tcx, M>,
343    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
344}
345
346impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
347    #[inline(always)]
348    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
349        self.clone()
350    }
351
352    #[inline(always)]
353    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
354        &self,
355        ecx: &mut InterpCx<'tcx, M>,
356    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
357        ecx.force_allocation(self)
358    }
359}
360
361impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
362    #[inline(always)]
363    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
364        self.clone().into()
365    }
366
367    #[inline(always)]
368    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
369        &self,
370        _ecx: &mut InterpCx<'tcx, M>,
371    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
372        interp_ok(self.clone())
373    }
374}
375
376// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
377impl<'tcx, Prov, M> InterpCx<'tcx, M>
378where
379    Prov: Provenance,
380    M: Machine<'tcx, Provenance = Prov>,
381{
382    fn ptr_with_meta_to_mplace(
383        &self,
384        ptr: Pointer<Option<M::Provenance>>,
385        meta: MemPlaceMeta<M::Provenance>,
386        layout: TyAndLayout<'tcx>,
387        unaligned: bool,
388    ) -> MPlaceTy<'tcx, M::Provenance> {
389        let misaligned =
390            if unaligned { None } else { self.is_ptr_misaligned(ptr, layout.align.abi) };
391        MPlaceTy { mplace: MemPlace { ptr, meta, misaligned }, layout }
392    }
393
394    pub fn ptr_to_mplace(
395        &self,
396        ptr: Pointer<Option<M::Provenance>>,
397        layout: TyAndLayout<'tcx>,
398    ) -> MPlaceTy<'tcx, M::Provenance> {
399        assert!(layout.is_sized());
400        self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout, /*unaligned*/ false)
401    }
402
403    pub fn ptr_to_mplace_unaligned(
404        &self,
405        ptr: Pointer<Option<M::Provenance>>,
406        layout: TyAndLayout<'tcx>,
407    ) -> MPlaceTy<'tcx, M::Provenance> {
408        assert!(layout.is_sized());
409        self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout, /*unaligned*/ true)
410    }
411
412    /// Take a value, which represents a (thin or wide) reference, and make it a place.
413    /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
414    ///
415    /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
416    /// want to ever use the place for memory access!
417    /// Generally prefer `deref_pointer`.
418    pub fn ref_to_mplace(
419        &self,
420        val: &ImmTy<'tcx, M::Provenance>,
421    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
422        let pointee_type =
423            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type");
424        let layout = self.layout_of(pointee_type)?;
425        let (ptr, meta) = val.to_scalar_and_meta();
426
427        // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
428        // we hence can't call `size_and_align_of` since that asserts more validity than we want.
429        let ptr = ptr.to_pointer(self)?;
430        interp_ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
431    }
432
433    /// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
434    /// `align` information is lost!
435    /// This is the inverse of `ref_to_mplace`.
436    pub fn mplace_to_ref(
437        &self,
438        mplace: &MPlaceTy<'tcx, M::Provenance>,
439    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
440        let imm = mplace.mplace.to_ref(self);
441        let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
442        interp_ok(ImmTy::from_immediate(imm, layout))
443    }
444
445    /// Take an operand, representing a pointer, and dereference it to a place.
446    /// Corresponds to the `*` operator in Rust.
447    #[instrument(skip(self), level = "trace")]
448    pub fn deref_pointer(
449        &self,
450        src: &impl Projectable<'tcx, M::Provenance>,
451    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
452        if src.layout().ty.is_box() {
453            // Derefer should have removed all Box derefs.
454            // Some `Box` are not immediates (if they have a custom allocator)
455            // so the code below would fail.
456            bug!("dereferencing {}", src.layout().ty);
457        }
458
459        let val = self.read_immediate(src)?;
460        trace!("deref to {} on {:?}", val.layout.ty, *val);
461
462        let mplace = self.ref_to_mplace(&val)?;
463        interp_ok(mplace)
464    }
465
466    #[inline]
467    pub(super) fn get_place_alloc(
468        &self,
469        mplace: &MPlaceTy<'tcx, M::Provenance>,
470    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
471    {
472        let (size, _align) = self
473            .size_and_align_of_mplace(mplace)?
474            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
475        // We check alignment separately, and *after* checking everything else.
476        // If an access is both OOB and misaligned, we want to see the bounds error.
477        let a = self.get_ptr_alloc(mplace.ptr(), size)?;
478        self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
479        interp_ok(a)
480    }
481
482    #[inline]
483    pub(super) fn get_place_alloc_mut(
484        &mut self,
485        mplace: &MPlaceTy<'tcx, M::Provenance>,
486    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
487    {
488        let (size, _align) = self
489            .size_and_align_of_mplace(mplace)?
490            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
491        // We check alignment separately, and raise that error *after* checking everything else.
492        // If an access is both OOB and misaligned, we want to see the bounds error.
493        // However we have to call `check_misalign` first to make the borrow checker happy.
494        let misalign_res = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
495        // An error from get_ptr_alloc_mut takes precedence.
496        let (a, ()) = self.get_ptr_alloc_mut(mplace.ptr(), size).and(misalign_res)?;
497        interp_ok(a)
498    }
499
500    /// Turn a local in the current frame into a place.
501    pub fn local_to_place(
502        &self,
503        local: mir::Local,
504    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
505        let frame = self.frame();
506        let layout = self.layout_of_local(frame, local, None)?;
507        let place = if layout.is_sized() {
508            // We can just always use the `Local` for sized values.
509            Place::Local { local, offset: None, locals_addr: frame.locals_addr() }
510        } else {
511            // Other parts of the system rely on `Place::Local` never being unsized.
512            match frame.locals[local].access()? {
513                Operand::Immediate(_) => bug!(),
514                Operand::Indirect(mplace) => Place::Ptr(*mplace),
515            }
516        };
517        interp_ok(PlaceTy { place, layout })
518    }
519
520    /// Computes a place. You should only use this if you intend to write into this
521    /// place; for reading, a more efficient alternative is `eval_place_to_op`.
522    #[instrument(skip(self), level = "trace")]
523    pub fn eval_place(
524        &self,
525        mir_place: mir::Place<'tcx>,
526    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
527        let mut place = self.local_to_place(mir_place.local)?;
528        // Using `try_fold` turned out to be bad for performance, hence the loop.
529        for elem in mir_place.projection.iter() {
530            place = self.project(&place, elem)?
531        }
532
533        trace!("{:?}", self.dump_place(&place));
534        // Sanity-check the type we ended up with.
535        if cfg!(debug_assertions) {
536            let normalized_place_ty = self
537                .instantiate_from_current_frame_and_normalize_erasing_regions(
538                    mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
539                )?;
540            if !mir_assign_valid_types(
541                *self.tcx,
542                self.typing_env,
543                self.layout_of(normalized_place_ty)?,
544                place.layout,
545            ) {
546                span_bug!(
547                    self.cur_span(),
548                    "eval_place of a MIR place with type {} produced an interpreter place with type {}",
549                    normalized_place_ty,
550                    place.layout.ty,
551                )
552            }
553        }
554        interp_ok(place)
555    }
556
557    /// Given a place, returns either the underlying mplace or a reference to where the value of
558    /// this place is stored.
559    #[inline(always)]
560    fn as_mplace_or_mutable_local(
561        &mut self,
562        place: &PlaceTy<'tcx, M::Provenance>,
563    ) -> InterpResult<
564        'tcx,
565        Either<
566            MPlaceTy<'tcx, M::Provenance>,
567            (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local),
568        >,
569    > {
570        interp_ok(match place.to_place().as_mplace_or_local() {
571            Left(mplace) => Left(mplace),
572            Right((local, offset, locals_addr, layout)) => {
573                if offset.is_some() {
574                    // This has been projected to a part of this local, or had the type changed.
575                    // FIMXE: there are cases where we could still avoid allocating an mplace.
576                    Left(place.force_mplace(self)?)
577                } else {
578                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
579                    debug_assert_eq!(self.layout_of_local(self.frame(), local, None)?, layout);
580                    match self.frame_mut().locals[local].access_mut()? {
581                        Operand::Indirect(mplace) => {
582                            // The local is in memory.
583                            Left(MPlaceTy { mplace: *mplace, layout })
584                        }
585                        Operand::Immediate(local_val) => {
586                            // The local still has the optimized representation.
587                            Right((local_val, layout, local))
588                        }
589                    }
590                }
591            }
592        })
593    }
594
595    /// Write an immediate to a place
596    #[inline(always)]
597    #[instrument(skip(self), level = "trace")]
598    pub fn write_immediate(
599        &mut self,
600        src: Immediate<M::Provenance>,
601        dest: &impl Writeable<'tcx, M::Provenance>,
602    ) -> InterpResult<'tcx> {
603        self.write_immediate_no_validate(src, dest)?;
604
605        if M::enforce_validity(self, dest.layout()) {
606            // Data got changed, better make sure it matches the type!
607            // Also needed to reset padding.
608            self.validate_operand(
609                &dest.to_place(),
610                M::enforce_validity_recursively(self, dest.layout()),
611                /*reset_provenance_and_padding*/ true,
612            )?;
613        }
614
615        interp_ok(())
616    }
617
618    /// Write a scalar to a place
619    #[inline(always)]
620    pub fn write_scalar(
621        &mut self,
622        val: impl Into<Scalar<M::Provenance>>,
623        dest: &impl Writeable<'tcx, M::Provenance>,
624    ) -> InterpResult<'tcx> {
625        self.write_immediate(Immediate::Scalar(val.into()), dest)
626    }
627
628    /// Write a pointer to a place
629    #[inline(always)]
630    pub fn write_pointer(
631        &mut self,
632        ptr: impl Into<Pointer<Option<M::Provenance>>>,
633        dest: &impl Writeable<'tcx, M::Provenance>,
634    ) -> InterpResult<'tcx> {
635        self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
636    }
637
638    /// Write an immediate to a place.
639    /// If you use this you are responsible for validating that things got copied at the
640    /// right type.
641    pub(super) fn write_immediate_no_validate(
642        &mut self,
643        src: Immediate<M::Provenance>,
644        dest: &impl Writeable<'tcx, M::Provenance>,
645    ) -> InterpResult<'tcx> {
646        assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
647
648        match self.as_mplace_or_mutable_local(&dest.to_place())? {
649            Right((local_val, local_layout, local)) => {
650                // Local can be updated in-place.
651                *local_val = src;
652                // Call the machine hook (the data race detector needs to know about this write).
653                if !self.validation_in_progress() {
654                    M::after_local_write(self, local, /*storage_live*/ false)?;
655                }
656                // Double-check that the value we are storing and the local fit to each other.
657                // Things can ge wrong in quite weird ways when this is violated.
658                // Unfortunately this is too expensive to do in release builds.
659                if cfg!(debug_assertions) {
660                    src.assert_matches_abi(
661                        local_layout.backend_repr,
662                        "invalid immediate for given destination place",
663                        self,
664                    );
665                }
666            }
667            Left(mplace) => {
668                self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)?;
669            }
670        }
671        interp_ok(())
672    }
673
674    /// Write an immediate to memory.
675    /// If you use this you are responsible for validating that things got copied at the
676    /// right layout.
677    fn write_immediate_to_mplace_no_validate(
678        &mut self,
679        value: Immediate<M::Provenance>,
680        layout: TyAndLayout<'tcx>,
681        dest: MemPlace<M::Provenance>,
682    ) -> InterpResult<'tcx> {
683        // We use the sizes from `value` below.
684        // Ensure that matches the type of the place it is written to.
685        value.assert_matches_abi(
686            layout.backend_repr,
687            "invalid immediate for given destination place",
688            self,
689        );
690        // Note that it is really important that the type here is the right one, and matches the
691        // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
692        // to handle padding properly, which is only correct if we never look at this data with the
693        // wrong type.
694
695        let tcx = *self.tcx;
696        let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
697            // zero-sized access
698            return interp_ok(());
699        };
700
701        match value {
702            Immediate::Scalar(scalar) => {
703                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
704            }
705            Immediate::ScalarPair(a_val, b_val) => {
706                let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
707                    span_bug!(
708                        self.cur_span(),
709                        "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
710                        layout
711                    )
712                };
713                let b_offset = a.size(&tcx).align_to(b.align(&tcx).abi);
714                assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
715
716                // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
717                // but that does not work: We could be a newtype around a pair, then the
718                // fields do not match the `ScalarPair` components.
719
720                alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
721                alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
722                // We don't have to reset padding here, `write_immediate` will anyway do a validation run.
723                interp_ok(())
724            }
725            Immediate::Uninit => alloc.write_uninit_full(),
726        }
727    }
728
729    pub fn write_uninit(
730        &mut self,
731        dest: &impl Writeable<'tcx, M::Provenance>,
732    ) -> InterpResult<'tcx> {
733        match self.as_mplace_or_mutable_local(&dest.to_place())? {
734            Right((local_val, _local_layout, local)) => {
735                *local_val = Immediate::Uninit;
736                // Call the machine hook (the data race detector needs to know about this write).
737                if !self.validation_in_progress() {
738                    M::after_local_write(self, local, /*storage_live*/ false)?;
739                }
740            }
741            Left(mplace) => {
742                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
743                    // Zero-sized access
744                    return interp_ok(());
745                };
746                alloc.write_uninit_full()?;
747            }
748        }
749        interp_ok(())
750    }
751
752    /// Remove all provenance in the given place.
753    pub fn clear_provenance(
754        &mut self,
755        dest: &impl Writeable<'tcx, M::Provenance>,
756    ) -> InterpResult<'tcx> {
757        match self.as_mplace_or_mutable_local(&dest.to_place())? {
758            Right((local_val, _local_layout, local)) => {
759                local_val.clear_provenance()?;
760                // Call the machine hook (the data race detector needs to know about this write).
761                if !self.validation_in_progress() {
762                    M::after_local_write(self, local, /*storage_live*/ false)?;
763                }
764            }
765            Left(mplace) => {
766                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
767                    // Zero-sized access
768                    return interp_ok(());
769                };
770                alloc.clear_provenance()?;
771            }
772        }
773        interp_ok(())
774    }
775
776    /// Copies the data from an operand to a place.
777    /// The layouts of the `src` and `dest` may disagree.
778    #[inline(always)]
779    pub fn copy_op_allow_transmute(
780        &mut self,
781        src: &impl Projectable<'tcx, M::Provenance>,
782        dest: &impl Writeable<'tcx, M::Provenance>,
783    ) -> InterpResult<'tcx> {
784        self.copy_op_inner(src, dest, /* allow_transmute */ true)
785    }
786
787    /// Copies the data from an operand to a place.
788    /// `src` and `dest` must have the same layout and the copied value will be validated.
789    #[inline(always)]
790    pub fn copy_op(
791        &mut self,
792        src: &impl Projectable<'tcx, M::Provenance>,
793        dest: &impl Writeable<'tcx, M::Provenance>,
794    ) -> InterpResult<'tcx> {
795        self.copy_op_inner(src, dest, /* allow_transmute */ false)
796    }
797
798    /// Copies the data from an operand to a place.
799    /// `allow_transmute` indicates whether the layouts may disagree.
800    #[inline(always)]
801    #[instrument(skip(self), level = "trace")]
802    fn copy_op_inner(
803        &mut self,
804        src: &impl Projectable<'tcx, M::Provenance>,
805        dest: &impl Writeable<'tcx, M::Provenance>,
806        allow_transmute: bool,
807    ) -> InterpResult<'tcx> {
808        // These are technically *two* typed copies: `src` is a not-yet-loaded value,
809        // so we're doing a typed copy at `src` type from there to some intermediate storage.
810        // And then we're doing a second typed copy from that intermediate storage to `dest`.
811        // But as an optimization, we only make a single direct copy here.
812
813        // Do the actual copy.
814        self.copy_op_no_validate(src, dest, allow_transmute)?;
815
816        if M::enforce_validity(self, dest.layout()) {
817            let dest = dest.to_place();
818            // Given that there were two typed copies, we have to ensure this is valid at both types,
819            // and we have to ensure this loses provenance and padding according to both types.
820            // But if the types are identical, we only do one pass.
821            if src.layout().ty != dest.layout().ty {
822                self.validate_operand(
823                    &dest.transmute(src.layout(), self)?,
824                    M::enforce_validity_recursively(self, src.layout()),
825                    /*reset_provenance_and_padding*/ true,
826                )?;
827            }
828            self.validate_operand(
829                &dest,
830                M::enforce_validity_recursively(self, dest.layout()),
831                /*reset_provenance_and_padding*/ true,
832            )?;
833        }
834
835        interp_ok(())
836    }
837
838    /// Copies the data from an operand to a place.
839    /// `allow_transmute` indicates whether the layouts may disagree.
840    /// Also, if you use this you are responsible for validating that things get copied at the
841    /// right type.
842    #[instrument(skip(self), level = "trace")]
843    fn copy_op_no_validate(
844        &mut self,
845        src: &impl Projectable<'tcx, M::Provenance>,
846        dest: &impl Writeable<'tcx, M::Provenance>,
847        allow_transmute: bool,
848    ) -> InterpResult<'tcx> {
849        // We do NOT compare the types for equality, because well-typed code can
850        // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
851        let layout_compat =
852            mir_assign_valid_types(*self.tcx, self.typing_env, src.layout(), dest.layout());
853        if !allow_transmute && !layout_compat {
854            span_bug!(
855                self.cur_span(),
856                "type mismatch when copying!\nsrc: {},\ndest: {}",
857                src.layout().ty,
858                dest.layout().ty,
859            );
860        }
861
862        // Let us see if the layout is simple so we take a shortcut,
863        // avoid force_allocation.
864        let src = match self.read_immediate_raw(src)? {
865            Right(src_val) => {
866                assert!(!src.layout().is_unsized());
867                assert!(!dest.layout().is_unsized());
868                assert_eq!(src.layout().size, dest.layout().size);
869                // Yay, we got a value that we can write directly.
870                return if layout_compat {
871                    self.write_immediate_no_validate(*src_val, dest)
872                } else {
873                    // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
874                    // loaded using the offsets defined by `src.layout`. When we put this back into
875                    // the destination, we have to use the same offsets! So (a) we make sure we
876                    // write back to memory, and (b) we use `dest` *with the source layout*.
877                    let dest_mem = dest.force_mplace(self)?;
878                    self.write_immediate_to_mplace_no_validate(
879                        *src_val,
880                        src.layout(),
881                        dest_mem.mplace,
882                    )
883                };
884            }
885            Left(mplace) => mplace,
886        };
887        // Slow path, this does not fit into an immediate. Just memcpy.
888        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
889
890        let dest = dest.force_mplace(self)?;
891        let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
892            span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
893        };
894        if cfg!(debug_assertions) {
895            let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
896            assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
897        } else {
898            // As a cheap approximation, we compare the fixed parts of the size.
899            assert_eq!(src.layout.size, dest.layout.size);
900        }
901
902        // Setting `nonoverlapping` here only has an effect when we don't hit the fast-path above,
903        // but that should at least match what LLVM does where `memcpy` is also only used when the
904        // type does not have Scalar/ScalarPair layout.
905        // (Or as the `Assign` docs put it, assignments "not producing primitives" must be
906        // non-overlapping.)
907        // We check alignment separately, and *after* checking everything else.
908        // If an access is both OOB and misaligned, we want to see the bounds error.
909        self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
910        self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
911        self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
912        interp_ok(())
913    }
914
915    /// Ensures that a place is in memory, and returns where it is.
916    /// If the place currently refers to a local that doesn't yet have a matching allocation,
917    /// create such an allocation.
918    /// This is essentially `force_to_memplace`.
919    #[instrument(skip(self), level = "trace")]
920    pub fn force_allocation(
921        &mut self,
922        place: &PlaceTy<'tcx, M::Provenance>,
923    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
924        let mplace = match place.place {
925            Place::Local { local, offset, locals_addr } => {
926                debug_assert_eq!(locals_addr, self.frame().locals_addr());
927                let whole_local = match self.frame_mut().locals[local].access_mut()? {
928                    &mut Operand::Immediate(local_val) => {
929                        // We need to make an allocation.
930
931                        // We need the layout of the local. We can NOT use the layout we got,
932                        // that might e.g., be an inner field of a struct with `Scalar` layout,
933                        // that has different alignment than the outer field.
934                        let local_layout = self.layout_of_local(&self.frame(), local, None)?;
935                        assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
936                        let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
937                        // Preserve old value. (As an optimization, we can skip this if it was uninit.)
938                        if !matches!(local_val, Immediate::Uninit) {
939                            // We don't have to validate as we can assume the local was already
940                            // valid for its type. We must not use any part of `place` here, that
941                            // could be a projection to a part of the local!
942                            self.write_immediate_to_mplace_no_validate(
943                                local_val,
944                                local_layout,
945                                mplace.mplace,
946                            )?;
947                        }
948                        M::after_local_moved_to_memory(self, local, &mplace)?;
949                        // Now we can call `access_mut` again, asserting it goes well, and actually
950                        // overwrite things. This points to the entire allocation, not just the part
951                        // the place refers to, i.e. we do this before we apply `offset`.
952                        *self.frame_mut().locals[local].access_mut().unwrap() =
953                            Operand::Indirect(mplace.mplace);
954                        mplace.mplace
955                    }
956                    &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
957                };
958                if let Some(offset) = offset {
959                    // This offset is always inbounds, no need to check it again.
960                    whole_local.offset_with_meta_(
961                        offset,
962                        OffsetMode::Wrapping,
963                        MemPlaceMeta::None,
964                        self,
965                    )?
966                } else {
967                    // Preserve wide place metadata, do not call `offset`.
968                    whole_local
969                }
970            }
971            Place::Ptr(mplace) => mplace,
972        };
973        // Return with the original layout and align, so that the caller can go on
974        interp_ok(MPlaceTy { mplace, layout: place.layout })
975    }
976
977    pub fn allocate_dyn(
978        &mut self,
979        layout: TyAndLayout<'tcx>,
980        kind: MemoryKind<M::MemoryKind>,
981        meta: MemPlaceMeta<M::Provenance>,
982    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
983        let Some((size, align)) = self.size_and_align_of(&meta, &layout)? else {
984            span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
985        };
986        let ptr = self.allocate_ptr(size, align, kind, AllocInit::Uninit)?;
987        interp_ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
988    }
989
990    pub fn allocate(
991        &mut self,
992        layout: TyAndLayout<'tcx>,
993        kind: MemoryKind<M::MemoryKind>,
994    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
995        assert!(layout.is_sized());
996        self.allocate_dyn(layout, kind, MemPlaceMeta::None)
997    }
998
999    /// Allocates a sequence of bytes in the interpreter's memory with alignment 1.
1000    /// This is allocated in immutable global memory and deduplicated.
1001    pub fn allocate_bytes_dedup(
1002        &mut self,
1003        bytes: &[u8],
1004    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
1005        let salt = M::get_global_alloc_salt(self, None);
1006        let id = self.tcx.allocate_bytes_dedup(bytes, salt);
1007
1008        // Turn untagged "global" pointers (obtained via `tcx`) into the machine pointer to the allocation.
1009        M::adjust_alloc_root_pointer(
1010            &self,
1011            Pointer::from(id),
1012            M::GLOBAL_KIND.map(MemoryKind::Machine),
1013        )
1014    }
1015
1016    /// Allocates a string in the interpreter's memory, returning it as a (wide) place.
1017    /// This is allocated in immutable global memory and deduplicated.
1018    pub fn allocate_str_dedup(
1019        &mut self,
1020        s: &str,
1021    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1022        let bytes = s.as_bytes();
1023        let ptr = self.allocate_bytes_dedup(bytes)?;
1024
1025        // Create length metadata for the string.
1026        let meta = Scalar::from_target_usize(u64::try_from(bytes.len()).unwrap(), self);
1027
1028        // Get layout for Rust's str type.
1029        let layout = self.layout_of(self.tcx.types.str_).unwrap();
1030
1031        // Combine pointer and metadata into a wide pointer.
1032        interp_ok(self.ptr_with_meta_to_mplace(
1033            ptr.into(),
1034            MemPlaceMeta::Meta(meta),
1035            layout,
1036            /*unaligned*/ false,
1037        ))
1038    }
1039
1040    pub fn raw_const_to_mplace(
1041        &self,
1042        raw: mir::ConstAlloc<'tcx>,
1043    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
1044        // This must be an allocation in `tcx`
1045        let _ = self.tcx.global_alloc(raw.alloc_id);
1046        let ptr = self.global_root_pointer(Pointer::from(raw.alloc_id))?;
1047        let layout = self.layout_of(raw.ty)?;
1048        interp_ok(self.ptr_to_mplace(ptr.into(), layout))
1049    }
1050}
1051
1052// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
1053#[cfg(target_pointer_width = "64")]
1054mod size_asserts {
1055    use rustc_data_structures::static_assert_size;
1056
1057    use super::*;
1058    // tidy-alphabetical-start
1059    static_assert_size!(MemPlace, 48);
1060    static_assert_size!(MemPlaceMeta, 24);
1061    static_assert_size!(MPlaceTy<'_>, 64);
1062    static_assert_size!(Place, 48);
1063    static_assert_size!(PlaceTy<'_>, 64);
1064    // tidy-alphabetical-end
1065}