miri/borrow_tracker/stacked_borrows/
mod.rs

1//! Implements "Stacked Borrows".  See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2//! for further information.
3
4pub mod diagnostics;
5mod item;
6mod stack;
7
8use std::fmt::Write;
9use std::{cmp, mem};
10
11use rustc_abi::{BackendRepr, Size};
12use rustc_data_structures::fx::FxHashSet;
13use rustc_middle::mir::{Mutability, RetagKind};
14use rustc_middle::ty::layout::HasTypingEnv;
15use rustc_middle::ty::{self, Ty};
16
17use self::diagnostics::{RetagCause, RetagInfo};
18pub use self::item::{Item, Permission};
19pub use self::stack::Stack;
20use crate::borrow_tracker::stacked_borrows::diagnostics::{
21    AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
22};
23use crate::borrow_tracker::{GlobalStateInner, ProtectorKind};
24use crate::concurrency::data_race::{NaReadType, NaWriteType};
25use crate::*;
26
27pub type AllocState = Stacks;
28
29/// Extra per-allocation state.
30#[derive(Clone, Debug)]
31pub struct Stacks {
32    // Even reading memory can have effects on the stack, so we need a `RefCell` here.
33    stacks: RangeMap<Stack>,
34    /// Stores past operations on this allocation
35    history: AllocHistory,
36    /// The set of tags that have been exposed inside this allocation.
37    exposed_tags: FxHashSet<BorTag>,
38}
39
40/// Indicates which permissions to grant to the retagged pointer.
41#[derive(Clone, Debug)]
42enum NewPermission {
43    Uniform {
44        perm: Permission,
45        access: Option<AccessKind>,
46        protector: Option<ProtectorKind>,
47    },
48    FreezeSensitive {
49        freeze_perm: Permission,
50        freeze_access: Option<AccessKind>,
51        freeze_protector: Option<ProtectorKind>,
52        nonfreeze_perm: Permission,
53        nonfreeze_access: Option<AccessKind>,
54        // nonfreeze_protector must always be None
55    },
56}
57
58impl NewPermission {
59    /// A key function: determine the permissions to grant at a retag for the given kind of
60    /// reference/pointer.
61    fn from_ref_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
62        let protector = (kind == RetagKind::FnEntry).then_some(ProtectorKind::StrongProtector);
63        match ty.kind() {
64            ty::Ref(_, pointee, Mutability::Mut) => {
65                if kind == RetagKind::TwoPhase {
66                    // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
67                    assert!(protector.is_none()); // RetagKind can't be both FnEntry and TwoPhase.
68                    NewPermission::Uniform {
69                        perm: Permission::SharedReadWrite,
70                        access: None,
71                        protector: None,
72                    }
73                } else if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
74                    // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
75                    NewPermission::Uniform {
76                        perm: Permission::Unique,
77                        access: Some(AccessKind::Write),
78                        protector,
79                    }
80                } else {
81                    // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
82                    NewPermission::Uniform {
83                        perm: Permission::SharedReadWrite,
84                        access: None,
85                        protector: None,
86                    }
87                }
88            }
89            ty::RawPtr(_, Mutability::Mut) => {
90                assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
91                // Mutable raw pointer. No access, not protected.
92                NewPermission::Uniform {
93                    perm: Permission::SharedReadWrite,
94                    access: None,
95                    protector: None,
96                }
97            }
98            ty::Ref(_, _pointee, Mutability::Not) => {
99                // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
100                NewPermission::FreezeSensitive {
101                    freeze_perm: Permission::SharedReadOnly,
102                    freeze_access: Some(AccessKind::Read),
103                    freeze_protector: protector,
104                    nonfreeze_perm: Permission::SharedReadWrite,
105                    // Inside UnsafeCell, this does *not* count as an access, as there
106                    // might actually be mutable references further up the stack that
107                    // we have to keep alive.
108                    nonfreeze_access: None,
109                    // We do not protect inside UnsafeCell.
110                    // This fixes https://github.com/rust-lang/rust/issues/55005.
111                }
112            }
113            ty::RawPtr(_, Mutability::Not) => {
114                assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
115                // `*const T`, when freshly created, are read-only in the frozen part.
116                NewPermission::FreezeSensitive {
117                    freeze_perm: Permission::SharedReadOnly,
118                    freeze_access: Some(AccessKind::Read),
119                    freeze_protector: None,
120                    nonfreeze_perm: Permission::SharedReadWrite,
121                    nonfreeze_access: None,
122                }
123            }
124            _ => unreachable!(),
125        }
126    }
127
128    fn from_box_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
129        // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
130        let pointee = ty.builtin_deref(true).unwrap();
131        if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
132            // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
133            // a weak protector).
134            NewPermission::Uniform {
135                perm: Permission::Unique,
136                access: Some(AccessKind::Write),
137                protector: (kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
138            }
139        } else {
140            // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
141            NewPermission::Uniform {
142                perm: Permission::SharedReadWrite,
143                access: None,
144                protector: None,
145            }
146        }
147    }
148
149    fn protector(&self) -> Option<ProtectorKind> {
150        match self {
151            NewPermission::Uniform { protector, .. } => *protector,
152            NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
153        }
154    }
155}
156
157// # Stacked Borrows Core Begin
158
159/// We need to make at least the following things true:
160///
161/// U1: After creating a `Uniq`, it is at the top.
162/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
163/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
164///
165/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
166/// F2: If a write access happens, it pops the `SharedReadOnly`.  This has three pieces:
167///     F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
168///          gets popped.
169///     F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
170/// F3: If an access happens with an `&` outside `UnsafeCell`,
171///     it requires the `SharedReadOnly` to still be in the stack.
172///
173/// Core relation on `Permission` to define which accesses are allowed
174impl Permission {
175    /// This defines for a given permission, whether it permits the given kind of access.
176    fn grants(self, access: AccessKind) -> bool {
177        // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
178        self != Permission::Disabled
179            && (access == AccessKind::Read || self != Permission::SharedReadOnly)
180    }
181}
182
183/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
184#[derive(Copy, Clone, Debug)]
185enum ItemInvalidationCause {
186    Conflict,
187    Dealloc,
188}
189
190/// Core per-location operations: access, dealloc, reborrow.
191impl<'tcx> Stack {
192    /// Find the first write-incompatible item above the given one --
193    /// i.e, find the height to which the stack will be truncated when writing to `granting`.
194    fn find_first_write_incompatible(&self, granting: usize) -> usize {
195        let perm = self.get(granting).unwrap().perm();
196        match perm {
197            Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
198            Permission::Disabled => bug!("Cannot use Disabled for anything"),
199            Permission::Unique => {
200                // On a write, everything above us is incompatible.
201                granting + 1
202            }
203            Permission::SharedReadWrite => {
204                // The SharedReadWrite *just* above us are compatible, to skip those.
205                let mut idx = granting + 1;
206                while let Some(item) = self.get(idx) {
207                    if item.perm() == Permission::SharedReadWrite {
208                        // Go on.
209                        idx += 1;
210                    } else {
211                        // Found first incompatible!
212                        break;
213                    }
214                }
215                idx
216            }
217        }
218    }
219
220    /// The given item was invalidated -- check its protectors for whether that will cause UB.
221    fn item_invalidated(
222        item: &Item,
223        global: &GlobalStateInner,
224        dcx: &DiagnosticCx<'_, '_, 'tcx>,
225        cause: ItemInvalidationCause,
226    ) -> InterpResult<'tcx> {
227        if !global.tracked_pointer_tags.is_empty() {
228            dcx.check_tracked_tag_popped(item, global);
229        }
230
231        if !item.protected() {
232            return interp_ok(());
233        }
234
235        // We store tags twice, once in global.protected_tags and once in each call frame.
236        // We do this because consulting a single global set in this function is faster
237        // than attempting to search all call frames in the program for the `FrameExtra`
238        // (if any) which is protecting the popped tag.
239        //
240        // This duplication trades off making `end_call` slower to make this function faster. This
241        // trade-off is profitable in practice for a combination of two reasons.
242        // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
243        //    Therefore, adding overhead in function call/return is profitable even if it only
244        //    saves a little work in this function.
245        // 2. Most frames protect only one or two tags. So this duplicative global turns a search
246        //    which ends up about linear in the number of protected tags in the program into a
247        //    constant time check (and a slow linear, because the tags in the frames aren't contiguous).
248        if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
249            // The only way this is okay is if the protector is weak and we are deallocating with
250            // the right pointer.
251            let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
252                && matches!(protector_kind, ProtectorKind::WeakProtector);
253            if !allowed {
254                return Err(dcx.protector_error(item, protector_kind)).into();
255            }
256        }
257        interp_ok(())
258    }
259
260    /// Test if a memory `access` using pointer tagged `tag` is granted.
261    /// If yes, return the index of the item that granted it.
262    /// `range` refers the entire operation, and `offset` refers to the specific offset into the
263    /// allocation that we are currently checking.
264    fn access(
265        &mut self,
266        access: AccessKind,
267        tag: ProvenanceExtra,
268        global: &GlobalStateInner,
269        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
270        exposed_tags: &FxHashSet<BorTag>,
271    ) -> InterpResult<'tcx> {
272        // Two main steps: Find granting item, remove incompatible items above.
273
274        // Step 1: Find granting item.
275        let granting_idx =
276            self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
277
278        // Step 2: Remove incompatible items above them.  Make sure we do not remove protected
279        // items.  Behavior differs for reads and writes.
280        // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
281        if access == AccessKind::Write {
282            // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
283            // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
284            let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
285                // The granting_idx *might* be approximate, but any lower idx would remove more
286                // things. Even if this is a Unique and the lower idx is an SRW (which removes
287                // less), there is an SRW group boundary here so strictly more would get removed.
288                self.find_first_write_incompatible(granting_idx)
289            } else {
290                // We are writing to something in the unknown part.
291                // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
292                0
293            };
294            self.pop_items_after(first_incompatible_idx, |item| {
295                Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
296                dcx.log_invalidation(item.tag());
297                interp_ok(())
298            })?;
299        } else {
300            // On a read, *disable* all `Unique` above the granting item.  This ensures U2 for read accesses.
301            // The reason this is not following the stack discipline (by removing the first Unique and
302            // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
303            // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
304            // `SharedReadWrite` for `raw`.
305            // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
306            // reference and use that.
307            // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
308            let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
309                // The granting_idx *might* be approximate, but any lower idx would disable more things.
310                granting_idx + 1
311            } else {
312                // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
313                0
314            };
315            self.disable_uniques_starting_at(first_incompatible_idx, |item| {
316                Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
317                dcx.log_invalidation(item.tag());
318                interp_ok(())
319            })?;
320        }
321
322        // If this was an approximate action, we now collapse everything into an unknown.
323        if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
324            // Compute the upper bound of the items that remain.
325            // (This is why we did all the work above: to reduce the items we have to consider here.)
326            let mut max = BorTag::one();
327            for i in 0..self.len() {
328                let item = self.get(i).unwrap();
329                // Skip disabled items, they cannot be matched anyway.
330                if !matches!(item.perm(), Permission::Disabled) {
331                    // We are looking for a strict upper bound, so add 1 to this tag.
332                    max = cmp::max(item.tag().succ().unwrap(), max);
333                }
334            }
335            if let Some(unk) = self.unknown_bottom() {
336                max = cmp::max(unk, max);
337            }
338            // Use `max` as new strict upper bound for everything.
339            trace!(
340                "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
341                max = max.get(),
342            );
343            self.set_unknown_bottom(max);
344        }
345
346        // Done.
347        interp_ok(())
348    }
349
350    /// Deallocate a location: Like a write access, but also there must be no
351    /// active protectors at all because we will remove all items.
352    fn dealloc(
353        &mut self,
354        tag: ProvenanceExtra,
355        global: &GlobalStateInner,
356        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
357        exposed_tags: &FxHashSet<BorTag>,
358    ) -> InterpResult<'tcx> {
359        // Step 1: Make a write access.
360        // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
361        self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
362
363        // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
364        for idx in (0..self.len()).rev() {
365            let item = self.get(idx).unwrap();
366            Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
367        }
368
369        interp_ok(())
370    }
371
372    /// Derive a new pointer from one with the given tag.
373    ///
374    /// `access` indicates which kind of memory access this retag itself should correspond to.
375    fn grant(
376        &mut self,
377        derived_from: ProvenanceExtra,
378        new: Item,
379        access: Option<AccessKind>,
380        global: &GlobalStateInner,
381        dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
382        exposed_tags: &FxHashSet<BorTag>,
383    ) -> InterpResult<'tcx> {
384        dcx.start_grant(new.perm());
385
386        // Compute where to put the new item.
387        // Either way, we ensure that we insert the new item in a way such that between
388        // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
389        let new_idx = if let Some(access) = access {
390            // Simple case: We are just a regular memory access, and then push our thing on top,
391            // like a regular stack.
392            // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
393            self.access(access, derived_from, global, dcx, exposed_tags)?;
394
395            // We insert "as far up as possible": We know only compatible items are remaining
396            // on top of `derived_from`, and we want the new item at the top so that we
397            // get the strongest possible guarantees.
398            // This ensures U1 and F1.
399            self.len()
400        } else {
401            // The tricky case: creating a new SRW permission without actually being an access.
402            assert!(new.perm() == Permission::SharedReadWrite);
403
404            // First we figure out which item grants our parent (`derived_from`) this kind of access.
405            // We use that to determine where to put the new item.
406            let granting_idx = self
407                .find_granting(AccessKind::Write, derived_from, exposed_tags)
408                .map_err(|()| dcx.grant_error(self))?;
409
410            let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from)
411            else {
412                // The parent is a wildcard pointer or matched the unknown bottom.
413                // This is approximate. Nobody knows what happened, so forget everything.
414                // The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
415                // (for all we know, it might join an SRW group inside the unknown).
416                trace!(
417                    "reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
418                );
419                self.set_unknown_bottom(global.next_ptr_tag);
420                return interp_ok(());
421            };
422
423            // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
424            // access.  Instead of popping the stack, we insert the item at the place the stack would
425            // be popped to (i.e., we insert it above all the write-compatible items).
426            // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
427            self.find_first_write_incompatible(granting_idx)
428        };
429
430        // Put the new item there.
431        trace!("reborrow: adding item {:?}", new);
432        self.insert(new_idx, new);
433        interp_ok(())
434    }
435}
436// # Stacked Borrows Core End
437
438/// Integration with the BorTag garbage collector
439impl Stacks {
440    pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
441        for (_stack_range, stack) in self.stacks.iter_mut_all() {
442            stack.retain(live_tags);
443        }
444        self.history.retain(live_tags);
445    }
446}
447
448impl VisitProvenance for Stacks {
449    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
450        for tag in self.exposed_tags.iter().copied() {
451            visit(None, Some(tag));
452        }
453    }
454}
455
456/// Map per-stack operations to higher-level per-location-range operations.
457impl<'tcx> Stacks {
458    /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
459    /// the [`AllocId`] of the allocation this is associated with.
460    fn new(
461        size: Size,
462        perm: Permission,
463        tag: BorTag,
464        id: AllocId,
465        machine: &MiriMachine<'_>,
466    ) -> Self {
467        let item = Item::new(tag, perm, false);
468        let stack = Stack::new(item);
469
470        Stacks {
471            stacks: RangeMap::new(size, stack),
472            history: AllocHistory::new(id, item, machine),
473            exposed_tags: FxHashSet::default(),
474        }
475    }
476
477    /// Call `f` on every stack in the range.
478    fn for_each(
479        &mut self,
480        range: AllocRange,
481        mut dcx_builder: DiagnosticCxBuilder<'_, 'tcx>,
482        mut f: impl FnMut(
483            &mut Stack,
484            &mut DiagnosticCx<'_, '_, 'tcx>,
485            &mut FxHashSet<BorTag>,
486        ) -> InterpResult<'tcx>,
487    ) -> InterpResult<'tcx> {
488        for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
489            let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
490            f(stack, &mut dcx, &mut self.exposed_tags)?;
491            dcx_builder = dcx.unbuild();
492        }
493        interp_ok(())
494    }
495}
496
497/// Glue code to connect with Miri Machine Hooks
498impl Stacks {
499    pub fn new_allocation(
500        id: AllocId,
501        size: Size,
502        state: &mut GlobalStateInner,
503        kind: MemoryKind,
504        machine: &MiriMachine<'_>,
505    ) -> Self {
506        let (base_tag, perm) = match kind {
507            // New unique borrow. This tag is not accessible by the program,
508            // so it will only ever be used when using the local directly (i.e.,
509            // not through a pointer). That is, whenever we directly write to a local, this will pop
510            // everything else off the stack, invalidating all previous pointers,
511            // and in particular, *all* raw pointers.
512            MemoryKind::Stack => (state.root_ptr_tag(id, machine), Permission::Unique),
513            // Everything else is shared by default.
514            _ => (state.root_ptr_tag(id, machine), Permission::SharedReadWrite),
515        };
516        Stacks::new(size, perm, base_tag, id, machine)
517    }
518
519    #[inline(always)]
520    pub fn before_memory_read<'ecx, 'tcx>(
521        &mut self,
522        alloc_id: AllocId,
523        tag: ProvenanceExtra,
524        range: AllocRange,
525        machine: &'ecx MiriMachine<'tcx>,
526    ) -> InterpResult<'tcx>
527    where
528        'tcx: 'ecx,
529    {
530        trace!(
531            "read access with tag {:?}: {:?}, size {}",
532            tag,
533            interpret::Pointer::new(alloc_id, range.start),
534            range.size.bytes()
535        );
536        let dcx = DiagnosticCxBuilder::read(machine, tag, range);
537        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
538        self.for_each(range, dcx, |stack, dcx, exposed_tags| {
539            stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
540        })
541    }
542
543    #[inline(always)]
544    pub fn before_memory_write<'tcx>(
545        &mut self,
546        alloc_id: AllocId,
547        tag: ProvenanceExtra,
548        range: AllocRange,
549        machine: &MiriMachine<'tcx>,
550    ) -> InterpResult<'tcx> {
551        trace!(
552            "write access with tag {:?}: {:?}, size {}",
553            tag,
554            interpret::Pointer::new(alloc_id, range.start),
555            range.size.bytes()
556        );
557        let dcx = DiagnosticCxBuilder::write(machine, tag, range);
558        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
559        self.for_each(range, dcx, |stack, dcx, exposed_tags| {
560            stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
561        })
562    }
563
564    #[inline(always)]
565    pub fn before_memory_deallocation<'tcx>(
566        &mut self,
567        alloc_id: AllocId,
568        tag: ProvenanceExtra,
569        size: Size,
570        machine: &MiriMachine<'tcx>,
571    ) -> InterpResult<'tcx> {
572        trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, size.bytes());
573        let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
574        let state = machine.borrow_tracker.as_ref().unwrap().borrow();
575        self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
576            stack.dealloc(tag, &state, dcx, exposed_tags)
577        })?;
578        interp_ok(())
579    }
580}
581
582/// Retagging/reborrowing.  There is some policy in here, such as which permissions
583/// to grant for which references, and when to add protectors.
584impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for crate::MiriInterpCx<'tcx> {}
585trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
586    /// Returns the provenance that should be used henceforth.
587    fn sb_reborrow(
588        &mut self,
589        place: &MPlaceTy<'tcx>,
590        size: Size,
591        new_perm: NewPermission,
592        new_tag: BorTag,
593        retag_info: RetagInfo, // diagnostics info about this retag
594    ) -> InterpResult<'tcx, Option<Provenance>> {
595        let this = self.eval_context_mut();
596        // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
597        this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::InboundsTest)?;
598
599        // It is crucial that this gets called on all code paths, to ensure we track tag creation.
600        let log_creation = |this: &MiriInterpCx<'tcx>,
601                            loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
602         -> InterpResult<'tcx> {
603            let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
604            let ty = place.layout.ty;
605            if global.tracked_pointer_tags.contains(&new_tag) {
606                let mut kind_str = String::new();
607                match new_perm {
608                    NewPermission::Uniform { perm, .. } =>
609                        write!(kind_str, "{perm:?} permission").unwrap(),
610                    NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.typing_env()) =>
611                        write!(kind_str, "{freeze_perm:?} permission").unwrap(),
612                    NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. }  =>
613                        write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
614                }
615                write!(kind_str, " (pointee type {ty})").unwrap();
616                this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
617                    new_tag.inner(),
618                    Some(kind_str),
619                    loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
620                ));
621            }
622            drop(global); // don't hold that reference any longer than we have to
623
624            let Some((alloc_id, base_offset, orig_tag)) = loc else {
625                return interp_ok(())
626            };
627
628            let alloc_kind = this.get_alloc_info(alloc_id).kind;
629            match alloc_kind {
630                AllocKind::LiveData => {
631                    // This should have alloc_extra data, but `get_alloc_extra` can still fail
632                    // if converting this alloc_id from a global to a local one
633                    // uncovers a non-supported `extern static`.
634                    let extra = this.get_alloc_extra(alloc_id)?;
635                    let mut stacked_borrows = extra
636                        .borrow_tracker_sb()
637                        .borrow_mut();
638                    // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
639                    // FIXME: can this be done cleaner?
640                    let dcx = DiagnosticCxBuilder::retag(
641                        &this.machine,
642                        retag_info,
643                        new_tag,
644                        orig_tag,
645                        alloc_range(base_offset, size),
646                    );
647                    let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
648                    dcx.log_creation();
649                    if new_perm.protector().is_some() {
650                        dcx.log_protector();
651                    }
652                },
653                AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
654                    // No stacked borrows on these allocations.
655                }
656            }
657            interp_ok(())
658        };
659
660        if size == Size::ZERO {
661            trace!(
662                "reborrow of size 0: reference {:?} derived from {:?} (pointee {})",
663                new_tag,
664                place.ptr(),
665                place.layout.ty,
666            );
667            // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
668            // touches no bytes so there is no stack to put this tag in.
669            // However, if the pointer for this operation points at a real allocation we still
670            // record where it was created so that we can issue a helpful diagnostic if there is an
671            // attempt to use it for a non-zero-sized access.
672            // Dangling slices are a common case here; it's valid to get their length but with raw
673            // pointer tagging for example all calls to get_unchecked on them are invalid.
674            if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr(), 0)
675            {
676                log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
677                // Still give it the new provenance, it got retagged after all.
678                return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
679            } else {
680                // This pointer doesn't come with an AllocId. :shrug:
681                log_creation(this, None)?;
682                // Provenance unchanged.
683                return interp_ok(place.ptr().provenance);
684            }
685        }
686
687        let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr(), 0)?;
688        log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
689
690        trace!(
691            "reborrow: reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
692            new_tag,
693            orig_tag,
694            place.layout.ty,
695            interpret::Pointer::new(alloc_id, base_offset),
696            size.bytes()
697        );
698
699        if let Some(protect) = new_perm.protector() {
700            // See comment in `Stack::item_invalidated` for why we store the tag twice.
701            this.frame_mut()
702                .extra
703                .borrow_tracker
704                .as_mut()
705                .unwrap()
706                .protected_tags
707                .push((alloc_id, new_tag));
708            this.machine
709                .borrow_tracker
710                .as_mut()
711                .unwrap()
712                .get_mut()
713                .protected_tags
714                .insert(new_tag, protect);
715        }
716
717        // Update the stacks, according to the new permission information we are given.
718        match new_perm {
719            NewPermission::Uniform { perm, access, protector } => {
720                assert!(perm != Permission::SharedReadOnly);
721                // Here we can avoid `borrow()` calls because we have mutable references.
722                // Note that this asserts that the allocation is mutable -- but since we are creating a
723                // mutable pointer, that seems reasonable.
724                let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
725                let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
726                let item = Item::new(new_tag, perm, protector.is_some());
727                let range = alloc_range(base_offset, size);
728                let global = machine.borrow_tracker.as_ref().unwrap().borrow();
729                let dcx = DiagnosticCxBuilder::retag(
730                    machine,
731                    retag_info,
732                    new_tag,
733                    orig_tag,
734                    alloc_range(base_offset, size),
735                );
736                stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
737                    stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
738                })?;
739                drop(global);
740                if let Some(access) = access {
741                    assert_eq!(access, AccessKind::Write);
742                    // Make sure the data race model also knows about this.
743                    if let Some(data_race) = alloc_extra.data_race.as_mut() {
744                        data_race.write(
745                            alloc_id,
746                            range,
747                            NaWriteType::Retag,
748                            Some(place.layout.ty),
749                            machine,
750                        )?;
751                    }
752                }
753            }
754            NewPermission::FreezeSensitive {
755                freeze_perm,
756                freeze_access,
757                freeze_protector,
758                nonfreeze_perm,
759                nonfreeze_access,
760            } => {
761                // The permission is not uniform across the entire range!
762                // We need a frozen-sensitive reborrow.
763                // We have to use shared references to alloc/memory_extra here since
764                // `visit_freeze_sensitive` needs to access the global state.
765                let alloc_extra = this.get_alloc_extra(alloc_id)?;
766                let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
767                this.visit_freeze_sensitive(place, size, |mut range, frozen| {
768                    // Adjust range.
769                    range.start += base_offset;
770                    // We are only ever `SharedReadOnly` inside the frozen bits.
771                    let (perm, access, protector) = if frozen {
772                        (freeze_perm, freeze_access, freeze_protector)
773                    } else {
774                        (nonfreeze_perm, nonfreeze_access, None)
775                    };
776                    let item = Item::new(new_tag, perm, protector.is_some());
777                    let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
778                    let dcx = DiagnosticCxBuilder::retag(
779                        &this.machine,
780                        retag_info,
781                        new_tag,
782                        orig_tag,
783                        alloc_range(base_offset, size),
784                    );
785                    stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
786                        stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
787                    })?;
788                    drop(global);
789                    if let Some(access) = access {
790                        assert_eq!(access, AccessKind::Read);
791                        // Make sure the data race model also knows about this.
792                        if let Some(data_race) = alloc_extra.data_race.as_ref() {
793                            data_race.read(
794                                alloc_id,
795                                range,
796                                NaReadType::Retag,
797                                Some(place.layout.ty),
798                                &this.machine,
799                            )?;
800                        }
801                    }
802                    interp_ok(())
803                })?;
804            }
805        }
806
807        interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
808    }
809
810    fn sb_retag_place(
811        &mut self,
812        place: &MPlaceTy<'tcx>,
813        new_perm: NewPermission,
814        info: RetagInfo, // diagnostics info about this retag
815    ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
816        let this = self.eval_context_mut();
817        let size = this.size_and_align_of_mplace(place)?.map(|(size, _)| size);
818        // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
819        // bail out -- we cannot reasonably figure out which memory range to reborrow.
820        // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
821        let size = match size {
822            Some(size) => size,
823            None => {
824                if !this.machine.sb_extern_type_warned.replace(true) {
825                    this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
826                }
827                return interp_ok(place.clone());
828            }
829        };
830
831        // Compute new borrow.
832        let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
833
834        // Reborrow.
835        let new_prov = this.sb_reborrow(place, size, new_perm, new_tag, info)?;
836
837        // Adjust place.
838        // (If the closure gets called, that means the old provenance was `Some`, and hence the new
839        // one must also be `Some`.)
840        interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
841    }
842
843    /// Retags an individual pointer, returning the retagged version.
844    /// `kind` indicates what kind of reference is being created.
845    fn sb_retag_reference(
846        &mut self,
847        val: &ImmTy<'tcx>,
848        new_perm: NewPermission,
849        info: RetagInfo, // diagnostics info about this retag
850    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
851        let this = self.eval_context_mut();
852        let place = this.ref_to_mplace(val)?;
853        let new_place = this.sb_retag_place(&place, new_perm, info)?;
854        interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
855    }
856}
857
858impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
859pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
860    fn sb_retag_ptr_value(
861        &mut self,
862        kind: RetagKind,
863        val: &ImmTy<'tcx>,
864    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
865        let this = self.eval_context_mut();
866        let new_perm = NewPermission::from_ref_ty(val.layout.ty, kind, this);
867        let cause = match kind {
868            RetagKind::TwoPhase => RetagCause::TwoPhase,
869            RetagKind::FnEntry => unreachable!(),
870            RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
871        };
872        this.sb_retag_reference(val, new_perm, RetagInfo { cause, in_field: false })
873    }
874
875    fn sb_retag_place_contents(
876        &mut self,
877        kind: RetagKind,
878        place: &PlaceTy<'tcx>,
879    ) -> InterpResult<'tcx> {
880        let this = self.eval_context_mut();
881        let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields;
882        let retag_cause = match kind {
883            RetagKind::TwoPhase => unreachable!(), // can only happen in `retag_ptr_value`
884            RetagKind::FnEntry => RetagCause::FnEntry,
885            RetagKind::Default | RetagKind::Raw => RetagCause::Normal,
886        };
887        let mut visitor =
888            RetagVisitor { ecx: this, kind, retag_cause, retag_fields, in_field: false };
889        return visitor.visit_value(place);
890
891        // The actual visitor.
892        struct RetagVisitor<'ecx, 'tcx> {
893            ecx: &'ecx mut MiriInterpCx<'tcx>,
894            kind: RetagKind,
895            retag_cause: RetagCause,
896            retag_fields: RetagFields,
897            in_field: bool,
898        }
899        impl<'ecx, 'tcx> RetagVisitor<'ecx, 'tcx> {
900            #[inline(always)] // yes this helps in our benchmarks
901            fn retag_ptr_inplace(
902                &mut self,
903                place: &PlaceTy<'tcx>,
904                new_perm: NewPermission,
905            ) -> InterpResult<'tcx> {
906                let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
907                let val = self.ecx.sb_retag_reference(
908                    &val,
909                    new_perm,
910                    RetagInfo { cause: self.retag_cause, in_field: self.in_field },
911                )?;
912                self.ecx.write_immediate(*val, place)?;
913                interp_ok(())
914            }
915        }
916        impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
917            type V = PlaceTy<'tcx>;
918
919            #[inline(always)]
920            fn ecx(&self) -> &MiriInterpCx<'tcx> {
921                self.ecx
922            }
923
924            fn visit_box(&mut self, box_ty: Ty<'tcx>, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
925                // Only boxes for the global allocator get any special treatment.
926                if box_ty.is_box_global(*self.ecx.tcx) {
927                    // Boxes get a weak protectors, since they may be deallocated.
928                    let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
929                    self.retag_ptr_inplace(place, new_perm)?;
930                }
931                interp_ok(())
932            }
933
934            fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
935                // If this place is smaller than a pointer, we know that it can't contain any
936                // pointers we need to retag, so we can stop recursion early.
937                // This optimization is crucial for ZSTs, because they can contain way more fields
938                // than we can ever visit.
939                if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
940                    return interp_ok(());
941                }
942
943                // Check the type of this value to see what to do with it (retag, or recurse).
944                match place.layout.ty.kind() {
945                    ty::Ref(..) | ty::RawPtr(..) => {
946                        if matches!(place.layout.ty.kind(), ty::Ref(..))
947                            || self.kind == RetagKind::Raw
948                        {
949                            let new_perm =
950                                NewPermission::from_ref_ty(place.layout.ty, self.kind, self.ecx);
951                            self.retag_ptr_inplace(place, new_perm)?;
952                        }
953                    }
954                    ty::Adt(adt, _) if adt.is_box() => {
955                        // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
956                        // (Yes this means we technically also recursively retag the allocator itself
957                        // even if field retagging is not enabled. *shrug*)
958                        self.walk_value(place)?;
959                    }
960                    _ => {
961                        // Not a reference/pointer/box. Only recurse if configured appropriately.
962                        let recurse = match self.retag_fields {
963                            RetagFields::No => false,
964                            RetagFields::Yes => true,
965                            RetagFields::OnlyScalar => {
966                                // Matching `ArgAbi::new` at the time of writing, only fields of
967                                // `Scalar` and `ScalarPair` ABI are considered.
968                                matches!(
969                                    place.layout.backend_repr,
970                                    BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
971                                )
972                            }
973                        };
974                        if recurse {
975                            let in_field = mem::replace(&mut self.in_field, true); // remember and restore old value
976                            self.walk_value(place)?;
977                            self.in_field = in_field;
978                        }
979                    }
980                }
981
982                interp_ok(())
983            }
984        }
985    }
986
987    /// Protect a place so that it cannot be used any more for the duration of the current function
988    /// call.
989    ///
990    /// This is used to ensure soundness of in-place function argument/return passing.
991    fn sb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
992        let this = self.eval_context_mut();
993
994        // Retag it. With protection! That is the entire point.
995        let new_perm = NewPermission::Uniform {
996            perm: Permission::Unique,
997            access: Some(AccessKind::Write),
998            protector: Some(ProtectorKind::StrongProtector),
999        };
1000        this.sb_retag_place(
1001            place,
1002            new_perm,
1003            RetagInfo { cause: RetagCause::InPlaceFnPassing, in_field: false },
1004        )
1005    }
1006
1007    /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1008    fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
1009        let this = self.eval_context_ref();
1010
1011        // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1012        // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1013        // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1014        let kind = this.get_alloc_info(alloc_id).kind;
1015        match kind {
1016            AllocKind::LiveData => {
1017                // This should have alloc_extra data, but `get_alloc_extra` can still fail
1018                // if converting this alloc_id from a global to a local one
1019                // uncovers a non-supported `extern static`.
1020                let alloc_extra = this.get_alloc_extra(alloc_id)?;
1021                trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1022                alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
1023            }
1024            AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
1025                // No stacked borrows on these allocations.
1026            }
1027        }
1028        interp_ok(())
1029    }
1030
1031    fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
1032        let this = self.eval_context_mut();
1033        let alloc_extra = this.get_alloc_extra(alloc_id)?;
1034        let stacks = alloc_extra.borrow_tracker_sb().borrow();
1035        for (range, stack) in stacks.stacks.iter_all() {
1036            print!("{range:?}: [");
1037            if let Some(bottom) = stack.unknown_bottom() {
1038                print!(" unknown-bottom(..{bottom:?})");
1039            }
1040            for i in 0..stack.len() {
1041                let item = stack.get(i).unwrap();
1042                print!(" {:?}{:?}", item.perm(), item.tag());
1043            }
1044            println!(" ]");
1045        }
1046        interp_ok(())
1047    }
1048}