miri/borrow_tracker/stacked_borrows/mod.rs
1//! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2//! for further information.
3
4pub mod diagnostics;
5mod item;
6mod stack;
7
8use std::fmt::Write;
9use std::sync::atomic::AtomicBool;
10use std::{cmp, mem};
11
12use rustc_abi::Size;
13use rustc_data_structures::fx::FxHashSet;
14use rustc_middle::mir::{Mutability, RetagKind};
15use rustc_middle::ty::layout::HasTypingEnv;
16use rustc_middle::ty::{self, Ty};
17
18use self::diagnostics::{RetagCause, RetagInfo};
19pub use self::item::{Item, Permission};
20pub use self::stack::Stack;
21use crate::borrow_tracker::stacked_borrows::diagnostics::{
22 AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
23};
24use crate::borrow_tracker::{AccessKind, GlobalStateInner, ProtectorKind};
25use crate::concurrency::data_race::{NaReadType, NaWriteType};
26use crate::*;
27
28pub type AllocState = Stacks;
29
30/// Extra per-allocation state.
31#[derive(Clone, Debug)]
32pub struct Stacks {
33 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
34 stacks: DedupRangeMap<Stack>,
35 /// Stores past operations on this allocation
36 history: AllocHistory,
37 /// The set of tags that have been exposed inside this allocation.
38 exposed_tags: FxHashSet<BorTag>,
39}
40
41/// Indicates which permissions to grant to the retagged pointer.
42#[derive(Clone, Debug)]
43enum NewPermission {
44 Uniform {
45 perm: Permission,
46 access: Option<AccessKind>,
47 protector: Option<ProtectorKind>,
48 },
49 FreezeSensitive {
50 freeze_perm: Permission,
51 freeze_access: Option<AccessKind>,
52 freeze_protector: Option<ProtectorKind>,
53 nonfreeze_perm: Permission,
54 nonfreeze_access: Option<AccessKind>,
55 // nonfreeze_protector must always be None
56 },
57}
58
59impl NewPermission {
60 /// A key function: determine the permissions to grant at a retag for the given kind of
61 /// reference/pointer.
62 fn from_ref_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
63 let protector = (kind == RetagKind::FnEntry).then_some(ProtectorKind::StrongProtector);
64 match ty.kind() {
65 ty::Ref(_, pointee, Mutability::Mut) => {
66 if kind == RetagKind::TwoPhase {
67 // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
68 assert!(protector.is_none()); // RetagKind can't be both FnEntry and TwoPhase.
69 NewPermission::Uniform {
70 perm: Permission::SharedReadWrite,
71 access: None,
72 protector: None,
73 }
74 } else if pointee.is_unpin(*cx.tcx, cx.typing_env())
75 && pointee.is_unsafe_unpin(*cx.tcx, cx.typing_env())
76 {
77 // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
78 NewPermission::Uniform {
79 perm: Permission::Unique,
80 access: Some(AccessKind::Write),
81 protector,
82 }
83 } else {
84 // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
85 NewPermission::Uniform {
86 perm: Permission::SharedReadWrite,
87 access: None,
88 protector: None,
89 }
90 }
91 }
92 ty::RawPtr(_, Mutability::Mut) => {
93 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
94 // Mutable raw pointer. No access, not protected.
95 NewPermission::Uniform {
96 perm: Permission::SharedReadWrite,
97 access: None,
98 protector: None,
99 }
100 }
101 ty::Ref(_, _pointee, Mutability::Not) => {
102 // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
103 NewPermission::FreezeSensitive {
104 freeze_perm: Permission::SharedReadOnly,
105 freeze_access: Some(AccessKind::Read),
106 freeze_protector: protector,
107 nonfreeze_perm: Permission::SharedReadWrite,
108 // Inside UnsafeCell, this does *not* count as an access, as there
109 // might actually be mutable references further up the stack that
110 // we have to keep alive.
111 nonfreeze_access: None,
112 // We do not protect inside UnsafeCell.
113 // This fixes https://github.com/rust-lang/rust/issues/55005.
114 }
115 }
116 ty::RawPtr(_, Mutability::Not) => {
117 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
118 // `*const T`, when freshly created, are read-only in the frozen part.
119 NewPermission::FreezeSensitive {
120 freeze_perm: Permission::SharedReadOnly,
121 freeze_access: Some(AccessKind::Read),
122 freeze_protector: None,
123 nonfreeze_perm: Permission::SharedReadWrite,
124 nonfreeze_access: None,
125 }
126 }
127 _ => unreachable!(),
128 }
129 }
130
131 fn from_box_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
132 // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
133 let pointee = ty.builtin_deref(true).unwrap();
134 if pointee.is_unpin(*cx.tcx, cx.typing_env())
135 && pointee.is_unsafe_unpin(*cx.tcx, cx.typing_env())
136 {
137 // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
138 // a weak protector).
139 NewPermission::Uniform {
140 perm: Permission::Unique,
141 access: Some(AccessKind::Write),
142 protector: (kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
143 }
144 } else {
145 // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
146 NewPermission::Uniform {
147 perm: Permission::SharedReadWrite,
148 access: None,
149 protector: None,
150 }
151 }
152 }
153
154 fn protector(&self) -> Option<ProtectorKind> {
155 match self {
156 NewPermission::Uniform { protector, .. } => *protector,
157 NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
158 }
159 }
160}
161
162// # Stacked Borrows Core Begin
163
164/// We need to make at least the following things true:
165///
166/// U1: After creating a `Uniq`, it is at the top.
167/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
168/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
169///
170/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
171/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
172/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
173/// gets popped.
174/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
175/// F3: If an access happens with an `&` outside `UnsafeCell`,
176/// it requires the `SharedReadOnly` to still be in the stack.
177///
178/// Core relation on `Permission` to define which accesses are allowed
179impl Permission {
180 /// This defines for a given permission, whether it permits the given kind of access.
181 fn grants(self, access: AccessKind) -> bool {
182 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
183 self != Permission::Disabled
184 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
185 }
186}
187
188/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
189#[derive(Copy, Clone, Debug)]
190enum ItemInvalidationCause {
191 Conflict,
192 Dealloc,
193}
194
195/// Core per-location operations: access, dealloc, reborrow.
196impl<'tcx> Stack {
197 /// Find the first write-incompatible item above the given one --
198 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
199 fn find_first_write_incompatible(&self, granting: usize) -> usize {
200 let perm = self.get(granting).unwrap().perm();
201 match perm {
202 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
203 Permission::Disabled => bug!("Cannot use Disabled for anything"),
204 Permission::Unique => {
205 // On a write, everything above us is incompatible.
206 granting + 1
207 }
208 Permission::SharedReadWrite => {
209 // The SharedReadWrite *just* above us are compatible, to skip those.
210 let mut idx = granting + 1;
211 while let Some(item) = self.get(idx) {
212 if item.perm() == Permission::SharedReadWrite {
213 // Go on.
214 idx += 1;
215 } else {
216 // Found first incompatible!
217 break;
218 }
219 }
220 idx
221 }
222 }
223 }
224
225 /// The given item was invalidated -- check its protectors for whether that will cause UB.
226 fn item_invalidated(
227 item: &Item,
228 global: &GlobalStateInner,
229 dcx: &DiagnosticCx<'_, '_, 'tcx>,
230 cause: ItemInvalidationCause,
231 ) -> InterpResult<'tcx> {
232 if !global.tracked_pointer_tags.is_empty() {
233 dcx.check_tracked_tag_popped(item, global);
234 }
235
236 if !item.protected() {
237 return interp_ok(());
238 }
239
240 // We store tags twice, once in global.protected_tags and once in each call frame.
241 // We do this because consulting a single global set in this function is faster
242 // than attempting to search all call frames in the program for the `FrameExtra`
243 // (if any) which is protecting the popped tag.
244 //
245 // This duplication trades off making `end_call` slower to make this function faster. This
246 // trade-off is profitable in practice for a combination of two reasons.
247 // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
248 // Therefore, adding overhead in function call/return is profitable even if it only
249 // saves a little work in this function.
250 // 2. Most frames protect only one or two tags. So this duplicative global turns a search
251 // which ends up about linear in the number of protected tags in the program into a
252 // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
253 if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
254 // The only way this is okay is if the protector is weak and we are deallocating with
255 // the right pointer.
256 let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
257 && matches!(protector_kind, ProtectorKind::WeakProtector);
258 if !allowed {
259 return Err(dcx.protector_error(item, protector_kind)).into();
260 }
261 }
262 interp_ok(())
263 }
264
265 /// Test if a memory `access` using pointer tagged `tag` is granted.
266 /// If yes, return the index of the item that granted it.
267 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
268 /// allocation that we are currently checking.
269 fn access(
270 &mut self,
271 access: AccessKind,
272 tag: ProvenanceExtra,
273 global: &GlobalStateInner,
274 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
275 exposed_tags: &FxHashSet<BorTag>,
276 ) -> InterpResult<'tcx> {
277 // Two main steps: Find granting item, remove incompatible items above.
278
279 // Step 1: Find granting item.
280 let granting_idx =
281 self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
282
283 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
284 // items. Behavior differs for reads and writes.
285 // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
286 if access == AccessKind::Write {
287 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
288 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
289 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
290 // The granting_idx *might* be approximate, but any lower idx would remove more
291 // things. Even if this is a Unique and the lower idx is an SRW (which removes
292 // less), there is an SRW group boundary here so strictly more would get removed.
293 self.find_first_write_incompatible(granting_idx)
294 } else {
295 // We are writing to something in the unknown part.
296 // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
297 0
298 };
299 self.pop_items_after(first_incompatible_idx, |item| {
300 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
301 dcx.log_invalidation(item.tag());
302 interp_ok(())
303 })?;
304 } else {
305 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
306 // The reason this is not following the stack discipline (by removing the first Unique and
307 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
308 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
309 // `SharedReadWrite` for `raw`.
310 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
311 // reference and use that.
312 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
313 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
314 // The granting_idx *might* be approximate, but any lower idx would disable more things.
315 granting_idx + 1
316 } else {
317 // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
318 0
319 };
320 self.disable_uniques_starting_at(first_incompatible_idx, |item| {
321 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
322 dcx.log_invalidation(item.tag());
323 interp_ok(())
324 })?;
325 }
326
327 // If this was an approximate action, we now collapse everything into an unknown.
328 if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
329 // Compute the upper bound of the items that remain.
330 // (This is why we did all the work above: to reduce the items we have to consider here.)
331 let mut max = BorTag::one();
332 for i in 0..self.len() {
333 let item = self.get(i).unwrap();
334 // Skip disabled items, they cannot be matched anyway.
335 if !matches!(item.perm(), Permission::Disabled) {
336 // We are looking for a strict upper bound, so add 1 to this tag.
337 max = cmp::max(item.tag().succ().unwrap(), max);
338 }
339 }
340 if let Some(unk) = self.unknown_bottom() {
341 max = cmp::max(unk, max);
342 }
343 // Use `max` as new strict upper bound for everything.
344 trace!(
345 "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
346 max = max.get(),
347 );
348 self.set_unknown_bottom(max);
349 }
350
351 // Done.
352 interp_ok(())
353 }
354
355 /// Deallocate a location: Like a write access, but also there must be no
356 /// active protectors at all because we will remove all items.
357 fn dealloc(
358 &mut self,
359 tag: ProvenanceExtra,
360 global: &GlobalStateInner,
361 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
362 exposed_tags: &FxHashSet<BorTag>,
363 ) -> InterpResult<'tcx> {
364 // Step 1: Make a write access.
365 // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
366 self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
367
368 // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
369 for idx in (0..self.len()).rev() {
370 let item = self.get(idx).unwrap();
371 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
372 }
373
374 interp_ok(())
375 }
376
377 /// Derive a new pointer from one with the given tag.
378 ///
379 /// `access` indicates which kind of memory access this retag itself should correspond to.
380 fn grant(
381 &mut self,
382 derived_from: ProvenanceExtra,
383 new: Item,
384 access: Option<AccessKind>,
385 global: &GlobalStateInner,
386 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
387 exposed_tags: &FxHashSet<BorTag>,
388 ) -> InterpResult<'tcx> {
389 dcx.start_grant(new.perm());
390
391 // Compute where to put the new item.
392 // Either way, we ensure that we insert the new item in a way such that between
393 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
394 let new_idx = if let Some(access) = access {
395 // Simple case: We are just a regular memory access, and then push our thing on top,
396 // like a regular stack.
397 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
398 self.access(access, derived_from, global, dcx, exposed_tags)?;
399
400 // We insert "as far up as possible": We know only compatible items are remaining
401 // on top of `derived_from`, and we want the new item at the top so that we
402 // get the strongest possible guarantees.
403 // This ensures U1 and F1.
404 self.len()
405 } else {
406 // The tricky case: creating a new SRW permission without actually being an access.
407 assert!(new.perm() == Permission::SharedReadWrite);
408
409 // First we figure out which item grants our parent (`derived_from`) this kind of access.
410 // We use that to determine where to put the new item.
411 let granting_idx = self
412 .find_granting(AccessKind::Write, derived_from, exposed_tags)
413 .map_err(|()| dcx.grant_error(self))?;
414
415 let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from)
416 else {
417 // The parent is a wildcard pointer or matched the unknown bottom.
418 // This is approximate. Nobody knows what happened, so forget everything.
419 // The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
420 // (for all we know, it might join an SRW group inside the unknown).
421 trace!(
422 "reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
423 );
424 self.set_unknown_bottom(global.next_ptr_tag);
425 return interp_ok(());
426 };
427
428 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
429 // access. Instead of popping the stack, we insert the item at the place the stack would
430 // be popped to (i.e., we insert it above all the write-compatible items).
431 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
432 self.find_first_write_incompatible(granting_idx)
433 };
434
435 // Put the new item there.
436 trace!("reborrow: adding item {:?}", new);
437 self.insert(new_idx, new);
438 interp_ok(())
439 }
440}
441// # Stacked Borrows Core End
442
443/// Integration with the BorTag garbage collector
444impl Stacks {
445 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
446 for (_stack_range, stack) in self.stacks.iter_mut_all() {
447 stack.retain(live_tags);
448 }
449 self.history.retain(live_tags);
450 }
451}
452
453impl VisitProvenance for Stacks {
454 fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
455 for tag in self.exposed_tags.iter().copied() {
456 visit(None, Some(tag));
457 }
458 }
459}
460
461/// Map per-stack operations to higher-level per-location-range operations.
462impl<'tcx> Stacks {
463 /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
464 /// the [`AllocId`] of the allocation this is associated with.
465 fn new(
466 size: Size,
467 perm: Permission,
468 tag: BorTag,
469 id: AllocId,
470 machine: &MiriMachine<'_>,
471 ) -> Self {
472 let item = Item::new(tag, perm, false);
473 let stack = Stack::new(item);
474
475 Stacks {
476 stacks: DedupRangeMap::new(size, stack),
477 history: AllocHistory::new(id, item, machine),
478 exposed_tags: FxHashSet::default(),
479 }
480 }
481
482 /// Call `f` on every stack in the range.
483 fn for_each(
484 &mut self,
485 range: AllocRange,
486 mut dcx_builder: DiagnosticCxBuilder<'_, 'tcx>,
487 mut f: impl FnMut(
488 &mut Stack,
489 &mut DiagnosticCx<'_, '_, 'tcx>,
490 &mut FxHashSet<BorTag>,
491 ) -> InterpResult<'tcx>,
492 ) -> InterpResult<'tcx> {
493 for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
494 let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
495 f(stack, &mut dcx, &mut self.exposed_tags)?;
496 dcx_builder = dcx.unbuild();
497 }
498 interp_ok(())
499 }
500}
501
502/// Glue code to connect with Miri Machine Hooks
503impl Stacks {
504 pub fn new_allocation(
505 id: AllocId,
506 size: Size,
507 state: &mut GlobalStateInner,
508 kind: MemoryKind,
509 machine: &MiriMachine<'_>,
510 ) -> Self {
511 let (base_tag, perm) = match kind {
512 // New unique borrow. This tag is not accessible by the program,
513 // so it will only ever be used when using the local directly (i.e.,
514 // not through a pointer). That is, whenever we directly write to a local, this will pop
515 // everything else off the stack, invalidating all previous pointers,
516 // and in particular, *all* raw pointers.
517 MemoryKind::Stack => (state.root_ptr_tag(id, machine), Permission::Unique),
518 // Everything else is shared by default.
519 _ => (state.root_ptr_tag(id, machine), Permission::SharedReadWrite),
520 };
521 Stacks::new(size, perm, base_tag, id, machine)
522 }
523
524 #[inline(always)]
525 pub fn before_memory_read<'ecx, 'tcx>(
526 &mut self,
527 alloc_id: AllocId,
528 tag: ProvenanceExtra,
529 range: AllocRange,
530 machine: &'ecx MiriMachine<'tcx>,
531 ) -> InterpResult<'tcx>
532 where
533 'tcx: 'ecx,
534 {
535 trace!(
536 "read access with tag {:?}: {:?}, size {}",
537 tag,
538 interpret::Pointer::new(alloc_id, range.start),
539 range.size.bytes()
540 );
541 let dcx = DiagnosticCxBuilder::read(machine, tag, range);
542 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
543 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
544 stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
545 })
546 }
547
548 #[inline(always)]
549 pub fn before_memory_write<'tcx>(
550 &mut self,
551 alloc_id: AllocId,
552 tag: ProvenanceExtra,
553 range: AllocRange,
554 machine: &MiriMachine<'tcx>,
555 ) -> InterpResult<'tcx> {
556 trace!(
557 "write access with tag {:?}: {:?}, size {}",
558 tag,
559 interpret::Pointer::new(alloc_id, range.start),
560 range.size.bytes()
561 );
562 let dcx = DiagnosticCxBuilder::write(machine, tag, range);
563 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
564 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
565 stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
566 })
567 }
568
569 #[inline(always)]
570 pub fn before_memory_deallocation<'tcx>(
571 &mut self,
572 alloc_id: AllocId,
573 tag: ProvenanceExtra,
574 size: Size,
575 machine: &MiriMachine<'tcx>,
576 ) -> InterpResult<'tcx> {
577 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, size.bytes());
578 let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
579 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
580 self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
581 stack.dealloc(tag, &state, dcx, exposed_tags)
582 })?;
583 interp_ok(())
584 }
585}
586
587/// Retagging/reborrowing. There is some policy in here, such as which permissions
588/// to grant for which references, and when to add protectors.
589impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for crate::MiriInterpCx<'tcx> {}
590trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
591 /// Returns the provenance that should be used henceforth.
592 fn sb_reborrow(
593 &mut self,
594 place: &MPlaceTy<'tcx>,
595 size: Size,
596 new_perm: NewPermission,
597 new_tag: BorTag,
598 retag_info: RetagInfo, // diagnostics info about this retag
599 ) -> InterpResult<'tcx, Option<Provenance>> {
600 let this = self.eval_context_mut();
601 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
602 this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::Dereferenceable)?;
603
604 // It is crucial that this gets called on all code paths, to ensure we track tag creation.
605 let log_creation = |this: &MiriInterpCx<'tcx>,
606 loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
607 -> InterpResult<'tcx> {
608 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
609 let ty = place.layout.ty;
610 if global.tracked_pointer_tags.contains(&new_tag) {
611 let mut kind_str = String::new();
612 match new_perm {
613 NewPermission::Uniform { perm, .. } =>
614 write!(kind_str, "{perm:?} permission").unwrap(),
615 NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.typing_env()) =>
616 write!(kind_str, "{freeze_perm:?} permission").unwrap(),
617 NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. } =>
618 write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
619 }
620 write!(kind_str, " (pointee type {ty})").unwrap();
621 this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
622 new_tag.inner(),
623 Some(kind_str),
624 loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
625 ));
626 }
627 drop(global); // don't hold that reference any longer than we have to
628
629 let Some((alloc_id, base_offset, orig_tag)) = loc else {
630 return interp_ok(())
631 };
632
633 let alloc_kind = this.get_alloc_info(alloc_id).kind;
634 match alloc_kind {
635 AllocKind::LiveData => {
636 // This should have alloc_extra data, but `get_alloc_extra` can still fail
637 // if converting this alloc_id from a global to a local one
638 // uncovers a non-supported `extern static`.
639 let extra = this.get_alloc_extra(alloc_id)?;
640 let mut stacked_borrows = extra
641 .borrow_tracker_sb()
642 .borrow_mut();
643 // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
644 // FIXME: can this be done cleaner?
645 let dcx = DiagnosticCxBuilder::retag(
646 &this.machine,
647 retag_info,
648 new_tag,
649 orig_tag,
650 alloc_range(base_offset, size),
651 );
652 let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
653 dcx.log_creation();
654 if new_perm.protector().is_some() {
655 dcx.log_protector();
656 }
657 },
658 AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead | AllocKind::VaList => {
659 // No stacked borrows on these allocations.
660 }
661 }
662 interp_ok(())
663 };
664
665 if size == Size::ZERO {
666 trace!(
667 "reborrow of size 0: reference {:?} derived from {:?} (pointee {})",
668 new_tag,
669 place.ptr(),
670 place.layout.ty,
671 );
672 // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
673 // touches no bytes so there is no stack to put this tag in.
674 // However, if the pointer for this operation points at a real allocation we still
675 // record where it was created so that we can issue a helpful diagnostic if there is an
676 // attempt to use it for a non-zero-sized access.
677 // Dangling slices are a common case here; it's valid to get their length but with raw
678 // pointer tagging for example all calls to get_unchecked on them are invalid.
679 if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr(), 0)
680 {
681 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
682 // Still give it the new provenance, it got retagged after all. If this was a
683 // wildcard pointer, this will fix the AllocId and make future accesses with this
684 // reference to other allocations UB, but that's fine: due to subobject provenance,
685 // *all* future accesses with this reference should be UB!
686 return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
687 } else {
688 // This pointer doesn't come with an AllocId. :shrug:
689 log_creation(this, None)?;
690 // Provenance unchanged. Ideally we'd make this pointer UB to use like above,
691 // but there's no easy way to do that.
692 return interp_ok(place.ptr().provenance);
693 }
694 }
695
696 // The pointer *must* have a valid AllocId to continue, so we want to resolve this to
697 // a concrete ID even for wildcard pointers.
698 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr(), 0)?;
699 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
700
701 trace!(
702 "reborrow: reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
703 new_tag,
704 orig_tag,
705 place.layout.ty,
706 interpret::Pointer::new(alloc_id, base_offset),
707 size.bytes()
708 );
709
710 if let Some(protect) = new_perm.protector() {
711 // See comment in `Stack::item_invalidated` for why we store the tag twice.
712 this.frame_mut()
713 .extra
714 .borrow_tracker
715 .as_mut()
716 .unwrap()
717 .protected_tags
718 .push((alloc_id, new_tag));
719 this.machine
720 .borrow_tracker
721 .as_mut()
722 .unwrap()
723 .get_mut()
724 .protected_tags
725 .insert(new_tag, protect);
726 }
727
728 // Update the stacks, according to the new permission information we are given.
729 match new_perm {
730 NewPermission::Uniform { perm, access, protector } => {
731 assert!(perm != Permission::SharedReadOnly);
732 // Here we can avoid `borrow()` calls because we have mutable references.
733 // Note that this asserts that the allocation is mutable -- but since we are creating a
734 // mutable pointer, that seems reasonable.
735 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
736 let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
737 let item = Item::new(new_tag, perm, protector.is_some());
738 let range = alloc_range(base_offset, size);
739 let global = machine.borrow_tracker.as_ref().unwrap().borrow();
740 let dcx = DiagnosticCxBuilder::retag(
741 machine,
742 retag_info,
743 new_tag,
744 orig_tag,
745 alloc_range(base_offset, size),
746 );
747 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
748 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
749 })?;
750 drop(global);
751 if let Some(access) = access {
752 assert_eq!(access, AccessKind::Write);
753 // Make sure the data race model also knows about this.
754 // FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
755 if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
756 data_race.write_non_atomic(
757 alloc_id,
758 range,
759 NaWriteType::Retag,
760 Some(place.layout.ty),
761 machine,
762 )?;
763 }
764 }
765 }
766 NewPermission::FreezeSensitive {
767 freeze_perm,
768 freeze_access,
769 freeze_protector,
770 nonfreeze_perm,
771 nonfreeze_access,
772 } => {
773 // The permission is not uniform across the entire range!
774 // We need a frozen-sensitive reborrow.
775 // We have to use shared references to alloc/memory_extra here since
776 // `visit_freeze_sensitive` needs to access the global state.
777 let alloc_extra = this.get_alloc_extra(alloc_id)?;
778 let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
779 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
780 // Adjust range.
781 range.start += base_offset;
782 // We are only ever `SharedReadOnly` inside the frozen bits.
783 let (perm, access, protector) = if frozen {
784 (freeze_perm, freeze_access, freeze_protector)
785 } else {
786 (nonfreeze_perm, nonfreeze_access, None)
787 };
788 let item = Item::new(new_tag, perm, protector.is_some());
789 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
790 let dcx = DiagnosticCxBuilder::retag(
791 &this.machine,
792 retag_info,
793 new_tag,
794 orig_tag,
795 alloc_range(base_offset, size),
796 );
797 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
798 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
799 })?;
800 drop(global);
801 if let Some(access) = access {
802 assert_eq!(access, AccessKind::Read);
803 // Make sure the data race model also knows about this.
804 if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
805 data_race.read_non_atomic(
806 alloc_id,
807 range,
808 NaReadType::Retag,
809 Some(place.layout.ty),
810 &this.machine,
811 )?;
812 }
813 }
814 interp_ok(())
815 })?;
816 }
817 }
818
819 interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
820 }
821
822 fn sb_retag_place(
823 &mut self,
824 place: &MPlaceTy<'tcx>,
825 new_perm: NewPermission,
826 info: RetagInfo, // diagnostics info about this retag
827 ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
828 let this = self.eval_context_mut();
829 let size = this.size_and_align_of_val(place)?.map(|(size, _)| size);
830 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
831 // bail out -- we cannot reasonably figure out which memory range to reborrow.
832 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
833 let Some(size) = size else {
834 static DEDUP: AtomicBool = AtomicBool::new(false);
835 if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
836 this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
837 }
838 return interp_ok(place.clone());
839 };
840
841 // Compute new borrow.
842 let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
843
844 // Reborrow.
845 let new_prov = this.sb_reborrow(place, size, new_perm, new_tag, info)?;
846
847 // Adjust place.
848 // (If the closure gets called, that means the old provenance was `Some`, and hence the new
849 // one must also be `Some`.)
850 interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
851 }
852
853 /// Retags an individual pointer, returning the retagged version.
854 /// `kind` indicates what kind of reference is being created.
855 fn sb_retag_reference(
856 &mut self,
857 val: &ImmTy<'tcx>,
858 new_perm: NewPermission,
859 info: RetagInfo, // diagnostics info about this retag
860 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
861 let this = self.eval_context_mut();
862 let place = this.ref_to_mplace(val)?;
863 let new_place = this.sb_retag_place(&place, new_perm, info)?;
864 interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
865 }
866}
867
868impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
869pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
870 fn sb_retag_ptr_value(
871 &mut self,
872 kind: RetagKind,
873 val: &ImmTy<'tcx>,
874 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
875 let this = self.eval_context_mut();
876 let new_perm = NewPermission::from_ref_ty(val.layout.ty, kind, this);
877 let cause = match kind {
878 RetagKind::TwoPhase => RetagCause::TwoPhase,
879 RetagKind::FnEntry => unreachable!(),
880 RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
881 };
882 this.sb_retag_reference(val, new_perm, RetagInfo { cause, in_field: false })
883 }
884
885 fn sb_retag_place_contents(
886 &mut self,
887 kind: RetagKind,
888 place: &PlaceTy<'tcx>,
889 ) -> InterpResult<'tcx> {
890 let this = self.eval_context_mut();
891 let retag_cause = match kind {
892 RetagKind::TwoPhase => unreachable!(), // can only happen in `retag_ptr_value`
893 RetagKind::FnEntry => RetagCause::FnEntry,
894 RetagKind::Default | RetagKind::Raw => RetagCause::Normal,
895 };
896 let mut visitor = RetagVisitor { ecx: this, kind, retag_cause, in_field: false };
897 return visitor.visit_value(place);
898
899 // The actual visitor.
900 struct RetagVisitor<'ecx, 'tcx> {
901 ecx: &'ecx mut MiriInterpCx<'tcx>,
902 kind: RetagKind,
903 retag_cause: RetagCause,
904 in_field: bool,
905 }
906 impl<'ecx, 'tcx> RetagVisitor<'ecx, 'tcx> {
907 #[inline(always)] // yes this helps in our benchmarks
908 fn retag_ptr_inplace(
909 &mut self,
910 place: &PlaceTy<'tcx>,
911 new_perm: NewPermission,
912 ) -> InterpResult<'tcx> {
913 let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
914 let val = self.ecx.sb_retag_reference(
915 &val,
916 new_perm,
917 RetagInfo { cause: self.retag_cause, in_field: self.in_field },
918 )?;
919 self.ecx.write_immediate(*val, place)?;
920 interp_ok(())
921 }
922 }
923 impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
924 type V = PlaceTy<'tcx>;
925
926 #[inline(always)]
927 fn ecx(&self) -> &MiriInterpCx<'tcx> {
928 self.ecx
929 }
930
931 fn visit_box(&mut self, box_ty: Ty<'tcx>, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
932 // Only boxes for the global allocator get any special treatment.
933 if box_ty.is_box_global(*self.ecx.tcx) {
934 // Boxes get a weak protectors, since they may be deallocated.
935 let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
936 self.retag_ptr_inplace(place, new_perm)?;
937 }
938 interp_ok(())
939 }
940
941 fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
942 // If this place is smaller than a pointer, we know that it can't contain any
943 // pointers we need to retag, so we can stop recursion early.
944 // This optimization is crucial for ZSTs, because they can contain way more fields
945 // than we can ever visit.
946 if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
947 return interp_ok(());
948 }
949
950 // Check the type of this value to see what to do with it (retag, or recurse).
951 match place.layout.ty.kind() {
952 ty::Ref(..) | ty::RawPtr(..) => {
953 if matches!(place.layout.ty.kind(), ty::Ref(..))
954 || self.kind == RetagKind::Raw
955 {
956 let new_perm =
957 NewPermission::from_ref_ty(place.layout.ty, self.kind, self.ecx);
958 self.retag_ptr_inplace(place, new_perm)?;
959 }
960 }
961 ty::Adt(adt, _) if adt.is_box() => {
962 // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
963 // (Yes this means we technically also recursively retag the allocator itself
964 // even if field retagging is not enabled. *shrug*)
965 self.walk_value(place)?;
966 }
967 _ => {
968 // Not a reference/pointer/box. Recurse.
969 let in_field = mem::replace(&mut self.in_field, true); // remember and restore old value
970 self.walk_value(place)?;
971 self.in_field = in_field;
972 }
973 }
974
975 interp_ok(())
976 }
977 }
978 }
979
980 /// Protect a place so that it cannot be used any more for the duration of the current function
981 /// call.
982 ///
983 /// This is used to ensure soundness of in-place function argument/return passing.
984 fn sb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
985 let this = self.eval_context_mut();
986
987 // Retag it. With protection! That is the entire point.
988 let new_perm = NewPermission::Uniform {
989 perm: Permission::Unique,
990 access: Some(AccessKind::Write),
991 protector: Some(ProtectorKind::StrongProtector),
992 };
993 this.sb_retag_place(
994 place,
995 new_perm,
996 RetagInfo { cause: RetagCause::InPlaceFnPassing, in_field: false },
997 )
998 }
999
1000 /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1001 fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
1002 let this = self.eval_context_ref();
1003
1004 // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1005 // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1006 // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1007 let kind = this.get_alloc_info(alloc_id).kind;
1008 match kind {
1009 AllocKind::LiveData => {
1010 // This should have alloc_extra data, but `get_alloc_extra` can still fail
1011 // if converting this alloc_id from a global to a local one
1012 // uncovers a non-supported `extern static`.
1013 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1014 trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1015 alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
1016 }
1017 AllocKind::Function
1018 | AllocKind::VTable
1019 | AllocKind::TypeId
1020 | AllocKind::Dead
1021 | AllocKind::VaList => {
1022 // No stacked borrows on these allocations.
1023 }
1024 }
1025 interp_ok(())
1026 }
1027
1028 fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
1029 let this = self.eval_context_mut();
1030 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1031 let stacks = alloc_extra.borrow_tracker_sb().borrow();
1032 for (range, stack) in stacks.stacks.iter_all() {
1033 print!("{range:?}: [");
1034 if let Some(bottom) = stack.unknown_bottom() {
1035 print!(" unknown-bottom(..{bottom:?})");
1036 }
1037 for i in 0..stack.len() {
1038 let item = stack.get(i).unwrap();
1039 print!(" {:?}{:?}", item.perm(), item.tag());
1040 }
1041 println!(" ]");
1042 }
1043 interp_ok(())
1044 }
1045}