miri/borrow_tracker/stacked_borrows/mod.rs
1//! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2//! for further information.
3
4pub mod diagnostics;
5mod item;
6mod stack;
7
8use std::fmt::Write;
9use std::sync::atomic::AtomicBool;
10use std::{cmp, mem};
11
12use rustc_abi::{BackendRepr, Size};
13use rustc_data_structures::fx::FxHashSet;
14use rustc_middle::mir::{Mutability, RetagKind};
15use rustc_middle::ty::layout::HasTypingEnv;
16use rustc_middle::ty::{self, Ty};
17
18use self::diagnostics::{RetagCause, RetagInfo};
19pub use self::item::{Item, Permission};
20pub use self::stack::Stack;
21use crate::borrow_tracker::stacked_borrows::diagnostics::{
22 AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
23};
24use crate::borrow_tracker::{GlobalStateInner, ProtectorKind};
25use crate::concurrency::data_race::{NaReadType, NaWriteType};
26use crate::*;
27
28pub type AllocState = Stacks;
29
30/// Extra per-allocation state.
31#[derive(Clone, Debug)]
32pub struct Stacks {
33 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
34 stacks: DedupRangeMap<Stack>,
35 /// Stores past operations on this allocation
36 history: AllocHistory,
37 /// The set of tags that have been exposed inside this allocation.
38 exposed_tags: FxHashSet<BorTag>,
39}
40
41/// Indicates which permissions to grant to the retagged pointer.
42#[derive(Clone, Debug)]
43enum NewPermission {
44 Uniform {
45 perm: Permission,
46 access: Option<AccessKind>,
47 protector: Option<ProtectorKind>,
48 },
49 FreezeSensitive {
50 freeze_perm: Permission,
51 freeze_access: Option<AccessKind>,
52 freeze_protector: Option<ProtectorKind>,
53 nonfreeze_perm: Permission,
54 nonfreeze_access: Option<AccessKind>,
55 // nonfreeze_protector must always be None
56 },
57}
58
59impl NewPermission {
60 /// A key function: determine the permissions to grant at a retag for the given kind of
61 /// reference/pointer.
62 fn from_ref_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
63 let protector = (kind == RetagKind::FnEntry).then_some(ProtectorKind::StrongProtector);
64 match ty.kind() {
65 ty::Ref(_, pointee, Mutability::Mut) => {
66 if kind == RetagKind::TwoPhase {
67 // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
68 assert!(protector.is_none()); // RetagKind can't be both FnEntry and TwoPhase.
69 NewPermission::Uniform {
70 perm: Permission::SharedReadWrite,
71 access: None,
72 protector: None,
73 }
74 } else if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
75 // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
76 NewPermission::Uniform {
77 perm: Permission::Unique,
78 access: Some(AccessKind::Write),
79 protector,
80 }
81 } else {
82 // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
83 NewPermission::Uniform {
84 perm: Permission::SharedReadWrite,
85 access: None,
86 protector: None,
87 }
88 }
89 }
90 ty::RawPtr(_, Mutability::Mut) => {
91 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
92 // Mutable raw pointer. No access, not protected.
93 NewPermission::Uniform {
94 perm: Permission::SharedReadWrite,
95 access: None,
96 protector: None,
97 }
98 }
99 ty::Ref(_, _pointee, Mutability::Not) => {
100 // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
101 NewPermission::FreezeSensitive {
102 freeze_perm: Permission::SharedReadOnly,
103 freeze_access: Some(AccessKind::Read),
104 freeze_protector: protector,
105 nonfreeze_perm: Permission::SharedReadWrite,
106 // Inside UnsafeCell, this does *not* count as an access, as there
107 // might actually be mutable references further up the stack that
108 // we have to keep alive.
109 nonfreeze_access: None,
110 // We do not protect inside UnsafeCell.
111 // This fixes https://github.com/rust-lang/rust/issues/55005.
112 }
113 }
114 ty::RawPtr(_, Mutability::Not) => {
115 assert!(protector.is_none()); // RetagKind can't be both FnEntry and Raw.
116 // `*const T`, when freshly created, are read-only in the frozen part.
117 NewPermission::FreezeSensitive {
118 freeze_perm: Permission::SharedReadOnly,
119 freeze_access: Some(AccessKind::Read),
120 freeze_protector: None,
121 nonfreeze_perm: Permission::SharedReadWrite,
122 nonfreeze_access: None,
123 }
124 }
125 _ => unreachable!(),
126 }
127 }
128
129 fn from_box_ty<'tcx>(ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>) -> Self {
130 // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
131 let pointee = ty.builtin_deref(true).unwrap();
132 if pointee.is_unpin(*cx.tcx, cx.typing_env()) {
133 // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
134 // a weak protector).
135 NewPermission::Uniform {
136 perm: Permission::Unique,
137 access: Some(AccessKind::Write),
138 protector: (kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
139 }
140 } else {
141 // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
142 NewPermission::Uniform {
143 perm: Permission::SharedReadWrite,
144 access: None,
145 protector: None,
146 }
147 }
148 }
149
150 fn protector(&self) -> Option<ProtectorKind> {
151 match self {
152 NewPermission::Uniform { protector, .. } => *protector,
153 NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
154 }
155 }
156}
157
158// # Stacked Borrows Core Begin
159
160/// We need to make at least the following things true:
161///
162/// U1: After creating a `Uniq`, it is at the top.
163/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
164/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
165///
166/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
167/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
168/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
169/// gets popped.
170/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
171/// F3: If an access happens with an `&` outside `UnsafeCell`,
172/// it requires the `SharedReadOnly` to still be in the stack.
173///
174/// Core relation on `Permission` to define which accesses are allowed
175impl Permission {
176 /// This defines for a given permission, whether it permits the given kind of access.
177 fn grants(self, access: AccessKind) -> bool {
178 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
179 self != Permission::Disabled
180 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
181 }
182}
183
184/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
185#[derive(Copy, Clone, Debug)]
186enum ItemInvalidationCause {
187 Conflict,
188 Dealloc,
189}
190
191/// Core per-location operations: access, dealloc, reborrow.
192impl<'tcx> Stack {
193 /// Find the first write-incompatible item above the given one --
194 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
195 fn find_first_write_incompatible(&self, granting: usize) -> usize {
196 let perm = self.get(granting).unwrap().perm();
197 match perm {
198 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
199 Permission::Disabled => bug!("Cannot use Disabled for anything"),
200 Permission::Unique => {
201 // On a write, everything above us is incompatible.
202 granting + 1
203 }
204 Permission::SharedReadWrite => {
205 // The SharedReadWrite *just* above us are compatible, to skip those.
206 let mut idx = granting + 1;
207 while let Some(item) = self.get(idx) {
208 if item.perm() == Permission::SharedReadWrite {
209 // Go on.
210 idx += 1;
211 } else {
212 // Found first incompatible!
213 break;
214 }
215 }
216 idx
217 }
218 }
219 }
220
221 /// The given item was invalidated -- check its protectors for whether that will cause UB.
222 fn item_invalidated(
223 item: &Item,
224 global: &GlobalStateInner,
225 dcx: &DiagnosticCx<'_, '_, 'tcx>,
226 cause: ItemInvalidationCause,
227 ) -> InterpResult<'tcx> {
228 if !global.tracked_pointer_tags.is_empty() {
229 dcx.check_tracked_tag_popped(item, global);
230 }
231
232 if !item.protected() {
233 return interp_ok(());
234 }
235
236 // We store tags twice, once in global.protected_tags and once in each call frame.
237 // We do this because consulting a single global set in this function is faster
238 // than attempting to search all call frames in the program for the `FrameExtra`
239 // (if any) which is protecting the popped tag.
240 //
241 // This duplication trades off making `end_call` slower to make this function faster. This
242 // trade-off is profitable in practice for a combination of two reasons.
243 // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
244 // Therefore, adding overhead in function call/return is profitable even if it only
245 // saves a little work in this function.
246 // 2. Most frames protect only one or two tags. So this duplicative global turns a search
247 // which ends up about linear in the number of protected tags in the program into a
248 // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
249 if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
250 // The only way this is okay is if the protector is weak and we are deallocating with
251 // the right pointer.
252 let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
253 && matches!(protector_kind, ProtectorKind::WeakProtector);
254 if !allowed {
255 return Err(dcx.protector_error(item, protector_kind)).into();
256 }
257 }
258 interp_ok(())
259 }
260
261 /// Test if a memory `access` using pointer tagged `tag` is granted.
262 /// If yes, return the index of the item that granted it.
263 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
264 /// allocation that we are currently checking.
265 fn access(
266 &mut self,
267 access: AccessKind,
268 tag: ProvenanceExtra,
269 global: &GlobalStateInner,
270 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
271 exposed_tags: &FxHashSet<BorTag>,
272 ) -> InterpResult<'tcx> {
273 // Two main steps: Find granting item, remove incompatible items above.
274
275 // Step 1: Find granting item.
276 let granting_idx =
277 self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
278
279 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
280 // items. Behavior differs for reads and writes.
281 // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
282 if access == AccessKind::Write {
283 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
284 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
285 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
286 // The granting_idx *might* be approximate, but any lower idx would remove more
287 // things. Even if this is a Unique and the lower idx is an SRW (which removes
288 // less), there is an SRW group boundary here so strictly more would get removed.
289 self.find_first_write_incompatible(granting_idx)
290 } else {
291 // We are writing to something in the unknown part.
292 // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
293 0
294 };
295 self.pop_items_after(first_incompatible_idx, |item| {
296 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
297 dcx.log_invalidation(item.tag());
298 interp_ok(())
299 })?;
300 } else {
301 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
302 // The reason this is not following the stack discipline (by removing the first Unique and
303 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
304 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
305 // `SharedReadWrite` for `raw`.
306 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
307 // reference and use that.
308 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
309 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
310 // The granting_idx *might* be approximate, but any lower idx would disable more things.
311 granting_idx + 1
312 } else {
313 // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
314 0
315 };
316 self.disable_uniques_starting_at(first_incompatible_idx, |item| {
317 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
318 dcx.log_invalidation(item.tag());
319 interp_ok(())
320 })?;
321 }
322
323 // If this was an approximate action, we now collapse everything into an unknown.
324 if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
325 // Compute the upper bound of the items that remain.
326 // (This is why we did all the work above: to reduce the items we have to consider here.)
327 let mut max = BorTag::one();
328 for i in 0..self.len() {
329 let item = self.get(i).unwrap();
330 // Skip disabled items, they cannot be matched anyway.
331 if !matches!(item.perm(), Permission::Disabled) {
332 // We are looking for a strict upper bound, so add 1 to this tag.
333 max = cmp::max(item.tag().succ().unwrap(), max);
334 }
335 }
336 if let Some(unk) = self.unknown_bottom() {
337 max = cmp::max(unk, max);
338 }
339 // Use `max` as new strict upper bound for everything.
340 trace!(
341 "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
342 max = max.get(),
343 );
344 self.set_unknown_bottom(max);
345 }
346
347 // Done.
348 interp_ok(())
349 }
350
351 /// Deallocate a location: Like a write access, but also there must be no
352 /// active protectors at all because we will remove all items.
353 fn dealloc(
354 &mut self,
355 tag: ProvenanceExtra,
356 global: &GlobalStateInner,
357 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
358 exposed_tags: &FxHashSet<BorTag>,
359 ) -> InterpResult<'tcx> {
360 // Step 1: Make a write access.
361 // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
362 self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
363
364 // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
365 for idx in (0..self.len()).rev() {
366 let item = self.get(idx).unwrap();
367 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
368 }
369
370 interp_ok(())
371 }
372
373 /// Derive a new pointer from one with the given tag.
374 ///
375 /// `access` indicates which kind of memory access this retag itself should correspond to.
376 fn grant(
377 &mut self,
378 derived_from: ProvenanceExtra,
379 new: Item,
380 access: Option<AccessKind>,
381 global: &GlobalStateInner,
382 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
383 exposed_tags: &FxHashSet<BorTag>,
384 ) -> InterpResult<'tcx> {
385 dcx.start_grant(new.perm());
386
387 // Compute where to put the new item.
388 // Either way, we ensure that we insert the new item in a way such that between
389 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
390 let new_idx = if let Some(access) = access {
391 // Simple case: We are just a regular memory access, and then push our thing on top,
392 // like a regular stack.
393 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
394 self.access(access, derived_from, global, dcx, exposed_tags)?;
395
396 // We insert "as far up as possible": We know only compatible items are remaining
397 // on top of `derived_from`, and we want the new item at the top so that we
398 // get the strongest possible guarantees.
399 // This ensures U1 and F1.
400 self.len()
401 } else {
402 // The tricky case: creating a new SRW permission without actually being an access.
403 assert!(new.perm() == Permission::SharedReadWrite);
404
405 // First we figure out which item grants our parent (`derived_from`) this kind of access.
406 // We use that to determine where to put the new item.
407 let granting_idx = self
408 .find_granting(AccessKind::Write, derived_from, exposed_tags)
409 .map_err(|()| dcx.grant_error(self))?;
410
411 let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from)
412 else {
413 // The parent is a wildcard pointer or matched the unknown bottom.
414 // This is approximate. Nobody knows what happened, so forget everything.
415 // The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
416 // (for all we know, it might join an SRW group inside the unknown).
417 trace!(
418 "reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
419 );
420 self.set_unknown_bottom(global.next_ptr_tag);
421 return interp_ok(());
422 };
423
424 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
425 // access. Instead of popping the stack, we insert the item at the place the stack would
426 // be popped to (i.e., we insert it above all the write-compatible items).
427 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
428 self.find_first_write_incompatible(granting_idx)
429 };
430
431 // Put the new item there.
432 trace!("reborrow: adding item {:?}", new);
433 self.insert(new_idx, new);
434 interp_ok(())
435 }
436}
437// # Stacked Borrows Core End
438
439/// Integration with the BorTag garbage collector
440impl Stacks {
441 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
442 for (_stack_range, stack) in self.stacks.iter_mut_all() {
443 stack.retain(live_tags);
444 }
445 self.history.retain(live_tags);
446 }
447}
448
449impl VisitProvenance for Stacks {
450 fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
451 for tag in self.exposed_tags.iter().copied() {
452 visit(None, Some(tag));
453 }
454 }
455}
456
457/// Map per-stack operations to higher-level per-location-range operations.
458impl<'tcx> Stacks {
459 /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
460 /// the [`AllocId`] of the allocation this is associated with.
461 fn new(
462 size: Size,
463 perm: Permission,
464 tag: BorTag,
465 id: AllocId,
466 machine: &MiriMachine<'_>,
467 ) -> Self {
468 let item = Item::new(tag, perm, false);
469 let stack = Stack::new(item);
470
471 Stacks {
472 stacks: DedupRangeMap::new(size, stack),
473 history: AllocHistory::new(id, item, machine),
474 exposed_tags: FxHashSet::default(),
475 }
476 }
477
478 /// Call `f` on every stack in the range.
479 fn for_each(
480 &mut self,
481 range: AllocRange,
482 mut dcx_builder: DiagnosticCxBuilder<'_, 'tcx>,
483 mut f: impl FnMut(
484 &mut Stack,
485 &mut DiagnosticCx<'_, '_, 'tcx>,
486 &mut FxHashSet<BorTag>,
487 ) -> InterpResult<'tcx>,
488 ) -> InterpResult<'tcx> {
489 for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
490 let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
491 f(stack, &mut dcx, &mut self.exposed_tags)?;
492 dcx_builder = dcx.unbuild();
493 }
494 interp_ok(())
495 }
496}
497
498/// Glue code to connect with Miri Machine Hooks
499impl Stacks {
500 pub fn new_allocation(
501 id: AllocId,
502 size: Size,
503 state: &mut GlobalStateInner,
504 kind: MemoryKind,
505 machine: &MiriMachine<'_>,
506 ) -> Self {
507 let (base_tag, perm) = match kind {
508 // New unique borrow. This tag is not accessible by the program,
509 // so it will only ever be used when using the local directly (i.e.,
510 // not through a pointer). That is, whenever we directly write to a local, this will pop
511 // everything else off the stack, invalidating all previous pointers,
512 // and in particular, *all* raw pointers.
513 MemoryKind::Stack => (state.root_ptr_tag(id, machine), Permission::Unique),
514 // Everything else is shared by default.
515 _ => (state.root_ptr_tag(id, machine), Permission::SharedReadWrite),
516 };
517 Stacks::new(size, perm, base_tag, id, machine)
518 }
519
520 #[inline(always)]
521 pub fn before_memory_read<'ecx, 'tcx>(
522 &mut self,
523 alloc_id: AllocId,
524 tag: ProvenanceExtra,
525 range: AllocRange,
526 machine: &'ecx MiriMachine<'tcx>,
527 ) -> InterpResult<'tcx>
528 where
529 'tcx: 'ecx,
530 {
531 trace!(
532 "read access with tag {:?}: {:?}, size {}",
533 tag,
534 interpret::Pointer::new(alloc_id, range.start),
535 range.size.bytes()
536 );
537 let dcx = DiagnosticCxBuilder::read(machine, tag, range);
538 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
539 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
540 stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
541 })
542 }
543
544 #[inline(always)]
545 pub fn before_memory_write<'tcx>(
546 &mut self,
547 alloc_id: AllocId,
548 tag: ProvenanceExtra,
549 range: AllocRange,
550 machine: &MiriMachine<'tcx>,
551 ) -> InterpResult<'tcx> {
552 trace!(
553 "write access with tag {:?}: {:?}, size {}",
554 tag,
555 interpret::Pointer::new(alloc_id, range.start),
556 range.size.bytes()
557 );
558 let dcx = DiagnosticCxBuilder::write(machine, tag, range);
559 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
560 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
561 stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
562 })
563 }
564
565 #[inline(always)]
566 pub fn before_memory_deallocation<'tcx>(
567 &mut self,
568 alloc_id: AllocId,
569 tag: ProvenanceExtra,
570 size: Size,
571 machine: &MiriMachine<'tcx>,
572 ) -> InterpResult<'tcx> {
573 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, size.bytes());
574 let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
575 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
576 self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
577 stack.dealloc(tag, &state, dcx, exposed_tags)
578 })?;
579 interp_ok(())
580 }
581}
582
583/// Retagging/reborrowing. There is some policy in here, such as which permissions
584/// to grant for which references, and when to add protectors.
585impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for crate::MiriInterpCx<'tcx> {}
586trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
587 /// Returns the provenance that should be used henceforth.
588 fn sb_reborrow(
589 &mut self,
590 place: &MPlaceTy<'tcx>,
591 size: Size,
592 new_perm: NewPermission,
593 new_tag: BorTag,
594 retag_info: RetagInfo, // diagnostics info about this retag
595 ) -> InterpResult<'tcx, Option<Provenance>> {
596 let this = self.eval_context_mut();
597 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
598 this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::Dereferenceable)?;
599
600 // It is crucial that this gets called on all code paths, to ensure we track tag creation.
601 let log_creation = |this: &MiriInterpCx<'tcx>,
602 loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
603 -> InterpResult<'tcx> {
604 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
605 let ty = place.layout.ty;
606 if global.tracked_pointer_tags.contains(&new_tag) {
607 let mut kind_str = String::new();
608 match new_perm {
609 NewPermission::Uniform { perm, .. } =>
610 write!(kind_str, "{perm:?} permission").unwrap(),
611 NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.typing_env()) =>
612 write!(kind_str, "{freeze_perm:?} permission").unwrap(),
613 NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. } =>
614 write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
615 }
616 write!(kind_str, " (pointee type {ty})").unwrap();
617 this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
618 new_tag.inner(),
619 Some(kind_str),
620 loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
621 ));
622 }
623 drop(global); // don't hold that reference any longer than we have to
624
625 let Some((alloc_id, base_offset, orig_tag)) = loc else {
626 return interp_ok(())
627 };
628
629 let alloc_kind = this.get_alloc_info(alloc_id).kind;
630 match alloc_kind {
631 AllocKind::LiveData => {
632 // This should have alloc_extra data, but `get_alloc_extra` can still fail
633 // if converting this alloc_id from a global to a local one
634 // uncovers a non-supported `extern static`.
635 let extra = this.get_alloc_extra(alloc_id)?;
636 let mut stacked_borrows = extra
637 .borrow_tracker_sb()
638 .borrow_mut();
639 // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
640 // FIXME: can this be done cleaner?
641 let dcx = DiagnosticCxBuilder::retag(
642 &this.machine,
643 retag_info,
644 new_tag,
645 orig_tag,
646 alloc_range(base_offset, size),
647 );
648 let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
649 dcx.log_creation();
650 if new_perm.protector().is_some() {
651 dcx.log_protector();
652 }
653 },
654 AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead => {
655 // No stacked borrows on these allocations.
656 }
657 }
658 interp_ok(())
659 };
660
661 if size == Size::ZERO {
662 trace!(
663 "reborrow of size 0: reference {:?} derived from {:?} (pointee {})",
664 new_tag,
665 place.ptr(),
666 place.layout.ty,
667 );
668 // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
669 // touches no bytes so there is no stack to put this tag in.
670 // However, if the pointer for this operation points at a real allocation we still
671 // record where it was created so that we can issue a helpful diagnostic if there is an
672 // attempt to use it for a non-zero-sized access.
673 // Dangling slices are a common case here; it's valid to get their length but with raw
674 // pointer tagging for example all calls to get_unchecked on them are invalid.
675 if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr(), 0)
676 {
677 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
678 // Still give it the new provenance, it got retagged after all.
679 return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
680 } else {
681 // This pointer doesn't come with an AllocId. :shrug:
682 log_creation(this, None)?;
683 // Provenance unchanged.
684 return interp_ok(place.ptr().provenance);
685 }
686 }
687
688 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr(), 0)?;
689 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
690
691 trace!(
692 "reborrow: reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
693 new_tag,
694 orig_tag,
695 place.layout.ty,
696 interpret::Pointer::new(alloc_id, base_offset),
697 size.bytes()
698 );
699
700 if let Some(protect) = new_perm.protector() {
701 // See comment in `Stack::item_invalidated` for why we store the tag twice.
702 this.frame_mut()
703 .extra
704 .borrow_tracker
705 .as_mut()
706 .unwrap()
707 .protected_tags
708 .push((alloc_id, new_tag));
709 this.machine
710 .borrow_tracker
711 .as_mut()
712 .unwrap()
713 .get_mut()
714 .protected_tags
715 .insert(new_tag, protect);
716 }
717
718 // Update the stacks, according to the new permission information we are given.
719 match new_perm {
720 NewPermission::Uniform { perm, access, protector } => {
721 assert!(perm != Permission::SharedReadOnly);
722 // Here we can avoid `borrow()` calls because we have mutable references.
723 // Note that this asserts that the allocation is mutable -- but since we are creating a
724 // mutable pointer, that seems reasonable.
725 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
726 let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
727 let item = Item::new(new_tag, perm, protector.is_some());
728 let range = alloc_range(base_offset, size);
729 let global = machine.borrow_tracker.as_ref().unwrap().borrow();
730 let dcx = DiagnosticCxBuilder::retag(
731 machine,
732 retag_info,
733 new_tag,
734 orig_tag,
735 alloc_range(base_offset, size),
736 );
737 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
738 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
739 })?;
740 drop(global);
741 if let Some(access) = access {
742 assert_eq!(access, AccessKind::Write);
743 // Make sure the data race model also knows about this.
744 // FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
745 if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
746 data_race.write(
747 alloc_id,
748 range,
749 NaWriteType::Retag,
750 Some(place.layout.ty),
751 machine,
752 )?;
753 }
754 }
755 }
756 NewPermission::FreezeSensitive {
757 freeze_perm,
758 freeze_access,
759 freeze_protector,
760 nonfreeze_perm,
761 nonfreeze_access,
762 } => {
763 // The permission is not uniform across the entire range!
764 // We need a frozen-sensitive reborrow.
765 // We have to use shared references to alloc/memory_extra here since
766 // `visit_freeze_sensitive` needs to access the global state.
767 let alloc_extra = this.get_alloc_extra(alloc_id)?;
768 let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
769 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
770 // Adjust range.
771 range.start += base_offset;
772 // We are only ever `SharedReadOnly` inside the frozen bits.
773 let (perm, access, protector) = if frozen {
774 (freeze_perm, freeze_access, freeze_protector)
775 } else {
776 (nonfreeze_perm, nonfreeze_access, None)
777 };
778 let item = Item::new(new_tag, perm, protector.is_some());
779 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
780 let dcx = DiagnosticCxBuilder::retag(
781 &this.machine,
782 retag_info,
783 new_tag,
784 orig_tag,
785 alloc_range(base_offset, size),
786 );
787 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
788 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
789 })?;
790 drop(global);
791 if let Some(access) = access {
792 assert_eq!(access, AccessKind::Read);
793 // Make sure the data race model also knows about this.
794 if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
795 data_race.read(
796 alloc_id,
797 range,
798 NaReadType::Retag,
799 Some(place.layout.ty),
800 &this.machine,
801 )?;
802 }
803 }
804 interp_ok(())
805 })?;
806 }
807 }
808
809 interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
810 }
811
812 fn sb_retag_place(
813 &mut self,
814 place: &MPlaceTy<'tcx>,
815 new_perm: NewPermission,
816 info: RetagInfo, // diagnostics info about this retag
817 ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
818 let this = self.eval_context_mut();
819 let size = this.size_and_align_of_val(place)?.map(|(size, _)| size);
820 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
821 // bail out -- we cannot reasonably figure out which memory range to reborrow.
822 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
823 let size = match size {
824 Some(size) => size,
825 None => {
826 static DEDUP: AtomicBool = AtomicBool::new(false);
827 if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
828 this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
829 }
830 return interp_ok(place.clone());
831 }
832 };
833
834 // Compute new borrow.
835 let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
836
837 // Reborrow.
838 let new_prov = this.sb_reborrow(place, size, new_perm, new_tag, info)?;
839
840 // Adjust place.
841 // (If the closure gets called, that means the old provenance was `Some`, and hence the new
842 // one must also be `Some`.)
843 interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
844 }
845
846 /// Retags an individual pointer, returning the retagged version.
847 /// `kind` indicates what kind of reference is being created.
848 fn sb_retag_reference(
849 &mut self,
850 val: &ImmTy<'tcx>,
851 new_perm: NewPermission,
852 info: RetagInfo, // diagnostics info about this retag
853 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
854 let this = self.eval_context_mut();
855 let place = this.ref_to_mplace(val)?;
856 let new_place = this.sb_retag_place(&place, new_perm, info)?;
857 interp_ok(ImmTy::from_immediate(new_place.to_ref(this), val.layout))
858 }
859}
860
861impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
862pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
863 fn sb_retag_ptr_value(
864 &mut self,
865 kind: RetagKind,
866 val: &ImmTy<'tcx>,
867 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
868 let this = self.eval_context_mut();
869 let new_perm = NewPermission::from_ref_ty(val.layout.ty, kind, this);
870 let cause = match kind {
871 RetagKind::TwoPhase => RetagCause::TwoPhase,
872 RetagKind::FnEntry => unreachable!(),
873 RetagKind::Raw | RetagKind::Default => RetagCause::Normal,
874 };
875 this.sb_retag_reference(val, new_perm, RetagInfo { cause, in_field: false })
876 }
877
878 fn sb_retag_place_contents(
879 &mut self,
880 kind: RetagKind,
881 place: &PlaceTy<'tcx>,
882 ) -> InterpResult<'tcx> {
883 let this = self.eval_context_mut();
884 let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields;
885 let retag_cause = match kind {
886 RetagKind::TwoPhase => unreachable!(), // can only happen in `retag_ptr_value`
887 RetagKind::FnEntry => RetagCause::FnEntry,
888 RetagKind::Default | RetagKind::Raw => RetagCause::Normal,
889 };
890 let mut visitor =
891 RetagVisitor { ecx: this, kind, retag_cause, retag_fields, in_field: false };
892 return visitor.visit_value(place);
893
894 // The actual visitor.
895 struct RetagVisitor<'ecx, 'tcx> {
896 ecx: &'ecx mut MiriInterpCx<'tcx>,
897 kind: RetagKind,
898 retag_cause: RetagCause,
899 retag_fields: RetagFields,
900 in_field: bool,
901 }
902 impl<'ecx, 'tcx> RetagVisitor<'ecx, 'tcx> {
903 #[inline(always)] // yes this helps in our benchmarks
904 fn retag_ptr_inplace(
905 &mut self,
906 place: &PlaceTy<'tcx>,
907 new_perm: NewPermission,
908 ) -> InterpResult<'tcx> {
909 let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
910 let val = self.ecx.sb_retag_reference(
911 &val,
912 new_perm,
913 RetagInfo { cause: self.retag_cause, in_field: self.in_field },
914 )?;
915 self.ecx.write_immediate(*val, place)?;
916 interp_ok(())
917 }
918 }
919 impl<'ecx, 'tcx> ValueVisitor<'tcx, MiriMachine<'tcx>> for RetagVisitor<'ecx, 'tcx> {
920 type V = PlaceTy<'tcx>;
921
922 #[inline(always)]
923 fn ecx(&self) -> &MiriInterpCx<'tcx> {
924 self.ecx
925 }
926
927 fn visit_box(&mut self, box_ty: Ty<'tcx>, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
928 // Only boxes for the global allocator get any special treatment.
929 if box_ty.is_box_global(*self.ecx.tcx) {
930 // Boxes get a weak protectors, since they may be deallocated.
931 let new_perm = NewPermission::from_box_ty(place.layout.ty, self.kind, self.ecx);
932 self.retag_ptr_inplace(place, new_perm)?;
933 }
934 interp_ok(())
935 }
936
937 fn visit_value(&mut self, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> {
938 // If this place is smaller than a pointer, we know that it can't contain any
939 // pointers we need to retag, so we can stop recursion early.
940 // This optimization is crucial for ZSTs, because they can contain way more fields
941 // than we can ever visit.
942 if place.layout.is_sized() && place.layout.size < self.ecx.pointer_size() {
943 return interp_ok(());
944 }
945
946 // Check the type of this value to see what to do with it (retag, or recurse).
947 match place.layout.ty.kind() {
948 ty::Ref(..) | ty::RawPtr(..) => {
949 if matches!(place.layout.ty.kind(), ty::Ref(..))
950 || self.kind == RetagKind::Raw
951 {
952 let new_perm =
953 NewPermission::from_ref_ty(place.layout.ty, self.kind, self.ecx);
954 self.retag_ptr_inplace(place, new_perm)?;
955 }
956 }
957 ty::Adt(adt, _) if adt.is_box() => {
958 // Recurse for boxes, they require some tricky handling and will end up in `visit_box` above.
959 // (Yes this means we technically also recursively retag the allocator itself
960 // even if field retagging is not enabled. *shrug*)
961 self.walk_value(place)?;
962 }
963 _ => {
964 // Not a reference/pointer/box. Only recurse if configured appropriately.
965 let recurse = match self.retag_fields {
966 RetagFields::No => false,
967 RetagFields::Yes => true,
968 RetagFields::OnlyScalar => {
969 // Matching `ArgAbi::new` at the time of writing, only fields of
970 // `Scalar` and `ScalarPair` ABI are considered.
971 matches!(
972 place.layout.backend_repr,
973 BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
974 )
975 }
976 };
977 if recurse {
978 let in_field = mem::replace(&mut self.in_field, true); // remember and restore old value
979 self.walk_value(place)?;
980 self.in_field = in_field;
981 }
982 }
983 }
984
985 interp_ok(())
986 }
987 }
988 }
989
990 /// Protect a place so that it cannot be used any more for the duration of the current function
991 /// call.
992 ///
993 /// This is used to ensure soundness of in-place function argument/return passing.
994 fn sb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
995 let this = self.eval_context_mut();
996
997 // Retag it. With protection! That is the entire point.
998 let new_perm = NewPermission::Uniform {
999 perm: Permission::Unique,
1000 access: Some(AccessKind::Write),
1001 protector: Some(ProtectorKind::StrongProtector),
1002 };
1003 this.sb_retag_place(
1004 place,
1005 new_perm,
1006 RetagInfo { cause: RetagCause::InPlaceFnPassing, in_field: false },
1007 )
1008 }
1009
1010 /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
1011 fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
1012 let this = self.eval_context_ref();
1013
1014 // Function pointers and dead objects don't have an alloc_extra so we ignore them.
1015 // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
1016 // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
1017 let kind = this.get_alloc_info(alloc_id).kind;
1018 match kind {
1019 AllocKind::LiveData => {
1020 // This should have alloc_extra data, but `get_alloc_extra` can still fail
1021 // if converting this alloc_id from a global to a local one
1022 // uncovers a non-supported `extern static`.
1023 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1024 trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
1025 alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
1026 }
1027 AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead => {
1028 // No stacked borrows on these allocations.
1029 }
1030 }
1031 interp_ok(())
1032 }
1033
1034 fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
1035 let this = self.eval_context_mut();
1036 let alloc_extra = this.get_alloc_extra(alloc_id)?;
1037 let stacks = alloc_extra.borrow_tracker_sb().borrow();
1038 for (range, stack) in stacks.stacks.iter_all() {
1039 print!("{range:?}: [");
1040 if let Some(bottom) = stack.unknown_bottom() {
1041 print!(" unknown-bottom(..{bottom:?})");
1042 }
1043 for i in 0..stack.len() {
1044 let item = stack.get(i).unwrap();
1045 print!(" {:?}{:?}", item.perm(), item.tag());
1046 }
1047 println!(" ]");
1048 }
1049 interp_ok(())
1050 }
1051}