miri/borrow_tracker/tree_borrows/tree.rs
1//! In this file we handle the "Tree" part of Tree Borrows, i.e. all tree
2//! traversal functions, optimizations to trim branches, and keeping track of
3//! the relative position of the access to each node being updated. This of course
4//! also includes the definition of the tree structure.
5//!
6//! Functions here manipulate permissions but are oblivious to them: as
7//! the internals of `Permission` are private, the update process is a black
8//! box. All we need to know here are
9//! - the fact that updates depend only on the old state, the status of protectors,
10//! and the relative position of the access;
11//! - idempotency properties asserted in `perms.rs` (for optimizations)
12
13use std::{fmt, mem};
14
15use rustc_abi::Size;
16use rustc_data_structures::fx::FxHashSet;
17use rustc_span::Span;
18use smallvec::SmallVec;
19
20use crate::borrow_tracker::tree_borrows::Permission;
21use crate::borrow_tracker::tree_borrows::diagnostics::{
22 self, NodeDebugInfo, TbError, TransitionError,
23};
24use crate::borrow_tracker::tree_borrows::foreign_access_skipping::IdempotentForeignAccess;
25use crate::borrow_tracker::tree_borrows::perms::PermTransition;
26use crate::borrow_tracker::tree_borrows::unimap::{UniEntry, UniIndex, UniKeyMap, UniValMap};
27use crate::borrow_tracker::{GlobalState, ProtectorKind};
28use crate::*;
29
30mod tests;
31
32/// Data for a single *location*.
33#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
34pub(super) struct LocationState {
35 /// A location is initialized when it is child-accessed for the first time (and the initial
36 /// retag initializes the location for the range covered by the type), and it then stays
37 /// initialized forever.
38 /// For initialized locations, "permission" is the current permission. However, for
39 /// uninitialized locations, we still need to track the "future initial permission": this will
40 /// start out to be `default_initial_perm`, but foreign accesses need to be taken into account.
41 /// Crucially however, while transitions to `Disabled` would usually be UB if this location is
42 /// protected, that is *not* the case for uninitialized locations. Instead we just have a latent
43 /// "future initial permission" of `Disabled`, causing UB only if an access is ever actually
44 /// performed.
45 /// Note that the tree root is also always initialized, as if the allocation was a write access.
46 initialized: bool,
47 /// This pointer's current permission / future initial permission.
48 permission: Permission,
49 /// See `foreign_access_skipping.rs`.
50 /// Stores an idempotent foreign access for this location and its children.
51 /// For correctness, this must not be too strong, and the recorded idempotent foreign access
52 /// of all children must be at least as strong as this. For performance, it should be as strong as possible.
53 idempotent_foreign_access: IdempotentForeignAccess,
54}
55
56impl LocationState {
57 /// Constructs a new initial state. It has neither been accessed, nor been subjected
58 /// to any foreign access yet.
59 /// The permission is not allowed to be `Active`.
60 /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
61 fn new_uninit(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
62 assert!(permission.is_initial() || permission.is_disabled());
63 Self { permission, initialized: false, idempotent_foreign_access: sifa }
64 }
65
66 /// Constructs a new initial state. It has not yet been subjected
67 /// to any foreign access. However, it is already marked as having been accessed.
68 /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
69 fn new_init(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
70 Self { permission, initialized: true, idempotent_foreign_access: sifa }
71 }
72
73 /// Check if the location has been initialized, i.e. if it has
74 /// ever been accessed through a child pointer.
75 pub fn is_initialized(&self) -> bool {
76 self.initialized
77 }
78
79 /// Check if the state can exist as the initial permission of a pointer.
80 ///
81 /// Do not confuse with `is_initialized`, the two are almost orthogonal
82 /// as apart from `Active` which is not initial and must be initialized,
83 /// any other permission can have an arbitrary combination of being
84 /// initial/initialized.
85 /// FIXME: when the corresponding `assert` in `tree_borrows/mod.rs` finally
86 /// passes and can be uncommented, remove this `#[allow(dead_code)]`.
87 #[cfg_attr(not(test), allow(dead_code))]
88 pub fn is_initial(&self) -> bool {
89 self.permission.is_initial()
90 }
91
92 pub fn permission(&self) -> Permission {
93 self.permission
94 }
95
96 /// Apply the effect of an access to one location, including
97 /// - applying `Permission::perform_access` to the inner `Permission`,
98 /// - emitting protector UB if the location is initialized,
99 /// - updating the initialized status (child accesses produce initialized locations).
100 fn perform_access(
101 &mut self,
102 access_kind: AccessKind,
103 rel_pos: AccessRelatedness,
104 protected: bool,
105 ) -> Result<PermTransition, TransitionError> {
106 let old_perm = self.permission;
107 let transition = Permission::perform_access(access_kind, rel_pos, old_perm, protected)
108 .ok_or(TransitionError::ChildAccessForbidden(old_perm))?;
109 self.initialized |= !rel_pos.is_foreign();
110 self.permission = transition.applied(old_perm).unwrap();
111 // Why do only initialized locations cause protector errors?
112 // Consider two mutable references `x`, `y` into disjoint parts of
113 // the same allocation. A priori, these may actually both be used to
114 // access the entire allocation, as long as only reads occur. However,
115 // a write to `y` needs to somehow record that `x` can no longer be used
116 // on that location at all. For these uninitialized locations (i.e., locations
117 // that haven't been accessed with `x` yet), we track the "future initial state":
118 // it defaults to whatever the initial state of the tag is,
119 // but the access to `y` moves that "future initial state" of `x` to `Disabled`.
120 // However, usually a `Reserved -> Disabled` transition would be UB due to the protector!
121 // So clearly protectors shouldn't fire for such "future initial state" transitions.
122 //
123 // See the test `two_mut_protected_same_alloc` in `tests/pass/tree_borrows/tree-borrows.rs`
124 // for an example of safe code that would be UB if we forgot to check `self.initialized`.
125 if protected && self.initialized && transition.produces_disabled() {
126 return Err(TransitionError::ProtectedDisabled(old_perm));
127 }
128 Ok(transition)
129 }
130
131 /// Like `perform_access`, but ignores the concrete error cause and also uses state-passing
132 /// rather than a mutable reference. As such, it returns `Some(x)` if the transition succeeded,
133 /// or `None` if there was an error.
134 #[cfg(test)]
135 fn perform_access_no_fluff(
136 mut self,
137 access_kind: AccessKind,
138 rel_pos: AccessRelatedness,
139 protected: bool,
140 ) -> Option<Self> {
141 match self.perform_access(access_kind, rel_pos, protected) {
142 Ok(_) => Some(self),
143 Err(_) => None,
144 }
145 }
146
147 /// Tree traversal optimizations. See `foreign_access_skipping.rs`.
148 /// This checks if such a foreign access can be skipped.
149 fn skip_if_known_noop(
150 &self,
151 access_kind: AccessKind,
152 rel_pos: AccessRelatedness,
153 ) -> ContinueTraversal {
154 if rel_pos.is_foreign() {
155 let happening_now = IdempotentForeignAccess::from_foreign(access_kind);
156 let mut new_access_noop =
157 self.idempotent_foreign_access.can_skip_foreign_access(happening_now);
158 if self.permission.is_disabled() {
159 // A foreign access to a `Disabled` tag will have almost no observable effect.
160 // It's a theorem that `Disabled` node have no protected initialized children,
161 // and so this foreign access will never trigger any protector.
162 // (Intuition: You're either protected initialized, and thus can't become Disabled
163 // or you're already Disabled protected, but not initialized, and then can't
164 // become initialized since that requires a child access, which Disabled blocks.)
165 // Further, the children will never be able to read or write again, since they
166 // have a `Disabled` parent. So this only affects diagnostics, such that the
167 // blocking write will still be identified directly, just at a different tag.
168 new_access_noop = true;
169 }
170 if self.permission.is_frozen() && access_kind == AccessKind::Read {
171 // A foreign read to a `Frozen` tag will have almost no observable effect.
172 // It's a theorem that `Frozen` nodes have no active children, so all children
173 // already survive foreign reads. Foreign reads in general have almost no
174 // effect, the only further thing they could do is make protected `Reserved`
175 // nodes become conflicted, i.e. make them reject child writes for the further
176 // duration of their protector. But such a child write is already rejected
177 // because this node is frozen. So this only affects diagnostics, but the
178 // blocking read will still be identified directly, just at a different tag.
179 new_access_noop = true;
180 }
181 if new_access_noop {
182 // Abort traversal if the new access is indeed guaranteed
183 // to be noop.
184 // No need to update `self.idempotent_foreign_access`,
185 // the type of the current streak among nonempty read-only
186 // or nonempty with at least one write has not changed.
187 ContinueTraversal::SkipSelfAndChildren
188 } else {
189 // Otherwise propagate this time, and also record the
190 // access that just occurred so that we can skip the propagation
191 // next time.
192 ContinueTraversal::Recurse
193 }
194 } else {
195 // A child access occurred, this breaks the streak of foreign
196 // accesses in a row and the sequence since the previous child access
197 // is now empty.
198 ContinueTraversal::Recurse
199 }
200 }
201
202 /// Records a new access, so that future access can potentially be skipped
203 /// by `skip_if_known_noop`. This must be called on child accesses, and otherwise
204 /// shoud be called on foreign accesses for increased performance. It should not be called
205 /// when `skip_if_known_noop` indicated skipping, since it then is a no-op.
206 /// See `foreign_access_skipping.rs`
207 fn record_new_access(&mut self, access_kind: AccessKind, rel_pos: AccessRelatedness) {
208 debug_assert!(matches!(
209 self.skip_if_known_noop(access_kind, rel_pos),
210 ContinueTraversal::Recurse
211 ));
212 self.idempotent_foreign_access
213 .record_new(IdempotentForeignAccess::from_acc_and_rel(access_kind, rel_pos));
214 }
215}
216
217impl fmt::Display for LocationState {
218 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
219 write!(f, "{}", self.permission)?;
220 if !self.initialized {
221 write!(f, "?")?;
222 }
223 Ok(())
224 }
225}
226
227/// Tree structure with both parents and children since we want to be
228/// able to traverse the tree efficiently in both directions.
229#[derive(Clone, Debug)]
230pub struct Tree {
231 /// Mapping from tags to keys. The key obtained can then be used in
232 /// any of the `UniValMap` relative to this allocation, i.e. both the
233 /// `nodes` and `rperms` of the same `Tree`.
234 /// The parent-child relationship in `Node` is encoded in terms of these same
235 /// keys, so traversing the entire tree needs exactly one access to
236 /// `tag_mapping`.
237 pub(super) tag_mapping: UniKeyMap<BorTag>,
238 /// All nodes of this tree.
239 pub(super) nodes: UniValMap<Node>,
240 /// Maps a tag and a location to a perm, with possible lazy
241 /// initialization.
242 ///
243 /// NOTE: not all tags registered in `nodes` are necessarily in all
244 /// ranges of `rperms`, because `rperms` is in part lazily initialized.
245 /// Just because `nodes.get(key)` is `Some(_)` does not mean you can safely
246 /// `unwrap` any `perm.get(key)`.
247 ///
248 /// We do uphold the fact that `keys(perms)` is a subset of `keys(nodes)`
249 pub(super) rperms: RangeMap<UniValMap<LocationState>>,
250 /// The index of the root node.
251 pub(super) root: UniIndex,
252}
253
254/// A node in the borrow tree. Each node is uniquely identified by a tag via
255/// the `nodes` map of `Tree`.
256#[derive(Clone, Debug)]
257pub(super) struct Node {
258 /// The tag of this node.
259 pub tag: BorTag,
260 /// All tags except the root have a parent tag.
261 pub parent: Option<UniIndex>,
262 /// If the pointer was reborrowed, it has children.
263 // FIXME: bench to compare this to FxHashSet and to other SmallVec sizes
264 pub children: SmallVec<[UniIndex; 4]>,
265 /// Either `Reserved`, `Frozen`, or `Disabled`, it is the permission this tag will
266 /// lazily be initialized to on the first access.
267 /// It is only ever `Disabled` for a tree root, since the root is initialized to `Active` by
268 /// its own separate mechanism.
269 default_initial_perm: Permission,
270 /// The default initial (strongest) idempotent foreign access.
271 /// This participates in the invariant for `LocationState::idempotent_foreign_access`
272 /// in cases where there is no location state yet. See `foreign_access_skipping.rs`,
273 /// and `LocationState::idempotent_foreign_access` for more information
274 default_initial_idempotent_foreign_access: IdempotentForeignAccess,
275 /// Some extra information useful only for debugging purposes
276 pub debug_info: NodeDebugInfo,
277}
278
279/// Data given to the transition function
280struct NodeAppArgs<'node> {
281 /// Node on which the transition is currently being applied
282 node: &'node mut Node,
283 /// Mutable access to its permissions
284 perm: UniEntry<'node, LocationState>,
285 /// Relative position of the access
286 rel_pos: AccessRelatedness,
287}
288/// Data given to the error handler
289struct ErrHandlerArgs<'node, InErr> {
290 /// Kind of error that occurred
291 error_kind: InErr,
292 /// Tag that triggered the error (not the tag that was accessed,
293 /// rather the parent tag that had insufficient permissions or the
294 /// non-parent tag that had a protector).
295 conflicting_info: &'node NodeDebugInfo,
296 /// Information about the tag that was accessed just before the
297 /// error was triggered.
298 accessed_info: &'node NodeDebugInfo,
299}
300/// Internal contents of `Tree` with the minimum of mutable access for
301/// the purposes of the tree traversal functions: the permissions (`perms`) can be
302/// updated but not the tree structure (`tag_mapping` and `nodes`)
303struct TreeVisitor<'tree> {
304 tag_mapping: &'tree UniKeyMap<BorTag>,
305 nodes: &'tree mut UniValMap<Node>,
306 perms: &'tree mut UniValMap<LocationState>,
307}
308
309/// Whether to continue exploring the children recursively or not.
310enum ContinueTraversal {
311 Recurse,
312 SkipSelfAndChildren,
313}
314
315#[derive(Clone, Copy)]
316pub enum ChildrenVisitMode {
317 VisitChildrenOfAccessed,
318 SkipChildrenOfAccessed,
319}
320
321enum RecursionState {
322 BeforeChildren,
323 AfterChildren,
324}
325
326/// Stack of nodes left to explore in a tree traversal.
327/// See the docs of `traverse_this_parents_children_other` for details on the
328/// traversal order.
329struct TreeVisitorStack<NodeContinue, NodeApp, ErrHandler> {
330 /// Identifier of the original access.
331 initial: UniIndex,
332 /// Function describing whether to continue at a tag.
333 /// This is only invoked for foreign accesses.
334 f_continue: NodeContinue,
335 /// Function to apply to each tag.
336 f_propagate: NodeApp,
337 /// Handler to add the required context to diagnostics.
338 err_builder: ErrHandler,
339 /// Mutable state of the visit: the tags left to handle.
340 /// Every tag pushed should eventually be handled,
341 /// and the precise order is relevant for diagnostics.
342 /// Since the traversal is piecewise bottom-up, we need to
343 /// remember whether we're here initially, or after visiting all children.
344 /// The last element indicates this.
345 /// This is just an artifact of how you hand-roll recursion,
346 /// it does not have a deeper meaning otherwise.
347 stack: Vec<(UniIndex, AccessRelatedness, RecursionState)>,
348}
349
350impl<NodeContinue, NodeApp, InnErr, OutErr, ErrHandler>
351 TreeVisitorStack<NodeContinue, NodeApp, ErrHandler>
352where
353 NodeContinue: Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
354 NodeApp: Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
355 ErrHandler: Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
356{
357 fn should_continue_at(
358 &self,
359 this: &mut TreeVisitor<'_>,
360 idx: UniIndex,
361 rel_pos: AccessRelatedness,
362 ) -> ContinueTraversal {
363 let node = this.nodes.get_mut(idx).unwrap();
364 let args = NodeAppArgs { node, perm: this.perms.entry(idx), rel_pos };
365 (self.f_continue)(&args)
366 }
367
368 fn propagate_at(
369 &mut self,
370 this: &mut TreeVisitor<'_>,
371 idx: UniIndex,
372 rel_pos: AccessRelatedness,
373 ) -> Result<(), OutErr> {
374 let node = this.nodes.get_mut(idx).unwrap();
375 (self.f_propagate)(NodeAppArgs { node, perm: this.perms.entry(idx), rel_pos }).map_err(
376 |error_kind| {
377 (self.err_builder)(ErrHandlerArgs {
378 error_kind,
379 conflicting_info: &this.nodes.get(idx).unwrap().debug_info,
380 accessed_info: &this.nodes.get(self.initial).unwrap().debug_info,
381 })
382 },
383 )
384 }
385
386 fn go_upwards_from_accessed(
387 &mut self,
388 this: &mut TreeVisitor<'_>,
389 accessed_node: UniIndex,
390 visit_children: ChildrenVisitMode,
391 ) -> Result<(), OutErr> {
392 // We want to visit the accessed node's children first.
393 // However, we will below walk up our parents and push their children (our cousins)
394 // onto the stack. To ensure correct iteration order, this method thus finishes
395 // by reversing the stack. This only works if the stack is empty initially.
396 assert!(self.stack.is_empty());
397 // First, handle accessed node. A bunch of things need to
398 // be handled differently here compared to the further parents
399 // of `accesssed_node`.
400 {
401 self.propagate_at(this, accessed_node, AccessRelatedness::This)?;
402 if matches!(visit_children, ChildrenVisitMode::VisitChildrenOfAccessed) {
403 let accessed_node = this.nodes.get(accessed_node).unwrap();
404 // We `rev()` here because we reverse the entire stack later.
405 for &child in accessed_node.children.iter().rev() {
406 self.stack.push((
407 child,
408 AccessRelatedness::AncestorAccess,
409 RecursionState::BeforeChildren,
410 ));
411 }
412 }
413 }
414 // Then, handle the accessed node's parents. Here, we need to
415 // make sure we only mark the "cousin" subtrees for later visitation,
416 // not the subtree that contains the accessed node.
417 let mut last_node = accessed_node;
418 while let Some(current) = this.nodes.get(last_node).unwrap().parent {
419 self.propagate_at(this, current, AccessRelatedness::StrictChildAccess)?;
420 let node = this.nodes.get(current).unwrap();
421 // We `rev()` here because we reverse the entire stack later.
422 for &child in node.children.iter().rev() {
423 if last_node == child {
424 continue;
425 }
426 self.stack.push((
427 child,
428 AccessRelatedness::CousinAccess,
429 RecursionState::BeforeChildren,
430 ));
431 }
432 last_node = current;
433 }
434 // Reverse the stack, as discussed above.
435 self.stack.reverse();
436 Ok(())
437 }
438
439 fn finish_foreign_accesses(&mut self, this: &mut TreeVisitor<'_>) -> Result<(), OutErr> {
440 while let Some((idx, rel_pos, step)) = self.stack.last_mut() {
441 let idx = *idx;
442 let rel_pos = *rel_pos;
443 match *step {
444 // How to do bottom-up traversal, 101: Before you handle a node, you handle all children.
445 // For this, you must first find the children, which is what this code here does.
446 RecursionState::BeforeChildren => {
447 // Next time we come back will be when all the children are handled.
448 *step = RecursionState::AfterChildren;
449 // Now push the children, except if we are told to skip this subtree.
450 let handle_children = self.should_continue_at(this, idx, rel_pos);
451 match handle_children {
452 ContinueTraversal::Recurse => {
453 let node = this.nodes.get(idx).unwrap();
454 for &child in node.children.iter() {
455 self.stack.push((child, rel_pos, RecursionState::BeforeChildren));
456 }
457 }
458 ContinueTraversal::SkipSelfAndChildren => {
459 // skip self
460 self.stack.pop();
461 continue;
462 }
463 }
464 }
465 // All the children are handled, let's actually visit this node
466 RecursionState::AfterChildren => {
467 self.stack.pop();
468 self.propagate_at(this, idx, rel_pos)?;
469 }
470 }
471 }
472 Ok(())
473 }
474
475 fn new(
476 initial: UniIndex,
477 f_continue: NodeContinue,
478 f_propagate: NodeApp,
479 err_builder: ErrHandler,
480 ) -> Self {
481 Self { initial, f_continue, f_propagate, err_builder, stack: Vec::new() }
482 }
483}
484
485impl<'tree> TreeVisitor<'tree> {
486 /// Applies `f_propagate` to every vertex of the tree in a piecewise bottom-up way: First, visit
487 /// all ancestors of `start` (starting with `start` itself), then children of `start`, then the rest,
488 /// going bottom-up in each of these two "pieces" / sections.
489 /// This ensures that errors are triggered in the following order
490 /// - first invalid accesses with insufficient permissions, closest to the accessed node first,
491 /// - then protector violations, bottom-up, starting with the children of the accessed node, and then
492 /// going upwards and outwards.
493 ///
494 /// The following graphic visualizes it, with numbers indicating visitation order and `start` being
495 /// the node that is visited first ("1"):
496 ///
497 /// ```text
498 /// 3
499 /// /|
500 /// / |
501 /// 9 2
502 /// | |\
503 /// | | \
504 /// 8 1 7
505 /// / \
506 /// 4 6
507 /// |
508 /// 5
509 /// ```
510 ///
511 /// `f_propagate` should follow the following format: for a given `Node` it updates its
512 /// `Permission` depending on the position relative to `start` (given by an
513 /// `AccessRelatedness`).
514 /// `f_continue` is called earlier on foreign nodes, and describes whether to even start
515 /// visiting the subtree at that node. If it e.g. returns `SkipSelfAndChildren` on node 6
516 /// above, then nodes 5 _and_ 6 would not be visited by `f_propagate`. It is not used for
517 /// notes having a child access (nodes 1, 2, 3).
518 ///
519 /// Finally, remember that the iteration order is not relevant for UB, it only affects
520 /// diagnostics. It also affects tree traversal optimizations built on top of this, so
521 /// those need to be reviewed carefully as well whenever this changes.
522 fn traverse_this_parents_children_other<InnErr, OutErr>(
523 mut self,
524 start: BorTag,
525 f_continue: impl Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
526 f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
527 err_builder: impl Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
528 ) -> Result<(), OutErr> {
529 let start_idx = self.tag_mapping.get(&start).unwrap();
530 let mut stack = TreeVisitorStack::new(start_idx, f_continue, f_propagate, err_builder);
531 // Visits the accessed node itself, and all its parents, i.e. all nodes
532 // undergoing a child access. Also pushes the children and the other
533 // cousin nodes (i.e. all nodes undergoing a foreign access) to the stack
534 // to be processed later.
535 stack.go_upwards_from_accessed(
536 &mut self,
537 start_idx,
538 ChildrenVisitMode::VisitChildrenOfAccessed,
539 )?;
540 // Now visit all the foreign nodes we remembered earlier.
541 // For this we go bottom-up, but also allow f_continue to skip entire
542 // subtrees from being visited if it would be a NOP.
543 stack.finish_foreign_accesses(&mut self)
544 }
545
546 /// Like `traverse_this_parents_children_other`, but skips the children of `start`.
547 fn traverse_nonchildren<InnErr, OutErr>(
548 mut self,
549 start: BorTag,
550 f_continue: impl Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
551 f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
552 err_builder: impl Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
553 ) -> Result<(), OutErr> {
554 let start_idx = self.tag_mapping.get(&start).unwrap();
555 let mut stack = TreeVisitorStack::new(start_idx, f_continue, f_propagate, err_builder);
556 // Visits the accessed node itself, and all its parents, i.e. all nodes
557 // undergoing a child access. Also pushes the other cousin nodes to the
558 // stack, but not the children of the accessed node.
559 stack.go_upwards_from_accessed(
560 &mut self,
561 start_idx,
562 ChildrenVisitMode::SkipChildrenOfAccessed,
563 )?;
564 // Now visit all the foreign nodes we remembered earlier.
565 // For this we go bottom-up, but also allow f_continue to skip entire
566 // subtrees from being visited if it would be a NOP.
567 stack.finish_foreign_accesses(&mut self)
568 }
569}
570
571impl Tree {
572 /// Create a new tree, with only a root pointer.
573 pub fn new(root_tag: BorTag, size: Size, span: Span) -> Self {
574 // The root has `Disabled` as the default permission,
575 // so that any access out of bounds is invalid.
576 let root_default_perm = Permission::new_disabled();
577 let mut tag_mapping = UniKeyMap::default();
578 let root_idx = tag_mapping.insert(root_tag);
579 let nodes = {
580 let mut nodes = UniValMap::<Node>::default();
581 let mut debug_info = NodeDebugInfo::new(root_tag, root_default_perm, span);
582 // name the root so that all allocations contain one named pointer
583 debug_info.add_name("root of the allocation");
584 nodes.insert(
585 root_idx,
586 Node {
587 tag: root_tag,
588 parent: None,
589 children: SmallVec::default(),
590 default_initial_perm: root_default_perm,
591 // The root may never be skipped, all accesses will be local.
592 default_initial_idempotent_foreign_access: IdempotentForeignAccess::None,
593 debug_info,
594 },
595 );
596 nodes
597 };
598 let rperms = {
599 let mut perms = UniValMap::default();
600 // We manually set it to `Active` on all in-bounds positions.
601 // We also ensure that it is initialized, so that no `Active` but
602 // not yet initialized nodes exist. Essentially, we pretend there
603 // was a write that initialized these to `Active`.
604 perms.insert(
605 root_idx,
606 LocationState::new_init(Permission::new_active(), IdempotentForeignAccess::None),
607 );
608 RangeMap::new(size, perms)
609 };
610 Self { root: root_idx, nodes, rperms, tag_mapping }
611 }
612}
613
614impl<'tcx> Tree {
615 /// Insert a new tag in the tree
616 pub fn new_child(
617 &mut self,
618 parent_tag: BorTag,
619 new_tag: BorTag,
620 default_initial_perm: Permission,
621 reborrow_range: AllocRange,
622 span: Span,
623 prot: bool,
624 ) -> InterpResult<'tcx> {
625 assert!(!self.tag_mapping.contains_key(&new_tag));
626 let idx = self.tag_mapping.insert(new_tag);
627 let parent_idx = self.tag_mapping.get(&parent_tag).unwrap();
628 let strongest_idempotent = default_initial_perm.strongest_idempotent_foreign_access(prot);
629 // Create the node
630 self.nodes.insert(
631 idx,
632 Node {
633 tag: new_tag,
634 parent: Some(parent_idx),
635 children: SmallVec::default(),
636 default_initial_perm,
637 default_initial_idempotent_foreign_access: strongest_idempotent,
638 debug_info: NodeDebugInfo::new(new_tag, default_initial_perm, span),
639 },
640 );
641 // Register new_tag as a child of parent_tag
642 self.nodes.get_mut(parent_idx).unwrap().children.push(idx);
643 // Initialize perms
644 let perm = LocationState::new_init(default_initial_perm, strongest_idempotent);
645 for (_perms_range, perms) in self.rperms.iter_mut(reborrow_range.start, reborrow_range.size)
646 {
647 perms.insert(idx, perm);
648 }
649
650 // Inserting the new perms might have broken the SIFA invariant (see `foreign_access_skipping.rs`).
651 // We now weaken the recorded SIFA for our parents, until the invariant is restored.
652 // We could weaken them all to `LocalAccess`, but it is more efficient to compute the SIFA
653 // for the new permission statically, and use that.
654 self.update_last_accessed_after_retag(parent_idx, strongest_idempotent);
655
656 interp_ok(())
657 }
658
659 /// Restores the SIFA "children are stronger" invariant after a retag.
660 /// See `foreign_access_skipping` and `new_child`.
661 fn update_last_accessed_after_retag(
662 &mut self,
663 mut current: UniIndex,
664 strongest_allowed: IdempotentForeignAccess,
665 ) {
666 // We walk the tree upwards, until the invariant is restored
667 loop {
668 let current_node = self.nodes.get_mut(current).unwrap();
669 // Call `ensure_no_stronger_than` on all SIFAs for this node: the per-location SIFA, as well
670 // as the default SIFA for not-yet-initialized locations.
671 // Record whether we did any change; if not, the invariant is restored and we can stop the traversal.
672 let mut any_change = false;
673 for (_, map) in self.rperms.iter_mut_all() {
674 // Check if this node has a state for this location (or range of locations).
675 if let Some(perm) = map.get_mut(current) {
676 // Update the per-location SIFA, recording if it changed.
677 any_change |=
678 perm.idempotent_foreign_access.ensure_no_stronger_than(strongest_allowed);
679 }
680 }
681 // Now update `default_initial_idempotent_foreign_access`, which stores the default SIFA for not-yet-initialized locations.
682 any_change |= current_node
683 .default_initial_idempotent_foreign_access
684 .ensure_no_stronger_than(strongest_allowed);
685
686 if any_change {
687 let Some(next) = self.nodes.get(current).unwrap().parent else {
688 // We have arrived at the root.
689 break;
690 };
691 current = next;
692 continue;
693 } else {
694 break;
695 }
696 }
697 }
698
699 /// Deallocation requires
700 /// - a pointer that permits write accesses
701 /// - the absence of Strong Protectors anywhere in the allocation
702 pub fn dealloc(
703 &mut self,
704 tag: BorTag,
705 access_range: AllocRange,
706 global: &GlobalState,
707 alloc_id: AllocId, // diagnostics
708 span: Span, // diagnostics
709 ) -> InterpResult<'tcx> {
710 self.perform_access(
711 tag,
712 Some((access_range, AccessKind::Write, diagnostics::AccessCause::Dealloc)),
713 global,
714 alloc_id,
715 span,
716 )?;
717 for (perms_range, perms) in self.rperms.iter_mut(access_range.start, access_range.size) {
718 TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms }
719 .traverse_this_parents_children_other(
720 tag,
721 // visit all children, skipping none
722 |_| ContinueTraversal::Recurse,
723 |args: NodeAppArgs<'_>| -> Result<(), TransitionError> {
724 let NodeAppArgs { node, .. } = args;
725 if global.borrow().protected_tags.get(&node.tag)
726 == Some(&ProtectorKind::StrongProtector)
727 {
728 Err(TransitionError::ProtectedDealloc)
729 } else {
730 Ok(())
731 }
732 },
733 |args: ErrHandlerArgs<'_, TransitionError>| -> InterpErrorKind<'tcx> {
734 let ErrHandlerArgs { error_kind, conflicting_info, accessed_info } = args;
735 TbError {
736 conflicting_info,
737 access_cause: diagnostics::AccessCause::Dealloc,
738 alloc_id,
739 error_offset: perms_range.start,
740 error_kind,
741 accessed_info,
742 }
743 .build()
744 },
745 )?;
746 }
747 interp_ok(())
748 }
749
750 /// Map the per-node and per-location `LocationState::perform_access`
751 /// to each location of the first component of `access_range_and_kind`,
752 /// on every tag of the allocation.
753 ///
754 /// If `access_range_and_kind` is `None`, this is interpreted as the special
755 /// access that is applied on protector release:
756 /// - the access will be applied only to initialized locations of the allocation,
757 /// - it will not be visible to children,
758 /// - it will be recorded as a `FnExit` diagnostic access
759 /// - and it will be a read except if the location is `Active`, i.e. has been written to,
760 /// in which case it will be a write.
761 ///
762 /// `LocationState::perform_access` will take care of raising transition
763 /// errors and updating the `initialized` status of each location,
764 /// this traversal adds to that:
765 /// - inserting into the map locations that do not exist yet,
766 /// - trimming the traversal,
767 /// - recording the history.
768 pub fn perform_access(
769 &mut self,
770 tag: BorTag,
771 access_range_and_kind: Option<(AllocRange, AccessKind, diagnostics::AccessCause)>,
772 global: &GlobalState,
773 alloc_id: AllocId, // diagnostics
774 span: Span, // diagnostics
775 ) -> InterpResult<'tcx> {
776 use std::ops::Range;
777 // Performs the per-node work:
778 // - insert the permission if it does not exist
779 // - perform the access
780 // - record the transition
781 // to which some optimizations are added:
782 // - skip the traversal of the children in some cases
783 // - do not record noop transitions
784 //
785 // `perms_range` is only for diagnostics (it is the range of
786 // the `RangeMap` on which we are currently working).
787 let node_skipper = |access_kind: AccessKind, args: &NodeAppArgs<'_>| -> ContinueTraversal {
788 let NodeAppArgs { node, perm, rel_pos } = args;
789
790 let old_state = perm.get().copied().unwrap_or_else(|| node.default_location_state());
791 old_state.skip_if_known_noop(access_kind, *rel_pos)
792 };
793 let node_app = |perms_range: Range<u64>,
794 access_kind: AccessKind,
795 access_cause: diagnostics::AccessCause,
796 args: NodeAppArgs<'_>|
797 -> Result<(), TransitionError> {
798 let NodeAppArgs { node, mut perm, rel_pos } = args;
799
800 let old_state = perm.or_insert(node.default_location_state());
801
802 // Call this function now, which ensures it is only called when
803 // `skip_if_known_noop` returns `Recurse`, due to the contract of
804 // `traverse_this_parents_children_other`.
805 old_state.record_new_access(access_kind, rel_pos);
806
807 let protected = global.borrow().protected_tags.contains_key(&node.tag);
808 let transition = old_state.perform_access(access_kind, rel_pos, protected)?;
809 // Record the event as part of the history
810 if !transition.is_noop() {
811 node.debug_info.history.push(diagnostics::Event {
812 transition,
813 is_foreign: rel_pos.is_foreign(),
814 access_cause,
815 access_range: access_range_and_kind.map(|x| x.0),
816 transition_range: perms_range,
817 span,
818 });
819 }
820 Ok(())
821 };
822
823 // Error handler in case `node_app` goes wrong.
824 // Wraps the faulty transition in more context for diagnostics.
825 let err_handler = |perms_range: Range<u64>,
826 access_cause: diagnostics::AccessCause,
827 args: ErrHandlerArgs<'_, TransitionError>|
828 -> InterpErrorKind<'tcx> {
829 let ErrHandlerArgs { error_kind, conflicting_info, accessed_info } = args;
830 TbError {
831 conflicting_info,
832 access_cause,
833 alloc_id,
834 error_offset: perms_range.start,
835 error_kind,
836 accessed_info,
837 }
838 .build()
839 };
840
841 if let Some((access_range, access_kind, access_cause)) = access_range_and_kind {
842 // Default branch: this is a "normal" access through a known range.
843 // We iterate over affected locations and traverse the tree for each of them.
844 for (perms_range, perms) in self.rperms.iter_mut(access_range.start, access_range.size)
845 {
846 TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms }
847 .traverse_this_parents_children_other(
848 tag,
849 |args| node_skipper(access_kind, args),
850 |args| node_app(perms_range.clone(), access_kind, access_cause, args),
851 |args| err_handler(perms_range.clone(), access_cause, args),
852 )?;
853 }
854 } else {
855 // This is a special access through the entire allocation.
856 // It actually only affects `initialized` locations, so we need
857 // to filter on those before initiating the traversal.
858 //
859 // In addition this implicit access should not be visible to children,
860 // thus the use of `traverse_nonchildren`.
861 // See the test case `returned_mut_is_usable` from
862 // `tests/pass/tree_borrows/tree-borrows.rs` for an example of
863 // why this is important.
864 for (perms_range, perms) in self.rperms.iter_mut_all() {
865 let idx = self.tag_mapping.get(&tag).unwrap();
866 // Only visit initialized permissions
867 if let Some(p) = perms.get(idx)
868 && p.initialized
869 {
870 let access_kind =
871 if p.permission.is_active() { AccessKind::Write } else { AccessKind::Read };
872 let access_cause = diagnostics::AccessCause::FnExit(access_kind);
873 TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms }
874 .traverse_nonchildren(
875 tag,
876 |args| node_skipper(access_kind, args),
877 |args| node_app(perms_range.clone(), access_kind, access_cause, args),
878 |args| err_handler(perms_range.clone(), access_cause, args),
879 )?;
880 }
881 }
882 }
883 interp_ok(())
884 }
885}
886
887/// Integration with the BorTag garbage collector
888impl Tree {
889 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
890 self.remove_useless_children(self.root, live_tags);
891 // Right after the GC runs is a good moment to check if we can
892 // merge some adjacent ranges that were made equal by the removal of some
893 // tags (this does not necessarily mean that they have identical internal representations,
894 // see the `PartialEq` impl for `UniValMap`)
895 self.rperms.merge_adjacent_thorough();
896 }
897
898 /// Checks if a node is useless and should be GC'ed.
899 /// A node is useless if it has no children and also the tag is no longer live.
900 fn is_useless(&self, idx: UniIndex, live: &FxHashSet<BorTag>) -> bool {
901 let node = self.nodes.get(idx).unwrap();
902 node.children.is_empty() && !live.contains(&node.tag)
903 }
904
905 /// Checks whether a node can be replaced by its only child.
906 /// If so, returns the index of said only child.
907 /// If not, returns none.
908 fn can_be_replaced_by_single_child(
909 &self,
910 idx: UniIndex,
911 live: &FxHashSet<BorTag>,
912 ) -> Option<UniIndex> {
913 let node = self.nodes.get(idx).unwrap();
914
915 let [child_idx] = node.children[..] else { return None };
916
917 // We never want to replace the root node, as it is also kept in `root_ptr_tags`.
918 if live.contains(&node.tag) || node.parent.is_none() {
919 return None;
920 }
921 // Since protected nodes are never GC'd (see `borrow_tracker::FrameExtra::visit_provenance`),
922 // we know that `node` is not protected because otherwise `live` would
923 // have contained `node.tag`.
924 let child = self.nodes.get(child_idx).unwrap();
925 // Check that for that one child, `can_be_replaced_by_child` holds for the permission
926 // on all locations.
927 for (_, data) in self.rperms.iter_all() {
928 let parent_perm =
929 data.get(idx).map(|x| x.permission).unwrap_or_else(|| node.default_initial_perm);
930 let child_perm = data
931 .get(child_idx)
932 .map(|x| x.permission)
933 .unwrap_or_else(|| child.default_initial_perm);
934 if !parent_perm.can_be_replaced_by_child(child_perm) {
935 return None;
936 }
937 }
938
939 Some(child_idx)
940 }
941
942 /// Properly removes a node.
943 /// The node to be removed should not otherwise be usable. It also
944 /// should have no children, but this is not checked, so that nodes
945 /// whose children were rotated somewhere else can be deleted without
946 /// having to first modify them to clear that array.
947 fn remove_useless_node(&mut self, this: UniIndex) {
948 // Due to the API of UniMap we must make sure to call
949 // `UniValMap::remove` for the key of this node on *all* maps that used it
950 // (which are `self.nodes` and every range of `self.rperms`)
951 // before we can safely apply `UniKeyMap::remove` to truly remove
952 // this tag from the `tag_mapping`.
953 let node = self.nodes.remove(this).unwrap();
954 for (_perms_range, perms) in self.rperms.iter_mut_all() {
955 perms.remove(this);
956 }
957 self.tag_mapping.remove(&node.tag);
958 }
959
960 /// Traverses the entire tree looking for useless tags.
961 /// Removes from the tree all useless child nodes of root.
962 /// It will not delete the root itself.
963 ///
964 /// NOTE: This leaves in the middle of the tree tags that are unreachable but have
965 /// reachable children. There is a potential for compacting the tree by reassigning
966 /// children of dead tags to the nearest live parent, but it must be done with care
967 /// not to remove UB.
968 ///
969 /// Example: Consider the tree `root - parent - child`, with `parent: Frozen` and
970 /// `child: Reserved`. This tree can exist. If we blindly delete `parent` and reassign
971 /// `child` to be a direct child of `root` then Writes to `child` are now permitted
972 /// whereas they were not when `parent` was still there.
973 fn remove_useless_children(&mut self, root: UniIndex, live: &FxHashSet<BorTag>) {
974 // To avoid stack overflows, we roll our own stack.
975 // Each element in the stack consists of the current tag, and the number of the
976 // next child to be processed.
977
978 // The other functions are written using the `TreeVisitorStack`, but that does not work here
979 // since we need to 1) do a post-traversal and 2) remove nodes from the tree.
980 // Since we do a post-traversal (by deleting nodes only after handling all children),
981 // we also need to be a bit smarter than "pop node, push all children."
982 let mut stack = vec![(root, 0)];
983 while let Some((tag, nth_child)) = stack.last_mut() {
984 let node = self.nodes.get(*tag).unwrap();
985 if *nth_child < node.children.len() {
986 // Visit the child by pushing it to the stack.
987 // Also increase `nth_child` so that when we come back to the `tag` node, we
988 // look at the next child.
989 let next_child = node.children[*nth_child];
990 *nth_child += 1;
991 stack.push((next_child, 0));
992 continue;
993 } else {
994 // We have processed all children of `node`, so now it is time to process `node` itself.
995 // First, get the current children of `node`. To appease the borrow checker,
996 // we have to temporarily move the list out of the node, and then put the
997 // list of remaining children back in.
998 let mut children_of_node =
999 mem::take(&mut self.nodes.get_mut(*tag).unwrap().children);
1000 // Remove all useless children.
1001 children_of_node.retain_mut(|idx| {
1002 if self.is_useless(*idx, live) {
1003 // Delete `idx` node everywhere else.
1004 self.remove_useless_node(*idx);
1005 // And delete it from children_of_node.
1006 false
1007 } else {
1008 if let Some(nextchild) = self.can_be_replaced_by_single_child(*idx, live) {
1009 // `nextchild` is our grandchild, and will become our direct child.
1010 // Delete the in-between node, `idx`.
1011 self.remove_useless_node(*idx);
1012 // Set the new child's parent.
1013 self.nodes.get_mut(nextchild).unwrap().parent = Some(*tag);
1014 // Save the new child in children_of_node.
1015 *idx = nextchild;
1016 }
1017 // retain it
1018 true
1019 }
1020 });
1021 // Put back the now-filtered vector.
1022 self.nodes.get_mut(*tag).unwrap().children = children_of_node;
1023
1024 // We are done, the parent can continue.
1025 stack.pop();
1026 continue;
1027 }
1028 }
1029 }
1030}
1031
1032impl Node {
1033 pub fn default_location_state(&self) -> LocationState {
1034 LocationState::new_uninit(
1035 self.default_initial_perm,
1036 self.default_initial_idempotent_foreign_access,
1037 )
1038 }
1039}
1040
1041impl VisitProvenance for Tree {
1042 fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
1043 // To ensure that the root never gets removed, we visit it
1044 // (the `root` node of `Tree` is not an `Option<_>`)
1045 visit(None, Some(self.nodes.get(self.root).unwrap().tag))
1046 }
1047}
1048
1049/// Relative position of the access
1050#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1051pub enum AccessRelatedness {
1052 /// The accessed pointer is the current one
1053 This,
1054 /// The accessed pointer is a (transitive) child of the current one.
1055 // Current pointer is excluded (unlike in some other places of this module
1056 // where "child" is inclusive).
1057 StrictChildAccess,
1058 /// The accessed pointer is a (transitive) parent of the current one.
1059 // Current pointer is excluded.
1060 AncestorAccess,
1061 /// The accessed pointer is neither of the above.
1062 // It's a cousin/uncle/etc., something in a side branch.
1063 CousinAccess,
1064}
1065
1066impl AccessRelatedness {
1067 /// Check that access is either Ancestor or Distant, i.e. not
1068 /// a transitive child (initial pointer included).
1069 pub fn is_foreign(self) -> bool {
1070 matches!(self, AccessRelatedness::AncestorAccess | AccessRelatedness::CousinAccess)
1071 }
1072
1073 /// Given the AccessRelatedness for the parent node, compute the AccessRelatedness
1074 /// for the child node. This function assumes that we propagate away from the initial
1075 /// access.
1076 pub fn for_child(self) -> Self {
1077 use AccessRelatedness::*;
1078 match self {
1079 AncestorAccess | This => AncestorAccess,
1080 StrictChildAccess | CousinAccess => CousinAccess,
1081 }
1082 }
1083}