miri/borrow_tracker/stacked_borrows/stack.rs
1#[cfg(feature = "stack-cache")]
2use std::ops::Range;
3
4use rustc_data_structures::fx::FxHashSet;
5use tracing::trace;
6
7use crate::borrow_tracker::stacked_borrows::{Item, Permission};
8use crate::borrow_tracker::{AccessKind, BorTag};
9use crate::{InterpResult, ProvenanceExtra, interp_ok};
10
11/// Exactly what cache size we should use is a difficult trade-off. There will always be some
12/// workload which has a `BorTag` working set which exceeds the size of the cache, and ends up
13/// falling back to linear searches of the borrow stack very often.
14/// The cost of making this value too large is that the loop in `Stack::insert` which ensures the
15/// entries in the cache stay correct after an insert becomes expensive.
16#[cfg(feature = "stack-cache")]
17const CACHE_LEN: usize = 32;
18
19/// Extra per-location state.
20#[derive(Clone, Debug)]
21pub struct Stack {
22 /// Used *mostly* as a stack; never empty.
23 /// Invariants:
24 /// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
25 /// * Except for `Untagged`, no tag occurs in the stack more than once.
26 borrows: Vec<Item>,
27 /// If this is `Some(id)`, then the actual current stack is unknown. This can happen when
28 /// wildcard pointers are used to access this location. What we do know is that `borrows` are at
29 /// the top of the stack, and below it are arbitrarily many items whose `tag` is strictly less
30 /// than `id`.
31 /// When the bottom is unknown, `borrows` always has a `SharedReadOnly` or `Unique` at the bottom;
32 /// we never have the unknown-to-known boundary in an SRW group.
33 unknown_bottom: Option<BorTag>,
34
35 /// A small LRU cache of searches of the borrow stack.
36 #[cfg(feature = "stack-cache")]
37 cache: StackCache,
38 /// On a read, we need to disable all `Unique` above the granting item. We can avoid most of
39 /// this scan by keeping track of the region of the borrow stack that may contain `Unique`s.
40 #[cfg(feature = "stack-cache")]
41 unique_range: Range<usize>,
42}
43
44impl Stack {
45 pub fn retain(&mut self, tags: &FxHashSet<BorTag>) {
46 let mut first_removed = None;
47
48 // We never consider removing the bottom-most tag. For stacks without an unknown
49 // bottom this preserves the root tag.
50 // Note that the algorithm below is based on considering the tag at read_idx - 1,
51 // so precisely considering the tag at index 0 for removal when we have an unknown
52 // bottom would complicate the implementation. The simplification of not considering
53 // it does not have a significant impact on the degree to which the GC mitigates
54 // memory growth.
55 let mut read_idx = 1;
56 let mut write_idx = read_idx;
57 while read_idx < self.borrows.len() {
58 let left = self.borrows[read_idx - 1];
59 let this = self.borrows[read_idx];
60 let should_keep = match this.perm() {
61 // SharedReadWrite is the simplest case, if it's unreachable we can just remove it.
62 Permission::SharedReadWrite => tags.contains(&this.tag()),
63 // Only retain a Disabled tag if it is terminating a SharedReadWrite block.
64 Permission::Disabled => left.perm() == Permission::SharedReadWrite,
65 // Unique and SharedReadOnly can terminate a SharedReadWrite block, so only remove
66 // them if they are both unreachable and not directly after a SharedReadWrite.
67 Permission::Unique | Permission::SharedReadOnly =>
68 left.perm() == Permission::SharedReadWrite || tags.contains(&this.tag()),
69 };
70
71 if should_keep {
72 if read_idx != write_idx {
73 self.borrows[write_idx] = self.borrows[read_idx];
74 }
75 write_idx += 1;
76 } else if first_removed.is_none() {
77 first_removed = Some(read_idx);
78 }
79
80 read_idx += 1;
81 }
82 self.borrows.truncate(write_idx);
83
84 #[cfg(not(feature = "stack-cache"))]
85 let _unused = first_removed; // This is only needed for the stack-cache
86
87 #[cfg(feature = "stack-cache")]
88 if let Some(first_removed) = first_removed {
89 // Either end of unique_range may have shifted, all we really know is that we can't
90 // have introduced a new Unique.
91 if !self.unique_range.is_empty() {
92 self.unique_range = 0..self.len();
93 }
94
95 // Replace any Items which have been collected with the root item, a known-good value.
96 for i in 0..CACHE_LEN {
97 if self.cache.idx[i] >= first_removed {
98 self.cache.items[i] = self.borrows[0];
99 self.cache.idx[i] = 0;
100 }
101 }
102 }
103 }
104}
105
106/// A very small cache of searches of a borrow stack, mapping `Item`s to their position in said stack.
107///
108/// It may seem like maintaining this cache is a waste for small stacks, but
109/// (a) iterating over small fixed-size arrays is super fast, and (b) empirically this helps *a lot*,
110/// probably because runtime is dominated by large stacks.
111#[cfg(feature = "stack-cache")]
112#[derive(Clone, Debug)]
113struct StackCache {
114 items: [Item; CACHE_LEN], // Hot in find_granting
115 idx: [usize; CACHE_LEN], // Hot in grant
116}
117
118#[cfg(feature = "stack-cache")]
119impl StackCache {
120 /// When a tag is used, we call this function to add or refresh it in the cache.
121 ///
122 /// We use the position in the cache to represent how recently a tag was used; the first position
123 /// is the most recently used tag. So an add shifts every element towards the end, and inserts
124 /// the new element at the start. We lose the last element.
125 /// This strategy is effective at keeping the most-accessed items in the cache, but it costs a
126 /// linear shift across the entire cache when we add a new tag.
127 fn add(&mut self, idx: usize, item: Item) {
128 self.items.copy_within(0..CACHE_LEN - 1, 1);
129 self.items[0] = item;
130 self.idx.copy_within(0..CACHE_LEN - 1, 1);
131 self.idx[0] = idx;
132 }
133}
134
135impl PartialEq for Stack {
136 fn eq(&self, other: &Self) -> bool {
137 let Stack {
138 borrows,
139 unknown_bottom,
140 // The cache is ignored for comparison.
141 #[cfg(feature = "stack-cache")]
142 cache: _,
143 #[cfg(feature = "stack-cache")]
144 unique_range: _,
145 } = self;
146 *borrows == other.borrows && *unknown_bottom == other.unknown_bottom
147 }
148}
149
150impl Eq for Stack {}
151
152impl<'tcx> Stack {
153 /// Panics if any of the caching mechanisms have broken,
154 /// - The StackCache indices don't refer to the parallel items,
155 /// - There are no Unique items outside of first_unique..last_unique
156 #[cfg(feature = "stack-cache-consistency-check")]
157 fn verify_cache_consistency(&self) {
158 // Only a full cache needs to be valid. Also see the comments in find_granting_cache
159 // and set_unknown_bottom.
160 if self.borrows.len() >= CACHE_LEN {
161 for (tag, stack_idx) in self.cache.items.iter().zip(self.cache.idx.iter()) {
162 assert_eq!(self.borrows[*stack_idx], *tag);
163 }
164 }
165
166 // Check that all Unique items fall within unique_range.
167 for (idx, item) in self.borrows.iter().enumerate() {
168 if item.perm() == Permission::Unique {
169 assert!(
170 self.unique_range.contains(&idx),
171 "{:?} {:?}",
172 self.unique_range,
173 self.borrows
174 );
175 }
176 }
177
178 // Check that the unique_range is a valid index into the borrow stack.
179 // This asserts that the unique_range's start <= end.
180 let _uniques = &self.borrows[self.unique_range.clone()];
181
182 // We cannot assert that the unique range is precise.
183 // Both ends may shift around when `Stack::retain` is called. Additionally,
184 // when we pop items within the unique range, setting the end of the range precisely
185 // requires doing a linear search of the borrow stack, which is exactly the kind of
186 // operation that all this caching exists to avoid.
187 }
188
189 /// Find the item granting the given kind of access to the given tag, and return where
190 /// it is on the stack. For wildcard tags, the given index is approximate, but if *no*
191 /// index is given it means the match was *not* in the known part of the stack.
192 /// `Ok(None)` indicates it matched the "unknown" part of the stack.
193 /// `Err` indicates it was not found.
194 pub(super) fn find_granting(
195 &mut self,
196 access: AccessKind,
197 tag: ProvenanceExtra,
198 exposed_tags: &FxHashSet<BorTag>,
199 ) -> Result<Option<usize>, ()> {
200 #[cfg(feature = "stack-cache-consistency-check")]
201 self.verify_cache_consistency();
202
203 let ProvenanceExtra::Concrete(tag) = tag else {
204 // Handle the wildcard case.
205 // Go search the stack for an exposed tag.
206 if let Some(idx) = self
207 .borrows
208 .iter()
209 .enumerate() // we also need to know *where* in the stack
210 .rev() // search top-to-bottom
211 .find_map(|(idx, item)| {
212 // If the item fits and *might* be this wildcard, use it.
213 if item.perm().grants(access) && exposed_tags.contains(&item.tag()) {
214 Some(idx)
215 } else {
216 None
217 }
218 })
219 {
220 return Ok(Some(idx));
221 }
222 // If we couldn't find it in the stack, check the unknown bottom.
223 return if self.unknown_bottom.is_some() { Ok(None) } else { Err(()) };
224 };
225
226 if let Some(idx) = self.find_granting_tagged(access, tag) {
227 return Ok(Some(idx));
228 }
229
230 // Couldn't find it in the stack; but if there is an unknown bottom it might be there.
231 let found = self.unknown_bottom.is_some_and(|unknown_limit| {
232 tag < unknown_limit // unknown_limit is an upper bound for what can be in the unknown bottom.
233 });
234 if found { Ok(None) } else { Err(()) }
235 }
236
237 fn find_granting_tagged(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
238 #[cfg(feature = "stack-cache")]
239 if let Some(idx) = self.find_granting_cache(access, tag) {
240 return Some(idx);
241 }
242
243 // If we didn't find the tag in the cache, fall back to a linear search of the
244 // whole stack, and add the tag to the cache.
245 for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
246 if tag == item.tag() && item.perm().grants(access) {
247 #[cfg(feature = "stack-cache")]
248 self.cache.add(stack_idx, *item);
249 return Some(stack_idx);
250 }
251 }
252 None
253 }
254
255 #[cfg(feature = "stack-cache")]
256 fn find_granting_cache(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
257 // This looks like a common-sense optimization; we're going to do a linear search of the
258 // cache or the borrow stack to scan the shorter of the two. This optimization is minuscule
259 // and this check actually ensures we do not access an invalid cache.
260 // When a stack is created and when items are removed from the top of the borrow stack, we
261 // need some valid value to populate the cache. In both cases, we try to use the bottom
262 // item. But when the stack is cleared in `set_unknown_bottom` there is nothing we could
263 // place in the cache that is correct. But due to the way we populate the cache in
264 // `StackCache::add`, we know that when the borrow stack has grown larger than the cache,
265 // every slot in the cache is valid.
266 if self.borrows.len() <= CACHE_LEN {
267 return None;
268 }
269 // Search the cache for the tag we're looking up
270 let cache_idx = self.cache.items.iter().position(|t| t.tag() == tag)?;
271 let stack_idx = self.cache.idx[cache_idx];
272 // If we found the tag, look up its position in the stack to see if it grants
273 // the required permission
274 if self.cache.items[cache_idx].perm().grants(access) {
275 // If it does, and it's not already in the most-recently-used position, re-insert it at
276 // the most-recently-used position. This technically reduces the efficiency of the
277 // cache by duplicating elements, but current benchmarks do not seem to benefit from
278 // avoiding this duplication.
279 // But if the tag is in position 1, avoiding the duplicating add is trivial.
280 // If it does, and it's not already in the most-recently-used position, move it there.
281 // Except if the tag is in position 1, this is equivalent to just a swap, so do that.
282 if cache_idx == 1 {
283 self.cache.items.swap(0, 1);
284 self.cache.idx.swap(0, 1);
285 } else if cache_idx > 1 {
286 self.cache.add(stack_idx, self.cache.items[cache_idx]);
287 }
288 Some(stack_idx)
289 } else {
290 // Tag is in the cache, but it doesn't grant the required permission
291 None
292 }
293 }
294
295 pub fn insert(&mut self, new_idx: usize, new: Item) {
296 self.borrows.insert(new_idx, new);
297
298 #[cfg(feature = "stack-cache")]
299 self.insert_cache(new_idx, new);
300 }
301
302 #[cfg(feature = "stack-cache")]
303 fn insert_cache(&mut self, new_idx: usize, new: Item) {
304 // Adjust the possibly-unique range if an insert occurs before or within it
305 if self.unique_range.start >= new_idx {
306 self.unique_range.start += 1;
307 }
308 if self.unique_range.end >= new_idx {
309 self.unique_range.end += 1;
310 }
311 if new.perm() == Permission::Unique {
312 // If this is the only Unique, set the range to contain just the new item.
313 if self.unique_range.is_empty() {
314 self.unique_range = new_idx..new_idx + 1;
315 } else {
316 // We already have other Unique items, expand the range to include the new item
317 self.unique_range.start = self.unique_range.start.min(new_idx);
318 self.unique_range.end = self.unique_range.end.max(new_idx + 1);
319 }
320 }
321
322 // The above insert changes the meaning of every index in the cache >= new_idx, so now
323 // we need to find every one of those indexes and increment it.
324 // But if the insert is at the end (equivalent to a push), we can skip this step because
325 // it didn't change the position of any other items.
326 if new_idx != self.borrows.len() - 1 {
327 for idx in &mut self.cache.idx {
328 if *idx >= new_idx {
329 *idx += 1;
330 }
331 }
332 }
333
334 // This primes the cache for the next access, which is almost always the just-added tag.
335 self.cache.add(new_idx, new);
336
337 #[cfg(feature = "stack-cache-consistency-check")]
338 self.verify_cache_consistency();
339 }
340
341 /// Construct a new `Stack` using the passed `Item` as the root tag.
342 pub fn new(item: Item) -> Self {
343 Stack {
344 borrows: vec![item],
345 unknown_bottom: None,
346 #[cfg(feature = "stack-cache")]
347 cache: StackCache { idx: [0; CACHE_LEN], items: [item; CACHE_LEN] },
348 #[cfg(feature = "stack-cache")]
349 unique_range: if item.perm() == Permission::Unique { 0..1 } else { 0..0 },
350 }
351 }
352
353 pub fn get(&self, idx: usize) -> Option<Item> {
354 self.borrows.get(idx).cloned()
355 }
356
357 #[expect(clippy::len_without_is_empty)] // Stacks are never empty
358 pub fn len(&self) -> usize {
359 self.borrows.len()
360 }
361
362 pub fn unknown_bottom(&self) -> Option<BorTag> {
363 self.unknown_bottom
364 }
365
366 pub fn set_unknown_bottom(&mut self, tag: BorTag) {
367 // We clear the borrow stack but the lookup cache doesn't support clearing per se. Instead,
368 // there is a check explained in `find_granting_cache` which protects against accessing the
369 // cache when it has been cleared and not yet refilled.
370 self.borrows.clear();
371 self.unknown_bottom = Some(tag);
372 #[cfg(feature = "stack-cache")]
373 {
374 self.unique_range = 0..0;
375 }
376 }
377
378 /// Find all `Unique` elements in this borrow stack above `granting_idx`, pass a copy of them
379 /// to the `visitor`, then set their `Permission` to `Disabled`.
380 pub fn disable_uniques_starting_at(
381 &mut self,
382 disable_start: usize,
383 mut visitor: impl FnMut(Item) -> InterpResult<'tcx>,
384 ) -> InterpResult<'tcx> {
385 #[cfg(feature = "stack-cache")]
386 let unique_range = self.unique_range.clone();
387 #[cfg(not(feature = "stack-cache"))]
388 let unique_range = 0..self.len();
389
390 if disable_start <= unique_range.end {
391 let lower = unique_range.start.max(disable_start);
392 let upper = unique_range.end;
393 for item in &mut self.borrows[lower..upper] {
394 if item.perm() == Permission::Unique {
395 trace!("access: disabling item {:?}", item);
396 visitor(*item)?;
397 item.set_permission(Permission::Disabled);
398 // Also update all copies of this item in the cache.
399 #[cfg(feature = "stack-cache")]
400 for it in &mut self.cache.items {
401 if it.tag() == item.tag() {
402 it.set_permission(Permission::Disabled);
403 }
404 }
405 }
406 }
407 }
408
409 #[cfg(feature = "stack-cache")]
410 if disable_start <= self.unique_range.start {
411 // We disabled all Unique items
412 self.unique_range.start = 0;
413 self.unique_range.end = 0;
414 } else {
415 // Truncate the range to only include items up to the index that we started disabling
416 // at.
417 self.unique_range.end = self.unique_range.end.min(disable_start);
418 }
419
420 #[cfg(feature = "stack-cache-consistency-check")]
421 self.verify_cache_consistency();
422
423 interp_ok(())
424 }
425
426 /// Produces an iterator which iterates over `range` in reverse, and when dropped removes that
427 /// range of `Item`s from this `Stack`.
428 pub fn pop_items_after<V: FnMut(Item) -> InterpResult<'tcx>>(
429 &mut self,
430 start: usize,
431 mut visitor: V,
432 ) -> InterpResult<'tcx> {
433 while self.borrows.len() > start {
434 let item = self.borrows.pop().unwrap();
435 visitor(item)?;
436 }
437
438 #[cfg(feature = "stack-cache")]
439 if !self.borrows.is_empty() {
440 // After we remove from the borrow stack, every aspect of our caching may be invalid, but it is
441 // also possible that the whole cache is still valid. So we call this method to repair what
442 // aspects of the cache are now invalid, instead of resetting the whole thing to a trivially
443 // valid default state.
444 let base_tag = self.borrows[0];
445 let mut removed = 0;
446 let mut cursor = 0;
447 // Remove invalid entries from the cache by rotating them to the end of the cache, then
448 // keep track of how many invalid elements there are and overwrite them with the root tag.
449 // The root tag here serves as a harmless default value.
450 for _ in 0..CACHE_LEN - 1 {
451 if self.cache.idx[cursor] >= start {
452 self.cache.idx[cursor..CACHE_LEN - removed].rotate_left(1);
453 self.cache.items[cursor..CACHE_LEN - removed].rotate_left(1);
454 removed += 1;
455 } else {
456 cursor += 1;
457 }
458 }
459 for i in CACHE_LEN - removed - 1..CACHE_LEN {
460 self.cache.idx[i] = 0;
461 self.cache.items[i] = base_tag;
462 }
463
464 if start <= self.unique_range.start {
465 // We removed all the Unique items
466 self.unique_range = 0..0;
467 } else {
468 // Ensure the range doesn't extend past the new top of the stack
469 self.unique_range.end = self.unique_range.end.min(start);
470 }
471 } else {
472 self.unique_range = 0..0;
473 }
474
475 #[cfg(feature = "stack-cache-consistency-check")]
476 self.verify_cache_consistency();
477 interp_ok(())
478 }
479}