miri/alloc_addresses/reuse_pool.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
//! Manages a pool of addresses that can be reused.
use rand::Rng;
use rustc_abi::{Align, Size};
use crate::concurrency::VClock;
use crate::{MemoryKind, MiriConfig, ThreadId};
const MAX_POOL_SIZE: usize = 64;
/// The pool strikes a balance between exploring more possible executions and making it more likely
/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
/// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data
/// structure. Therefore we only reuse allocations when size and alignment match exactly.
#[derive(Debug)]
pub struct ReusePool {
address_reuse_rate: f64,
address_reuse_cross_thread_rate: f64,
/// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
/// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
///
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
/// less than 64 different possible value, that bounds the overall size of the pool.
///
/// We also store the ID and the data-race clock of the thread that donated this pool element,
/// to ensure synchronization with the thread that picks up this address.
pool: Vec<Vec<(u64, Size, ThreadId, VClock)>>,
}
impl ReusePool {
pub fn new(config: &MiriConfig) -> Self {
ReusePool {
address_reuse_rate: config.address_reuse_rate,
address_reuse_cross_thread_rate: config.address_reuse_cross_thread_rate,
pool: vec![],
}
}
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> {
let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
if self.pool.len() <= pool_idx {
self.pool.resize(pool_idx + 1, Vec::new());
}
&mut self.pool[pool_idx]
}
pub fn add_addr(
&mut self,
rng: &mut impl Rng,
addr: u64,
size: Size,
align: Align,
kind: MemoryKind,
thread: ThreadId,
clock: impl FnOnce() -> VClock,
) {
// Let's see if we even want to remember this address.
// We don't remember stack addresses: there's a lot of them (so the perf impact is big),
// and we only want to reuse stack slots within the same thread or else we'll add a lot of
// undesired synchronization.
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
return;
}
let clock = clock();
// Determine the pool to add this to, and where in the pool to put it.
let subpool = self.subpool(align);
let pos = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
(*other_size, *other_thread) < (size, thread)
});
// Make sure the pool does not grow too big.
if subpool.len() >= MAX_POOL_SIZE {
// Pool full. Replace existing element, or last one if this would be even bigger.
let clamped_pos = pos.min(subpool.len() - 1);
subpool[clamped_pos] = (addr, size, thread, clock);
return;
}
// Add address to pool, at the right position.
subpool.insert(pos, (addr, size, thread, clock));
}
/// Returns the address to use and optionally a clock we have to synchronize with.
pub fn take_addr(
&mut self,
rng: &mut impl Rng,
size: Size,
align: Align,
kind: MemoryKind,
thread: ThreadId,
) -> Option<(u64, Option<VClock>)> {
// Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
return None;
}
let cross_thread_reuse = rng.gen_bool(self.address_reuse_cross_thread_rate);
// Determine the pool to take this from.
let subpool = self.subpool(align);
// Let's see if we can find something of the right size. We want to find the full range of
// such items, beginning with the first, so we can't use `binary_search_by_key`. If we do
// *not* want to consider other thread's allocations, we effectively use the lexicographic
// order on `(size, thread)`.
let begin = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
*other_size < size
|| (*other_size == size && !cross_thread_reuse && *other_thread < thread)
});
let mut end = begin;
while let Some((_addr, other_size, other_thread, _)) = subpool.get(end) {
if *other_size != size {
break;
}
if !cross_thread_reuse && *other_thread != thread {
// We entered the allocations of another thread.
break;
}
end += 1;
}
if end == begin {
// Could not find any item of the right size.
return None;
}
// Pick a random element with the desired size.
let idx = rng.gen_range(begin..end);
// Remove it from the pool and return.
let (chosen_addr, chosen_size, chosen_thread, clock) = subpool.remove(idx);
debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0);
debug_assert!(cross_thread_reuse || chosen_thread == thread);
// No synchronization needed if we reused from the current thread.
Some((chosen_addr, if chosen_thread == thread { None } else { Some(clock) }))
}
}