miri/alloc_addresses/
address_generator.rs

1use std::ops::Range;
2
3use rand::Rng;
4use rustc_abi::{Align, Size};
5use rustc_const_eval::interpret::{InterpResult, interp_ok};
6use rustc_middle::{err_exhaust, throw_exhaust};
7
8/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
9/// of `align` that is larger or equal to `addr`
10fn align_addr(addr: u64, align: u64) -> u64 {
11    match addr % align {
12        0 => addr,
13        rem => addr.strict_add(align) - rem,
14    }
15}
16
17/// This provides the logic to generate addresses for memory allocations in a given address range.
18#[derive(Debug)]
19pub struct AddressGenerator {
20    /// This is used as a memory address when a new pointer is casted to an integer. It
21    /// is always larger than any address that was previously made part of a block.
22    next_base_addr: u64,
23    /// This is the last address that can be allocated.
24    end: u64,
25}
26
27impl AddressGenerator {
28    pub fn new(addr_range: Range<u64>) -> Self {
29        Self { next_base_addr: addr_range.start, end: addr_range.end }
30    }
31
32    /// Get the remaining range where this `AddressGenerator` can still allocate addresses.
33    pub fn get_remaining(&self) -> Range<u64> {
34        self.next_base_addr..self.end
35    }
36
37    /// Generate a new address with the specified size and alignment, using the given Rng to add some randomness.
38    /// The returned allocation is guaranteed not to overlap with any address ranges given out by the generator before.
39    /// Returns an error if the allocation request cannot be fulfilled.
40    pub fn generate<'tcx, R: Rng>(
41        &mut self,
42        size: Size,
43        align: Align,
44        rng: &mut R,
45    ) -> InterpResult<'tcx, u64> {
46        // Leave some space to the previous allocation, to give it some chance to be less aligned.
47        // We ensure that `(self.next_base_addr + slack) % 16` is uniformly distributed.
48        let slack = rng.random_range(0..16);
49        // From next_base_addr + slack, round up to adjust for alignment.
50        let base_addr =
51            self.next_base_addr.checked_add(slack).ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
52        let base_addr = align_addr(base_addr, align.bytes());
53
54        // Remember next base address.  If this allocation is zero-sized, leave a gap of at
55        // least 1 to avoid two allocations having the same base address. (The logic in
56        // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
57        // need to be distinguishable!)
58        self.next_base_addr = base_addr
59            .checked_add(size.bytes().max(1))
60            .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
61        // Even if `Size` didn't overflow, we might still have filled up the address space.
62        if self.next_base_addr > self.end {
63            throw_exhaust!(AddressSpaceFull);
64        }
65        interp_ok(base_addr)
66    }
67}
68
69#[cfg(test)]
70mod tests {
71    use super::*;
72
73    #[test]
74    fn test_align_addr() {
75        assert_eq!(align_addr(37, 4), 40);
76        assert_eq!(align_addr(44, 4), 44);
77    }
78}