miri/alloc/isolated_alloc.rs
1use std::alloc::Layout;
2use std::ptr::NonNull;
3
4use rustc_index::bit_set::DenseBitSet;
5
6/// How many bytes of memory each bit in the bitset represents.
7const COMPRESSION_FACTOR: usize = 4;
8
9/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
10/// pages (not mixed with Miri's own memory). This is used in native-lib mode.
11#[derive(Debug)]
12pub struct IsolatedAlloc {
13 /// Pointers to page-aligned memory that has been claimed by the allocator.
14 /// Every pointer here must point to a page-sized allocation claimed via
15 /// mmap. These pointers are used for "small" allocations.
16 page_ptrs: Vec<NonNull<u8>>,
17 /// Metadata about which bytes have been allocated on each page. The length
18 /// of this vector must be the same as that of `page_ptrs`, and the domain
19 /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
20 ///
21 /// Conceptually, each bit of the bitset represents the allocation status of
22 /// one n-byte chunk on the corresponding element of `page_ptrs`. Thus,
23 /// indexing into it should be done with a value one-nth of the corresponding
24 /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`).
25 page_infos: Vec<DenseBitSet<usize>>,
26 /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
27 /// with their size stored as the second element of the vector.
28 huge_ptrs: Vec<(NonNull<u8>, usize)>,
29 /// The host (not emulated) page size.
30 page_size: usize,
31}
32
33impl IsolatedAlloc {
34 /// Creates an empty allocator.
35 pub fn new() -> Self {
36 Self {
37 page_ptrs: Vec::new(),
38 huge_ptrs: Vec::new(),
39 page_infos: Vec::new(),
40 // SAFETY: `sysconf(_SC_PAGESIZE)` is always safe to call at runtime
41 // See https://www.man7.org/linux/man-pages/man3/sysconf.3.html
42 page_size: unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() },
43 }
44 }
45
46 pub fn page_size(&self) -> usize {
47 self.page_size
48 }
49
50 /// For simplicity, we serve small allocations in multiples of COMPRESSION_FACTOR
51 /// bytes with at least that alignment.
52 #[inline]
53 fn normalized_layout(layout: Layout) -> Layout {
54 let align =
55 if layout.align() < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout.align() };
56 let size = layout.size().next_multiple_of(COMPRESSION_FACTOR);
57 Layout::from_size_align(size, align).unwrap()
58 }
59
60 /// For greater-than-page-sized allocations, returns the allocation size we need to request
61 /// including the slack we need to satisfy the alignment request.
62 #[inline]
63 fn huge_normalized_layout(&self, layout: Layout) -> usize {
64 // Allocate in page-sized chunks
65 let size = layout.size().next_multiple_of(self.page_size);
66 // And make sure the align is at least one page
67 let align = std::cmp::max(layout.align(), self.page_size);
68 // pg_count gives us the # of pages needed to satisfy the size. For
69 // align > page_size where align = n * page_size, a sufficently-aligned
70 // address must exist somewhere in the range of
71 // some_page_aligned_address..some_page_aligned_address + (n-1) * page_size
72 // (since if some_page_aligned_address + n * page_size is sufficently aligned,
73 // then so is some_page_aligned_address itself per the definition of n, so we
74 // can avoid using that 1 extra page).
75 // Thus we allocate n-1 extra pages
76 let pg_count = size.div_ceil(self.page_size);
77 let extra_pages = align.strict_div(self.page_size).saturating_sub(1);
78
79 pg_count.strict_add(extra_pages).strict_mul(self.page_size)
80 }
81
82 /// Determined whether a given normalized (size, align) should be sent to
83 /// `alloc_huge` / `dealloc_huge`.
84 #[inline]
85 fn is_huge_alloc(&self, layout: &Layout) -> bool {
86 layout.align() > self.page_size / 2 || layout.size() >= self.page_size / 2
87 }
88
89 /// Allocates memory as described in `Layout`. This memory should be deallocated
90 /// by calling `dealloc` on this same allocator.
91 ///
92 /// SAFETY: See `alloc::alloc()`.
93 pub unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 {
94 // SAFETY: Upheld by caller
95 unsafe { self.allocate(layout, false) }
96 }
97
98 /// Same as `alloc`, but zeroes out the memory.
99 ///
100 /// SAFETY: See `alloc::alloc_zeroed()`.
101 pub unsafe fn alloc_zeroed(&mut self, layout: Layout) -> *mut u8 {
102 // SAFETY: Upheld by caller
103 unsafe { self.allocate(layout, true) }
104 }
105
106 /// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
107 /// the `zeroed` argument.
108 ///
109 /// SAFETY: See `alloc::alloc()`.
110 unsafe fn allocate(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
111 let layout = IsolatedAlloc::normalized_layout(layout);
112 if self.is_huge_alloc(&layout) {
113 // SAFETY: Validity of `layout` upheld by caller; we checked that
114 // the size and alignment are appropriate for being a huge alloc
115 unsafe { self.alloc_huge(layout) }
116 } else {
117 for (&mut page, pinfo) in std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) {
118 // SAFETY: The value in `self.page_size` is used to allocate
119 // `page`, with page alignment
120 if let Some(ptr) =
121 unsafe { Self::alloc_small(self.page_size, layout, page, pinfo, zeroed) }
122 {
123 return ptr;
124 }
125 }
126
127 // We get here only if there's no space in our existing pages
128 let page_size = self.page_size;
129 // Add another page and allocate from it; this cannot fail since the
130 // new page is empty and we already asserted it fits into a page
131 let (page, pinfo) = self.add_page();
132
133 // SAFETY: See comment on `alloc_from_page` above
134 unsafe { Self::alloc_small(page_size, layout, page, pinfo, zeroed).unwrap() }
135 }
136 }
137
138 /// Used internally by `allocate` to abstract over some logic.
139 ///
140 /// SAFETY: `page` must be a page-aligned pointer to an allocated page,
141 /// where the allocation is (at least) `page_size` bytes.
142 unsafe fn alloc_small(
143 page_size: usize,
144 layout: Layout,
145 page: NonNull<u8>,
146 pinfo: &mut DenseBitSet<usize>,
147 zeroed: bool,
148 ) -> Option<*mut u8> {
149 // Check every alignment-sized block and see if there exists a `size`
150 // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n)
151 for offset in (0..page_size).step_by(layout.align()) {
152 let offset_pinfo = offset / COMPRESSION_FACTOR;
153 let size_pinfo = layout.size() / COMPRESSION_FACTOR;
154 // DenseBitSet::contains() panics if the index is out of bounds
155 if pinfo.domain_size() < offset_pinfo + size_pinfo {
156 break;
157 }
158 if !pinfo.contains_any(offset_pinfo..offset_pinfo + size_pinfo) {
159 pinfo.insert_range(offset_pinfo..offset_pinfo + size_pinfo);
160 // SAFETY: We checked the available bytes after `idx` in the call
161 // to `domain_size` above and asserted there are at least `idx +
162 // layout.size()` bytes available and unallocated after it.
163 // `page` must point to the start of the page, so adding `idx`
164 // is safe per the above.
165 unsafe {
166 let ptr = page.add(offset);
167 if zeroed {
168 // Only write the bytes we were specifically asked to
169 // zero out, even if we allocated more
170 ptr.write_bytes(0, layout.size());
171 }
172 return Some(ptr.as_ptr());
173 }
174 }
175 }
176 None
177 }
178
179 /// Expands the available memory pool by adding one page.
180 fn add_page(&mut self) -> (NonNull<u8>, &mut DenseBitSet<usize>) {
181 // SAFETY: mmap is always safe to call when requesting anonymous memory
182 let page_ptr = unsafe {
183 libc::mmap(
184 std::ptr::null_mut(),
185 self.page_size,
186 libc::PROT_READ | libc::PROT_WRITE,
187 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
188 -1,
189 0,
190 )
191 .cast::<u8>()
192 };
193 assert_ne!(page_ptr.addr(), usize::MAX, "mmap failed");
194 // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
195 assert!(self.page_size.is_multiple_of(COMPRESSION_FACTOR));
196 self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
197 self.page_ptrs.push(NonNull::new(page_ptr).unwrap());
198 (NonNull::new(page_ptr).unwrap(), self.page_infos.last_mut().unwrap())
199 }
200
201 /// Allocates in multiples of one page on the host system.
202 /// Will always be zeroed.
203 ///
204 /// SAFETY: Same as `alloc()`.
205 unsafe fn alloc_huge(&mut self, layout: Layout) -> *mut u8 {
206 let size = self.huge_normalized_layout(layout);
207 // SAFETY: mmap is always safe to call when requesting anonymous memory
208 let ret = unsafe {
209 libc::mmap(
210 std::ptr::null_mut(),
211 size,
212 libc::PROT_READ | libc::PROT_WRITE,
213 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
214 -1,
215 0,
216 )
217 .cast::<u8>()
218 };
219 assert_ne!(ret.addr(), usize::MAX, "mmap failed");
220 self.huge_ptrs.push((NonNull::new(ret).unwrap(), size));
221 // huge_normalized_layout ensures that we've overallocated enough space
222 // for this to be valid.
223 ret.map_addr(|a| a.next_multiple_of(layout.align()))
224 }
225
226 /// Deallocates a pointer from this allocator.
227 ///
228 /// SAFETY: This pointer must have been allocated by calling `alloc()` (or
229 /// `alloc_zeroed()`) with the same layout as the one passed on this same
230 /// `IsolatedAlloc`.
231 pub unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
232 let layout = IsolatedAlloc::normalized_layout(layout);
233
234 if self.is_huge_alloc(&layout) {
235 // SAFETY: Partly upheld by caller, and we checked that the size
236 // and align, meaning this must have been allocated via `alloc_huge`
237 unsafe {
238 self.dealloc_huge(ptr, layout);
239 }
240 } else {
241 // SAFETY: It's not a huge allocation, therefore it is a small one.
242 let idx = unsafe { self.dealloc_small(ptr, layout) };
243
244 // This may have been the last allocation on this page. If so, free the entire page.
245 // FIXME: this can lead to threshold effects, we should probably add some form
246 // of hysteresis.
247 if self.page_infos[idx].is_empty() {
248 self.page_infos.remove(idx);
249 let page_ptr = self.page_ptrs.remove(idx);
250 // SAFETY: We checked that there are no outstanding allocations
251 // from us pointing to this page, and we know it was allocated
252 // in add_page as exactly a single page.
253 unsafe {
254 assert_eq!(libc::munmap(page_ptr.as_ptr().cast(), self.page_size), 0);
255 }
256 }
257 }
258 }
259
260 /// Returns the index of the page that this was deallocated from.
261 ///
262 /// SAFETY: the pointer must have been allocated with `alloc_small`.
263 unsafe fn dealloc_small(&mut self, ptr: *mut u8, layout: Layout) -> usize {
264 // Offset of the pointer in the current page
265 let offset = ptr.addr() % self.page_size;
266 // And then the page's base address
267 let page_addr = ptr.addr() - offset;
268
269 // Find the page this allocation belongs to.
270 // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
271 let pinfo = std::iter::zip(&mut self.page_ptrs, &mut self.page_infos)
272 .enumerate()
273 .find(|(_, (page, _))| page.addr().get() == page_addr);
274 let Some((idx_of_pinfo, (_, pinfo))) = pinfo else {
275 panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.page_ptrs)
276 };
277 // Mark this range as available in the page.
278 let ptr_idx_pinfo = offset / COMPRESSION_FACTOR;
279 let size_pinfo = layout.size() / COMPRESSION_FACTOR;
280 for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
281 pinfo.remove(idx);
282 }
283 idx_of_pinfo
284 }
285
286 /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
287 /// must ask for a size larger than the host pagesize.
288 unsafe fn dealloc_huge(&mut self, ptr: *mut u8, layout: Layout) {
289 let size = self.huge_normalized_layout(layout);
290 // Find the huge allocation containing `ptr`.
291 let idx = self
292 .huge_ptrs
293 .iter()
294 .position(|&(pg, size)| {
295 pg.addr().get() <= ptr.addr() && ptr.addr() < pg.addr().get().strict_add(size)
296 })
297 .expect("Freeing unallocated pages");
298 // And kick it from the list
299 let (un_offset_ptr, size2) = self.huge_ptrs.remove(idx);
300 assert_eq!(size, size2, "got wrong layout in dealloc");
301 // SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
302 unsafe {
303 let ret = libc::munmap(un_offset_ptr.as_ptr().cast(), size);
304 assert_eq!(ret, 0);
305 }
306 }
307
308 /// Returns a list of page ranges managed by the allocator, given in terms of pointers
309 /// and size (in bytes).
310 pub fn pages(&self) -> impl Iterator<Item = (NonNull<u8>, usize)> {
311 let pages = self.page_ptrs.iter().map(|&p| (p, self.page_size));
312 pages.chain(self.huge_ptrs.iter().copied())
313 }
314}
315
316#[cfg(test)]
317mod tests {
318 use super::*;
319
320 /// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())`
321 /// are zeroes.
322 ///
323 /// SAFETY: `ptr` must have been allocated with `layout`.
324 unsafe fn assert_zeroes(ptr: *mut u8, layout: Layout) {
325 // SAFETY: Caller ensures this is valid
326 unsafe {
327 for ofs in 0..layout.size() {
328 assert_eq!(0, ptr.add(ofs).read());
329 }
330 }
331 }
332
333 /// Check that small (sub-pagesize) allocations are properly zeroed out.
334 #[test]
335 fn small_zeroes() {
336 let mut alloc = IsolatedAlloc::new();
337 // 256 should be less than the pagesize on *any* system
338 let layout = Layout::from_size_align(256, 32).unwrap();
339 // SAFETY: layout size is the constant above, not 0
340 let ptr = unsafe { alloc.alloc_zeroed(layout) };
341 // SAFETY: `ptr` was just allocated with `layout`
342 unsafe {
343 assert_zeroes(ptr, layout);
344 alloc.dealloc(ptr, layout);
345 }
346 }
347
348 /// Check that huge (> 1 page) allocations are properly zeroed out also.
349 #[test]
350 fn huge_zeroes() {
351 let mut alloc = IsolatedAlloc::new();
352 // 16k is about as big as pages get e.g. on macos aarch64
353 let layout = Layout::from_size_align(16 * 1024, 128).unwrap();
354 // SAFETY: layout size is the constant above, not 0
355 let ptr = unsafe { alloc.alloc_zeroed(layout) };
356 // SAFETY: `ptr` was just allocated with `layout`
357 unsafe {
358 assert_zeroes(ptr, layout);
359 alloc.dealloc(ptr, layout);
360 }
361 }
362
363 /// Check that repeatedly reallocating the same memory will still zero out
364 /// everything properly
365 #[test]
366 fn repeated_allocs() {
367 let mut alloc = IsolatedAlloc::new();
368 // Try both sub-pagesize allocs and those larger than / equal to a page
369 for sz in (1..=(16 * 1024)).step_by(128) {
370 let layout = Layout::from_size_align(sz, 1).unwrap();
371 // SAFETY: all sizes in the range above are nonzero as we start from 1
372 let ptr = unsafe { alloc.alloc_zeroed(layout) };
373 // SAFETY: `ptr` was just allocated with `layout`, which was used
374 // to bound the access size
375 unsafe {
376 assert_zeroes(ptr, layout);
377 ptr.write_bytes(255, sz);
378 alloc.dealloc(ptr, layout);
379 }
380 }
381 }
382
383 /// Checks that allocations of different sizes do not overlap, then for memory
384 /// leaks that might have occurred.
385 #[test]
386 fn check_leaks_and_overlaps() {
387 let mut alloc = IsolatedAlloc::new();
388
389 // Some random sizes and aligns
390 let mut sizes = vec![32; 10];
391 sizes.append(&mut vec![15; 4]);
392 sizes.append(&mut vec![256; 12]);
393 // Give it some multi-page ones too
394 sizes.append(&mut vec![32 * 1024; 4]);
395 sizes.push(4 * 1024);
396
397 // Matching aligns for the sizes
398 let mut aligns = vec![16; 12];
399 aligns.append(&mut vec![256; 2]);
400 aligns.append(&mut vec![64; 12]);
401 aligns.append(&mut vec![4096; 4]);
402 // And one that requests align > page_size
403 aligns.push(64 * 1024);
404
405 // Make sure we didn't mess up in the test itself!
406 assert_eq!(sizes.len(), aligns.len());
407
408 // Aggregate the sizes and aligns into a vec of layouts, then allocate them
409 let layouts: Vec<_> = std::iter::zip(sizes, aligns)
410 .map(|(sz, al)| Layout::from_size_align(sz, al).unwrap())
411 .collect();
412 // SAFETY: all sizes specified in `sizes` are nonzero
413 let ptrs: Vec<_> =
414 layouts.iter().map(|layout| unsafe { alloc.alloc_zeroed(*layout) }).collect();
415
416 for (&ptr, &layout) in std::iter::zip(&ptrs, &layouts) {
417 // We requested zeroed allocations, so check that that's true
418 // Then write to the end of the current size, so if the allocs
419 // overlap (or the zeroing is wrong) then `assert_zeroes` will panic.
420 // Also check that the alignment we asked for was respected
421 assert_eq!(ptr.addr().strict_rem(layout.align()), 0);
422 // SAFETY: each `ptr` was allocated with its corresponding `layout`,
423 // which is used to bound the access size
424 unsafe {
425 assert_zeroes(ptr, layout);
426 ptr.write_bytes(255, layout.size());
427 alloc.dealloc(ptr, layout);
428 }
429 }
430
431 // And then verify that no memory was leaked after all that
432 assert!(alloc.page_ptrs.is_empty() && alloc.huge_ptrs.is_empty());
433 }
434}