miri/alloc_addresses/
mod.rs

1//! This module is responsible for managing the absolute addresses that allocations are located at,
2//! and for casting between pointers and integers based on those addresses.
3
4mod reuse_pool;
5
6use std::cell::RefCell;
7use std::cmp::max;
8
9use rand::Rng;
10use rustc_abi::{Align, Size};
11use rustc_data_structures::fx::{FxHashMap, FxHashSet};
12
13use self::reuse_pool::ReusePool;
14use crate::concurrency::VClock;
15use crate::*;
16
17#[derive(Copy, Clone, Debug, PartialEq, Eq)]
18pub enum ProvenanceMode {
19    /// We support `expose_provenance`/`with_exposed_provenance` via "wildcard" provenance.
20    /// However, we warn on `with_exposed_provenance` to alert the user of the precision loss.
21    Default,
22    /// Like `Default`, but without the warning.
23    Permissive,
24    /// We error on `with_exposed_provenance`, ensuring no precision loss.
25    Strict,
26}
27
28pub type GlobalState = RefCell<GlobalStateInner>;
29
30#[derive(Debug)]
31pub struct GlobalStateInner {
32    /// This is used as a map between the address of each allocation and its `AllocId`. It is always
33    /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
34    /// from the base address, and we need to find the `AllocId` it belongs to. This is not the
35    /// *full* inverse of `base_addr`; dead allocations have been removed.
36    int_to_ptr_map: Vec<(u64, AllocId)>,
37    /// The base address for each allocation.  We cannot put that into
38    /// `AllocExtra` because function pointers also have a base address, and
39    /// they do not have an `AllocExtra`.
40    /// This is the inverse of `int_to_ptr_map`.
41    base_addr: FxHashMap<AllocId, u64>,
42    /// Temporarily store prepared memory space for global allocations the first time their memory
43    /// address is required. This is used to ensure that the memory is allocated before Miri assigns
44    /// it an internal address, which is important for matching the internal address to the machine
45    /// address so FFI can read from pointers.
46    prepared_alloc_bytes: FxHashMap<AllocId, MiriAllocBytes>,
47    /// A pool of addresses we can reuse for future allocations.
48    reuse: ReusePool,
49    /// Whether an allocation has been exposed or not. This cannot be put
50    /// into `AllocExtra` for the same reason as `base_addr`.
51    exposed: FxHashSet<AllocId>,
52    /// This is used as a memory address when a new pointer is casted to an integer. It
53    /// is always larger than any address that was previously made part of a block.
54    next_base_addr: u64,
55    /// The provenance to use for int2ptr casts
56    provenance_mode: ProvenanceMode,
57}
58
59impl VisitProvenance for GlobalStateInner {
60    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
61        let GlobalStateInner {
62            int_to_ptr_map: _,
63            base_addr: _,
64            prepared_alloc_bytes: _,
65            reuse: _,
66            exposed: _,
67            next_base_addr: _,
68            provenance_mode: _,
69        } = self;
70        // Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
71        // int_to_ptr_map and exposed must contain only live allocations, and those
72        // are never garbage collected.
73        // base_addr is only relevant if we have a pointer to an AllocId and need to look up its
74        // base address; so if an AllocId is not reachable from somewhere else we can remove it
75        // here.
76    }
77}
78
79impl GlobalStateInner {
80    pub fn new(config: &MiriConfig, stack_addr: u64) -> Self {
81        GlobalStateInner {
82            int_to_ptr_map: Vec::default(),
83            base_addr: FxHashMap::default(),
84            prepared_alloc_bytes: FxHashMap::default(),
85            reuse: ReusePool::new(config),
86            exposed: FxHashSet::default(),
87            next_base_addr: stack_addr,
88            provenance_mode: config.provenance_mode,
89        }
90    }
91
92    pub fn remove_unreachable_allocs(&mut self, allocs: &LiveAllocs<'_, '_>) {
93        // `exposed` and `int_to_ptr_map` are cleared immediately when an allocation
94        // is freed, so `base_addr` is the only one we have to clean up based on the GC.
95        self.base_addr.retain(|id, _| allocs.is_live(*id));
96    }
97}
98
99/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
100/// of `align` that is larger or equal to `addr`
101fn align_addr(addr: u64, align: u64) -> u64 {
102    match addr % align {
103        0 => addr,
104        rem => addr.strict_add(align) - rem,
105    }
106}
107
108impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
109trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
110    // Returns the exposed `AllocId` that corresponds to the specified addr,
111    // or `None` if the addr is out of bounds
112    fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId> {
113        let this = self.eval_context_ref();
114        let global_state = this.machine.alloc_addresses.borrow();
115        assert!(global_state.provenance_mode != ProvenanceMode::Strict);
116
117        // We always search the allocation to the right of this address. So if the size is structly
118        // negative, we have to search for `addr-1` instead.
119        let addr = if size >= 0 { addr } else { addr.saturating_sub(1) };
120        let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr);
121
122        // Determine the in-bounds provenance for this pointer.
123        let alloc_id = match pos {
124            Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
125            Err(0) => None,
126            Err(pos) => {
127                // This is the largest of the addresses smaller than `int`,
128                // i.e. the greatest lower bound (glb)
129                let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
130                // This never overflows because `addr >= glb`
131                let offset = addr - glb;
132                // We require this to be strict in-bounds of the allocation. This arm is only
133                // entered for addresses that are not the base address, so even zero-sized
134                // allocations will get recognized at their base address -- but all other
135                // allocations will *not* be recognized at their "end" address.
136                let size = this.get_alloc_info(alloc_id).size;
137                if offset < size.bytes() { Some(alloc_id) } else { None }
138            }
139        }?;
140
141        // We only use this provenance if it has been exposed.
142        if global_state.exposed.contains(&alloc_id) {
143            // This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
144            debug_assert!(this.is_alloc_live(alloc_id));
145            Some(alloc_id)
146        } else {
147            None
148        }
149    }
150
151    fn addr_from_alloc_id_uncached(
152        &self,
153        global_state: &mut GlobalStateInner,
154        alloc_id: AllocId,
155        memory_kind: MemoryKind,
156    ) -> InterpResult<'tcx, u64> {
157        let this = self.eval_context_ref();
158        let mut rng = this.machine.rng.borrow_mut();
159        let info = this.get_alloc_info(alloc_id);
160        // This is either called immediately after allocation (and then cached), or when
161        // adjusting `tcx` pointers (which never get freed). So assert that we are looking
162        // at a live allocation. This also ensures that we never re-assign an address to an
163        // allocation that previously had an address, but then was freed and the address
164        // information was removed.
165        assert!(!matches!(info.kind, AllocKind::Dead));
166
167        // This allocation does not have a base address yet, pick or reuse one.
168        if this.machine.native_lib.is_some() {
169            // In native lib mode, we use the "real" address of the bytes for this allocation.
170            // This ensures the interpreted program and native code have the same view of memory.
171            let base_ptr = match info.kind {
172                AllocKind::LiveData => {
173                    if this.tcx.try_get_global_alloc(alloc_id).is_some() {
174                        // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
175                        let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align)
176                            .unwrap_or_else(|| {
177                                panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes", size = info.size)
178                            });
179                        let ptr = prepared_bytes.as_ptr();
180                        // Store prepared allocation space to be picked up for use later.
181                        global_state
182                            .prepared_alloc_bytes
183                            .try_insert(alloc_id, prepared_bytes)
184                            .unwrap();
185                        ptr
186                    } else {
187                        this.get_alloc_bytes_unchecked_raw(alloc_id)?
188                    }
189                }
190                AllocKind::Function | AllocKind::VTable => {
191                    // Allocate some dummy memory to get a unique address for this function/vtable.
192                    let alloc_bytes =
193                        MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap());
194                    let ptr = alloc_bytes.as_ptr();
195                    // Leak the underlying memory to ensure it remains unique.
196                    std::mem::forget(alloc_bytes);
197                    ptr
198                }
199                AllocKind::Dead => unreachable!(),
200            };
201            // Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
202            return interp_ok(base_ptr.expose_provenance().try_into().unwrap());
203        }
204        // We are not in native lib mode, so we control the addresses ourselves.
205        if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
206            &mut *rng,
207            info.size,
208            info.align,
209            memory_kind,
210            this.active_thread(),
211        ) {
212            if let Some(clock) = clock {
213                this.acquire_clock(&clock);
214            }
215            interp_ok(reuse_addr)
216        } else {
217            // We have to pick a fresh address.
218            // Leave some space to the previous allocation, to give it some chance to be less aligned.
219            // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
220            let slack = rng.random_range(0..16);
221            // From next_base_addr + slack, round up to adjust for alignment.
222            let base_addr = global_state
223                .next_base_addr
224                .checked_add(slack)
225                .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
226            let base_addr = align_addr(base_addr, info.align.bytes());
227
228            // Remember next base address.  If this allocation is zero-sized, leave a gap of at
229            // least 1 to avoid two allocations having the same base address. (The logic in
230            // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
231            // need to be distinguishable!)
232            global_state.next_base_addr = base_addr
233                .checked_add(max(info.size.bytes(), 1))
234                .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
235            // Even if `Size` didn't overflow, we might still have filled up the address space.
236            if global_state.next_base_addr > this.target_usize_max() {
237                throw_exhaust!(AddressSpaceFull);
238            }
239
240            interp_ok(base_addr)
241        }
242    }
243
244    fn addr_from_alloc_id(
245        &self,
246        alloc_id: AllocId,
247        memory_kind: MemoryKind,
248    ) -> InterpResult<'tcx, u64> {
249        let this = self.eval_context_ref();
250        let mut global_state = this.machine.alloc_addresses.borrow_mut();
251        let global_state = &mut *global_state;
252
253        match global_state.base_addr.get(&alloc_id) {
254            Some(&addr) => interp_ok(addr),
255            None => {
256                // First time we're looking for the absolute address of this allocation.
257                let base_addr =
258                    self.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
259                trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
260
261                // Store address in cache.
262                global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
263
264                // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it sorted.
265                // We have a fast-path for the common case that this address is bigger than all previous ones.
266                let pos = if global_state
267                    .int_to_ptr_map
268                    .last()
269                    .is_some_and(|(last_addr, _)| *last_addr < base_addr)
270                {
271                    global_state.int_to_ptr_map.len()
272                } else {
273                    global_state
274                        .int_to_ptr_map
275                        .binary_search_by_key(&base_addr, |(addr, _)| *addr)
276                        .unwrap_err()
277                };
278                global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
279
280                interp_ok(base_addr)
281            }
282        }
283    }
284}
285
286impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
287pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
288    fn expose_ptr(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
289        let this = self.eval_context_ref();
290        let mut global_state = this.machine.alloc_addresses.borrow_mut();
291        // In strict mode, we don't need this, so we can save some cycles by not tracking it.
292        if global_state.provenance_mode == ProvenanceMode::Strict {
293            return interp_ok(());
294        }
295        // Exposing a dead alloc is a no-op, because it's not possible to get a dead allocation
296        // via int2ptr.
297        if !this.is_alloc_live(alloc_id) {
298            return interp_ok(());
299        }
300        trace!("Exposing allocation id {alloc_id:?}");
301        global_state.exposed.insert(alloc_id);
302        // Release the global state before we call `expose_tag`, which may call `get_alloc_info_extra`,
303        // which may need access to the global state.
304        drop(global_state);
305        if this.machine.borrow_tracker.is_some() {
306            this.expose_tag(alloc_id, tag)?;
307        }
308        interp_ok(())
309    }
310
311    fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer> {
312        trace!("Casting {:#x} to a pointer", addr);
313
314        let this = self.eval_context_ref();
315        let global_state = this.machine.alloc_addresses.borrow();
316
317        // Potentially emit a warning.
318        match global_state.provenance_mode {
319            ProvenanceMode::Default => {
320                // The first time this happens at a particular location, print a warning.
321                let mut int2ptr_warned = this.machine.int2ptr_warned.borrow_mut();
322                let first = int2ptr_warned.is_empty();
323                if int2ptr_warned.insert(this.cur_span()) {
324                    // Newly inserted, so first time we see this span.
325                    this.emit_diagnostic(NonHaltingDiagnostic::Int2Ptr { details: first });
326                }
327            }
328            ProvenanceMode::Strict => {
329                throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
330            }
331            ProvenanceMode::Permissive => {}
332        }
333
334        // We do *not* look up the `AllocId` here! This is a `ptr as usize` cast, and it is
335        // completely legal to do a cast and then `wrapping_offset` to another allocation and only
336        // *then* do a memory access. So the allocation that the pointer happens to point to on a
337        // cast is fairly irrelevant. Instead we generate this as a "wildcard" pointer, such that
338        // *every time the pointer is used*, we do an `AllocId` lookup to find the (exposed)
339        // allocation it might be referencing.
340        interp_ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
341    }
342
343    /// Convert a relative (tcx) pointer to a Miri pointer.
344    fn adjust_alloc_root_pointer(
345        &self,
346        ptr: interpret::Pointer<CtfeProvenance>,
347        tag: BorTag,
348        kind: MemoryKind,
349    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
350        let this = self.eval_context_ref();
351
352        let (prov, offset) = ptr.into_parts(); // offset is relative (AllocId provenance)
353        let alloc_id = prov.alloc_id();
354
355        // Get a pointer to the beginning of this allocation.
356        let base_addr = this.addr_from_alloc_id(alloc_id, kind)?;
357        let base_ptr = interpret::Pointer::new(
358            Provenance::Concrete { alloc_id, tag },
359            Size::from_bytes(base_addr),
360        );
361        // Add offset with the right kind of pointer-overflowing arithmetic.
362        interp_ok(base_ptr.wrapping_offset(offset, this))
363    }
364
365    // This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
366    // memory space in the past, or by doing the pre-allocation right upon being called.
367    fn get_global_alloc_bytes(
368        &self,
369        id: AllocId,
370        kind: MemoryKind,
371        bytes: &[u8],
372        align: Align,
373    ) -> InterpResult<'tcx, MiriAllocBytes> {
374        let this = self.eval_context_ref();
375        if this.machine.native_lib.is_some() {
376            // In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
377            // This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
378            // this function gets called before the first time `addr_from_alloc_id` gets called.
379            this.addr_from_alloc_id(id, kind)?;
380            // The memory we need here will have already been allocated during an earlier call to
381            // `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
382            // fetch the previously prepared bytes from `prepared_alloc_bytes`.
383            let mut global_state = this.machine.alloc_addresses.borrow_mut();
384            let mut prepared_alloc_bytes = global_state
385                .prepared_alloc_bytes
386                .remove(&id)
387                .unwrap_or_else(|| panic!("alloc bytes for {id:?} have not been prepared"));
388            // Sanity-check that the prepared allocation has the right size and alignment.
389            assert!(prepared_alloc_bytes.as_ptr().is_aligned_to(align.bytes_usize()));
390            assert_eq!(prepared_alloc_bytes.len(), bytes.len());
391            // Copy allocation contents into prepared memory.
392            prepared_alloc_bytes.copy_from_slice(bytes);
393            interp_ok(prepared_alloc_bytes)
394        } else {
395            interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
396        }
397    }
398
399    /// When a pointer is used for a memory access, this computes where in which allocation the
400    /// access is going.
401    fn ptr_get_alloc(
402        &self,
403        ptr: interpret::Pointer<Provenance>,
404        size: i64,
405    ) -> Option<(AllocId, Size)> {
406        let this = self.eval_context_ref();
407
408        let (tag, addr) = ptr.into_parts(); // addr is absolute (Tag provenance)
409
410        let alloc_id = if let Provenance::Concrete { alloc_id, .. } = tag {
411            alloc_id
412        } else {
413            // A wildcard pointer.
414            this.alloc_id_from_addr(addr.bytes(), size)?
415        };
416
417        // This cannot fail: since we already have a pointer with that provenance, adjust_alloc_root_pointer
418        // must have been called in the past, so we can just look up the address in the map.
419        let base_addr = *this.machine.alloc_addresses.borrow().base_addr.get(&alloc_id).unwrap();
420
421        // Wrapping "addr - base_addr"
422        let rel_offset = this.truncate_to_target_usize(addr.bytes().wrapping_sub(base_addr));
423        Some((alloc_id, Size::from_bytes(rel_offset)))
424    }
425}
426
427impl<'tcx> MiriMachine<'tcx> {
428    pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
429        let global_state = self.alloc_addresses.get_mut();
430        let rng = self.rng.get_mut();
431
432        // We can *not* remove this from `base_addr`, since the interpreter design requires that we
433        // be able to retrieve an AllocId + offset for any memory access *before* we check if the
434        // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
435        // access to determine the allocation ID and offset -- and there can still be pointers with
436        // `dead_id` that one can attempt to use for a memory access. `ptr_get_alloc` may return
437        // `None` only if the pointer truly has no provenance (this ensures consistent error
438        // messages).
439        // However, we *can* remove it from `int_to_ptr_map`, since any wildcard pointers that exist
440        // can no longer actually be accessing that address. This ensures `alloc_id_from_addr` never
441        // returns a dead allocation.
442        // To avoid a linear scan we first look up the address in `base_addr`, and then find it in
443        // `int_to_ptr_map`.
444        let addr = *global_state.base_addr.get(&dead_id).unwrap();
445        let pos =
446            global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
447        let removed = global_state.int_to_ptr_map.remove(pos);
448        assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
449        // We can also remove it from `exposed`, since this allocation can anyway not be returned by
450        // `alloc_id_from_addr` any more.
451        global_state.exposed.remove(&dead_id);
452        // Also remember this address for future reuse.
453        let thread = self.threads.active_thread();
454        global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
455            if let Some(data_race) = &self.data_race {
456                data_race.release_clock(&self.threads, |clock| clock.clone())
457            } else {
458                VClock::default()
459            }
460        })
461    }
462}
463
464#[cfg(test)]
465mod tests {
466    use super::*;
467
468    #[test]
469    fn test_align_addr() {
470        assert_eq!(align_addr(37, 4), 40);
471        assert_eq!(align_addr(44, 4), 44);
472    }
473}