miri/alloc_addresses/
mod.rs

1//! This module is responsible for managing the absolute addresses that allocations are located at,
2//! and for casting between pointers and integers based on those addresses.
3
4mod address_generator;
5mod reuse_pool;
6
7use std::cell::RefCell;
8
9use rustc_abi::{Align, Size};
10use rustc_data_structures::fx::{FxHashMap, FxHashSet};
11use rustc_middle::ty::TyCtxt;
12
13pub use self::address_generator::AddressGenerator;
14use self::reuse_pool::ReusePool;
15use crate::alloc::MiriAllocParams;
16use crate::concurrency::VClock;
17use crate::diagnostics::SpanDedupDiagnostic;
18use crate::*;
19
20#[derive(Copy, Clone, Debug, PartialEq, Eq)]
21pub enum ProvenanceMode {
22    /// We support `expose_provenance`/`with_exposed_provenance` via "wildcard" provenance.
23    /// However, we warn on `with_exposed_provenance` to alert the user of the precision loss.
24    Default,
25    /// Like `Default`, but without the warning.
26    Permissive,
27    /// We error on `with_exposed_provenance`, ensuring no precision loss.
28    Strict,
29}
30
31pub type GlobalState = RefCell<GlobalStateInner>;
32
33#[derive(Debug)]
34pub struct GlobalStateInner {
35    /// This is used as a map between the address of each allocation and its `AllocId`. It is always
36    /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
37    /// from the base address, and we need to find the `AllocId` it belongs to. This is not the
38    /// *full* inverse of `base_addr`; dead allocations have been removed.
39    /// Note that in GenMC mode, dead allocations are *not* removed -- and also, addresses are never
40    /// reused. This lets us use the address as a cross-execution-stable identifier for an allocation.
41    int_to_ptr_map: Vec<(u64, AllocId)>,
42    /// The base address for each allocation.  We cannot put that into
43    /// `AllocExtra` because function pointers also have a base address, and
44    /// they do not have an `AllocExtra`.
45    /// This is the inverse of `int_to_ptr_map`.
46    base_addr: FxHashMap<AllocId, u64>,
47    /// The set of exposed allocations. This cannot be put
48    /// into `AllocExtra` for the same reason as `base_addr`.
49    exposed: FxHashSet<AllocId>,
50    /// The provenance to use for int2ptr casts
51    provenance_mode: ProvenanceMode,
52    /// The generator for new addresses in a given range, and a pool for address reuse. This is
53    /// `None` if addresses are generated elsewhere (in native-lib mode or with GenMC).
54    address_generation: Option<(AddressGenerator, ReusePool)>,
55    /// Native-lib mode only: Temporarily store prepared memory space for global allocations the
56    /// first time their memory address is required. This is used to ensure that the memory is
57    /// allocated before Miri assigns it an internal address, which is important for matching the
58    /// internal address to the machine address so FFI can read from pointers.
59    prepared_alloc_bytes: Option<FxHashMap<AllocId, MiriAllocBytes>>,
60}
61
62impl VisitProvenance for GlobalStateInner {
63    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
64        let GlobalStateInner {
65            int_to_ptr_map: _,
66            base_addr: _,
67            prepared_alloc_bytes: _,
68            exposed: _,
69            address_generation: _,
70            provenance_mode: _,
71        } = self;
72        // Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
73        // int_to_ptr_map and exposed must contain only live allocations, and those
74        // are never garbage collected.
75        // base_addr is only relevant if we have a pointer to an AllocId and need to look up its
76        // base address; so if an AllocId is not reachable from somewhere else we can remove it
77        // here.
78    }
79}
80
81impl GlobalStateInner {
82    pub fn new<'tcx>(config: &MiriConfig, stack_addr: u64, tcx: TyCtxt<'tcx>) -> Self {
83        GlobalStateInner {
84            int_to_ptr_map: Vec::default(),
85            base_addr: FxHashMap::default(),
86            exposed: FxHashSet::default(),
87            provenance_mode: config.provenance_mode,
88            address_generation: (config.native_lib.is_empty() && config.genmc_config.is_none())
89                .then(|| {
90                    (
91                        AddressGenerator::new(stack_addr..tcx.target_usize_max()),
92                        ReusePool::new(config),
93                    )
94                }),
95            prepared_alloc_bytes: (!config.native_lib.is_empty()).then(FxHashMap::default),
96        }
97    }
98
99    pub fn remove_unreachable_allocs(&mut self, allocs: &LiveAllocs<'_, '_>) {
100        // `exposed` and `int_to_ptr_map` are cleared immediately when an allocation
101        // is freed, so `base_addr` is the only one we have to clean up based on the GC.
102        self.base_addr.retain(|id, _| allocs.is_live(*id));
103    }
104}
105
106impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
107trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
108    fn addr_from_alloc_id_uncached(
109        &self,
110        global_state: &mut GlobalStateInner,
111        alloc_id: AllocId,
112        memory_kind: MemoryKind,
113    ) -> InterpResult<'tcx, u64> {
114        let this = self.eval_context_ref();
115        let info = this.get_alloc_info(alloc_id);
116
117        // This is either called immediately after allocation (and then cached), or when
118        // adjusting `tcx` pointers (which never get freed). So assert that we are looking
119        // at a live allocation. This also ensures that we never re-assign an address to an
120        // allocation that previously had an address, but then was freed and the address
121        // information was removed.
122        assert!(!matches!(info.kind, AllocKind::Dead));
123
124        // TypeId allocations always have a "base address" of 0 (i.e., the relative offset is the
125        // hash fragment and therefore equal to the actual integer value).
126        if matches!(info.kind, AllocKind::TypeId) {
127            return interp_ok(0);
128        }
129
130        // Miri's address assignment leaks state across thread boundaries, which is incompatible
131        // with GenMC execution. So we instead let GenMC assign addresses to allocations.
132        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
133            let addr =
134                genmc_ctx.handle_alloc(this, alloc_id, info.size, info.align, memory_kind)?;
135            return interp_ok(addr);
136        }
137
138        // This allocation does not have a base address yet, pick or reuse one.
139        if !this.machine.native_lib.is_empty() {
140            // In native lib mode, we use the "real" address of the bytes for this allocation.
141            // This ensures the interpreted program and native code have the same view of memory.
142            let params = this.machine.get_default_alloc_params();
143            let base_ptr = match info.kind {
144                AllocKind::LiveData => {
145                    if memory_kind == MiriMemoryKind::Global.into() {
146                        // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
147                        let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align, params)
148                            .unwrap_or_else(|| {
149                                panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes", size = info.size)
150                            });
151                        let ptr = prepared_bytes.as_ptr();
152                        // Store prepared allocation to be picked up for use later.
153                        global_state
154                            .prepared_alloc_bytes
155                            .as_mut()
156                            .unwrap()
157                            .try_insert(alloc_id, prepared_bytes)
158                            .unwrap();
159                        ptr
160                    } else {
161                        // Non-global allocations are already in memory at this point so
162                        // we can just get a pointer to where their data is stored.
163                        this.get_alloc_bytes_unchecked_raw(alloc_id)?
164                    }
165                }
166                #[cfg(all(unix, feature = "native-lib"))]
167                AllocKind::Function => {
168                    if let Some(GlobalAlloc::Function { instance, .. }) =
169                        this.tcx.try_get_global_alloc(alloc_id)
170                    {
171                        let fn_sig = this.tcx.fn_sig(instance.def_id()).skip_binder().skip_binder();
172                        let fn_ptr = crate::shims::native_lib::build_libffi_closure(this, fn_sig)?;
173
174                        #[expect(
175                            clippy::as_conversions,
176                            reason = "No better way to cast a function ptr to a ptr"
177                        )]
178                        {
179                            fn_ptr as *const _
180                        }
181                    } else {
182                        dummy_alloc(params)
183                    }
184                }
185                #[cfg(not(all(unix, feature = "native-lib")))]
186                AllocKind::Function => dummy_alloc(params),
187                AllocKind::VTable => dummy_alloc(params),
188                AllocKind::TypeId | AllocKind::Dead => unreachable!(),
189            };
190            // We don't have to expose this pointer yet, we do that in `prepare_for_native_call`.
191            return interp_ok(base_ptr.addr().to_u64());
192        }
193        // We are not in native lib or genmc mode, so we control the addresses ourselves.
194        let (addr_gen, reuse) = global_state.address_generation.as_mut().unwrap();
195        let mut rng = this.machine.rng.borrow_mut();
196        if let Some((reuse_addr, clock)) =
197            reuse.take_addr(&mut *rng, info.size, info.align, memory_kind, this.active_thread())
198        {
199            if let Some(clock) = clock {
200                this.acquire_clock(&clock)?;
201            }
202            interp_ok(reuse_addr)
203        } else {
204            // We have to pick a fresh address.
205            let new_addr = addr_gen.generate(info.size, info.align, &mut rng)?;
206
207            // If we filled up more than half the address space, start aggressively reusing
208            // addresses to avoid running out.
209            let remaining_range = addr_gen.get_remaining();
210            if remaining_range.start > remaining_range.end / 2 {
211                reuse.address_space_shortage();
212            }
213
214            interp_ok(new_addr)
215        }
216    }
217}
218
219fn dummy_alloc(params: MiriAllocParams) -> *const u8 {
220    // Allocate some dummy memory to get a unique address for this function/vtable.
221    let alloc_bytes = MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap(), params);
222    let ptr = alloc_bytes.as_ptr();
223    // Leak the underlying memory to ensure it remains unique.
224    std::mem::forget(alloc_bytes);
225    ptr
226}
227
228impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
229pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
230    // Returns the `AllocId` that corresponds to the specified addr,
231    // or `None` if the addr is out of bounds.
232    fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId> {
233        let this = self.eval_context_ref();
234        let global_state = this.machine.alloc_addresses.borrow();
235        assert!(global_state.provenance_mode != ProvenanceMode::Strict);
236
237        // We always search the allocation to the right of this address. So if the size is strictly
238        // negative, we have to search for `addr-1` instead.
239        let addr = if size >= 0 { addr } else { addr.saturating_sub(1) };
240        let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr);
241
242        // Determine the in-bounds provenance for this pointer.
243        let alloc_id = match pos {
244            Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
245            Err(0) => None,
246            Err(pos) => {
247                // This is the largest of the addresses smaller than `int`,
248                // i.e. the greatest lower bound (glb)
249                let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
250                // This never overflows because `addr >= glb`
251                let offset = addr - glb;
252                // We require this to be strict in-bounds of the allocation. This arm is only
253                // entered for addresses that are not the base address, so even zero-sized
254                // allocations will get recognized at their base address -- but all other
255                // allocations will *not* be recognized at their "end" address.
256                let size = this.get_alloc_info(alloc_id).size;
257                if offset < size.bytes() { Some(alloc_id) } else { None }
258            }
259        }?;
260
261        // We only use this provenance if it has been exposed.
262        if global_state.exposed.contains(&alloc_id) {
263            // This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
264            debug_assert!(this.is_alloc_live(alloc_id));
265            Some(alloc_id)
266        } else {
267            None
268        }
269    }
270
271    /// Returns the base address of an allocation, or an error if no base address could be found
272    ///
273    /// # Panics
274    /// If `memory_kind = None` and the `alloc_id` is not cached, meaning that the first call to this function per `alloc_id` must get the `memory_kind`.
275    fn addr_from_alloc_id(
276        &self,
277        alloc_id: AllocId,
278        memory_kind: Option<MemoryKind>,
279    ) -> InterpResult<'tcx, u64> {
280        let this = self.eval_context_ref();
281        let mut global_state = this.machine.alloc_addresses.borrow_mut();
282        let global_state = &mut *global_state;
283
284        match global_state.base_addr.get(&alloc_id) {
285            Some(&addr) => interp_ok(addr),
286            None => {
287                // First time we're looking for the absolute address of this allocation.
288                let memory_kind =
289                    memory_kind.expect("memory_kind is required since alloc_id is not cached");
290                let base_addr =
291                    this.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
292                trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
293
294                // Store address in cache.
295                global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
296
297                // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it
298                // sorted. We have a fast-path for the common case that this address is bigger than
299                // all previous ones. We skip this for allocations at address 0; those can't be
300                // real, they must be TypeId "fake allocations".
301                if base_addr != 0 {
302                    let pos = if global_state
303                        .int_to_ptr_map
304                        .last()
305                        .is_some_and(|(last_addr, _)| *last_addr < base_addr)
306                    {
307                        global_state.int_to_ptr_map.len()
308                    } else {
309                        global_state
310                            .int_to_ptr_map
311                            .binary_search_by_key(&base_addr, |(addr, _)| *addr)
312                            .unwrap_err()
313                    };
314                    global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
315                }
316
317                interp_ok(base_addr)
318            }
319        }
320    }
321
322    fn expose_provenance(&self, provenance: Provenance) -> InterpResult<'tcx> {
323        let this = self.eval_context_ref();
324        let mut global_state = this.machine.alloc_addresses.borrow_mut();
325
326        let (alloc_id, tag) = match provenance {
327            Provenance::Concrete { alloc_id, tag } => (alloc_id, tag),
328            Provenance::Wildcard => {
329                // No need to do anything for wildcard pointers as
330                // their provenances have already been previously exposed.
331                return interp_ok(());
332            }
333        };
334
335        // In strict mode, we don't need this, so we can save some cycles by not tracking it.
336        if global_state.provenance_mode == ProvenanceMode::Strict {
337            return interp_ok(());
338        }
339        // Exposing a dead alloc is a no-op, because it's not possible to get a dead allocation
340        // via int2ptr.
341        if !this.is_alloc_live(alloc_id) {
342            return interp_ok(());
343        }
344        trace!("Exposing allocation id {alloc_id:?}");
345        global_state.exposed.insert(alloc_id);
346        // Release the global state before we call `expose_tag`, which may call `get_alloc_info_extra`,
347        // which may need access to the global state.
348        drop(global_state);
349        if this.machine.borrow_tracker.is_some() {
350            this.expose_tag(alloc_id, tag)?;
351        }
352        interp_ok(())
353    }
354
355    fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer> {
356        trace!("Casting {:#x} to a pointer", addr);
357
358        let this = self.eval_context_ref();
359        let global_state = this.machine.alloc_addresses.borrow();
360
361        // Potentially emit a warning.
362        match global_state.provenance_mode {
363            ProvenanceMode::Default => {
364                // The first time this happens at a particular location, print a warning.
365                static DEDUP: SpanDedupDiagnostic = SpanDedupDiagnostic::new();
366                this.dedup_diagnostic(&DEDUP, |first| {
367                    NonHaltingDiagnostic::Int2Ptr { details: first }
368                });
369            }
370            ProvenanceMode::Strict => {
371                throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
372            }
373            ProvenanceMode::Permissive => {}
374        }
375
376        // We do *not* look up the `AllocId` here! This is a `ptr as usize` cast, and it is
377        // completely legal to do a cast and then `wrapping_offset` to another allocation and only
378        // *then* do a memory access. So the allocation that the pointer happens to point to on a
379        // cast is fairly irrelevant. Instead we generate this as a "wildcard" pointer, such that
380        // *every time the pointer is used*, we do an `AllocId` lookup to find the (exposed)
381        // allocation it might be referencing.
382        interp_ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
383    }
384
385    /// Convert a relative (tcx) pointer to a Miri pointer.
386    fn adjust_alloc_root_pointer(
387        &self,
388        ptr: interpret::Pointer<CtfeProvenance>,
389        tag: BorTag,
390        kind: MemoryKind,
391    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
392        let this = self.eval_context_ref();
393
394        let (prov, offset) = ptr.prov_and_relative_offset();
395        let alloc_id = prov.alloc_id();
396
397        // Get a pointer to the beginning of this allocation.
398        let base_addr = this.addr_from_alloc_id(alloc_id, Some(kind))?;
399        let base_ptr = interpret::Pointer::new(
400            Provenance::Concrete { alloc_id, tag },
401            Size::from_bytes(base_addr),
402        );
403        // Add offset with the right kind of pointer-overflowing arithmetic.
404        interp_ok(base_ptr.wrapping_offset(offset, this))
405    }
406
407    // This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
408    // memory space in the past, or by doing the pre-allocation right upon being called.
409    fn get_global_alloc_bytes(
410        &self,
411        id: AllocId,
412        bytes: &[u8],
413        align: Align,
414    ) -> InterpResult<'tcx, MiriAllocBytes> {
415        let this = self.eval_context_ref();
416        assert!(this.tcx.try_get_global_alloc(id).is_some());
417        if !this.machine.native_lib.is_empty() {
418            // In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
419            // This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
420            // this function gets called before the first time `addr_from_alloc_id` gets called.
421            this.addr_from_alloc_id(id, Some(MiriMemoryKind::Global.into()))?;
422            // The memory we need here will have already been allocated during an earlier call to
423            // `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
424            // fetch the previously prepared bytes from `prepared_alloc_bytes`.
425            let mut global_state = this.machine.alloc_addresses.borrow_mut();
426            let mut prepared_alloc_bytes = global_state
427                .prepared_alloc_bytes
428                .as_mut()
429                .unwrap()
430                .remove(&id)
431                .unwrap_or_else(|| panic!("alloc bytes for {id:?} have not been prepared"));
432            // Sanity-check that the prepared allocation has the right size and alignment.
433            assert!(prepared_alloc_bytes.as_ptr().is_aligned_to(align.bytes_usize()));
434            assert_eq!(prepared_alloc_bytes.len(), bytes.len());
435            // Copy allocation contents into prepared memory.
436            prepared_alloc_bytes.copy_from_slice(bytes);
437            interp_ok(prepared_alloc_bytes)
438        } else {
439            let params = this.machine.get_default_alloc_params();
440            interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align, params))
441        }
442    }
443
444    /// When a pointer is used for a memory access, this computes where in which allocation the
445    /// access is going.
446    fn ptr_get_alloc(
447        &self,
448        ptr: interpret::Pointer<Provenance>,
449        size: i64,
450    ) -> Option<(AllocId, Size)> {
451        let this = self.eval_context_ref();
452
453        let (tag, addr) = ptr.into_raw_parts(); // addr is absolute (Miri provenance)
454
455        let alloc_id = if let Provenance::Concrete { alloc_id, .. } = tag {
456            alloc_id
457        } else {
458            // A wildcard pointer.
459            this.alloc_id_from_addr(addr.bytes(), size)?
460        };
461
462        // This cannot fail: since we already have a pointer with that provenance, adjust_alloc_root_pointer
463        // must have been called in the past, so we can just look up the address in the map.
464        let base_addr = *this.machine.alloc_addresses.borrow().base_addr.get(&alloc_id).unwrap();
465
466        // Wrapping "addr - base_addr"
467        let rel_offset = this.truncate_to_target_usize(addr.bytes().wrapping_sub(base_addr));
468        Some((alloc_id, Size::from_bytes(rel_offset)))
469    }
470
471    /// Return a list of all exposed allocations.
472    fn exposed_allocs(&self) -> Vec<AllocId> {
473        let this = self.eval_context_ref();
474        this.machine.alloc_addresses.borrow().exposed.iter().copied().collect()
475    }
476}
477
478impl<'tcx> MiriMachine<'tcx> {
479    pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
480        let global_state = self.alloc_addresses.get_mut();
481        let rng = self.rng.get_mut();
482
483        // We can *not* remove this from `base_addr`, since the interpreter design requires that we
484        // be able to retrieve an AllocId + offset for any memory access *before* we check if the
485        // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
486        // access to determine the allocation ID and offset -- and there can still be pointers with
487        // `dead_id` that one can attempt to use for a memory access. `ptr_get_alloc` may return
488        // `None` only if the pointer truly has no provenance (this ensures consistent error
489        // messages).
490        // However, we *can* remove it from `int_to_ptr_map`, since any wildcard pointers that exist
491        // can no longer actually be accessing that address. This ensures `alloc_id_from_addr` never
492        // returns a dead allocation.
493        // To avoid a linear scan we first look up the address in `base_addr`, and then find it in
494        // `int_to_ptr_map`.
495        let addr = *global_state.base_addr.get(&dead_id).unwrap();
496        let pos =
497            global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
498        let removed = global_state.int_to_ptr_map.remove(pos);
499        assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
500        // We can also remove it from `exposed`, since this allocation can anyway not be returned by
501        // `alloc_id_from_addr` any more.
502        global_state.exposed.remove(&dead_id);
503        // Also remember this address for future reuse.
504        if let Some((_addr_gen, reuse)) = global_state.address_generation.as_mut() {
505            let thread = self.threads.active_thread();
506            reuse.add_addr(rng, addr, size, align, kind, thread, || {
507                // We cannot be in GenMC mode as then `address_generation` is `None`. We cannot use
508                // `self.release_clock` as `self.alloc_addresses` is borrowed.
509                if let Some(data_race) = self.data_race.as_vclocks_ref() {
510                    data_race.release_clock(&self.threads, |clock| clock.clone())
511                } else {
512                    VClock::default()
513                }
514            })
515        }
516    }
517}