miri/alloc_addresses/mod.rs
1//! This module is responsible for managing the absolute addresses that allocations are located at,
2//! and for casting between pointers and integers based on those addresses.
3
4mod address_generator;
5mod reuse_pool;
6
7use std::cell::RefCell;
8
9use rustc_abi::{Align, Size};
10use rustc_data_structures::fx::{FxHashMap, FxHashSet};
11use rustc_middle::ty::TyCtxt;
12
13pub use self::address_generator::AddressGenerator;
14use self::reuse_pool::ReusePool;
15use crate::alloc::MiriAllocParams;
16use crate::concurrency::VClock;
17use crate::diagnostics::SpanDedupDiagnostic;
18use crate::*;
19
20#[derive(Copy, Clone, Debug, PartialEq, Eq)]
21pub enum ProvenanceMode {
22 /// We support `expose_provenance`/`with_exposed_provenance` via "wildcard" provenance.
23 /// However, we warn on `with_exposed_provenance` to alert the user of the precision loss.
24 Default,
25 /// Like `Default`, but without the warning.
26 Permissive,
27 /// We error on `with_exposed_provenance`, ensuring no precision loss.
28 Strict,
29}
30
31pub type GlobalState = RefCell<GlobalStateInner>;
32
33#[derive(Debug)]
34pub struct GlobalStateInner {
35 /// This is used as a map between the address of each allocation and its `AllocId`. It is always
36 /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
37 /// from the base address, and we need to find the `AllocId` it belongs to. This is not the
38 /// *full* inverse of `base_addr`; dead allocations have been removed.
39 /// Note that in GenMC mode, dead allocations are *not* removed -- and also, addresses are never
40 /// reused. This lets us use the address as a cross-execution-stable identifier for an allocation.
41 int_to_ptr_map: Vec<(u64, AllocId)>,
42 /// The base address for each allocation. We cannot put that into
43 /// `AllocExtra` because function pointers also have a base address, and
44 /// they do not have an `AllocExtra`.
45 /// This is the inverse of `int_to_ptr_map`.
46 base_addr: FxHashMap<AllocId, u64>,
47 /// The set of exposed allocations. This cannot be put
48 /// into `AllocExtra` for the same reason as `base_addr`.
49 exposed: FxHashSet<AllocId>,
50 /// The provenance to use for int2ptr casts
51 provenance_mode: ProvenanceMode,
52 /// The generator for new addresses in a given range, and a pool for address reuse. This is
53 /// `None` if addresses are generated elsewhere (in native-lib mode or with GenMC).
54 address_generation: Option<(AddressGenerator, ReusePool)>,
55 /// Native-lib mode only: Temporarily store prepared memory space for global allocations the
56 /// first time their memory address is required. This is used to ensure that the memory is
57 /// allocated before Miri assigns it an internal address, which is important for matching the
58 /// internal address to the machine address so FFI can read from pointers.
59 prepared_alloc_bytes: Option<FxHashMap<AllocId, MiriAllocBytes>>,
60}
61
62impl VisitProvenance for GlobalStateInner {
63 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
64 let GlobalStateInner {
65 int_to_ptr_map: _,
66 base_addr: _,
67 prepared_alloc_bytes: _,
68 exposed: _,
69 address_generation: _,
70 provenance_mode: _,
71 } = self;
72 // Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
73 // int_to_ptr_map and exposed must contain only live allocations, and those
74 // are never garbage collected.
75 // base_addr is only relevant if we have a pointer to an AllocId and need to look up its
76 // base address; so if an AllocId is not reachable from somewhere else we can remove it
77 // here.
78 }
79}
80
81impl GlobalStateInner {
82 pub fn new<'tcx>(config: &MiriConfig, stack_addr: u64, tcx: TyCtxt<'tcx>) -> Self {
83 GlobalStateInner {
84 int_to_ptr_map: Vec::default(),
85 base_addr: FxHashMap::default(),
86 exposed: FxHashSet::default(),
87 provenance_mode: config.provenance_mode,
88 address_generation: (config.native_lib.is_empty() && config.genmc_config.is_none())
89 .then(|| {
90 (
91 AddressGenerator::new(stack_addr..tcx.target_usize_max()),
92 ReusePool::new(config),
93 )
94 }),
95 prepared_alloc_bytes: (!config.native_lib.is_empty()).then(FxHashMap::default),
96 }
97 }
98
99 pub fn remove_unreachable_allocs(&mut self, allocs: &LiveAllocs<'_, '_>) {
100 // `exposed` and `int_to_ptr_map` are cleared immediately when an allocation
101 // is freed, so `base_addr` is the only one we have to clean up based on the GC.
102 self.base_addr.retain(|id, _| allocs.is_live(*id));
103 }
104}
105
106impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
107trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
108 fn addr_from_alloc_id_uncached(
109 &self,
110 global_state: &mut GlobalStateInner,
111 alloc_id: AllocId,
112 memory_kind: MemoryKind,
113 ) -> InterpResult<'tcx, u64> {
114 let this = self.eval_context_ref();
115 let info = this.get_alloc_info(alloc_id);
116
117 // This is either called immediately after allocation (and then cached), or when
118 // adjusting `tcx` pointers (which never get freed). So assert that we are looking
119 // at a live allocation. This also ensures that we never re-assign an address to an
120 // allocation that previously had an address, but then was freed and the address
121 // information was removed.
122 assert!(!matches!(info.kind, AllocKind::Dead));
123
124 // TypeId allocations always have a "base address" of 0 (i.e., the relative offset is the
125 // hash fragment and therefore equal to the actual integer value).
126 if matches!(info.kind, AllocKind::TypeId) {
127 return interp_ok(0);
128 }
129
130 // Miri's address assignment leaks state across thread boundaries, which is incompatible
131 // with GenMC execution. So we instead let GenMC assign addresses to allocations.
132 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
133 let addr =
134 genmc_ctx.handle_alloc(this, alloc_id, info.size, info.align, memory_kind)?;
135 return interp_ok(addr);
136 }
137
138 // This allocation does not have a base address yet, pick or reuse one.
139 if !this.machine.native_lib.is_empty() {
140 // In native lib mode, we use the "real" address of the bytes for this allocation.
141 // This ensures the interpreted program and native code have the same view of memory.
142 let params = this.machine.get_default_alloc_params();
143 let base_ptr = match info.kind {
144 AllocKind::LiveData => {
145 if memory_kind == MiriMemoryKind::Global.into() {
146 // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
147 let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align, params)
148 .unwrap_or_else(|| {
149 panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes", size = info.size)
150 });
151 let ptr = prepared_bytes.as_ptr();
152 // Store prepared allocation to be picked up for use later.
153 global_state
154 .prepared_alloc_bytes
155 .as_mut()
156 .unwrap()
157 .try_insert(alloc_id, prepared_bytes)
158 .unwrap();
159 ptr
160 } else {
161 // Non-global allocations are already in memory at this point so
162 // we can just get a pointer to where their data is stored.
163 this.get_alloc_bytes_unchecked_raw(alloc_id)?
164 }
165 }
166 #[cfg(all(unix, feature = "native-lib"))]
167 AllocKind::Function => {
168 if let Some(GlobalAlloc::Function { instance, .. }) =
169 this.tcx.try_get_global_alloc(alloc_id)
170 {
171 let fn_sig = this.tcx.instantiate_bound_regions_with_erased(
172 this.tcx
173 .fn_sig(instance.def_id())
174 .instantiate(*this.tcx, instance.args),
175 );
176 let fn_ptr = crate::shims::native_lib::build_libffi_closure(this, fn_sig)?;
177
178 #[expect(
179 clippy::as_conversions,
180 reason = "No better way to cast a function ptr to a ptr"
181 )]
182 {
183 fn_ptr as *const _
184 }
185 } else {
186 dummy_alloc(params)
187 }
188 }
189 #[cfg(not(all(unix, feature = "native-lib")))]
190 AllocKind::Function => dummy_alloc(params),
191 AllocKind::VTable | AllocKind::VaList => dummy_alloc(params),
192 AllocKind::TypeId | AllocKind::Dead => unreachable!(),
193 };
194 // We don't have to expose this pointer yet, we do that in `prepare_for_native_call`.
195 return interp_ok(base_ptr.addr().to_u64());
196 }
197 // We are not in native lib or genmc mode, so we control the addresses ourselves.
198 let (addr_gen, reuse) = global_state.address_generation.as_mut().unwrap();
199 let mut rng = this.machine.rng.borrow_mut();
200 if let Some((reuse_addr, clock)) =
201 reuse.take_addr(&mut *rng, info.size, info.align, memory_kind, this.active_thread())
202 {
203 if let Some(clock) = clock {
204 this.acquire_clock(&clock)?;
205 }
206 interp_ok(reuse_addr)
207 } else {
208 // We have to pick a fresh address.
209 let new_addr = addr_gen.generate(info.size, info.align, &mut rng)?;
210
211 // If we filled up more than half the address space, start aggressively reusing
212 // addresses to avoid running out.
213 let remaining_range = addr_gen.get_remaining();
214 if remaining_range.start > remaining_range.end / 2 {
215 reuse.address_space_shortage();
216 }
217
218 interp_ok(new_addr)
219 }
220 }
221}
222
223fn dummy_alloc(params: MiriAllocParams) -> *const u8 {
224 // Allocate some dummy memory to get a unique address for this function/vtable.
225 let alloc_bytes = MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap(), params);
226 let ptr = alloc_bytes.as_ptr();
227 // Leak the underlying memory to ensure it remains unique.
228 std::mem::forget(alloc_bytes);
229 ptr
230}
231
232impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
233pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
234 // Returns the `AllocId` that corresponds to the specified addr,
235 // or `None` if the addr is out of bounds.
236 fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId> {
237 let this = self.eval_context_ref();
238 let global_state = this.machine.alloc_addresses.borrow();
239 assert!(global_state.provenance_mode != ProvenanceMode::Strict);
240
241 // We always search the allocation to the right of this address. So if the size is strictly
242 // negative, we have to search for `addr-1` instead.
243 let addr = if size >= 0 { addr } else { addr.saturating_sub(1) };
244 let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr);
245
246 // Determine the in-bounds provenance for this pointer.
247 let alloc_id = match pos {
248 Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
249 Err(0) => None,
250 Err(pos) => {
251 // This is the largest of the addresses smaller than `int`,
252 // i.e. the greatest lower bound (glb)
253 let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
254 // This never overflows because `addr >= glb`
255 let offset = addr - glb;
256 // We require this to be strict in-bounds of the allocation. This arm is only
257 // entered for addresses that are not the base address, so even zero-sized
258 // allocations will get recognized at their base address -- but all other
259 // allocations will *not* be recognized at their "end" address.
260 let size = this.get_alloc_info(alloc_id).size;
261 if offset < size.bytes() { Some(alloc_id) } else { None }
262 }
263 }?;
264
265 // We only use this provenance if it has been exposed.
266 if global_state.exposed.contains(&alloc_id) {
267 // This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
268 debug_assert!(this.is_alloc_live(alloc_id));
269 Some(alloc_id)
270 } else {
271 None
272 }
273 }
274
275 /// Returns the base address of an allocation, or an error if no base address could be found
276 ///
277 /// # Panics
278 /// If `memory_kind = None` and the `alloc_id` is not cached, meaning that the first call to this function per `alloc_id` must get the `memory_kind`.
279 fn addr_from_alloc_id(
280 &self,
281 alloc_id: AllocId,
282 memory_kind: Option<MemoryKind>,
283 ) -> InterpResult<'tcx, u64> {
284 let this = self.eval_context_ref();
285 let mut global_state = this.machine.alloc_addresses.borrow_mut();
286 let global_state = &mut *global_state;
287
288 match global_state.base_addr.get(&alloc_id) {
289 Some(&addr) => interp_ok(addr),
290 None => {
291 // First time we're looking for the absolute address of this allocation.
292 let memory_kind =
293 memory_kind.expect("memory_kind is required since alloc_id is not cached");
294 let base_addr =
295 this.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
296 trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
297
298 // Store address in cache.
299 global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
300
301 // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it
302 // sorted. We have a fast-path for the common case that this address is bigger than
303 // all previous ones. We skip this for allocations at address 0; those can't be
304 // real, they must be TypeId "fake allocations".
305 if base_addr != 0 {
306 let pos = if global_state
307 .int_to_ptr_map
308 .last()
309 .is_some_and(|(last_addr, _)| *last_addr < base_addr)
310 {
311 global_state.int_to_ptr_map.len()
312 } else {
313 global_state
314 .int_to_ptr_map
315 .binary_search_by_key(&base_addr, |(addr, _)| *addr)
316 .unwrap_err()
317 };
318 global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
319 }
320
321 interp_ok(base_addr)
322 }
323 }
324 }
325
326 fn expose_provenance(&self, provenance: Provenance) -> InterpResult<'tcx> {
327 let this = self.eval_context_ref();
328 let mut global_state = this.machine.alloc_addresses.borrow_mut();
329
330 let (alloc_id, tag) = match provenance {
331 Provenance::Concrete { alloc_id, tag } => (alloc_id, tag),
332 Provenance::Wildcard => {
333 // No need to do anything for wildcard pointers as
334 // their provenances have already been previously exposed.
335 return interp_ok(());
336 }
337 };
338
339 // In strict mode, we don't need this, so we can save some cycles by not tracking it.
340 if global_state.provenance_mode == ProvenanceMode::Strict {
341 return interp_ok(());
342 }
343 // Exposing a dead alloc is a no-op, because it's not possible to get a dead allocation
344 // via int2ptr.
345 if !this.is_alloc_live(alloc_id) {
346 return interp_ok(());
347 }
348 trace!("Exposing allocation id {alloc_id:?}");
349 global_state.exposed.insert(alloc_id);
350 // Release the global state before we call `expose_tag`, which may call `get_alloc_info_extra`,
351 // which may need access to the global state.
352 drop(global_state);
353 if this.machine.borrow_tracker.is_some() {
354 this.expose_tag(alloc_id, tag)?;
355 }
356 interp_ok(())
357 }
358
359 fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer> {
360 trace!("Casting {:#x} to a pointer", addr);
361
362 let this = self.eval_context_ref();
363 let global_state = this.machine.alloc_addresses.borrow();
364
365 // Potentially emit a warning.
366 match global_state.provenance_mode {
367 ProvenanceMode::Default => {
368 // The first time this happens at a particular location, print a warning.
369 static DEDUP: SpanDedupDiagnostic = SpanDedupDiagnostic::new();
370 this.dedup_diagnostic(&DEDUP, |first| {
371 NonHaltingDiagnostic::Int2Ptr { details: first }
372 });
373 }
374 ProvenanceMode::Strict => {
375 throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
376 }
377 ProvenanceMode::Permissive => {}
378 }
379
380 // We do *not* look up the `AllocId` here! This is a `ptr as usize` cast, and it is
381 // completely legal to do a cast and then `wrapping_offset` to another allocation and only
382 // *then* do a memory access. So the allocation that the pointer happens to point to on a
383 // cast is fairly irrelevant. Instead we generate this as a "wildcard" pointer, such that
384 // *every time the pointer is used*, we do an `AllocId` lookup to find the (exposed)
385 // allocation it might be referencing.
386 interp_ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
387 }
388
389 /// Convert a relative (tcx) pointer to a Miri pointer.
390 fn adjust_alloc_root_pointer(
391 &self,
392 ptr: interpret::Pointer<CtfeProvenance>,
393 tag: BorTag,
394 kind: MemoryKind,
395 ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
396 let this = self.eval_context_ref();
397
398 let (prov, offset) = ptr.prov_and_relative_offset();
399 let alloc_id = prov.alloc_id();
400
401 // Get a pointer to the beginning of this allocation.
402 let base_addr = this.addr_from_alloc_id(alloc_id, Some(kind))?;
403 let base_ptr = interpret::Pointer::new(
404 Provenance::Concrete { alloc_id, tag },
405 Size::from_bytes(base_addr),
406 );
407 // Add offset with the right kind of pointer-overflowing arithmetic.
408 interp_ok(base_ptr.wrapping_offset(offset, this))
409 }
410
411 // This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
412 // memory space in the past, or by doing the pre-allocation right upon being called.
413 fn get_global_alloc_bytes(
414 &self,
415 id: AllocId,
416 bytes: &[u8],
417 align: Align,
418 ) -> InterpResult<'tcx, MiriAllocBytes> {
419 let this = self.eval_context_ref();
420 assert!(this.tcx.try_get_global_alloc(id).is_some());
421 if !this.machine.native_lib.is_empty() {
422 // In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
423 // This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
424 // this function gets called before the first time `addr_from_alloc_id` gets called.
425 this.addr_from_alloc_id(id, Some(MiriMemoryKind::Global.into()))?;
426 // The memory we need here will have already been allocated during an earlier call to
427 // `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
428 // fetch the previously prepared bytes from `prepared_alloc_bytes`.
429 let mut global_state = this.machine.alloc_addresses.borrow_mut();
430 let mut prepared_alloc_bytes = global_state
431 .prepared_alloc_bytes
432 .as_mut()
433 .unwrap()
434 .remove(&id)
435 .unwrap_or_else(|| panic!("alloc bytes for {id:?} have not been prepared"));
436 // Sanity-check that the prepared allocation has the right size and alignment.
437 assert!(prepared_alloc_bytes.as_ptr().is_aligned_to(align.bytes_usize()));
438 assert_eq!(prepared_alloc_bytes.len(), bytes.len());
439 // Copy allocation contents into prepared memory.
440 prepared_alloc_bytes.copy_from_slice(bytes);
441 interp_ok(prepared_alloc_bytes)
442 } else {
443 let params = this.machine.get_default_alloc_params();
444 interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align, params))
445 }
446 }
447
448 /// When a pointer is used for a memory access, this computes where in which allocation the
449 /// access is going.
450 fn ptr_get_alloc(
451 &self,
452 ptr: interpret::Pointer<Provenance>,
453 size: i64,
454 ) -> Option<(AllocId, Size)> {
455 let this = self.eval_context_ref();
456
457 let (tag, addr) = ptr.into_raw_parts(); // addr is absolute (Miri provenance)
458
459 let alloc_id = if let Provenance::Concrete { alloc_id, .. } = tag {
460 alloc_id
461 } else {
462 // A wildcard pointer.
463 this.alloc_id_from_addr(addr.bytes(), size)?
464 };
465
466 // This cannot fail: since we already have a pointer with that provenance, adjust_alloc_root_pointer
467 // must have been called in the past, so we can just look up the address in the map.
468 let base_addr = *this.machine.alloc_addresses.borrow().base_addr.get(&alloc_id).unwrap();
469
470 // Wrapping "addr - base_addr"
471 let rel_offset = this.truncate_to_target_usize(addr.bytes().wrapping_sub(base_addr));
472 Some((alloc_id, Size::from_bytes(rel_offset)))
473 }
474
475 /// Return a list of all exposed allocations.
476 fn exposed_allocs(&self) -> Vec<AllocId> {
477 let this = self.eval_context_ref();
478 this.machine.alloc_addresses.borrow().exposed.iter().copied().collect()
479 }
480}
481
482impl<'tcx> MiriMachine<'tcx> {
483 pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
484 let global_state = self.alloc_addresses.get_mut();
485 let rng = self.rng.get_mut();
486
487 // We can *not* remove this from `base_addr`, since the interpreter design requires that we
488 // be able to retrieve an AllocId + offset for any memory access *before* we check if the
489 // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
490 // access to determine the allocation ID and offset -- and there can still be pointers with
491 // `dead_id` that one can attempt to use for a memory access. `ptr_get_alloc` may return
492 // `None` only if the pointer truly has no provenance (this ensures consistent error
493 // messages).
494 // However, we *can* remove it from `int_to_ptr_map`, since any wildcard pointers that exist
495 // can no longer actually be accessing that address. This ensures `alloc_id_from_addr` never
496 // returns a dead allocation.
497 // To avoid a linear scan we first look up the address in `base_addr`, and then find it in
498 // `int_to_ptr_map`.
499 let addr = *global_state.base_addr.get(&dead_id).unwrap();
500 let pos =
501 global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
502 let removed = global_state.int_to_ptr_map.remove(pos);
503 assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
504 // We can also remove it from `exposed`, since this allocation can anyway not be returned by
505 // `alloc_id_from_addr` any more.
506 global_state.exposed.remove(&dead_id);
507 // Also remember this address for future reuse.
508 if let Some((_addr_gen, reuse)) = global_state.address_generation.as_mut() {
509 let thread = self.threads.active_thread();
510 reuse.add_addr(rng, addr, size, align, kind, thread, || {
511 // We cannot be in GenMC mode as then `address_generation` is `None`. We cannot use
512 // `self.release_clock` as `self.alloc_addresses` is borrowed.
513 if let Some(data_race) = self.data_race.as_vclocks_ref() {
514 data_race.release_clock(&self.threads, |clock| clock.clone())
515 } else {
516 VClock::default()
517 }
518 })
519 }
520 }
521}