miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_hir::attrs::InlineAttr;
16use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17#[allow(unused)]
18use rustc_data_structures::static_assert_size;
19use rustc_middle::mir;
20use rustc_middle::query::TyCtxtAt;
21use rustc_middle::ty::layout::{
22    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
23};
24use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
25use rustc_session::config::InliningThreshold;
26use rustc_span::def_id::{CrateNum, DefId};
27use rustc_span::{Span, SpanData, Symbol};
28use rustc_target::callconv::FnAbi;
29
30use crate::alloc_addresses::EvalContextExt;
31use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
32use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
33use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
34use crate::*;
35
36/// First real-time signal.
37/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
38/// as typical values.
39pub const SIGRTMIN: i32 = 34;
40
41/// Last real-time signal.
42/// `signal(7)` says it must be between 32 and 64 and specifies
43/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
44pub const SIGRTMAX: i32 = 42;
45
46/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
47/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
48/// base address for each evaluation would produce unbounded memory usage.
49const ADDRS_PER_ANON_GLOBAL: usize = 32;
50
51/// Extra data stored with each stack frame
52pub struct FrameExtra<'tcx> {
53    /// Extra data for the Borrow Tracker.
54    pub borrow_tracker: Option<borrow_tracker::FrameState>,
55
56    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
57    /// called by `try`). When this frame is popped during unwinding a panic,
58    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
59    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
60
61    /// If `measureme` profiling is enabled, holds timing information
62    /// for the start of this frame. When we finish executing this frame,
63    /// we use this to register a completed event with `measureme`.
64    pub timing: Option<measureme::DetachedTiming>,
65
66    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
67    /// `#[track_caller]`. We compute this once on creation and store the result, as an
68    /// optimization.
69    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
70    pub is_user_relevant: bool,
71
72    /// Data race detector per-frame data.
73    pub data_race: Option<data_race::FrameState>,
74}
75
76impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
77    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
78        // Omitting `timing`, it does not support `Debug`.
79        let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant, data_race } =
80            self;
81        f.debug_struct("FrameData")
82            .field("borrow_tracker", borrow_tracker)
83            .field("catch_unwind", catch_unwind)
84            .field("is_user_relevant", is_user_relevant)
85            .field("data_race", data_race)
86            .finish()
87    }
88}
89
90impl VisitProvenance for FrameExtra<'_> {
91    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
92        let FrameExtra {
93            catch_unwind,
94            borrow_tracker,
95            timing: _,
96            is_user_relevant: _,
97            data_race: _,
98        } = self;
99
100        catch_unwind.visit_provenance(visit);
101        borrow_tracker.visit_provenance(visit);
102    }
103}
104
105/// Extra memory kinds
106#[derive(Debug, Copy, Clone, PartialEq, Eq)]
107pub enum MiriMemoryKind {
108    /// `__rust_alloc` memory.
109    Rust,
110    /// `miri_alloc` memory.
111    Miri,
112    /// `malloc` memory.
113    C,
114    /// Windows `HeapAlloc` memory.
115    WinHeap,
116    /// Windows "local" memory (to be freed with `LocalFree`)
117    WinLocal,
118    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
119    /// This memory may leak.
120    Machine,
121    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
122    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
123    Runtime,
124    /// Globals copied from `tcx`.
125    /// This memory may leak.
126    Global,
127    /// Memory for extern statics.
128    /// This memory may leak.
129    ExternStatic,
130    /// Memory for thread-local statics.
131    /// This memory may leak.
132    Tls,
133    /// Memory mapped directly by the program
134    Mmap,
135}
136
137impl From<MiriMemoryKind> for MemoryKind {
138    #[inline(always)]
139    fn from(kind: MiriMemoryKind) -> MemoryKind {
140        MemoryKind::Machine(kind)
141    }
142}
143
144impl MayLeak for MiriMemoryKind {
145    #[inline(always)]
146    fn may_leak(self) -> bool {
147        use self::MiriMemoryKind::*;
148        match self {
149            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
150            Machine | Global | ExternStatic | Tls | Mmap => true,
151        }
152    }
153}
154
155impl MiriMemoryKind {
156    /// Whether we have a useful allocation span for an allocation of this kind.
157    fn should_save_allocation_span(self) -> bool {
158        use self::MiriMemoryKind::*;
159        match self {
160            // Heap allocations are fine since the `Allocation` is created immediately.
161            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
162            // Everything else is unclear, let's not show potentially confusing spans.
163            Machine | Global | ExternStatic | Tls | Runtime => false,
164        }
165    }
166}
167
168impl fmt::Display for MiriMemoryKind {
169    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
170        use self::MiriMemoryKind::*;
171        match self {
172            Rust => write!(f, "Rust heap"),
173            Miri => write!(f, "Miri bare-metal heap"),
174            C => write!(f, "C heap"),
175            WinHeap => write!(f, "Windows heap"),
176            WinLocal => write!(f, "Windows local memory"),
177            Machine => write!(f, "machine-managed memory"),
178            Runtime => write!(f, "language runtime memory"),
179            Global => write!(f, "global (static or const)"),
180            ExternStatic => write!(f, "extern static"),
181            Tls => write!(f, "thread-local static"),
182            Mmap => write!(f, "mmap"),
183        }
184    }
185}
186
187pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
188
189/// Pointer provenance.
190// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
191// *might* be recursive and then it has to track which places have already been visited.
192// These implementations are a bit questionable, and it means we may check the same place multiple
193// times with different provenance, but that is in general not wrong.
194#[derive(Clone, Copy, PartialEq, Eq, Hash)]
195pub enum Provenance {
196    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
197    /// and what their borrow tag is.
198    Concrete {
199        alloc_id: AllocId,
200        /// Borrow Tracker tag.
201        tag: BorTag,
202    },
203    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
204    /// specification, we should at that point angelically "guess" a provenance that will make all
205    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
206    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
207    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
208    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
209    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
210    /// then we allow the access. This allows too much code in two ways:
211    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
212    ///   subsequent memory accesses.
213    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
214    ///   the access, we also have to update the aliasing state -- and that update can be very
215    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
216    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
217    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
218    ///   stack that would be true with all possible choices.
219    Wildcard,
220}
221
222/// The "extra" information a pointer has over a regular AllocId.
223#[derive(Copy, Clone, PartialEq)]
224pub enum ProvenanceExtra {
225    Concrete(BorTag),
226    Wildcard,
227}
228
229#[cfg(target_pointer_width = "64")]
230static_assert_size!(StrictPointer, 24);
231// FIXME: this would with in 24bytes but layout optimizations are not smart enough
232// #[cfg(target_pointer_width = "64")]
233//static_assert_size!(Pointer, 24);
234#[cfg(target_pointer_width = "64")]
235static_assert_size!(Scalar, 32);
236
237impl fmt::Debug for Provenance {
238    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
239        match self {
240            Provenance::Concrete { alloc_id, tag } => {
241                // Forward `alternate` flag to `alloc_id` printing.
242                if f.alternate() {
243                    write!(f, "[{alloc_id:#?}]")?;
244                } else {
245                    write!(f, "[{alloc_id:?}]")?;
246                }
247                // Print Borrow Tracker tag.
248                write!(f, "{tag:?}")?;
249            }
250            Provenance::Wildcard => {
251                write!(f, "[wildcard]")?;
252            }
253        }
254        Ok(())
255    }
256}
257
258impl interpret::Provenance for Provenance {
259    /// We use absolute addresses in the `offset` of a `StrictPointer`.
260    const OFFSET_IS_ADDR: bool = true;
261
262    /// Miri implements wildcard provenance.
263    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
264
265    fn get_alloc_id(self) -> Option<AllocId> {
266        match self {
267            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
268            Provenance::Wildcard => None,
269        }
270    }
271
272    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
273        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
274        write!(f, "{:#x}", addr.bytes())?;
275        if f.alternate() {
276            write!(f, "{prov:#?}")?;
277        } else {
278            write!(f, "{prov:?}")?;
279        }
280        Ok(())
281    }
282
283    fn join(left: Option<Self>, right: Option<Self>) -> Option<Self> {
284        match (left, right) {
285            // If both are the *same* concrete tag, that is the result.
286            (
287                Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }),
288                Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }),
289            ) if left_alloc == right_alloc && left_tag == right_tag => left,
290            // If one side is a wildcard, the best possible outcome is that it is equal to the other
291            // one, and we use that.
292            (Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
293            // Otherwise, fall back to `None`.
294            _ => None,
295        }
296    }
297}
298
299impl fmt::Debug for ProvenanceExtra {
300    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
301        match self {
302            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
303            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
304        }
305    }
306}
307
308impl ProvenanceExtra {
309    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
310        match self {
311            ProvenanceExtra::Concrete(pid) => f(pid),
312            ProvenanceExtra::Wildcard => None,
313        }
314    }
315}
316
317/// Extra per-allocation data
318#[derive(Debug)]
319pub struct AllocExtra<'tcx> {
320    /// Global state of the borrow tracker, if enabled.
321    pub borrow_tracker: Option<borrow_tracker::AllocState>,
322    /// Extra state for data race detection.
323    ///
324    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
325    pub data_race: AllocDataRaceHandler,
326    /// A backtrace to where this allocation was allocated.
327    /// As this is recorded for leak reports, it only exists
328    /// if this allocation is leakable. The backtrace is not
329    /// pruned yet; that should be done before printing it.
330    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
331    /// Synchronization primitives like to attach extra data to particular addresses. We store that
332    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
333    /// freed.
334    /// This maps offsets to synchronization-primitive-specific data.
335    pub sync: FxHashMap<Size, Box<dyn Any>>,
336}
337
338// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
339// but that should never end up actually cloning our `AllocExtra`.
340impl<'tcx> Clone for AllocExtra<'tcx> {
341    fn clone(&self) -> Self {
342        panic!("our allocations should never be cloned");
343    }
344}
345
346impl VisitProvenance for AllocExtra<'_> {
347    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
348        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
349
350        borrow_tracker.visit_provenance(visit);
351        data_race.visit_provenance(visit);
352    }
353}
354
355/// Precomputed layouts of primitive types
356pub struct PrimitiveLayouts<'tcx> {
357    pub unit: TyAndLayout<'tcx>,
358    pub i8: TyAndLayout<'tcx>,
359    pub i16: TyAndLayout<'tcx>,
360    pub i32: TyAndLayout<'tcx>,
361    pub i64: TyAndLayout<'tcx>,
362    pub i128: TyAndLayout<'tcx>,
363    pub isize: TyAndLayout<'tcx>,
364    pub u8: TyAndLayout<'tcx>,
365    pub u16: TyAndLayout<'tcx>,
366    pub u32: TyAndLayout<'tcx>,
367    pub u64: TyAndLayout<'tcx>,
368    pub u128: TyAndLayout<'tcx>,
369    pub usize: TyAndLayout<'tcx>,
370    pub bool: TyAndLayout<'tcx>,
371    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
372    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
373}
374
375impl<'tcx> PrimitiveLayouts<'tcx> {
376    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
377        let tcx = layout_cx.tcx();
378        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
379        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
380        Ok(Self {
381            unit: layout_cx.layout_of(tcx.types.unit)?,
382            i8: layout_cx.layout_of(tcx.types.i8)?,
383            i16: layout_cx.layout_of(tcx.types.i16)?,
384            i32: layout_cx.layout_of(tcx.types.i32)?,
385            i64: layout_cx.layout_of(tcx.types.i64)?,
386            i128: layout_cx.layout_of(tcx.types.i128)?,
387            isize: layout_cx.layout_of(tcx.types.isize)?,
388            u8: layout_cx.layout_of(tcx.types.u8)?,
389            u16: layout_cx.layout_of(tcx.types.u16)?,
390            u32: layout_cx.layout_of(tcx.types.u32)?,
391            u64: layout_cx.layout_of(tcx.types.u64)?,
392            u128: layout_cx.layout_of(tcx.types.u128)?,
393            usize: layout_cx.layout_of(tcx.types.usize)?,
394            bool: layout_cx.layout_of(tcx.types.bool)?,
395            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
396            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
397        })
398    }
399
400    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
401        match size.bits() {
402            8 => Some(self.u8),
403            16 => Some(self.u16),
404            32 => Some(self.u32),
405            64 => Some(self.u64),
406            128 => Some(self.u128),
407            _ => None,
408        }
409    }
410
411    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
412        match size.bits() {
413            8 => Some(self.i8),
414            16 => Some(self.i16),
415            32 => Some(self.i32),
416            64 => Some(self.i64),
417            128 => Some(self.i128),
418            _ => None,
419        }
420    }
421}
422
423/// The machine itself.
424///
425/// If you add anything here that stores machine values, remember to update
426/// `visit_all_machine_values`!
427pub struct MiriMachine<'tcx> {
428    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
429    pub tcx: TyCtxt<'tcx>,
430
431    /// Global data for borrow tracking.
432    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
433
434    /// Depending on settings, this will be `None`,
435    /// global data for a data race detector,
436    /// or the context required for running in GenMC mode.
437    ///
438    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
439    pub data_race: GlobalDataRaceHandler,
440
441    /// Ptr-int-cast module global data.
442    pub alloc_addresses: alloc_addresses::GlobalState,
443
444    /// Environment variables.
445    pub(crate) env_vars: EnvVars<'tcx>,
446
447    /// Return place of the main function.
448    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
449
450    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
451    /// These are *pointers* to argc/argv because macOS.
452    /// We also need the full command line as one string because of Windows.
453    pub(crate) argc: Option<Pointer>,
454    pub(crate) argv: Option<Pointer>,
455    pub(crate) cmd_line: Option<Pointer>,
456
457    /// TLS state.
458    pub(crate) tls: TlsData<'tcx>,
459
460    /// What should Miri do when an op requires communicating with the host,
461    /// such as accessing host env vars, random number generation, and
462    /// file system access.
463    pub(crate) isolated_op: IsolatedOp,
464
465    /// Whether to enforce the validity invariant.
466    pub(crate) validation: ValidationMode,
467
468    /// The table of file descriptors.
469    pub(crate) fds: shims::FdTable,
470    /// The table of directory descriptors.
471    pub(crate) dirs: shims::DirTable,
472
473    /// The list of all EpollEventInterest.
474    pub(crate) epoll_interests: shims::EpollInterestTable,
475
476    /// This machine's monotone clock.
477    pub(crate) monotonic_clock: MonotonicClock,
478
479    /// The set of threads.
480    pub(crate) threads: ThreadManager<'tcx>,
481
482    /// Stores which thread is eligible to run on which CPUs.
483    /// This has no effect at all, it is just tracked to produce the correct result
484    /// in `sched_getaffinity`
485    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
486
487    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
488    pub(crate) layouts: PrimitiveLayouts<'tcx>,
489
490    /// Allocations that are considered roots of static memory (that may leak).
491    pub(crate) static_roots: Vec<AllocId>,
492
493    /// The `measureme` profiler used to record timing information about
494    /// the emulated program.
495    profiler: Option<measureme::Profiler>,
496    /// Used with `profiler` to cache the `StringId`s for event names
497    /// used with `measureme`.
498    string_cache: FxHashMap<String, measureme::StringId>,
499
500    /// Cache of `Instance` exported under the given `Symbol` name.
501    /// `None` means no `Instance` exported under the given name is found.
502    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
503
504    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
505    pub(crate) backtrace_style: BacktraceStyle,
506
507    /// Crates which are considered local for the purposes of error reporting.
508    pub(crate) local_crates: Vec<CrateNum>,
509
510    /// Mapping extern static names to their pointer.
511    extern_statics: FxHashMap<Symbol, StrictPointer>,
512
513    /// The random number generator used for resolving non-determinism.
514    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
515    pub(crate) rng: RefCell<StdRng>,
516
517    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
518    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
519
520    /// The allocation IDs to report when they are being allocated
521    /// (helps for debugging memory leaks and use after free bugs).
522    tracked_alloc_ids: FxHashSet<AllocId>,
523    /// For the tracked alloc ids, also report read/write accesses.
524    track_alloc_accesses: bool,
525
526    /// Controls whether alignment of memory accesses is being checked.
527    pub(crate) check_alignment: AlignmentCheck,
528
529    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
530    pub(crate) cmpxchg_weak_failure_rate: f64,
531
532    /// The probability of the active thread being preempted at the end of each basic block.
533    pub(crate) preemption_rate: f64,
534
535    /// If `Some`, we will report the current stack every N basic blocks.
536    pub(crate) report_progress: Option<u32>,
537    // The total number of blocks that have been executed.
538    pub(crate) basic_block_count: u64,
539
540    /// Handle of the optional shared object file for native functions.
541    #[cfg(all(unix, feature = "native-lib"))]
542    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
543    #[cfg(not(all(unix, feature = "native-lib")))]
544    pub native_lib: Vec<!>,
545
546    /// Run a garbage collector for BorTags every N basic blocks.
547    pub(crate) gc_interval: u32,
548    /// The number of blocks that passed since the last BorTag GC pass.
549    pub(crate) since_gc: u32,
550
551    /// The number of CPUs to be reported by miri.
552    pub(crate) num_cpus: u32,
553
554    /// Determines Miri's page size and associated values
555    pub(crate) page_size: u64,
556    pub(crate) stack_addr: u64,
557    pub(crate) stack_size: u64,
558
559    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
560    pub(crate) collect_leak_backtraces: bool,
561
562    /// The spans we will use to report where an allocation was created and deallocated in
563    /// diagnostics.
564    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
565
566    /// For each allocation, an offset inside that allocation that was deemed aligned even for
567    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
568    /// tracked for vtables and function allocations as well as regular allocations.
569    ///
570    /// Invariant: the promised alignment will never be less than the native alignment of the
571    /// allocation.
572    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
573
574    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
575    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
576
577    /// Caches the sanity-checks for various pthread primitives.
578    pub(crate) pthread_mutex_sanity: Cell<bool>,
579    pub(crate) pthread_rwlock_sanity: Cell<bool>,
580    pub(crate) pthread_condvar_sanity: Cell<bool>,
581
582    /// Remembers whether we already warned about an extern type with Stacked Borrows.
583    pub(crate) sb_extern_type_warned: Cell<bool>,
584    /// Remember whether we already warned about sharing memory with a native call.
585    #[allow(unused)]
586    pub(crate) native_call_mem_warned: Cell<bool>,
587    /// Remembers which shims have already shown the warning about erroring in isolation.
588    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
589    /// Remembers which int2ptr casts we have already warned about.
590    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
591
592    /// Cache for `mangle_internal_symbol`.
593    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
594
595    /// Always prefer the intrinsic fallback body over the native Miri implementation.
596    pub force_intrinsic_fallback: bool,
597
598    /// Whether floating-point operations can behave non-deterministically.
599    pub float_nondet: bool,
600    /// Whether floating-point operations can have a non-deterministic rounding error.
601    pub float_rounding_error: bool,
602}
603
604impl<'tcx> MiriMachine<'tcx> {
605    /// Create a new MiriMachine.
606    ///
607    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
608    pub(crate) fn new(
609        config: &MiriConfig,
610        layout_cx: LayoutCx<'tcx>,
611        genmc_ctx: Option<Rc<GenmcCtx>>,
612    ) -> Self {
613        let tcx = layout_cx.tcx();
614        let local_crates = helpers::get_local_crates(tcx);
615        let layouts =
616            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
617        let profiler = config.measureme_out.as_ref().map(|out| {
618            let crate_name =
619                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
620            let pid = process::id();
621            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
622            // the PID is padded so that the nondeterministic value of the PID does not spread
623            // nondeterminism to the allocator. In Miri we are not aiming for such performance
624            // control, we just pad for consistency with rustc.
625            let filename = format!("{crate_name}-{pid:07}");
626            let path = Path::new(out).join(filename);
627            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
628        });
629        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
630        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
631        let data_race = if config.genmc_config.is_some() {
632            // `genmc_ctx` persists across executions, so we don't create a new one here.
633            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
634        } else if config.data_race_detector {
635            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
636        } else {
637            GlobalDataRaceHandler::None
638        };
639        // Determine page size, stack address, and stack size.
640        // These values are mostly meaningless, but the stack address is also where we start
641        // allocating physical integer addresses for all allocations.
642        let page_size = if let Some(page_size) = config.page_size {
643            page_size
644        } else {
645            let target = &tcx.sess.target;
646            match target.arch.as_ref() {
647                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
648                "aarch64" => {
649                    if target.options.vendor.as_ref() == "apple" {
650                        // No "definitive" source, but see:
651                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
652                        // https://github.com/ziglang/zig/issues/11308 etc.
653                        16 * 1024
654                    } else {
655                        4 * 1024
656                    }
657                }
658                _ => 4 * 1024,
659            }
660        };
661        // On 16bit targets, 32 pages is more than the entire address space!
662        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
663        let stack_size =
664            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
665        assert!(
666            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
667            "miri only supports up to {} CPUs, but {} were configured",
668            cpu_affinity::MAX_CPUS,
669            config.num_cpus
670        );
671        let threads = ThreadManager::new(config);
672        let mut thread_cpu_affinity = FxHashMap::default();
673        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
674            thread_cpu_affinity
675                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
676        }
677        MiriMachine {
678            tcx,
679            borrow_tracker,
680            data_race,
681            alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
682            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
683            env_vars: EnvVars::default(),
684            main_fn_ret_place: None,
685            argc: None,
686            argv: None,
687            cmd_line: None,
688            tls: TlsData::default(),
689            isolated_op: config.isolated_op,
690            validation: config.validation,
691            fds: shims::FdTable::init(config.mute_stdout_stderr),
692            epoll_interests: shims::EpollInterestTable::new(),
693            dirs: Default::default(),
694            layouts,
695            threads,
696            thread_cpu_affinity,
697            static_roots: Vec::new(),
698            profiler,
699            string_cache: Default::default(),
700            exported_symbols_cache: FxHashMap::default(),
701            backtrace_style: config.backtrace_style,
702            local_crates,
703            extern_statics: FxHashMap::default(),
704            rng: RefCell::new(rng),
705            allocator: if !config.native_lib.is_empty() {
706                Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
707            } else { None },
708            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
709            track_alloc_accesses: config.track_alloc_accesses,
710            check_alignment: config.check_alignment,
711            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
712            preemption_rate: config.preemption_rate,
713            report_progress: config.report_progress,
714            basic_block_count: 0,
715            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
716            #[cfg(all(unix, feature = "native-lib"))]
717            native_lib: config.native_lib.iter().map(|lib_file_path| {
718                let host_triple = rustc_session::config::host_tuple();
719                let target_triple = tcx.sess.opts.target_triple.tuple();
720                // Check if host target == the session target.
721                if host_triple != target_triple {
722                    panic!(
723                        "calling native C functions in linked .so file requires host and target to be the same: \
724                        host={host_triple}, target={target_triple}",
725                    );
726                }
727                // Note: it is the user's responsibility to provide a correct SO file.
728                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
729                // undefined behaviour in Miri itself!
730                (
731                    unsafe {
732                        libloading::Library::new(lib_file_path)
733                            .expect("failed to read specified extern shared object file")
734                    },
735                    lib_file_path.clone(),
736                )
737            }).collect(),
738            #[cfg(not(all(unix, feature = "native-lib")))]
739            native_lib: config.native_lib.iter().map(|_| {
740                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
741            }).collect(),
742            gc_interval: config.gc_interval,
743            since_gc: 0,
744            num_cpus: config.num_cpus,
745            page_size,
746            stack_addr,
747            stack_size,
748            collect_leak_backtraces: config.collect_leak_backtraces,
749            allocation_spans: RefCell::new(FxHashMap::default()),
750            symbolic_alignment: RefCell::new(FxHashMap::default()),
751            union_data_ranges: FxHashMap::default(),
752            pthread_mutex_sanity: Cell::new(false),
753            pthread_rwlock_sanity: Cell::new(false),
754            pthread_condvar_sanity: Cell::new(false),
755            sb_extern_type_warned: Cell::new(false),
756            native_call_mem_warned: Cell::new(false),
757            reject_in_isolation_warned: Default::default(),
758            int2ptr_warned: Default::default(),
759            mangle_internal_symbol_cache: Default::default(),
760            force_intrinsic_fallback: config.force_intrinsic_fallback,
761            float_nondet: config.float_nondet,
762            float_rounding_error: config.float_rounding_error,
763        }
764    }
765
766    pub(crate) fn late_init(
767        ecx: &mut MiriInterpCx<'tcx>,
768        config: &MiriConfig,
769        on_main_stack_empty: StackEmptyCallback<'tcx>,
770    ) -> InterpResult<'tcx> {
771        EnvVars::init(ecx, config)?;
772        MiriMachine::init_extern_statics(ecx)?;
773        ThreadManager::init(ecx, on_main_stack_empty);
774        interp_ok(())
775    }
776
777    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
778        // This got just allocated, so there definitely is a pointer here.
779        let ptr = ptr.into_pointer_or_addr().unwrap();
780        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
781    }
782
783    pub(crate) fn communicate(&self) -> bool {
784        self.isolated_op == IsolatedOp::Allow
785    }
786
787    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
788    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
789        let def_id = frame.instance.def_id();
790        def_id.is_local() || self.local_crates.contains(&def_id.krate)
791    }
792
793    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
794    pub(crate) fn handle_abnormal_termination(&mut self) {
795        // All strings in the profile data are stored in a single string table which is not
796        // written to disk until the profiler is dropped. If the interpreter exits without dropping
797        // the profiler, it is not possible to interpret the profile data and all measureme tools
798        // will panic when given the file.
799        drop(self.profiler.take());
800    }
801
802    pub(crate) fn page_align(&self) -> Align {
803        Align::from_bytes(self.page_size).unwrap()
804    }
805
806    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
807        self.allocation_spans
808            .borrow()
809            .get(&alloc_id)
810            .map(|(allocated, _deallocated)| allocated.data())
811    }
812
813    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
814        self.allocation_spans
815            .borrow()
816            .get(&alloc_id)
817            .and_then(|(_allocated, deallocated)| *deallocated)
818            .map(Span::data)
819    }
820
821    fn init_allocation(
822        ecx: &MiriInterpCx<'tcx>,
823        id: AllocId,
824        kind: MemoryKind,
825        size: Size,
826        align: Align,
827    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
828        if ecx.machine.tracked_alloc_ids.contains(&id) {
829            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
830        }
831
832        let borrow_tracker = ecx
833            .machine
834            .borrow_tracker
835            .as_ref()
836            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
837
838        let data_race = match &ecx.machine.data_race {
839            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
840            GlobalDataRaceHandler::Vclocks(data_race) =>
841                AllocDataRaceHandler::Vclocks(
842                    data_race::AllocState::new_allocation(
843                        data_race,
844                        &ecx.machine.threads,
845                        size,
846                        kind,
847                        ecx.machine.current_span(),
848                    ),
849                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
850                ),
851            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
852                // GenMC learns about new allocations directly from the alloc_addresses module,
853                // since it has to be able to control the address at which they are placed.
854                AllocDataRaceHandler::Genmc
855            }
856        };
857
858        // If an allocation is leaked, we want to report a backtrace to indicate where it was
859        // allocated. We don't need to record a backtrace for allocations which are allowed to
860        // leak.
861        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
862            None
863        } else {
864            Some(ecx.generate_stacktrace())
865        };
866
867        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
868            ecx.machine
869                .allocation_spans
870                .borrow_mut()
871                .insert(id, (ecx.machine.current_span(), None));
872        }
873
874        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
875    }
876}
877
878impl VisitProvenance for MiriMachine<'_> {
879    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
880        #[rustfmt::skip]
881        let MiriMachine {
882            threads,
883            thread_cpu_affinity: _,
884            tls,
885            env_vars,
886            main_fn_ret_place,
887            argc,
888            argv,
889            cmd_line,
890            extern_statics,
891            dirs,
892            borrow_tracker,
893            data_race,
894            alloc_addresses,
895            fds,
896            epoll_interests:_,
897            tcx: _,
898            isolated_op: _,
899            validation: _,
900            monotonic_clock: _,
901            layouts: _,
902            static_roots: _,
903            profiler: _,
904            string_cache: _,
905            exported_symbols_cache: _,
906            backtrace_style: _,
907            local_crates: _,
908            rng: _,
909            allocator: _,
910            tracked_alloc_ids: _,
911            track_alloc_accesses: _,
912            check_alignment: _,
913            cmpxchg_weak_failure_rate: _,
914            preemption_rate: _,
915            report_progress: _,
916            basic_block_count: _,
917            native_lib: _,
918            gc_interval: _,
919            since_gc: _,
920            num_cpus: _,
921            page_size: _,
922            stack_addr: _,
923            stack_size: _,
924            collect_leak_backtraces: _,
925            allocation_spans: _,
926            symbolic_alignment: _,
927            union_data_ranges: _,
928            pthread_mutex_sanity: _,
929            pthread_rwlock_sanity: _,
930            pthread_condvar_sanity: _,
931            sb_extern_type_warned: _,
932            native_call_mem_warned: _,
933            reject_in_isolation_warned: _,
934            int2ptr_warned: _,
935            mangle_internal_symbol_cache: _,
936            force_intrinsic_fallback: _,
937            float_nondet: _,
938            float_rounding_error: _,
939        } = self;
940
941        threads.visit_provenance(visit);
942        tls.visit_provenance(visit);
943        env_vars.visit_provenance(visit);
944        dirs.visit_provenance(visit);
945        fds.visit_provenance(visit);
946        data_race.visit_provenance(visit);
947        borrow_tracker.visit_provenance(visit);
948        alloc_addresses.visit_provenance(visit);
949        main_fn_ret_place.visit_provenance(visit);
950        argc.visit_provenance(visit);
951        argv.visit_provenance(visit);
952        cmd_line.visit_provenance(visit);
953        for ptr in extern_statics.values() {
954            ptr.visit_provenance(visit);
955        }
956    }
957}
958
959/// A rustc InterpCx for Miri.
960pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
961
962/// A little trait that's useful to be inherited by extension traits.
963pub trait MiriInterpCxExt<'tcx> {
964    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
965    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
966}
967impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
968    #[inline(always)]
969    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
970        self
971    }
972    #[inline(always)]
973    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
974        self
975    }
976}
977
978/// Machine hook implementations.
979impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
980    type MemoryKind = MiriMemoryKind;
981    type ExtraFnVal = DynSym;
982
983    type FrameExtra = FrameExtra<'tcx>;
984    type AllocExtra = AllocExtra<'tcx>;
985
986    type Provenance = Provenance;
987    type ProvenanceExtra = ProvenanceExtra;
988    type Bytes = MiriAllocBytes;
989
990    type MemoryMap =
991        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
992
993    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
994
995    const PANIC_ON_ALLOC_FAIL: bool = false;
996
997    #[inline(always)]
998    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
999        ecx.machine.check_alignment != AlignmentCheck::None
1000    }
1001
1002    #[inline(always)]
1003    fn alignment_check(
1004        ecx: &MiriInterpCx<'tcx>,
1005        alloc_id: AllocId,
1006        alloc_align: Align,
1007        alloc_kind: AllocKind,
1008        offset: Size,
1009        align: Align,
1010    ) -> Option<Misalignment> {
1011        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1012            // Just use the built-in check.
1013            return None;
1014        }
1015        if alloc_kind != AllocKind::LiveData {
1016            // Can't have any extra info here.
1017            return None;
1018        }
1019        // Let's see which alignment we have been promised for this allocation.
1020        let (promised_offset, promised_align) = ecx
1021            .machine
1022            .symbolic_alignment
1023            .borrow()
1024            .get(&alloc_id)
1025            .copied()
1026            .unwrap_or((Size::ZERO, alloc_align));
1027        if promised_align < align {
1028            // Definitely not enough.
1029            Some(Misalignment { has: promised_align, required: align })
1030        } else {
1031            // What's the offset between us and the promised alignment?
1032            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1033            // That must also be aligned.
1034            if distance.is_multiple_of(align.bytes()) {
1035                // All looking good!
1036                None
1037            } else {
1038                // The biggest power of two through which `distance` is divisible.
1039                let distance_pow2 = 1 << distance.trailing_zeros();
1040                Some(Misalignment {
1041                    has: Align::from_bytes(distance_pow2).unwrap(),
1042                    required: align,
1043                })
1044            }
1045        }
1046    }
1047
1048    #[inline(always)]
1049    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1050        ecx.machine.validation != ValidationMode::No
1051    }
1052    #[inline(always)]
1053    fn enforce_validity_recursively(
1054        ecx: &InterpCx<'tcx, Self>,
1055        _layout: TyAndLayout<'tcx>,
1056    ) -> bool {
1057        ecx.machine.validation == ValidationMode::Deep
1058    }
1059
1060    #[inline(always)]
1061    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1062        !ecx.tcx.sess.overflow_checks()
1063    }
1064
1065    fn check_fn_target_features(
1066        ecx: &MiriInterpCx<'tcx>,
1067        instance: ty::Instance<'tcx>,
1068    ) -> InterpResult<'tcx> {
1069        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1070        if attrs
1071            .target_features
1072            .iter()
1073            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1074        {
1075            let unavailable = attrs
1076                .target_features
1077                .iter()
1078                .filter(|&feature| {
1079                    !feature.implied && !ecx.tcx.sess.target_features.contains(&feature.name)
1080                })
1081                .fold(String::new(), |mut s, feature| {
1082                    if !s.is_empty() {
1083                        s.push_str(", ");
1084                    }
1085                    s.push_str(feature.name.as_str());
1086                    s
1087                });
1088            let msg = format!(
1089                "calling a function that requires unavailable target features: {unavailable}"
1090            );
1091            // On WASM, this is not UB, but instead gets rejected during validation of the module
1092            // (see #84988).
1093            if ecx.tcx.sess.target.is_like_wasm {
1094                throw_machine_stop!(TerminationInfo::Abort(msg));
1095            } else {
1096                throw_ub_format!("{msg}");
1097            }
1098        }
1099        interp_ok(())
1100    }
1101
1102    #[inline(always)]
1103    fn find_mir_or_eval_fn(
1104        ecx: &mut MiriInterpCx<'tcx>,
1105        instance: ty::Instance<'tcx>,
1106        abi: &FnAbi<'tcx, Ty<'tcx>>,
1107        args: &[FnArg<'tcx, Provenance>],
1108        dest: &PlaceTy<'tcx>,
1109        ret: Option<mir::BasicBlock>,
1110        unwind: mir::UnwindAction,
1111    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1112        // For foreign items, try to see if we can emulate them.
1113        if ecx.tcx.is_foreign_item(instance.def_id()) {
1114            // An external function call that does not have a MIR body. We either find MIR elsewhere
1115            // or emulate its effect.
1116            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1117            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1118            // foreign function
1119            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1120            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1121            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1122            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1123        }
1124
1125        // Otherwise, load the MIR.
1126        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1127    }
1128
1129    #[inline(always)]
1130    fn call_extra_fn(
1131        ecx: &mut MiriInterpCx<'tcx>,
1132        fn_val: DynSym,
1133        abi: &FnAbi<'tcx, Ty<'tcx>>,
1134        args: &[FnArg<'tcx, Provenance>],
1135        dest: &PlaceTy<'tcx>,
1136        ret: Option<mir::BasicBlock>,
1137        unwind: mir::UnwindAction,
1138    ) -> InterpResult<'tcx> {
1139        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1140        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1141    }
1142
1143    #[inline(always)]
1144    fn call_intrinsic(
1145        ecx: &mut MiriInterpCx<'tcx>,
1146        instance: ty::Instance<'tcx>,
1147        args: &[OpTy<'tcx>],
1148        dest: &PlaceTy<'tcx>,
1149        ret: Option<mir::BasicBlock>,
1150        unwind: mir::UnwindAction,
1151    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1152        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1153    }
1154
1155    #[inline(always)]
1156    fn assert_panic(
1157        ecx: &mut MiriInterpCx<'tcx>,
1158        msg: &mir::AssertMessage<'tcx>,
1159        unwind: mir::UnwindAction,
1160    ) -> InterpResult<'tcx> {
1161        ecx.assert_panic(msg, unwind)
1162    }
1163
1164    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1165        ecx.start_panic_nounwind(msg)
1166    }
1167
1168    fn unwind_terminate(
1169        ecx: &mut InterpCx<'tcx, Self>,
1170        reason: mir::UnwindTerminateReason,
1171    ) -> InterpResult<'tcx> {
1172        // Call the lang item.
1173        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1174        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1175        ecx.call_function(
1176            panic,
1177            ExternAbi::Rust,
1178            &[],
1179            None,
1180            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1181        )?;
1182        interp_ok(())
1183    }
1184
1185    #[inline(always)]
1186    fn binary_ptr_op(
1187        ecx: &MiriInterpCx<'tcx>,
1188        bin_op: mir::BinOp,
1189        left: &ImmTy<'tcx>,
1190        right: &ImmTy<'tcx>,
1191    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1192        ecx.binary_ptr_op(bin_op, left, right)
1193    }
1194
1195    #[inline(always)]
1196    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1197        ecx: &InterpCx<'tcx, Self>,
1198        inputs: &[F1],
1199    ) -> F2 {
1200        ecx.generate_nan(inputs)
1201    }
1202
1203    #[inline(always)]
1204    fn apply_float_nondet(
1205        ecx: &mut InterpCx<'tcx, Self>,
1206        val: ImmTy<'tcx>,
1207    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1208        crate::math::apply_random_float_error_to_imm(ecx, val, 2 /* log2(4) */)
1209    }
1210
1211    #[inline(always)]
1212    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1213        ecx.equal_float_min_max(a, b)
1214    }
1215
1216    #[inline(always)]
1217    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1218        interp_ok(ecx.tcx.sess.ub_checks())
1219    }
1220
1221    #[inline(always)]
1222    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1223        interp_ok(ecx.tcx.sess.contract_checks())
1224    }
1225
1226    #[inline(always)]
1227    fn thread_local_static_pointer(
1228        ecx: &mut MiriInterpCx<'tcx>,
1229        def_id: DefId,
1230    ) -> InterpResult<'tcx, StrictPointer> {
1231        ecx.get_or_create_thread_local_alloc(def_id)
1232    }
1233
1234    fn extern_static_pointer(
1235        ecx: &MiriInterpCx<'tcx>,
1236        def_id: DefId,
1237    ) -> InterpResult<'tcx, StrictPointer> {
1238        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1239        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1240            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1241            // information. That uses the type information of this static.
1242            // Make sure it matches the Miri allocation for this.
1243            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1244                panic!("extern_statics cannot contain wildcards")
1245            };
1246            let info = ecx.get_alloc_info(alloc_id);
1247            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1248            let extern_decl_layout =
1249                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1250            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1251                throw_unsup_format!(
1252                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1253                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1254                    but Miri emulates it via an extern static shim \
1255                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1256                    name = ecx.tcx.def_path_str(def_id),
1257                    krate = ecx.tcx.crate_name(def_id.krate),
1258                    decl_size = extern_decl_layout.size.bytes(),
1259                    decl_align = extern_decl_layout.align.abi.bytes(),
1260                    shim_size = info.size.bytes(),
1261                    shim_align = info.align.bytes(),
1262                )
1263            }
1264            interp_ok(ptr)
1265        } else {
1266            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1267        }
1268    }
1269
1270    fn init_local_allocation(
1271        ecx: &MiriInterpCx<'tcx>,
1272        id: AllocId,
1273        kind: MemoryKind,
1274        size: Size,
1275        align: Align,
1276    ) -> InterpResult<'tcx, Self::AllocExtra> {
1277        assert!(kind != MiriMemoryKind::Global.into());
1278        MiriMachine::init_allocation(ecx, id, kind, size, align)
1279    }
1280
1281    fn adjust_alloc_root_pointer(
1282        ecx: &MiriInterpCx<'tcx>,
1283        ptr: interpret::Pointer<CtfeProvenance>,
1284        kind: Option<MemoryKind>,
1285    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1286        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1287        let alloc_id = ptr.provenance.alloc_id();
1288        if cfg!(debug_assertions) {
1289            // The machine promises to never call us on thread-local or extern statics.
1290            match ecx.tcx.try_get_global_alloc(alloc_id) {
1291                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1292                    panic!("adjust_alloc_root_pointer called on thread-local static")
1293                }
1294                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1295                    panic!("adjust_alloc_root_pointer called on extern static")
1296                }
1297                _ => {}
1298            }
1299        }
1300        // FIXME: can we somehow preserve the immutability of `ptr`?
1301        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1302            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1303        } else {
1304            // Value does not matter, SB is disabled
1305            BorTag::default()
1306        };
1307        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1308    }
1309
1310    /// Called on `usize as ptr` casts.
1311    #[inline(always)]
1312    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1313        ecx.ptr_from_addr_cast(addr)
1314    }
1315
1316    /// Called on `ptr as usize` casts.
1317    /// (Actually computing the resulting `usize` doesn't need machine help,
1318    /// that's just `Scalar::try_to_int`.)
1319    #[inline(always)]
1320    fn expose_provenance(
1321        ecx: &InterpCx<'tcx, Self>,
1322        provenance: Self::Provenance,
1323    ) -> InterpResult<'tcx> {
1324        ecx.expose_provenance(provenance)
1325    }
1326
1327    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1328    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1329    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1330    /// allocations.
1331    ///
1332    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1333    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1334    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1335    /// stored in machine state).
1336    ///
1337    /// When this fails, that means the pointer does not point to a live allocation.
1338    fn ptr_get_alloc(
1339        ecx: &MiriInterpCx<'tcx>,
1340        ptr: StrictPointer,
1341        size: i64,
1342    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1343        let rel = ecx.ptr_get_alloc(ptr, size);
1344
1345        rel.map(|(alloc_id, size)| {
1346            let tag = match ptr.provenance {
1347                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1348                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1349            };
1350            (alloc_id, size, tag)
1351        })
1352    }
1353
1354    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1355    ///
1356    /// If `alloc` contains pointers, then they are all pointing to globals.
1357    ///
1358    /// This should avoid copying if no work has to be done! If this returns an owned
1359    /// allocation (because a copy had to be done to adjust things), machine memory will
1360    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1361    /// owned allocation to the map even when the map is shared.)
1362    fn adjust_global_allocation<'b>(
1363        ecx: &InterpCx<'tcx, Self>,
1364        id: AllocId,
1365        alloc: &'b Allocation,
1366    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1367    {
1368        let alloc = alloc.adjust_from_tcx(
1369            &ecx.tcx,
1370            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1371            |ptr| ecx.global_root_pointer(ptr),
1372        )?;
1373        let kind = MiriMemoryKind::Global.into();
1374        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1375        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1376    }
1377
1378    #[inline(always)]
1379    fn before_memory_read(
1380        _tcx: TyCtxtAt<'tcx>,
1381        machine: &Self,
1382        alloc_extra: &AllocExtra<'tcx>,
1383        ptr: Pointer,
1384        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1385        range: AllocRange,
1386    ) -> InterpResult<'tcx> {
1387        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1388            machine
1389                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1390        }
1391        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1392        match &machine.data_race {
1393            GlobalDataRaceHandler::None => {}
1394            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1395                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1396            GlobalDataRaceHandler::Vclocks(_data_race) => {
1397                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1398                else {
1399                    unreachable!();
1400                };
1401                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1402                if let Some(weak_memory) = weak_memory {
1403                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1404                }
1405            }
1406        }
1407        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1408            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1409        }
1410        interp_ok(())
1411    }
1412
1413    #[inline(always)]
1414    fn before_memory_write(
1415        _tcx: TyCtxtAt<'tcx>,
1416        machine: &mut Self,
1417        alloc_extra: &mut AllocExtra<'tcx>,
1418        ptr: Pointer,
1419        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1420        range: AllocRange,
1421    ) -> InterpResult<'tcx> {
1422        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1423            machine
1424                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1425        }
1426        match &machine.data_race {
1427            GlobalDataRaceHandler::None => {}
1428            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1429                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
1430            }
1431            GlobalDataRaceHandler::Vclocks(_global_state) => {
1432                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1433                    &mut alloc_extra.data_race
1434                else {
1435                    unreachable!()
1436                };
1437                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1438                if let Some(weak_memory) = weak_memory {
1439                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1440                }
1441            }
1442        }
1443        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1444            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1445        }
1446        interp_ok(())
1447    }
1448
1449    #[inline(always)]
1450    fn before_memory_deallocation(
1451        _tcx: TyCtxtAt<'tcx>,
1452        machine: &mut Self,
1453        alloc_extra: &mut AllocExtra<'tcx>,
1454        ptr: Pointer,
1455        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1456        size: Size,
1457        align: Align,
1458        kind: MemoryKind,
1459    ) -> InterpResult<'tcx> {
1460        if machine.tracked_alloc_ids.contains(&alloc_id) {
1461            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1462        }
1463        match &machine.data_race {
1464            GlobalDataRaceHandler::None => {}
1465            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1466                genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
1467            GlobalDataRaceHandler::Vclocks(_global_state) => {
1468                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1469                data_race.write(
1470                    alloc_id,
1471                    alloc_range(Size::ZERO, size),
1472                    NaWriteType::Deallocate,
1473                    None,
1474                    machine,
1475                )?;
1476            }
1477        }
1478        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1479            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1480        }
1481        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1482        {
1483            *deallocated_at = Some(machine.current_span());
1484        }
1485        machine.free_alloc_id(alloc_id, size, align, kind);
1486        interp_ok(())
1487    }
1488
1489    #[inline(always)]
1490    fn retag_ptr_value(
1491        ecx: &mut InterpCx<'tcx, Self>,
1492        kind: mir::RetagKind,
1493        val: &ImmTy<'tcx>,
1494    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1495        if ecx.machine.borrow_tracker.is_some() {
1496            ecx.retag_ptr_value(kind, val)
1497        } else {
1498            interp_ok(val.clone())
1499        }
1500    }
1501
1502    #[inline(always)]
1503    fn retag_place_contents(
1504        ecx: &mut InterpCx<'tcx, Self>,
1505        kind: mir::RetagKind,
1506        place: &PlaceTy<'tcx>,
1507    ) -> InterpResult<'tcx> {
1508        if ecx.machine.borrow_tracker.is_some() {
1509            ecx.retag_place_contents(kind, place)?;
1510        }
1511        interp_ok(())
1512    }
1513
1514    fn protect_in_place_function_argument(
1515        ecx: &mut InterpCx<'tcx, Self>,
1516        place: &MPlaceTy<'tcx>,
1517    ) -> InterpResult<'tcx> {
1518        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1519        // writes* during this call are insta-UB.
1520        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1521            ecx.protect_place(place)?
1522        } else {
1523            // No borrow tracker.
1524            place.clone()
1525        };
1526        // We do need to write `uninit` so that even after the call ends, the former contents of
1527        // this place cannot be observed any more. We do the write after retagging so that for
1528        // Tree Borrows, this is considered to activate the new tag.
1529        // Conveniently this also ensures that the place actually points to suitable memory.
1530        ecx.write_uninit(&protected_place)?;
1531        // Now we throw away the protected place, ensuring its tag is never used again.
1532        interp_ok(())
1533    }
1534
1535    #[inline(always)]
1536    fn init_frame(
1537        ecx: &mut InterpCx<'tcx, Self>,
1538        frame: Frame<'tcx, Provenance>,
1539    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1540        // Start recording our event before doing anything else
1541        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1542            let fn_name = frame.instance().to_string();
1543            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1544            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1545
1546            Some(profiler.start_recording_interval_event_detached(
1547                *name,
1548                measureme::EventId::from_label(*name),
1549                ecx.active_thread().to_u32(),
1550            ))
1551        } else {
1552            None
1553        };
1554
1555        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1556
1557        let extra = FrameExtra {
1558            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1559            catch_unwind: None,
1560            timing,
1561            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1562            data_race: ecx
1563                .machine
1564                .data_race
1565                .as_vclocks_ref()
1566                .map(|_| data_race::FrameState::default()),
1567        };
1568
1569        interp_ok(frame.with_extra(extra))
1570    }
1571
1572    fn stack<'a>(
1573        ecx: &'a InterpCx<'tcx, Self>,
1574    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1575        ecx.active_thread_stack()
1576    }
1577
1578    fn stack_mut<'a>(
1579        ecx: &'a mut InterpCx<'tcx, Self>,
1580    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1581        ecx.active_thread_stack_mut()
1582    }
1583
1584    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1585        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1586        ecx.machine.since_gc += 1;
1587        // Possibly report our progress. This will point at the terminator we are about to execute.
1588        if let Some(report_progress) = ecx.machine.report_progress {
1589            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1590                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1591                    block_count: ecx.machine.basic_block_count,
1592                });
1593            }
1594        }
1595
1596        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1597        // stacks.
1598        // When debug assertions are enabled, run the GC as often as possible so that any cases
1599        // where it mistakenly removes an important tag become visible.
1600        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1601            ecx.machine.since_gc = 0;
1602            ecx.run_provenance_gc();
1603        }
1604
1605        // These are our preemption points.
1606        // (This will only take effect after the terminator has been executed.)
1607        ecx.maybe_preempt_active_thread();
1608
1609        // Make sure some time passes.
1610        ecx.machine.monotonic_clock.tick();
1611
1612        interp_ok(())
1613    }
1614
1615    #[inline(always)]
1616    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1617        if ecx.frame().extra.is_user_relevant {
1618            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1619            // frame. If we push a non-local frame, there's no need to do anything.
1620            let stack_len = ecx.active_thread_stack().len();
1621            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1622        }
1623        interp_ok(())
1624    }
1625
1626    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1627        let frame = ecx.frame();
1628        // We want this *before* the return value copy, because the return place itself is protected
1629        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1630        if ecx.machine.borrow_tracker.is_some() {
1631            ecx.on_stack_pop(frame)?;
1632        }
1633        if frame.extra.is_user_relevant {
1634            // All that we store is whether or not the frame we just removed is local, so now we
1635            // have no idea where the next topmost local frame is. So we recompute it.
1636            // (If this ever becomes a bottleneck, we could have `push` store the previous
1637            // user-relevant frame and restore that here.)
1638            // We have to skip the frame that is just being popped.
1639            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1640        }
1641        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1642        // concurrency and what it prints is just plain wrong. So we print our own information
1643        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1644        info!("Leaving {}", ecx.frame().instance());
1645        interp_ok(())
1646    }
1647
1648    #[inline(always)]
1649    fn after_stack_pop(
1650        ecx: &mut InterpCx<'tcx, Self>,
1651        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1652        unwinding: bool,
1653    ) -> InterpResult<'tcx, ReturnAction> {
1654        let res = {
1655            // Move `frame` into a sub-scope so we control when it will be dropped.
1656            let mut frame = frame;
1657            let timing = frame.extra.timing.take();
1658            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1659            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1660                profiler.finish_recording_interval_event(timing.unwrap());
1661            }
1662            res
1663        };
1664        // Needs to be done after dropping frame to show up on the right nesting level.
1665        // (Cc https://github.com/rust-lang/miri/issues/2266)
1666        if !ecx.active_thread_stack().is_empty() {
1667            info!("Continuing in {}", ecx.frame().instance());
1668        }
1669        res
1670    }
1671
1672    fn after_local_read(
1673        ecx: &InterpCx<'tcx, Self>,
1674        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1675        local: mir::Local,
1676    ) -> InterpResult<'tcx> {
1677        if let Some(data_race) = &frame.extra.data_race {
1678            data_race.local_read(local, &ecx.machine);
1679        }
1680        interp_ok(())
1681    }
1682
1683    fn after_local_write(
1684        ecx: &mut InterpCx<'tcx, Self>,
1685        local: mir::Local,
1686        storage_live: bool,
1687    ) -> InterpResult<'tcx> {
1688        if let Some(data_race) = &ecx.frame().extra.data_race {
1689            data_race.local_write(local, storage_live, &ecx.machine);
1690        }
1691        interp_ok(())
1692    }
1693
1694    fn after_local_moved_to_memory(
1695        ecx: &mut InterpCx<'tcx, Self>,
1696        local: mir::Local,
1697        mplace: &MPlaceTy<'tcx>,
1698    ) -> InterpResult<'tcx> {
1699        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1700            panic!("after_local_allocated should only be called on fresh allocations");
1701        };
1702        // Record the span where this was allocated: the declaration of the local.
1703        let local_decl = &ecx.frame().body().local_decls[local];
1704        let span = local_decl.source_info.span;
1705        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1706        // The data race system has to fix the clocks used for this write.
1707        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1708        if let Some(data_race) =
1709            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1710        {
1711            data_race.local_moved_to_memory(
1712                local,
1713                alloc_info.data_race.as_vclocks_mut().unwrap(),
1714                machine,
1715            );
1716        }
1717        interp_ok(())
1718    }
1719
1720    fn get_global_alloc_salt(
1721        ecx: &InterpCx<'tcx, Self>,
1722        instance: Option<ty::Instance<'tcx>>,
1723    ) -> usize {
1724        let unique = if let Some(instance) = instance {
1725            // Functions cannot be identified by pointers, as asm-equal functions can get
1726            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1727            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1728            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1729            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1730            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1731            // whether codegen will actually emit duplicate functions. It does that when they have
1732            // non-lifetime generics, or when they can be inlined. All other functions are given a
1733            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1734            // cannot be relied upon for anything. But if we don't do this, the
1735            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1736            // backtraces look terrible.
1737            let is_generic = instance
1738                .args
1739                .into_iter()
1740                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1741            let can_be_inlined = matches!(
1742                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1743                InliningThreshold::Always
1744            ) || !matches!(
1745                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1746                InlineAttr::Never
1747            );
1748            !is_generic && !can_be_inlined
1749        } else {
1750            // Non-functions are never unique.
1751            false
1752        };
1753        // Always use the same salt if the allocation is unique.
1754        if unique {
1755            CTFE_ALLOC_SALT
1756        } else {
1757            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1758        }
1759    }
1760
1761    fn cached_union_data_range<'e>(
1762        ecx: &'e mut InterpCx<'tcx, Self>,
1763        ty: Ty<'tcx>,
1764        compute_range: impl FnOnce() -> RangeSet,
1765    ) -> Cow<'e, RangeSet> {
1766        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1767    }
1768
1769    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1770        use crate::alloc::MiriAllocParams;
1771
1772        match &self.allocator {
1773            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1774            None => MiriAllocParams::Global,
1775        }
1776    }
1777
1778    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1779        #[cfg(feature = "tracing")]
1780        {
1781            span().entered()
1782        }
1783        #[cfg(not(feature = "tracing"))]
1784        #[expect(clippy::unused_unit)]
1785        {
1786            let _ = span; // so we avoid the "unused variable" warning
1787            ()
1788        }
1789    }
1790}
1791
1792/// Trait for callbacks handling asynchronous machine operations.
1793pub trait MachineCallback<'tcx, T>: VisitProvenance {
1794    /// The function to be invoked when the callback is fired.
1795    fn call(
1796        self: Box<Self>,
1797        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1798        arg: T,
1799    ) -> InterpResult<'tcx>;
1800}
1801
1802/// Type alias for boxed machine callbacks with generic argument type.
1803pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1804
1805/// Creates a `DynMachineCallback`:
1806///
1807/// ```rust
1808/// callback!(
1809///     @capture<'tcx> {
1810///         var1: Ty1,
1811///         var2: Ty2<'tcx>,
1812///     }
1813///     |this, arg: ArgTy| {
1814///         // Implement the callback here.
1815///         todo!()
1816///     }
1817/// )
1818/// ```
1819///
1820/// All the argument types must implement `VisitProvenance`.
1821#[macro_export]
1822macro_rules! callback {
1823    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1824        { $($name:ident: $type:ty),* $(,)? }
1825     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1826        struct Callback<$tcx, $($lft),*> {
1827            $($name: $type,)*
1828            _phantom: std::marker::PhantomData<&$tcx ()>,
1829        }
1830
1831        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1832            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1833                $(
1834                    self.$name.visit_provenance(_visit);
1835                )*
1836            }
1837        }
1838
1839        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1840            fn call(
1841                self: Box<Self>,
1842                $this: &mut MiriInterpCx<$tcx>,
1843                $arg: $arg_ty
1844            ) -> InterpResult<$tcx> {
1845                #[allow(unused_variables)]
1846                let Callback { $($name,)* _phantom } = *self;
1847                $body
1848            }
1849        }
1850
1851        Box::new(Callback {
1852            $($name,)*
1853            _phantom: std::marker::PhantomData
1854        })
1855    }};
1856}