miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_data_structures::fx::{FxHashMap, FxHashSet};
16#[allow(unused)]
17use rustc_data_structures::static_assert_size;
18use rustc_hir::attrs::InlineAttr;
19use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
20use rustc_middle::mir;
21use rustc_middle::query::TyCtxtAt;
22use rustc_middle::ty::layout::{
23    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
24};
25use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
26use rustc_session::config::InliningThreshold;
27use rustc_span::def_id::{CrateNum, DefId};
28use rustc_span::{Span, SpanData, Symbol};
29use rustc_target::callconv::FnAbi;
30
31use crate::alloc_addresses::EvalContextExt;
32use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
33use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
34use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
35use crate::*;
36
37/// First real-time signal.
38/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
39/// as typical values.
40pub const SIGRTMIN: i32 = 34;
41
42/// Last real-time signal.
43/// `signal(7)` says it must be between 32 and 64 and specifies
44/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
45pub const SIGRTMAX: i32 = 42;
46
47/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
48/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
49/// base address for each evaluation would produce unbounded memory usage.
50const ADDRS_PER_ANON_GLOBAL: usize = 32;
51
52#[derive(Copy, Clone, Debug, PartialEq)]
53pub enum AlignmentCheck {
54    /// Do not check alignment.
55    None,
56    /// Check alignment "symbolically", i.e., using only the requested alignment for an allocation and not its real base address.
57    Symbolic,
58    /// Check alignment on the actual physical integer address.
59    Int,
60}
61
62#[derive(Copy, Clone, Debug, PartialEq)]
63pub enum RejectOpWith {
64    /// Isolated op is rejected with an abort of the machine.
65    Abort,
66
67    /// If not Abort, miri returns an error for an isolated op.
68    /// Following options determine if user should be warned about such error.
69    /// Do not print warning about rejected isolated op.
70    NoWarning,
71
72    /// Print a warning about rejected isolated op, with backtrace.
73    Warning,
74
75    /// Print a warning about rejected isolated op, without backtrace.
76    WarningWithoutBacktrace,
77}
78
79#[derive(Copy, Clone, Debug, PartialEq)]
80pub enum IsolatedOp {
81    /// Reject an op requiring communication with the host. By
82    /// default, miri rejects the op with an abort. If not, it returns
83    /// an error code, and prints a warning about it. Warning levels
84    /// are controlled by `RejectOpWith` enum.
85    Reject(RejectOpWith),
86
87    /// Execute op requiring communication with the host, i.e. disable isolation.
88    Allow,
89}
90
91#[derive(Debug, Copy, Clone, PartialEq, Eq)]
92pub enum BacktraceStyle {
93    /// Prints a terser backtrace which ideally only contains relevant information.
94    Short,
95    /// Prints a backtrace with all possible information.
96    Full,
97    /// Prints only the frame that the error occurs in.
98    Off,
99}
100
101#[derive(Debug, Copy, Clone, PartialEq, Eq)]
102pub enum ValidationMode {
103    /// Do not perform any kind of validation.
104    No,
105    /// Validate the interior of the value, but not things behind references.
106    Shallow,
107    /// Fully recursively validate references.
108    Deep,
109}
110
111#[derive(Debug, Copy, Clone, PartialEq, Eq)]
112pub enum FloatRoundingErrorMode {
113    /// Apply a random error (the default).
114    Random,
115    /// Don't apply any error.
116    None,
117    /// Always apply the maximum error (with a random sign).
118    Max,
119}
120
121/// Extra data stored with each stack frame
122pub struct FrameExtra<'tcx> {
123    /// Extra data for the Borrow Tracker.
124    pub borrow_tracker: Option<borrow_tracker::FrameState>,
125
126    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
127    /// called by `try`). When this frame is popped during unwinding a panic,
128    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
129    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
130
131    /// If `measureme` profiling is enabled, holds timing information
132    /// for the start of this frame. When we finish executing this frame,
133    /// we use this to register a completed event with `measureme`.
134    pub timing: Option<measureme::DetachedTiming>,
135
136    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
137    /// `#[track_caller]`. We compute this once on creation and store the result, as an
138    /// optimization.
139    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
140    pub is_user_relevant: bool,
141
142    /// Data race detector per-frame data.
143    pub data_race: Option<data_race::FrameState>,
144}
145
146impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
147    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
148        // Omitting `timing`, it does not support `Debug`.
149        let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant, data_race } =
150            self;
151        f.debug_struct("FrameData")
152            .field("borrow_tracker", borrow_tracker)
153            .field("catch_unwind", catch_unwind)
154            .field("is_user_relevant", is_user_relevant)
155            .field("data_race", data_race)
156            .finish()
157    }
158}
159
160impl VisitProvenance for FrameExtra<'_> {
161    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
162        let FrameExtra {
163            catch_unwind,
164            borrow_tracker,
165            timing: _,
166            is_user_relevant: _,
167            data_race: _,
168        } = self;
169
170        catch_unwind.visit_provenance(visit);
171        borrow_tracker.visit_provenance(visit);
172    }
173}
174
175/// Extra memory kinds
176#[derive(Debug, Copy, Clone, PartialEq, Eq)]
177pub enum MiriMemoryKind {
178    /// `__rust_alloc` memory.
179    Rust,
180    /// `miri_alloc` memory.
181    Miri,
182    /// `malloc` memory.
183    C,
184    /// Windows `HeapAlloc` memory.
185    WinHeap,
186    /// Windows "local" memory (to be freed with `LocalFree`)
187    WinLocal,
188    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
189    /// This memory may leak.
190    Machine,
191    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
192    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
193    Runtime,
194    /// Globals copied from `tcx`.
195    /// This memory may leak.
196    Global,
197    /// Memory for extern statics.
198    /// This memory may leak.
199    ExternStatic,
200    /// Memory for thread-local statics.
201    /// This memory may leak.
202    Tls,
203    /// Memory mapped directly by the program
204    Mmap,
205}
206
207impl From<MiriMemoryKind> for MemoryKind {
208    #[inline(always)]
209    fn from(kind: MiriMemoryKind) -> MemoryKind {
210        MemoryKind::Machine(kind)
211    }
212}
213
214impl MayLeak for MiriMemoryKind {
215    #[inline(always)]
216    fn may_leak(self) -> bool {
217        use self::MiriMemoryKind::*;
218        match self {
219            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
220            Machine | Global | ExternStatic | Tls | Mmap => true,
221        }
222    }
223}
224
225impl MiriMemoryKind {
226    /// Whether we have a useful allocation span for an allocation of this kind.
227    fn should_save_allocation_span(self) -> bool {
228        use self::MiriMemoryKind::*;
229        match self {
230            // Heap allocations are fine since the `Allocation` is created immediately.
231            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
232            // Everything else is unclear, let's not show potentially confusing spans.
233            Machine | Global | ExternStatic | Tls | Runtime => false,
234        }
235    }
236}
237
238impl fmt::Display for MiriMemoryKind {
239    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240        use self::MiriMemoryKind::*;
241        match self {
242            Rust => write!(f, "Rust heap"),
243            Miri => write!(f, "Miri bare-metal heap"),
244            C => write!(f, "C heap"),
245            WinHeap => write!(f, "Windows heap"),
246            WinLocal => write!(f, "Windows local memory"),
247            Machine => write!(f, "machine-managed memory"),
248            Runtime => write!(f, "language runtime memory"),
249            Global => write!(f, "global (static or const)"),
250            ExternStatic => write!(f, "extern static"),
251            Tls => write!(f, "thread-local static"),
252            Mmap => write!(f, "mmap"),
253        }
254    }
255}
256
257pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
258
259/// Pointer provenance.
260// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
261// *might* be recursive and then it has to track which places have already been visited.
262// These implementations are a bit questionable, and it means we may check the same place multiple
263// times with different provenance, but that is in general not wrong.
264#[derive(Clone, Copy, PartialEq, Eq, Hash)]
265pub enum Provenance {
266    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
267    /// and what their borrow tag is.
268    Concrete {
269        alloc_id: AllocId,
270        /// Borrow Tracker tag.
271        tag: BorTag,
272    },
273    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
274    /// specification, we should at that point angelically "guess" a provenance that will make all
275    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
276    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
277    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
278    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
279    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
280    /// then we allow the access. This allows too much code in two ways:
281    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
282    ///   subsequent memory accesses.
283    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
284    ///   the access, we also have to update the aliasing state -- and that update can be very
285    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
286    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
287    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
288    ///   stack that would be true with all possible choices.
289    Wildcard,
290}
291
292/// The "extra" information a pointer has over a regular AllocId.
293#[derive(Copy, Clone, PartialEq)]
294pub enum ProvenanceExtra {
295    Concrete(BorTag),
296    Wildcard,
297}
298
299#[cfg(target_pointer_width = "64")]
300static_assert_size!(StrictPointer, 24);
301// FIXME: this would with in 24bytes but layout optimizations are not smart enough
302// #[cfg(target_pointer_width = "64")]
303//static_assert_size!(Pointer, 24);
304#[cfg(target_pointer_width = "64")]
305static_assert_size!(Scalar, 32);
306
307impl fmt::Debug for Provenance {
308    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
309        match self {
310            Provenance::Concrete { alloc_id, tag } => {
311                // Forward `alternate` flag to `alloc_id` printing.
312                if f.alternate() {
313                    write!(f, "[{alloc_id:#?}]")?;
314                } else {
315                    write!(f, "[{alloc_id:?}]")?;
316                }
317                // Print Borrow Tracker tag.
318                write!(f, "{tag:?}")?;
319            }
320            Provenance::Wildcard => {
321                write!(f, "[wildcard]")?;
322            }
323        }
324        Ok(())
325    }
326}
327
328impl interpret::Provenance for Provenance {
329    /// We use absolute addresses in the `offset` of a `StrictPointer`.
330    const OFFSET_IS_ADDR: bool = true;
331
332    /// Miri implements wildcard provenance.
333    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
334
335    fn get_alloc_id(self) -> Option<AllocId> {
336        match self {
337            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
338            Provenance::Wildcard => None,
339        }
340    }
341
342    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
343        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
344        write!(f, "{:#x}", addr.bytes())?;
345        if f.alternate() {
346            write!(f, "{prov:#?}")?;
347        } else {
348            write!(f, "{prov:?}")?;
349        }
350        Ok(())
351    }
352
353    fn join(left: Self, right: Self) -> Option<Self> {
354        match (left, right) {
355            // If both are the *same* concrete tag, that is the result.
356            (
357                Provenance::Concrete { alloc_id: left_alloc, tag: left_tag },
358                Provenance::Concrete { alloc_id: right_alloc, tag: right_tag },
359            ) if left_alloc == right_alloc && left_tag == right_tag => Some(left),
360            // If one side is a wildcard, the best possible outcome is that it is equal to the other
361            // one, and we use that.
362            (Provenance::Wildcard, o) | (o, Provenance::Wildcard) => Some(o),
363            // Otherwise, fall back to `None`.
364            _ => None,
365        }
366    }
367}
368
369impl fmt::Debug for ProvenanceExtra {
370    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
371        match self {
372            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
373            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
374        }
375    }
376}
377
378impl ProvenanceExtra {
379    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
380        match self {
381            ProvenanceExtra::Concrete(pid) => f(pid),
382            ProvenanceExtra::Wildcard => None,
383        }
384    }
385}
386
387/// Extra per-allocation data
388#[derive(Debug)]
389pub struct AllocExtra<'tcx> {
390    /// Global state of the borrow tracker, if enabled.
391    pub borrow_tracker: Option<borrow_tracker::AllocState>,
392    /// Extra state for data race detection.
393    ///
394    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
395    pub data_race: AllocDataRaceHandler,
396    /// A backtrace to where this allocation was allocated.
397    /// As this is recorded for leak reports, it only exists
398    /// if this allocation is leakable. The backtrace is not
399    /// pruned yet; that should be done before printing it.
400    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
401    /// Synchronization primitives like to attach extra data to particular addresses. We store that
402    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
403    /// freed.
404    /// This maps offsets to synchronization-primitive-specific data.
405    pub sync: FxHashMap<Size, Box<dyn Any>>,
406}
407
408// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
409// but that should never end up actually cloning our `AllocExtra`.
410impl<'tcx> Clone for AllocExtra<'tcx> {
411    fn clone(&self) -> Self {
412        panic!("our allocations should never be cloned");
413    }
414}
415
416impl VisitProvenance for AllocExtra<'_> {
417    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
418        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
419
420        borrow_tracker.visit_provenance(visit);
421        data_race.visit_provenance(visit);
422    }
423}
424
425/// Precomputed layouts of primitive types
426pub struct PrimitiveLayouts<'tcx> {
427    pub unit: TyAndLayout<'tcx>,
428    pub i8: TyAndLayout<'tcx>,
429    pub i16: TyAndLayout<'tcx>,
430    pub i32: TyAndLayout<'tcx>,
431    pub i64: TyAndLayout<'tcx>,
432    pub i128: TyAndLayout<'tcx>,
433    pub isize: TyAndLayout<'tcx>,
434    pub u8: TyAndLayout<'tcx>,
435    pub u16: TyAndLayout<'tcx>,
436    pub u32: TyAndLayout<'tcx>,
437    pub u64: TyAndLayout<'tcx>,
438    pub u128: TyAndLayout<'tcx>,
439    pub usize: TyAndLayout<'tcx>,
440    pub bool: TyAndLayout<'tcx>,
441    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
442    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
443}
444
445impl<'tcx> PrimitiveLayouts<'tcx> {
446    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
447        let tcx = layout_cx.tcx();
448        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
449        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
450        Ok(Self {
451            unit: layout_cx.layout_of(tcx.types.unit)?,
452            i8: layout_cx.layout_of(tcx.types.i8)?,
453            i16: layout_cx.layout_of(tcx.types.i16)?,
454            i32: layout_cx.layout_of(tcx.types.i32)?,
455            i64: layout_cx.layout_of(tcx.types.i64)?,
456            i128: layout_cx.layout_of(tcx.types.i128)?,
457            isize: layout_cx.layout_of(tcx.types.isize)?,
458            u8: layout_cx.layout_of(tcx.types.u8)?,
459            u16: layout_cx.layout_of(tcx.types.u16)?,
460            u32: layout_cx.layout_of(tcx.types.u32)?,
461            u64: layout_cx.layout_of(tcx.types.u64)?,
462            u128: layout_cx.layout_of(tcx.types.u128)?,
463            usize: layout_cx.layout_of(tcx.types.usize)?,
464            bool: layout_cx.layout_of(tcx.types.bool)?,
465            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
466            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
467        })
468    }
469
470    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
471        match size.bits() {
472            8 => Some(self.u8),
473            16 => Some(self.u16),
474            32 => Some(self.u32),
475            64 => Some(self.u64),
476            128 => Some(self.u128),
477            _ => None,
478        }
479    }
480
481    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
482        match size.bits() {
483            8 => Some(self.i8),
484            16 => Some(self.i16),
485            32 => Some(self.i32),
486            64 => Some(self.i64),
487            128 => Some(self.i128),
488            _ => None,
489        }
490    }
491}
492
493/// The machine itself.
494///
495/// If you add anything here that stores machine values, remember to update
496/// `visit_all_machine_values`!
497pub struct MiriMachine<'tcx> {
498    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
499    pub tcx: TyCtxt<'tcx>,
500
501    /// Global data for borrow tracking.
502    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
503
504    /// Depending on settings, this will be `None`,
505    /// global data for a data race detector,
506    /// or the context required for running in GenMC mode.
507    ///
508    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
509    pub data_race: GlobalDataRaceHandler,
510
511    /// Ptr-int-cast module global data.
512    pub alloc_addresses: alloc_addresses::GlobalState,
513
514    /// Environment variables.
515    pub(crate) env_vars: EnvVars<'tcx>,
516
517    /// Return place of the main function.
518    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
519
520    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
521    /// These are *pointers* to argc/argv because macOS.
522    /// We also need the full command line as one string because of Windows.
523    pub(crate) argc: Option<Pointer>,
524    pub(crate) argv: Option<Pointer>,
525    pub(crate) cmd_line: Option<Pointer>,
526
527    /// TLS state.
528    pub(crate) tls: TlsData<'tcx>,
529
530    /// What should Miri do when an op requires communicating with the host,
531    /// such as accessing host env vars, random number generation, and
532    /// file system access.
533    pub(crate) isolated_op: IsolatedOp,
534
535    /// Whether to enforce the validity invariant.
536    pub(crate) validation: ValidationMode,
537
538    /// The table of file descriptors.
539    pub(crate) fds: shims::FdTable,
540    /// The table of directory descriptors.
541    pub(crate) dirs: shims::DirTable,
542
543    /// The list of all EpollEventInterest.
544    pub(crate) epoll_interests: shims::EpollInterestTable,
545
546    /// This machine's monotone clock.
547    pub(crate) monotonic_clock: MonotonicClock,
548
549    /// The set of threads.
550    pub(crate) threads: ThreadManager<'tcx>,
551
552    /// Stores which thread is eligible to run on which CPUs.
553    /// This has no effect at all, it is just tracked to produce the correct result
554    /// in `sched_getaffinity`
555    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
556
557    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
558    pub(crate) layouts: PrimitiveLayouts<'tcx>,
559
560    /// Allocations that are considered roots of static memory (that may leak).
561    pub(crate) static_roots: Vec<AllocId>,
562
563    /// The `measureme` profiler used to record timing information about
564    /// the emulated program.
565    profiler: Option<measureme::Profiler>,
566    /// Used with `profiler` to cache the `StringId`s for event names
567    /// used with `measureme`.
568    string_cache: FxHashMap<String, measureme::StringId>,
569
570    /// Cache of `Instance` exported under the given `Symbol` name.
571    /// `None` means no `Instance` exported under the given name is found.
572    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
573
574    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
575    pub(crate) backtrace_style: BacktraceStyle,
576
577    /// Crates which are considered local for the purposes of error reporting.
578    pub(crate) local_crates: Vec<CrateNum>,
579
580    /// Mapping extern static names to their pointer.
581    extern_statics: FxHashMap<Symbol, StrictPointer>,
582
583    /// The random number generator used for resolving non-determinism.
584    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
585    pub(crate) rng: RefCell<StdRng>,
586
587    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
588    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
589
590    /// The allocation IDs to report when they are being allocated
591    /// (helps for debugging memory leaks and use after free bugs).
592    tracked_alloc_ids: FxHashSet<AllocId>,
593    /// For the tracked alloc ids, also report read/write accesses.
594    track_alloc_accesses: bool,
595
596    /// Controls whether alignment of memory accesses is being checked.
597    pub(crate) check_alignment: AlignmentCheck,
598
599    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
600    pub(crate) cmpxchg_weak_failure_rate: f64,
601
602    /// The probability of the active thread being preempted at the end of each basic block.
603    pub(crate) preemption_rate: f64,
604
605    /// If `Some`, we will report the current stack every N basic blocks.
606    pub(crate) report_progress: Option<u32>,
607    // The total number of blocks that have been executed.
608    pub(crate) basic_block_count: u64,
609
610    /// Handle of the optional shared object file for native functions.
611    #[cfg(all(unix, feature = "native-lib"))]
612    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
613    #[cfg(not(all(unix, feature = "native-lib")))]
614    pub native_lib: Vec<!>,
615
616    /// Run a garbage collector for BorTags every N basic blocks.
617    pub(crate) gc_interval: u32,
618    /// The number of blocks that passed since the last BorTag GC pass.
619    pub(crate) since_gc: u32,
620
621    /// The number of CPUs to be reported by miri.
622    pub(crate) num_cpus: u32,
623
624    /// Determines Miri's page size and associated values
625    pub(crate) page_size: u64,
626    pub(crate) stack_addr: u64,
627    pub(crate) stack_size: u64,
628
629    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
630    pub(crate) collect_leak_backtraces: bool,
631
632    /// The spans we will use to report where an allocation was created and deallocated in
633    /// diagnostics.
634    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
635
636    /// For each allocation, an offset inside that allocation that was deemed aligned even for
637    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
638    /// tracked for vtables and function allocations as well as regular allocations.
639    ///
640    /// Invariant: the promised alignment will never be less than the native alignment of the
641    /// allocation.
642    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
643
644    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
645    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
646
647    /// Caches the sanity-checks for various pthread primitives.
648    pub(crate) pthread_mutex_sanity: Cell<bool>,
649    pub(crate) pthread_rwlock_sanity: Cell<bool>,
650    pub(crate) pthread_condvar_sanity: Cell<bool>,
651
652    /// Remembers whether we already warned about an extern type with Stacked Borrows.
653    pub(crate) sb_extern_type_warned: Cell<bool>,
654    /// Remember whether we already warned about sharing memory with a native call.
655    #[allow(unused)]
656    pub(crate) native_call_mem_warned: Cell<bool>,
657    /// Remembers which shims have already shown the warning about erroring in isolation.
658    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
659    /// Remembers which int2ptr casts we have already warned about.
660    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
661
662    /// Cache for `mangle_internal_symbol`.
663    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
664
665    /// Always prefer the intrinsic fallback body over the native Miri implementation.
666    pub force_intrinsic_fallback: bool,
667
668    /// Whether floating-point operations can behave non-deterministically.
669    pub float_nondet: bool,
670    /// Whether floating-point operations can have a non-deterministic rounding error.
671    pub float_rounding_error: FloatRoundingErrorMode,
672
673    /// Whether Miri artifically introduces short reads/writes on file descriptors.
674    pub short_fd_operations: bool,
675}
676
677impl<'tcx> MiriMachine<'tcx> {
678    /// Create a new MiriMachine.
679    ///
680    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
681    pub(crate) fn new(
682        config: &MiriConfig,
683        layout_cx: LayoutCx<'tcx>,
684        genmc_ctx: Option<Rc<GenmcCtx>>,
685    ) -> Self {
686        let tcx = layout_cx.tcx();
687        let local_crates = helpers::get_local_crates(tcx);
688        let layouts =
689            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
690        let profiler = config.measureme_out.as_ref().map(|out| {
691            let crate_name =
692                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
693            let pid = process::id();
694            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
695            // the PID is padded so that the nondeterministic value of the PID does not spread
696            // nondeterminism to the allocator. In Miri we are not aiming for such performance
697            // control, we just pad for consistency with rustc.
698            let filename = format!("{crate_name}-{pid:07}");
699            let path = Path::new(out).join(filename);
700            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
701        });
702        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
703        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
704        let data_race = if config.genmc_config.is_some() {
705            // `genmc_ctx` persists across executions, so we don't create a new one here.
706            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
707        } else if config.data_race_detector {
708            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
709        } else {
710            GlobalDataRaceHandler::None
711        };
712        // Determine page size, stack address, and stack size.
713        // These values are mostly meaningless, but the stack address is also where we start
714        // allocating physical integer addresses for all allocations.
715        let page_size = if let Some(page_size) = config.page_size {
716            page_size
717        } else {
718            let target = &tcx.sess.target;
719            match target.arch.as_ref() {
720                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
721                "aarch64" => {
722                    if target.options.vendor.as_ref() == "apple" {
723                        // No "definitive" source, but see:
724                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
725                        // https://github.com/ziglang/zig/issues/11308 etc.
726                        16 * 1024
727                    } else {
728                        4 * 1024
729                    }
730                }
731                _ => 4 * 1024,
732            }
733        };
734        // On 16bit targets, 32 pages is more than the entire address space!
735        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
736        let stack_size =
737            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
738        assert!(
739            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
740            "miri only supports up to {} CPUs, but {} were configured",
741            cpu_affinity::MAX_CPUS,
742            config.num_cpus
743        );
744        let threads = ThreadManager::new(config);
745        let mut thread_cpu_affinity = FxHashMap::default();
746        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
747            thread_cpu_affinity
748                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
749        }
750        let alloc_addresses =
751            RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr, tcx));
752        MiriMachine {
753            tcx,
754            borrow_tracker,
755            data_race,
756            alloc_addresses,
757            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
758            env_vars: EnvVars::default(),
759            main_fn_ret_place: None,
760            argc: None,
761            argv: None,
762            cmd_line: None,
763            tls: TlsData::default(),
764            isolated_op: config.isolated_op,
765            validation: config.validation,
766            fds: shims::FdTable::init(config.mute_stdout_stderr),
767            epoll_interests: shims::EpollInterestTable::new(),
768            dirs: Default::default(),
769            layouts,
770            threads,
771            thread_cpu_affinity,
772            static_roots: Vec::new(),
773            profiler,
774            string_cache: Default::default(),
775            exported_symbols_cache: FxHashMap::default(),
776            backtrace_style: config.backtrace_style,
777            local_crates,
778            extern_statics: FxHashMap::default(),
779            rng: RefCell::new(rng),
780            allocator: if !config.native_lib.is_empty() {
781                Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
782            } else { None },
783            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
784            track_alloc_accesses: config.track_alloc_accesses,
785            check_alignment: config.check_alignment,
786            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
787            preemption_rate: config.preemption_rate,
788            report_progress: config.report_progress,
789            basic_block_count: 0,
790            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
791            #[cfg(all(unix, feature = "native-lib"))]
792            native_lib: config.native_lib.iter().map(|lib_file_path| {
793                let host_triple = rustc_session::config::host_tuple();
794                let target_triple = tcx.sess.opts.target_triple.tuple();
795                // Check if host target == the session target.
796                if host_triple != target_triple {
797                    panic!(
798                        "calling native C functions in linked .so file requires host and target to be the same: \
799                        host={host_triple}, target={target_triple}",
800                    );
801                }
802                // Note: it is the user's responsibility to provide a correct SO file.
803                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
804                // undefined behaviour in Miri itself!
805                (
806                    unsafe {
807                        libloading::Library::new(lib_file_path)
808                            .expect("failed to read specified extern shared object file")
809                    },
810                    lib_file_path.clone(),
811                )
812            }).collect(),
813            #[cfg(not(all(unix, feature = "native-lib")))]
814            native_lib: config.native_lib.iter().map(|_| {
815                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
816            }).collect(),
817            gc_interval: config.gc_interval,
818            since_gc: 0,
819            num_cpus: config.num_cpus,
820            page_size,
821            stack_addr,
822            stack_size,
823            collect_leak_backtraces: config.collect_leak_backtraces,
824            allocation_spans: RefCell::new(FxHashMap::default()),
825            symbolic_alignment: RefCell::new(FxHashMap::default()),
826            union_data_ranges: FxHashMap::default(),
827            pthread_mutex_sanity: Cell::new(false),
828            pthread_rwlock_sanity: Cell::new(false),
829            pthread_condvar_sanity: Cell::new(false),
830            sb_extern_type_warned: Cell::new(false),
831            native_call_mem_warned: Cell::new(false),
832            reject_in_isolation_warned: Default::default(),
833            int2ptr_warned: Default::default(),
834            mangle_internal_symbol_cache: Default::default(),
835            force_intrinsic_fallback: config.force_intrinsic_fallback,
836            float_nondet: config.float_nondet,
837            float_rounding_error: config.float_rounding_error,
838            short_fd_operations: config.short_fd_operations,
839        }
840    }
841
842    pub(crate) fn late_init(
843        ecx: &mut MiriInterpCx<'tcx>,
844        config: &MiriConfig,
845        on_main_stack_empty: StackEmptyCallback<'tcx>,
846    ) -> InterpResult<'tcx> {
847        EnvVars::init(ecx, config)?;
848        MiriMachine::init_extern_statics(ecx)?;
849        ThreadManager::init(ecx, on_main_stack_empty);
850        interp_ok(())
851    }
852
853    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
854        // This got just allocated, so there definitely is a pointer here.
855        let ptr = ptr.into_pointer_or_addr().unwrap();
856        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
857    }
858
859    pub(crate) fn communicate(&self) -> bool {
860        self.isolated_op == IsolatedOp::Allow
861    }
862
863    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
864    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
865        let def_id = frame.instance.def_id();
866        def_id.is_local() || self.local_crates.contains(&def_id.krate)
867    }
868
869    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
870    pub(crate) fn handle_abnormal_termination(&mut self) {
871        // All strings in the profile data are stored in a single string table which is not
872        // written to disk until the profiler is dropped. If the interpreter exits without dropping
873        // the profiler, it is not possible to interpret the profile data and all measureme tools
874        // will panic when given the file.
875        drop(self.profiler.take());
876    }
877
878    pub(crate) fn page_align(&self) -> Align {
879        Align::from_bytes(self.page_size).unwrap()
880    }
881
882    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
883        self.allocation_spans
884            .borrow()
885            .get(&alloc_id)
886            .map(|(allocated, _deallocated)| allocated.data())
887    }
888
889    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
890        self.allocation_spans
891            .borrow()
892            .get(&alloc_id)
893            .and_then(|(_allocated, deallocated)| *deallocated)
894            .map(Span::data)
895    }
896
897    fn init_allocation(
898        ecx: &MiriInterpCx<'tcx>,
899        id: AllocId,
900        kind: MemoryKind,
901        size: Size,
902        align: Align,
903    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
904        if ecx.machine.tracked_alloc_ids.contains(&id) {
905            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
906        }
907
908        let borrow_tracker = ecx
909            .machine
910            .borrow_tracker
911            .as_ref()
912            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
913
914        let data_race = match &ecx.machine.data_race {
915            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
916            GlobalDataRaceHandler::Vclocks(data_race) =>
917                AllocDataRaceHandler::Vclocks(
918                    data_race::AllocState::new_allocation(
919                        data_race,
920                        &ecx.machine.threads,
921                        size,
922                        kind,
923                        ecx.machine.current_span(),
924                    ),
925                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
926                ),
927            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
928                // GenMC learns about new allocations directly from the alloc_addresses module,
929                // since it has to be able to control the address at which they are placed.
930                AllocDataRaceHandler::Genmc
931            }
932        };
933
934        // If an allocation is leaked, we want to report a backtrace to indicate where it was
935        // allocated. We don't need to record a backtrace for allocations which are allowed to
936        // leak.
937        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
938            None
939        } else {
940            Some(ecx.generate_stacktrace())
941        };
942
943        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
944            ecx.machine
945                .allocation_spans
946                .borrow_mut()
947                .insert(id, (ecx.machine.current_span(), None));
948        }
949
950        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
951    }
952}
953
954impl VisitProvenance for MiriMachine<'_> {
955    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
956        #[rustfmt::skip]
957        let MiriMachine {
958            threads,
959            thread_cpu_affinity: _,
960            tls,
961            env_vars,
962            main_fn_ret_place,
963            argc,
964            argv,
965            cmd_line,
966            extern_statics,
967            dirs,
968            borrow_tracker,
969            data_race,
970            alloc_addresses,
971            fds,
972            epoll_interests:_,
973            tcx: _,
974            isolated_op: _,
975            validation: _,
976            monotonic_clock: _,
977            layouts: _,
978            static_roots: _,
979            profiler: _,
980            string_cache: _,
981            exported_symbols_cache: _,
982            backtrace_style: _,
983            local_crates: _,
984            rng: _,
985            allocator: _,
986            tracked_alloc_ids: _,
987            track_alloc_accesses: _,
988            check_alignment: _,
989            cmpxchg_weak_failure_rate: _,
990            preemption_rate: _,
991            report_progress: _,
992            basic_block_count: _,
993            native_lib: _,
994            gc_interval: _,
995            since_gc: _,
996            num_cpus: _,
997            page_size: _,
998            stack_addr: _,
999            stack_size: _,
1000            collect_leak_backtraces: _,
1001            allocation_spans: _,
1002            symbolic_alignment: _,
1003            union_data_ranges: _,
1004            pthread_mutex_sanity: _,
1005            pthread_rwlock_sanity: _,
1006            pthread_condvar_sanity: _,
1007            sb_extern_type_warned: _,
1008            native_call_mem_warned: _,
1009            reject_in_isolation_warned: _,
1010            int2ptr_warned: _,
1011            mangle_internal_symbol_cache: _,
1012            force_intrinsic_fallback: _,
1013            float_nondet: _,
1014            float_rounding_error: _,
1015            short_fd_operations: _,
1016        } = self;
1017
1018        threads.visit_provenance(visit);
1019        tls.visit_provenance(visit);
1020        env_vars.visit_provenance(visit);
1021        dirs.visit_provenance(visit);
1022        fds.visit_provenance(visit);
1023        data_race.visit_provenance(visit);
1024        borrow_tracker.visit_provenance(visit);
1025        alloc_addresses.visit_provenance(visit);
1026        main_fn_ret_place.visit_provenance(visit);
1027        argc.visit_provenance(visit);
1028        argv.visit_provenance(visit);
1029        cmd_line.visit_provenance(visit);
1030        for ptr in extern_statics.values() {
1031            ptr.visit_provenance(visit);
1032        }
1033    }
1034}
1035
1036/// A rustc InterpCx for Miri.
1037pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
1038
1039/// A little trait that's useful to be inherited by extension traits.
1040pub trait MiriInterpCxExt<'tcx> {
1041    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
1042    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
1043}
1044impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
1045    #[inline(always)]
1046    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
1047        self
1048    }
1049    #[inline(always)]
1050    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
1051        self
1052    }
1053}
1054
1055/// Machine hook implementations.
1056impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1057    type MemoryKind = MiriMemoryKind;
1058    type ExtraFnVal = DynSym;
1059
1060    type FrameExtra = FrameExtra<'tcx>;
1061    type AllocExtra = AllocExtra<'tcx>;
1062
1063    type Provenance = Provenance;
1064    type ProvenanceExtra = ProvenanceExtra;
1065    type Bytes = MiriAllocBytes;
1066
1067    type MemoryMap =
1068        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1069
1070    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1071
1072    const PANIC_ON_ALLOC_FAIL: bool = false;
1073
1074    #[inline(always)]
1075    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1076        ecx.machine.check_alignment != AlignmentCheck::None
1077    }
1078
1079    #[inline(always)]
1080    fn alignment_check(
1081        ecx: &MiriInterpCx<'tcx>,
1082        alloc_id: AllocId,
1083        alloc_align: Align,
1084        alloc_kind: AllocKind,
1085        offset: Size,
1086        align: Align,
1087    ) -> Option<Misalignment> {
1088        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1089            // Just use the built-in check.
1090            return None;
1091        }
1092        if alloc_kind != AllocKind::LiveData {
1093            // Can't have any extra info here.
1094            return None;
1095        }
1096        // Let's see which alignment we have been promised for this allocation.
1097        let (promised_offset, promised_align) = ecx
1098            .machine
1099            .symbolic_alignment
1100            .borrow()
1101            .get(&alloc_id)
1102            .copied()
1103            .unwrap_or((Size::ZERO, alloc_align));
1104        if promised_align < align {
1105            // Definitely not enough.
1106            Some(Misalignment { has: promised_align, required: align })
1107        } else {
1108            // What's the offset between us and the promised alignment?
1109            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1110            // That must also be aligned.
1111            if distance.is_multiple_of(align.bytes()) {
1112                // All looking good!
1113                None
1114            } else {
1115                // The biggest power of two through which `distance` is divisible.
1116                let distance_pow2 = 1 << distance.trailing_zeros();
1117                Some(Misalignment {
1118                    has: Align::from_bytes(distance_pow2).unwrap(),
1119                    required: align,
1120                })
1121            }
1122        }
1123    }
1124
1125    #[inline(always)]
1126    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1127        ecx.machine.validation != ValidationMode::No
1128    }
1129    #[inline(always)]
1130    fn enforce_validity_recursively(
1131        ecx: &InterpCx<'tcx, Self>,
1132        _layout: TyAndLayout<'tcx>,
1133    ) -> bool {
1134        ecx.machine.validation == ValidationMode::Deep
1135    }
1136
1137    #[inline(always)]
1138    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1139        !ecx.tcx.sess.overflow_checks()
1140    }
1141
1142    fn check_fn_target_features(
1143        ecx: &MiriInterpCx<'tcx>,
1144        instance: ty::Instance<'tcx>,
1145    ) -> InterpResult<'tcx> {
1146        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1147        if attrs
1148            .target_features
1149            .iter()
1150            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1151        {
1152            let unavailable = attrs
1153                .target_features
1154                .iter()
1155                .filter(|&feature| {
1156                    feature.kind != TargetFeatureKind::Implied
1157                        && !ecx.tcx.sess.target_features.contains(&feature.name)
1158                })
1159                .fold(String::new(), |mut s, feature| {
1160                    if !s.is_empty() {
1161                        s.push_str(", ");
1162                    }
1163                    s.push_str(feature.name.as_str());
1164                    s
1165                });
1166            let msg = format!(
1167                "calling a function that requires unavailable target features: {unavailable}"
1168            );
1169            // On WASM, this is not UB, but instead gets rejected during validation of the module
1170            // (see #84988).
1171            if ecx.tcx.sess.target.is_like_wasm {
1172                throw_machine_stop!(TerminationInfo::Abort(msg));
1173            } else {
1174                throw_ub_format!("{msg}");
1175            }
1176        }
1177        interp_ok(())
1178    }
1179
1180    #[inline(always)]
1181    fn find_mir_or_eval_fn(
1182        ecx: &mut MiriInterpCx<'tcx>,
1183        instance: ty::Instance<'tcx>,
1184        abi: &FnAbi<'tcx, Ty<'tcx>>,
1185        args: &[FnArg<'tcx, Provenance>],
1186        dest: &PlaceTy<'tcx>,
1187        ret: Option<mir::BasicBlock>,
1188        unwind: mir::UnwindAction,
1189    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1190        // For foreign items, try to see if we can emulate them.
1191        if ecx.tcx.is_foreign_item(instance.def_id()) {
1192            let _trace = enter_trace_span!("emulate_foreign_item");
1193            // An external function call that does not have a MIR body. We either find MIR elsewhere
1194            // or emulate its effect.
1195            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1196            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1197            // foreign function
1198            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1199            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1200            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1201            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1202        }
1203
1204        // Otherwise, load the MIR.
1205        let _trace = enter_trace_span!("load_mir");
1206        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1207    }
1208
1209    #[inline(always)]
1210    fn call_extra_fn(
1211        ecx: &mut MiriInterpCx<'tcx>,
1212        fn_val: DynSym,
1213        abi: &FnAbi<'tcx, Ty<'tcx>>,
1214        args: &[FnArg<'tcx, Provenance>],
1215        dest: &PlaceTy<'tcx>,
1216        ret: Option<mir::BasicBlock>,
1217        unwind: mir::UnwindAction,
1218    ) -> InterpResult<'tcx> {
1219        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1220        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1221    }
1222
1223    #[inline(always)]
1224    fn call_intrinsic(
1225        ecx: &mut MiriInterpCx<'tcx>,
1226        instance: ty::Instance<'tcx>,
1227        args: &[OpTy<'tcx>],
1228        dest: &PlaceTy<'tcx>,
1229        ret: Option<mir::BasicBlock>,
1230        unwind: mir::UnwindAction,
1231    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1232        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1233    }
1234
1235    #[inline(always)]
1236    fn assert_panic(
1237        ecx: &mut MiriInterpCx<'tcx>,
1238        msg: &mir::AssertMessage<'tcx>,
1239        unwind: mir::UnwindAction,
1240    ) -> InterpResult<'tcx> {
1241        ecx.assert_panic(msg, unwind)
1242    }
1243
1244    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1245        ecx.start_panic_nounwind(msg)
1246    }
1247
1248    fn unwind_terminate(
1249        ecx: &mut InterpCx<'tcx, Self>,
1250        reason: mir::UnwindTerminateReason,
1251    ) -> InterpResult<'tcx> {
1252        // Call the lang item.
1253        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1254        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1255        ecx.call_function(
1256            panic,
1257            ExternAbi::Rust,
1258            &[],
1259            None,
1260            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1261        )?;
1262        interp_ok(())
1263    }
1264
1265    #[inline(always)]
1266    fn binary_ptr_op(
1267        ecx: &MiriInterpCx<'tcx>,
1268        bin_op: mir::BinOp,
1269        left: &ImmTy<'tcx>,
1270        right: &ImmTy<'tcx>,
1271    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1272        ecx.binary_ptr_op(bin_op, left, right)
1273    }
1274
1275    #[inline(always)]
1276    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1277        ecx: &InterpCx<'tcx, Self>,
1278        inputs: &[F1],
1279    ) -> F2 {
1280        ecx.generate_nan(inputs)
1281    }
1282
1283    #[inline(always)]
1284    fn apply_float_nondet(
1285        ecx: &mut InterpCx<'tcx, Self>,
1286        val: ImmTy<'tcx>,
1287    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1288        crate::math::apply_random_float_error_to_imm(ecx, val, 4)
1289    }
1290
1291    #[inline(always)]
1292    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1293        ecx.equal_float_min_max(a, b)
1294    }
1295
1296    #[inline(always)]
1297    fn float_fuse_mul_add(ecx: &mut InterpCx<'tcx, Self>) -> bool {
1298        ecx.machine.float_nondet && ecx.machine.rng.get_mut().random()
1299    }
1300
1301    #[inline(always)]
1302    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1303        interp_ok(ecx.tcx.sess.ub_checks())
1304    }
1305
1306    #[inline(always)]
1307    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1308        interp_ok(ecx.tcx.sess.contract_checks())
1309    }
1310
1311    #[inline(always)]
1312    fn thread_local_static_pointer(
1313        ecx: &mut MiriInterpCx<'tcx>,
1314        def_id: DefId,
1315    ) -> InterpResult<'tcx, StrictPointer> {
1316        ecx.get_or_create_thread_local_alloc(def_id)
1317    }
1318
1319    fn extern_static_pointer(
1320        ecx: &MiriInterpCx<'tcx>,
1321        def_id: DefId,
1322    ) -> InterpResult<'tcx, StrictPointer> {
1323        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1324        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1325            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1326            // information. That uses the type information of this static.
1327            // Make sure it matches the Miri allocation for this.
1328            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1329                panic!("extern_statics cannot contain wildcards")
1330            };
1331            let info = ecx.get_alloc_info(alloc_id);
1332            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1333            let extern_decl_layout =
1334                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1335            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1336                throw_unsup_format!(
1337                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1338                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1339                    but Miri emulates it via an extern static shim \
1340                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1341                    name = ecx.tcx.def_path_str(def_id),
1342                    krate = ecx.tcx.crate_name(def_id.krate),
1343                    decl_size = extern_decl_layout.size.bytes(),
1344                    decl_align = extern_decl_layout.align.bytes(),
1345                    shim_size = info.size.bytes(),
1346                    shim_align = info.align.bytes(),
1347                )
1348            }
1349            interp_ok(ptr)
1350        } else {
1351            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1352        }
1353    }
1354
1355    fn init_local_allocation(
1356        ecx: &MiriInterpCx<'tcx>,
1357        id: AllocId,
1358        kind: MemoryKind,
1359        size: Size,
1360        align: Align,
1361    ) -> InterpResult<'tcx, Self::AllocExtra> {
1362        assert!(kind != MiriMemoryKind::Global.into());
1363        MiriMachine::init_allocation(ecx, id, kind, size, align)
1364    }
1365
1366    fn adjust_alloc_root_pointer(
1367        ecx: &MiriInterpCx<'tcx>,
1368        ptr: interpret::Pointer<CtfeProvenance>,
1369        kind: Option<MemoryKind>,
1370    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1371        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1372        let alloc_id = ptr.provenance.alloc_id();
1373        if cfg!(debug_assertions) {
1374            // The machine promises to never call us on thread-local or extern statics.
1375            match ecx.tcx.try_get_global_alloc(alloc_id) {
1376                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1377                    panic!("adjust_alloc_root_pointer called on thread-local static")
1378                }
1379                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1380                    panic!("adjust_alloc_root_pointer called on extern static")
1381                }
1382                _ => {}
1383            }
1384        }
1385        // FIXME: can we somehow preserve the immutability of `ptr`?
1386        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1387            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1388        } else {
1389            // Value does not matter, SB is disabled
1390            BorTag::default()
1391        };
1392        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1393    }
1394
1395    /// Called on `usize as ptr` casts.
1396    #[inline(always)]
1397    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1398        ecx.ptr_from_addr_cast(addr)
1399    }
1400
1401    /// Called on `ptr as usize` casts.
1402    /// (Actually computing the resulting `usize` doesn't need machine help,
1403    /// that's just `Scalar::try_to_int`.)
1404    #[inline(always)]
1405    fn expose_provenance(
1406        ecx: &InterpCx<'tcx, Self>,
1407        provenance: Self::Provenance,
1408    ) -> InterpResult<'tcx> {
1409        ecx.expose_provenance(provenance)
1410    }
1411
1412    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1413    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1414    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1415    /// allocations.
1416    ///
1417    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1418    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1419    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1420    /// stored in machine state).
1421    ///
1422    /// When this fails, that means the pointer does not point to a live allocation.
1423    fn ptr_get_alloc(
1424        ecx: &MiriInterpCx<'tcx>,
1425        ptr: StrictPointer,
1426        size: i64,
1427    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1428        let rel = ecx.ptr_get_alloc(ptr, size);
1429
1430        rel.map(|(alloc_id, size)| {
1431            let tag = match ptr.provenance {
1432                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1433                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1434            };
1435            (alloc_id, size, tag)
1436        })
1437    }
1438
1439    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1440    ///
1441    /// If `alloc` contains pointers, then they are all pointing to globals.
1442    ///
1443    /// This should avoid copying if no work has to be done! If this returns an owned
1444    /// allocation (because a copy had to be done to adjust things), machine memory will
1445    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1446    /// owned allocation to the map even when the map is shared.)
1447    fn adjust_global_allocation<'b>(
1448        ecx: &InterpCx<'tcx, Self>,
1449        id: AllocId,
1450        alloc: &'b Allocation,
1451    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1452    {
1453        let alloc = alloc.adjust_from_tcx(
1454            &ecx.tcx,
1455            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1456            |ptr| ecx.global_root_pointer(ptr),
1457        )?;
1458        let kind = MiriMemoryKind::Global.into();
1459        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1460        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1461    }
1462
1463    #[inline(always)]
1464    fn before_memory_read(
1465        _tcx: TyCtxtAt<'tcx>,
1466        machine: &Self,
1467        alloc_extra: &AllocExtra<'tcx>,
1468        ptr: Pointer,
1469        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1470        range: AllocRange,
1471    ) -> InterpResult<'tcx> {
1472        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1473            machine
1474                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1475        }
1476        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1477        match &machine.data_race {
1478            GlobalDataRaceHandler::None => {}
1479            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1480                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1481            GlobalDataRaceHandler::Vclocks(_data_race) => {
1482                let _trace = enter_trace_span!(data_race::before_memory_read);
1483                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1484                else {
1485                    unreachable!();
1486                };
1487                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1488                if let Some(weak_memory) = weak_memory {
1489                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1490                }
1491            }
1492        }
1493        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1494            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1495        }
1496        interp_ok(())
1497    }
1498
1499    #[inline(always)]
1500    fn before_memory_write(
1501        _tcx: TyCtxtAt<'tcx>,
1502        machine: &mut Self,
1503        alloc_extra: &mut AllocExtra<'tcx>,
1504        ptr: Pointer,
1505        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1506        range: AllocRange,
1507    ) -> InterpResult<'tcx> {
1508        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1509            machine
1510                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1511        }
1512        match &machine.data_race {
1513            GlobalDataRaceHandler::None => {}
1514            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1515                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?,
1516            GlobalDataRaceHandler::Vclocks(_global_state) => {
1517                let _trace = enter_trace_span!(data_race::before_memory_write);
1518                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1519                    &mut alloc_extra.data_race
1520                else {
1521                    unreachable!()
1522                };
1523                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1524                if let Some(weak_memory) = weak_memory {
1525                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1526                }
1527            }
1528        }
1529        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1530            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1531        }
1532        interp_ok(())
1533    }
1534
1535    #[inline(always)]
1536    fn before_memory_deallocation(
1537        _tcx: TyCtxtAt<'tcx>,
1538        machine: &mut Self,
1539        alloc_extra: &mut AllocExtra<'tcx>,
1540        ptr: Pointer,
1541        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1542        size: Size,
1543        align: Align,
1544        kind: MemoryKind,
1545    ) -> InterpResult<'tcx> {
1546        if machine.tracked_alloc_ids.contains(&alloc_id) {
1547            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1548        }
1549        match &machine.data_race {
1550            GlobalDataRaceHandler::None => {}
1551            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1552                genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), kind)?,
1553            GlobalDataRaceHandler::Vclocks(_global_state) => {
1554                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1555                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1556                data_race.write(
1557                    alloc_id,
1558                    alloc_range(Size::ZERO, size),
1559                    NaWriteType::Deallocate,
1560                    None,
1561                    machine,
1562                )?;
1563            }
1564        }
1565        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1566            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1567        }
1568        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1569        {
1570            *deallocated_at = Some(machine.current_span());
1571        }
1572        machine.free_alloc_id(alloc_id, size, align, kind);
1573        interp_ok(())
1574    }
1575
1576    #[inline(always)]
1577    fn retag_ptr_value(
1578        ecx: &mut InterpCx<'tcx, Self>,
1579        kind: mir::RetagKind,
1580        val: &ImmTy<'tcx>,
1581    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1582        if ecx.machine.borrow_tracker.is_some() {
1583            ecx.retag_ptr_value(kind, val)
1584        } else {
1585            interp_ok(val.clone())
1586        }
1587    }
1588
1589    #[inline(always)]
1590    fn retag_place_contents(
1591        ecx: &mut InterpCx<'tcx, Self>,
1592        kind: mir::RetagKind,
1593        place: &PlaceTy<'tcx>,
1594    ) -> InterpResult<'tcx> {
1595        if ecx.machine.borrow_tracker.is_some() {
1596            ecx.retag_place_contents(kind, place)?;
1597        }
1598        interp_ok(())
1599    }
1600
1601    fn protect_in_place_function_argument(
1602        ecx: &mut InterpCx<'tcx, Self>,
1603        place: &MPlaceTy<'tcx>,
1604    ) -> InterpResult<'tcx> {
1605        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1606        // writes* during this call are insta-UB.
1607        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1608            ecx.protect_place(place)?
1609        } else {
1610            // No borrow tracker.
1611            place.clone()
1612        };
1613        // We do need to write `uninit` so that even after the call ends, the former contents of
1614        // this place cannot be observed any more. We do the write after retagging so that for
1615        // Tree Borrows, this is considered to activate the new tag.
1616        // Conveniently this also ensures that the place actually points to suitable memory.
1617        ecx.write_uninit(&protected_place)?;
1618        // Now we throw away the protected place, ensuring its tag is never used again.
1619        interp_ok(())
1620    }
1621
1622    #[inline(always)]
1623    fn init_frame(
1624        ecx: &mut InterpCx<'tcx, Self>,
1625        frame: Frame<'tcx, Provenance>,
1626    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1627        // Start recording our event before doing anything else
1628        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1629            let fn_name = frame.instance().to_string();
1630            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1631            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1632
1633            Some(profiler.start_recording_interval_event_detached(
1634                *name,
1635                measureme::EventId::from_label(*name),
1636                ecx.active_thread().to_u32(),
1637            ))
1638        } else {
1639            None
1640        };
1641
1642        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1643
1644        let extra = FrameExtra {
1645            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1646            catch_unwind: None,
1647            timing,
1648            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1649            data_race: ecx
1650                .machine
1651                .data_race
1652                .as_vclocks_ref()
1653                .map(|_| data_race::FrameState::default()),
1654        };
1655
1656        interp_ok(frame.with_extra(extra))
1657    }
1658
1659    fn stack<'a>(
1660        ecx: &'a InterpCx<'tcx, Self>,
1661    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1662        ecx.active_thread_stack()
1663    }
1664
1665    fn stack_mut<'a>(
1666        ecx: &'a mut InterpCx<'tcx, Self>,
1667    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1668        ecx.active_thread_stack_mut()
1669    }
1670
1671    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1672        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1673        ecx.machine.since_gc += 1;
1674        // Possibly report our progress. This will point at the terminator we are about to execute.
1675        if let Some(report_progress) = ecx.machine.report_progress {
1676            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1677                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1678                    block_count: ecx.machine.basic_block_count,
1679                });
1680            }
1681        }
1682
1683        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1684        // stacks.
1685        // When debug assertions are enabled, run the GC as often as possible so that any cases
1686        // where it mistakenly removes an important tag become visible.
1687        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1688            ecx.machine.since_gc = 0;
1689            ecx.run_provenance_gc();
1690        }
1691
1692        // These are our preemption points.
1693        // (This will only take effect after the terminator has been executed.)
1694        ecx.maybe_preempt_active_thread();
1695
1696        // Make sure some time passes.
1697        ecx.machine.monotonic_clock.tick();
1698
1699        interp_ok(())
1700    }
1701
1702    #[inline(always)]
1703    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1704        if ecx.frame().extra.is_user_relevant {
1705            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1706            // frame. If we push a non-local frame, there's no need to do anything.
1707            let stack_len = ecx.active_thread_stack().len();
1708            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1709        }
1710        interp_ok(())
1711    }
1712
1713    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1714        let frame = ecx.frame();
1715        // We want this *before* the return value copy, because the return place itself is protected
1716        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1717        if ecx.machine.borrow_tracker.is_some() {
1718            ecx.on_stack_pop(frame)?;
1719        }
1720        if frame.extra.is_user_relevant {
1721            // All that we store is whether or not the frame we just removed is local, so now we
1722            // have no idea where the next topmost local frame is. So we recompute it.
1723            // (If this ever becomes a bottleneck, we could have `push` store the previous
1724            // user-relevant frame and restore that here.)
1725            // We have to skip the frame that is just being popped.
1726            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1727        }
1728        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1729        // concurrency and what it prints is just plain wrong. So we print our own information
1730        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1731        info!("Leaving {}", ecx.frame().instance());
1732        interp_ok(())
1733    }
1734
1735    #[inline(always)]
1736    fn after_stack_pop(
1737        ecx: &mut InterpCx<'tcx, Self>,
1738        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1739        unwinding: bool,
1740    ) -> InterpResult<'tcx, ReturnAction> {
1741        let res = {
1742            // Move `frame` into a sub-scope so we control when it will be dropped.
1743            let mut frame = frame;
1744            let timing = frame.extra.timing.take();
1745            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1746            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1747                profiler.finish_recording_interval_event(timing.unwrap());
1748            }
1749            res
1750        };
1751        // Needs to be done after dropping frame to show up on the right nesting level.
1752        // (Cc https://github.com/rust-lang/miri/issues/2266)
1753        if !ecx.active_thread_stack().is_empty() {
1754            info!("Continuing in {}", ecx.frame().instance());
1755        }
1756        res
1757    }
1758
1759    fn after_local_read(
1760        ecx: &InterpCx<'tcx, Self>,
1761        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1762        local: mir::Local,
1763    ) -> InterpResult<'tcx> {
1764        if let Some(data_race) = &frame.extra.data_race {
1765            let _trace = enter_trace_span!(data_race::after_local_read);
1766            data_race.local_read(local, &ecx.machine);
1767        }
1768        interp_ok(())
1769    }
1770
1771    fn after_local_write(
1772        ecx: &mut InterpCx<'tcx, Self>,
1773        local: mir::Local,
1774        storage_live: bool,
1775    ) -> InterpResult<'tcx> {
1776        if let Some(data_race) = &ecx.frame().extra.data_race {
1777            let _trace = enter_trace_span!(data_race::after_local_write);
1778            data_race.local_write(local, storage_live, &ecx.machine);
1779        }
1780        interp_ok(())
1781    }
1782
1783    fn after_local_moved_to_memory(
1784        ecx: &mut InterpCx<'tcx, Self>,
1785        local: mir::Local,
1786        mplace: &MPlaceTy<'tcx>,
1787    ) -> InterpResult<'tcx> {
1788        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1789            panic!("after_local_allocated should only be called on fresh allocations");
1790        };
1791        // Record the span where this was allocated: the declaration of the local.
1792        let local_decl = &ecx.frame().body().local_decls[local];
1793        let span = local_decl.source_info.span;
1794        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1795        // The data race system has to fix the clocks used for this write.
1796        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1797        if let Some(data_race) =
1798            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1799        {
1800            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1801            data_race.local_moved_to_memory(
1802                local,
1803                alloc_info.data_race.as_vclocks_mut().unwrap(),
1804                machine,
1805            );
1806        }
1807        interp_ok(())
1808    }
1809
1810    fn get_global_alloc_salt(
1811        ecx: &InterpCx<'tcx, Self>,
1812        instance: Option<ty::Instance<'tcx>>,
1813    ) -> usize {
1814        let unique = if let Some(instance) = instance {
1815            // Functions cannot be identified by pointers, as asm-equal functions can get
1816            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1817            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1818            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1819            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1820            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1821            // whether codegen will actually emit duplicate functions. It does that when they have
1822            // non-lifetime generics, or when they can be inlined. All other functions are given a
1823            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1824            // cannot be relied upon for anything. But if we don't do this, the
1825            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1826            // backtraces look terrible.
1827            let is_generic = instance
1828                .args
1829                .into_iter()
1830                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1831            let can_be_inlined = matches!(
1832                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1833                InliningThreshold::Always
1834            ) || !matches!(
1835                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1836                InlineAttr::Never
1837            );
1838            !is_generic && !can_be_inlined
1839        } else {
1840            // Non-functions are never unique.
1841            false
1842        };
1843        // Always use the same salt if the allocation is unique.
1844        if unique {
1845            CTFE_ALLOC_SALT
1846        } else {
1847            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1848        }
1849    }
1850
1851    fn cached_union_data_range<'e>(
1852        ecx: &'e mut InterpCx<'tcx, Self>,
1853        ty: Ty<'tcx>,
1854        compute_range: impl FnOnce() -> RangeSet,
1855    ) -> Cow<'e, RangeSet> {
1856        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1857    }
1858
1859    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1860        use crate::alloc::MiriAllocParams;
1861
1862        match &self.allocator {
1863            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1864            None => MiriAllocParams::Global,
1865        }
1866    }
1867
1868    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1869        #[cfg(feature = "tracing")]
1870        {
1871            span().entered()
1872        }
1873        #[cfg(not(feature = "tracing"))]
1874        #[expect(clippy::unused_unit)]
1875        {
1876            let _ = span; // so we avoid the "unused variable" warning
1877            ()
1878        }
1879    }
1880}
1881
1882/// Trait for callbacks handling asynchronous machine operations.
1883pub trait MachineCallback<'tcx, T>: VisitProvenance {
1884    /// The function to be invoked when the callback is fired.
1885    fn call(
1886        self: Box<Self>,
1887        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1888        arg: T,
1889    ) -> InterpResult<'tcx>;
1890}
1891
1892/// Type alias for boxed machine callbacks with generic argument type.
1893pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1894
1895/// Creates a `DynMachineCallback`:
1896///
1897/// ```rust
1898/// callback!(
1899///     @capture<'tcx> {
1900///         var1: Ty1,
1901///         var2: Ty2<'tcx>,
1902///     }
1903///     |this, arg: ArgTy| {
1904///         // Implement the callback here.
1905///         todo!()
1906///     }
1907/// )
1908/// ```
1909///
1910/// All the argument types must implement `VisitProvenance`.
1911#[macro_export]
1912macro_rules! callback {
1913    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1914        { $($name:ident: $type:ty),* $(,)? }
1915     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1916        struct Callback<$tcx, $($lft),*> {
1917            $($name: $type,)*
1918            _phantom: std::marker::PhantomData<&$tcx ()>,
1919        }
1920
1921        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1922            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1923                $(
1924                    self.$name.visit_provenance(_visit);
1925                )*
1926            }
1927        }
1928
1929        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1930            fn call(
1931                self: Box<Self>,
1932                $this: &mut MiriInterpCx<$tcx>,
1933                $arg: $arg_ty
1934            ) -> InterpResult<$tcx> {
1935                #[allow(unused_variables)]
1936                let Callback { $($name,)* _phantom } = *self;
1937                $body
1938            }
1939        }
1940
1941        Box::new(Callback {
1942            $($name,)*
1943            _phantom: std::marker::PhantomData
1944        })
1945    }};
1946}