Skip to main content

miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::borrow::Cow;
5use std::cell::{Cell, RefCell};
6use std::collections::BTreeMap;
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_ast::expand::allocator::{self, SpecialAllocatorMethod};
16use rustc_data_structures::either::Either;
17use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18#[allow(unused)]
19use rustc_data_structures::static_assert_size;
20use rustc_hir::attrs::InlineAttr;
21use rustc_log::tracing;
22use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
23use rustc_middle::mir;
24use rustc_middle::query::TyCtxtAt;
25use rustc_middle::ty::layout::{
26    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
27};
28use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
29use rustc_session::config::InliningThreshold;
30use rustc_span::def_id::{CrateNum, DefId};
31use rustc_span::{Span, SpanData, Symbol};
32use rustc_symbol_mangling::mangle_internal_symbol;
33use rustc_target::callconv::FnAbi;
34use rustc_target::spec::{Arch, Os};
35
36use crate::alloc_addresses::EvalContextExt;
37use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
38use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
39use crate::concurrency::sync::SyncObj;
40use crate::concurrency::{
41    AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
42};
43use crate::*;
44
45/// First real-time signal.
46/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
47/// as typical values.
48pub const SIGRTMIN: i32 = 34;
49
50/// Last real-time signal.
51/// `signal(7)` says it must be between 32 and 64 and specifies
52/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
53pub const SIGRTMAX: i32 = 42;
54
55/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
56/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
57/// base address for each evaluation would produce unbounded memory usage.
58const ADDRS_PER_ANON_GLOBAL: usize = 32;
59
60#[derive(Copy, Clone, Debug, PartialEq)]
61pub enum AlignmentCheck {
62    /// Do not check alignment.
63    None,
64    /// Check alignment "symbolically", i.e., using only the requested alignment for an allocation and not its real base address.
65    Symbolic,
66    /// Check alignment on the actual physical integer address.
67    Int,
68}
69
70#[derive(Copy, Clone, Debug, PartialEq)]
71pub enum RejectOpWith {
72    /// Isolated op is rejected with an abort of the machine.
73    Abort,
74
75    /// If not Abort, miri returns an error for an isolated op.
76    /// Following options determine if user should be warned about such error.
77    /// Do not print warning about rejected isolated op.
78    NoWarning,
79
80    /// Print a warning about rejected isolated op, with backtrace.
81    Warning,
82
83    /// Print a warning about rejected isolated op, without backtrace.
84    WarningWithoutBacktrace,
85}
86
87#[derive(Copy, Clone, Debug, PartialEq)]
88pub enum IsolatedOp {
89    /// Reject an op requiring communication with the host. By
90    /// default, miri rejects the op with an abort. If not, it returns
91    /// an error code, and prints a warning about it. Warning levels
92    /// are controlled by `RejectOpWith` enum.
93    Reject(RejectOpWith),
94
95    /// Execute op requiring communication with the host, i.e. disable isolation.
96    Allow,
97}
98
99#[derive(Debug, Copy, Clone, PartialEq, Eq)]
100pub enum BacktraceStyle {
101    /// Prints a terser backtrace which ideally only contains relevant information.
102    Short,
103    /// Prints a backtrace with all possible information.
104    Full,
105    /// Prints only the frame that the error occurs in.
106    Off,
107}
108
109#[derive(Debug, Copy, Clone, PartialEq, Eq)]
110pub enum ValidationMode {
111    /// Do not perform any kind of validation.
112    No,
113    /// Validate the interior of the value, but not things behind references.
114    Shallow,
115    /// Fully recursively validate references.
116    Deep,
117}
118
119#[derive(Debug, Copy, Clone, PartialEq, Eq)]
120pub enum FloatRoundingErrorMode {
121    /// Apply a random error (the default).
122    Random,
123    /// Don't apply any error.
124    None,
125    /// Always apply the maximum error (with a random sign).
126    Max,
127}
128
129/// Extra data stored with each stack frame
130pub struct FrameExtra<'tcx> {
131    /// Extra data for the Borrow Tracker.
132    pub borrow_tracker: Option<borrow_tracker::FrameState>,
133
134    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
135    /// called by `try`). When this frame is popped during unwinding a panic,
136    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
137    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
138
139    /// If `measureme` profiling is enabled, holds timing information
140    /// for the start of this frame. When we finish executing this frame,
141    /// we use this to register a completed event with `measureme`.
142    pub timing: Option<measureme::DetachedTiming>,
143
144    /// Indicates how user-relevant this frame is. `#[track_caller]` frames are never relevant.
145    /// Frames from user-relevant crates are maximally relevant; frames from other crates are less
146    /// relevant.
147    pub user_relevance: u8,
148
149    /// Data race detector per-frame data.
150    pub data_race: Option<data_race::FrameState>,
151}
152
153impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
154    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
155        // Omitting `timing`, it does not support `Debug`.
156        let FrameExtra { borrow_tracker, catch_unwind, timing: _, user_relevance, data_race } =
157            self;
158        f.debug_struct("FrameData")
159            .field("borrow_tracker", borrow_tracker)
160            .field("catch_unwind", catch_unwind)
161            .field("user_relevance", user_relevance)
162            .field("data_race", data_race)
163            .finish()
164    }
165}
166
167impl VisitProvenance for FrameExtra<'_> {
168    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
169        let FrameExtra { catch_unwind, borrow_tracker, timing: _, user_relevance: _, data_race: _ } =
170            self;
171
172        catch_unwind.visit_provenance(visit);
173        borrow_tracker.visit_provenance(visit);
174    }
175}
176
177/// Extra memory kinds
178#[derive(Debug, Copy, Clone, PartialEq, Eq)]
179pub enum MiriMemoryKind {
180    /// `__rust_alloc` memory.
181    Rust,
182    /// `miri_alloc` memory.
183    Miri,
184    /// `malloc` memory.
185    C,
186    /// Windows `HeapAlloc` memory.
187    WinHeap,
188    /// Windows "local" memory (to be freed with `LocalFree`)
189    WinLocal,
190    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
191    /// This memory may leak.
192    Machine,
193    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
194    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
195    Runtime,
196    /// Globals copied from `tcx`.
197    /// This memory may leak.
198    Global,
199    /// Memory for extern statics.
200    /// This memory may leak.
201    ExternStatic,
202    /// Memory for thread-local statics.
203    /// This memory may leak.
204    Tls,
205    /// Memory mapped directly by the program
206    Mmap,
207}
208
209impl From<MiriMemoryKind> for MemoryKind {
210    #[inline(always)]
211    fn from(kind: MiriMemoryKind) -> MemoryKind {
212        MemoryKind::Machine(kind)
213    }
214}
215
216impl MayLeak for MiriMemoryKind {
217    #[inline(always)]
218    fn may_leak(self) -> bool {
219        use self::MiriMemoryKind::*;
220        match self {
221            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
222            Machine | Global | ExternStatic | Tls | Mmap => true,
223        }
224    }
225}
226
227impl MiriMemoryKind {
228    /// Whether we have a useful allocation span for an allocation of this kind.
229    fn should_save_allocation_span(self) -> bool {
230        use self::MiriMemoryKind::*;
231        match self {
232            // Heap allocations are fine since the `Allocation` is created immediately.
233            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
234            // Everything else is unclear, let's not show potentially confusing spans.
235            Machine | Global | ExternStatic | Tls | Runtime => false,
236        }
237    }
238}
239
240impl fmt::Display for MiriMemoryKind {
241    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
242        use self::MiriMemoryKind::*;
243        match self {
244            Rust => write!(f, "Rust heap"),
245            Miri => write!(f, "Miri bare-metal heap"),
246            C => write!(f, "C heap"),
247            WinHeap => write!(f, "Windows heap"),
248            WinLocal => write!(f, "Windows local memory"),
249            Machine => write!(f, "machine-managed memory"),
250            Runtime => write!(f, "language runtime memory"),
251            Global => write!(f, "global (static or const)"),
252            ExternStatic => write!(f, "extern static"),
253            Tls => write!(f, "thread-local static"),
254            Mmap => write!(f, "mmap"),
255        }
256    }
257}
258
259pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
260
261/// Pointer provenance.
262// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
263// *might* be recursive and then it has to track which places have already been visited.
264// These implementations are a bit questionable, and it means we may check the same place multiple
265// times with different provenance, but that is in general not wrong.
266#[derive(Clone, Copy, PartialEq, Eq, Hash)]
267pub enum Provenance {
268    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
269    /// and what their borrow tag is.
270    Concrete {
271        alloc_id: AllocId,
272        /// Borrow Tracker tag.
273        tag: BorTag,
274    },
275    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
276    /// specification, we should at that point angelically "guess" a provenance that will make all
277    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
278    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
279    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
280    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
281    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
282    /// then we allow the access. This allows too much code in two ways:
283    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
284    ///   subsequent memory accesses.
285    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
286    ///   the access, we also have to update the aliasing state -- and that update can be very
287    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
288    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
289    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
290    ///   stack that would be true with all possible choices.
291    Wildcard,
292}
293
294/// The "extra" information a pointer has over a regular AllocId.
295#[derive(Copy, Clone, PartialEq)]
296pub enum ProvenanceExtra {
297    Concrete(BorTag),
298    Wildcard,
299}
300
301#[cfg(target_pointer_width = "64")]
302static_assert_size!(StrictPointer, 24);
303// Pointer does not fit as the layout algorithm isn't smart enough (but also, we tried using
304// pattern types to get a larger niche that makes this fit and it didn't improve performance).
305// #[cfg(target_pointer_width = "64")]
306//static_assert_size!(Pointer, 24);
307#[cfg(target_pointer_width = "64")]
308static_assert_size!(Scalar, 32);
309
310impl fmt::Debug for Provenance {
311    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
312        match self {
313            Provenance::Concrete { alloc_id, tag } => {
314                // Forward `alternate` flag to `alloc_id` printing.
315                if f.alternate() {
316                    write!(f, "[{alloc_id:#?}]")?;
317                } else {
318                    write!(f, "[{alloc_id:?}]")?;
319                }
320                // Print Borrow Tracker tag.
321                write!(f, "{tag:?}")?;
322            }
323            Provenance::Wildcard => {
324                write!(f, "[wildcard]")?;
325            }
326        }
327        Ok(())
328    }
329}
330
331impl interpret::Provenance for Provenance {
332    /// We use absolute addresses in the `offset` of a `StrictPointer`.
333    const OFFSET_IS_ADDR: bool = true;
334
335    /// Miri implements wildcard provenance.
336    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
337
338    fn get_alloc_id(self) -> Option<AllocId> {
339        match self {
340            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
341            Provenance::Wildcard => None,
342        }
343    }
344
345    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
346        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
347        write!(f, "{:#x}", addr.bytes())?;
348        if f.alternate() {
349            write!(f, "{prov:#?}")?;
350        } else {
351            write!(f, "{prov:?}")?;
352        }
353        Ok(())
354    }
355}
356
357impl fmt::Debug for ProvenanceExtra {
358    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
359        match self {
360            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
361            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
362        }
363    }
364}
365
366impl ProvenanceExtra {
367    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
368        match self {
369            ProvenanceExtra::Concrete(pid) => f(pid),
370            ProvenanceExtra::Wildcard => None,
371        }
372    }
373}
374
375/// Extra per-allocation data
376#[derive(Debug)]
377pub struct AllocExtra<'tcx> {
378    /// Global state of the borrow tracker, if enabled.
379    pub borrow_tracker: Option<borrow_tracker::AllocState>,
380    /// Extra state for data race detection.
381    ///
382    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
383    pub data_race: AllocDataRaceHandler,
384    /// A backtrace to where this allocation was allocated.
385    /// As this is recorded for leak reports, it only exists
386    /// if this allocation is leakable. The backtrace is not
387    /// pruned yet; that should be done before printing it.
388    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
389    /// Synchronization objects like to attach extra data to particular addresses. We store that
390    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
391    /// freed.
392    /// This maps offsets to synchronization-primitive-specific data.
393    pub sync_objs: BTreeMap<Size, Box<dyn SyncObj>>,
394}
395
396// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
397// but that should never end up actually cloning our `AllocExtra`.
398impl<'tcx> Clone for AllocExtra<'tcx> {
399    fn clone(&self) -> Self {
400        panic!("our allocations should never be cloned");
401    }
402}
403
404impl VisitProvenance for AllocExtra<'_> {
405    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
406        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync_objs: _ } = self;
407
408        borrow_tracker.visit_provenance(visit);
409        data_race.visit_provenance(visit);
410    }
411}
412
413/// Precomputed layouts of primitive types
414pub struct PrimitiveLayouts<'tcx> {
415    pub unit: TyAndLayout<'tcx>,
416    pub i8: TyAndLayout<'tcx>,
417    pub i16: TyAndLayout<'tcx>,
418    pub i32: TyAndLayout<'tcx>,
419    pub i64: TyAndLayout<'tcx>,
420    pub i128: TyAndLayout<'tcx>,
421    pub isize: TyAndLayout<'tcx>,
422    pub u8: TyAndLayout<'tcx>,
423    pub u16: TyAndLayout<'tcx>,
424    pub u32: TyAndLayout<'tcx>,
425    pub u64: TyAndLayout<'tcx>,
426    pub u128: TyAndLayout<'tcx>,
427    pub usize: TyAndLayout<'tcx>,
428    pub bool: TyAndLayout<'tcx>,
429    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
430    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
431}
432
433impl<'tcx> PrimitiveLayouts<'tcx> {
434    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
435        let tcx = layout_cx.tcx();
436        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
437        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
438        Ok(Self {
439            unit: layout_cx.layout_of(tcx.types.unit)?,
440            i8: layout_cx.layout_of(tcx.types.i8)?,
441            i16: layout_cx.layout_of(tcx.types.i16)?,
442            i32: layout_cx.layout_of(tcx.types.i32)?,
443            i64: layout_cx.layout_of(tcx.types.i64)?,
444            i128: layout_cx.layout_of(tcx.types.i128)?,
445            isize: layout_cx.layout_of(tcx.types.isize)?,
446            u8: layout_cx.layout_of(tcx.types.u8)?,
447            u16: layout_cx.layout_of(tcx.types.u16)?,
448            u32: layout_cx.layout_of(tcx.types.u32)?,
449            u64: layout_cx.layout_of(tcx.types.u64)?,
450            u128: layout_cx.layout_of(tcx.types.u128)?,
451            usize: layout_cx.layout_of(tcx.types.usize)?,
452            bool: layout_cx.layout_of(tcx.types.bool)?,
453            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
454            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
455        })
456    }
457
458    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
459        match size.bits() {
460            8 => Some(self.u8),
461            16 => Some(self.u16),
462            32 => Some(self.u32),
463            64 => Some(self.u64),
464            128 => Some(self.u128),
465            _ => None,
466        }
467    }
468
469    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
470        match size.bits() {
471            8 => Some(self.i8),
472            16 => Some(self.i16),
473            32 => Some(self.i32),
474            64 => Some(self.i64),
475            128 => Some(self.i128),
476            _ => None,
477        }
478    }
479}
480
481/// The machine itself.
482///
483/// If you add anything here that stores machine values, remember to update
484/// `visit_all_machine_values`!
485pub struct MiriMachine<'tcx> {
486    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
487    pub tcx: TyCtxt<'tcx>,
488
489    /// Global data for borrow tracking.
490    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
491
492    /// Depending on settings, this will be `None`,
493    /// global data for a data race detector,
494    /// or the context required for running in GenMC mode.
495    ///
496    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
497    pub data_race: GlobalDataRaceHandler,
498
499    /// Ptr-int-cast module global data.
500    pub alloc_addresses: alloc_addresses::GlobalState,
501
502    /// Environment variables.
503    pub(crate) env_vars: EnvVars<'tcx>,
504
505    /// Return place of the main function.
506    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
507
508    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
509    /// These are *pointers* to argc/argv because macOS.
510    /// We also need the full command line as one string because of Windows.
511    pub(crate) argc: Option<Pointer>,
512    pub(crate) argv: Option<Pointer>,
513    pub(crate) cmd_line: Option<Pointer>,
514
515    /// TLS state.
516    pub(crate) tls: TlsData<'tcx>,
517
518    /// What should Miri do when an op requires communicating with the host,
519    /// such as accessing host env vars, random number generation, and
520    /// file system access.
521    pub(crate) isolated_op: IsolatedOp,
522
523    /// Whether to enforce the validity invariant.
524    pub(crate) validation: ValidationMode,
525
526    /// The table of file descriptors.
527    pub(crate) fds: shims::FdTable,
528    /// The table of directory descriptors.
529    pub(crate) dirs: shims::DirTable,
530
531    /// The list of all EpollEventInterest.
532    pub(crate) epoll_interests: shims::EpollInterestTable,
533
534    /// This machine's monotone clock.
535    pub(crate) monotonic_clock: MonotonicClock,
536
537    /// The set of threads.
538    pub(crate) threads: ThreadManager<'tcx>,
539
540    /// Handles blocking I/O and polling for completion.
541    pub(crate) blocking_io: BlockingIoManager,
542
543    /// Stores which thread is eligible to run on which CPUs.
544    /// This has no effect at all, it is just tracked to produce the correct result
545    /// in `sched_getaffinity`
546    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
547
548    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
549    pub(crate) layouts: PrimitiveLayouts<'tcx>,
550
551    /// Allocations that are considered roots of static memory (that may leak).
552    pub(crate) static_roots: Vec<AllocId>,
553
554    /// The `measureme` profiler used to record timing information about
555    /// the emulated program.
556    profiler: Option<measureme::Profiler>,
557    /// Used with `profiler` to cache the `StringId`s for event names
558    /// used with `measureme`.
559    string_cache: FxHashMap<String, measureme::StringId>,
560
561    /// Cache of `Instance` exported under the given `Symbol` name.
562    /// `None` means no `Instance` exported under the given name is found.
563    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
564
565    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
566    pub(crate) backtrace_style: BacktraceStyle,
567
568    /// Crates which are considered user-relevant for the purposes of error reporting.
569    pub(crate) user_relevant_crates: Vec<CrateNum>,
570
571    /// Mapping extern static names to their pointer.
572    pub(crate) extern_statics: FxHashMap<Symbol, StrictPointer>,
573
574    /// The random number generator used for resolving non-determinism.
575    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
576    pub(crate) rng: RefCell<StdRng>,
577
578    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
579    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
580
581    /// The allocation IDs to report when they are being allocated
582    /// (helps for debugging memory leaks and use after free bugs).
583    pub(crate) tracked_alloc_ids: FxHashSet<AllocId>,
584    /// For the tracked alloc ids, also report read/write accesses.
585    track_alloc_accesses: bool,
586
587    /// Controls whether alignment of memory accesses is being checked.
588    pub(crate) check_alignment: AlignmentCheck,
589
590    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
591    pub(crate) cmpxchg_weak_failure_rate: f64,
592
593    /// The probability of the active thread being preempted at the end of each basic block.
594    pub(crate) preemption_rate: f64,
595
596    /// If `Some`, we will report the current stack every N basic blocks.
597    pub(crate) report_progress: Option<u32>,
598    // The total number of blocks that have been executed.
599    pub(crate) basic_block_count: u64,
600
601    /// Handle of the optional shared object file for native functions.
602    #[cfg(all(feature = "native-lib", unix))]
603    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
604    #[cfg(not(all(feature = "native-lib", unix)))]
605    pub native_lib: Vec<!>,
606    /// A memory location for exchanging the current `ecx` pointer with native code.
607    #[cfg(all(feature = "native-lib", unix))]
608    pub native_lib_ecx_interchange: &'static Cell<usize>,
609
610    /// Run a garbage collector for BorTags every N basic blocks.
611    pub(crate) gc_interval: u32,
612    /// The number of blocks that passed since the last BorTag GC pass.
613    pub(crate) since_gc: u32,
614
615    /// The number of CPUs to be reported by miri.
616    pub(crate) num_cpus: u32,
617
618    /// Determines Miri's page size and associated values
619    pub(crate) page_size: u64,
620    pub(crate) stack_addr: u64,
621    pub(crate) stack_size: u64,
622
623    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
624    pub(crate) collect_leak_backtraces: bool,
625
626    /// The spans we will use to report where an allocation was created and deallocated in
627    /// diagnostics.
628    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
629
630    /// For each allocation, an offset inside that allocation that was deemed aligned even for
631    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
632    /// tracked for vtables and function allocations as well as regular allocations.
633    ///
634    /// Invariant: the promised alignment will never be less than the native alignment of the
635    /// allocation.
636    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
637
638    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
639    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
640
641    /// Caches the sanity-checks for various pthread primitives.
642    pub(crate) pthread_mutex_sanity: Cell<bool>,
643    pub(crate) pthread_rwlock_sanity: Cell<bool>,
644    pub(crate) pthread_condvar_sanity: Cell<bool>,
645
646    /// (Foreign) symbols that are synthesized as part of the allocator shim: the key indicates the
647    /// name of the symbol being synthesized; the value indicates whether this should invoke some
648    /// other symbol or whether this has special allocator semantics.
649    pub(crate) allocator_shim_symbols: FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>>,
650    /// Cache for `mangle_internal_symbol`.
651    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
652
653    /// Always prefer the intrinsic fallback body over the native Miri implementation.
654    pub force_intrinsic_fallback: bool,
655
656    /// Whether floating-point operations can behave non-deterministically.
657    pub float_nondet: bool,
658    /// Whether floating-point operations can have a non-deterministic rounding error.
659    pub float_rounding_error: FloatRoundingErrorMode,
660
661    /// Whether Miri artificially introduces short reads/writes on file descriptors.
662    pub short_fd_operations: bool,
663}
664
665impl<'tcx> MiriMachine<'tcx> {
666    /// Create a new MiriMachine.
667    ///
668    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
669    pub(crate) fn new(
670        config: &MiriConfig,
671        layout_cx: LayoutCx<'tcx>,
672        genmc_ctx: Option<Rc<GenmcCtx>>,
673    ) -> Self {
674        let tcx = layout_cx.tcx();
675        let user_relevant_crates = Self::get_user_relevant_crates(tcx, config);
676        let layouts =
677            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
678        let profiler = config.measureme_out.as_ref().map(|out| {
679            let crate_name =
680                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
681            let pid = process::id();
682            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
683            // the PID is padded so that the nondeterministic value of the PID does not spread
684            // nondeterminism to the allocator. In Miri we are not aiming for such performance
685            // control, we just pad for consistency with rustc.
686            let filename = format!("{crate_name}-{pid:07}");
687            let path = Path::new(out).join(filename);
688            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
689        });
690        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
691        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
692        let data_race = if config.genmc_config.is_some() {
693            // `genmc_ctx` persists across executions, so we don't create a new one here.
694            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
695        } else if config.data_race_detector {
696            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
697        } else {
698            GlobalDataRaceHandler::None
699        };
700        // Determine page size, stack address, and stack size.
701        // These values are mostly meaningless, but the stack address is also where we start
702        // allocating physical integer addresses for all allocations.
703        let page_size = if let Some(page_size) = config.page_size {
704            page_size
705        } else {
706            let target = &tcx.sess.target;
707            match target.arch {
708                Arch::Wasm32 | Arch::Wasm64 => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
709                Arch::AArch64 => {
710                    if target.is_like_darwin {
711                        // No "definitive" source, but see:
712                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
713                        // https://github.com/ziglang/zig/issues/11308 etc.
714                        16 * 1024
715                    } else {
716                        4 * 1024
717                    }
718                }
719                _ => 4 * 1024,
720            }
721        };
722        // On 16bit targets, 32 pages is more than the entire address space!
723        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
724        let stack_size =
725            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
726        assert!(
727            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
728            "miri only supports up to {} CPUs, but {} were configured",
729            cpu_affinity::MAX_CPUS,
730            config.num_cpus
731        );
732        let threads = ThreadManager::new(config);
733        let mut thread_cpu_affinity = FxHashMap::default();
734        if matches!(&tcx.sess.target.os, Os::Linux | Os::FreeBsd | Os::Android) {
735            thread_cpu_affinity
736                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
737        }
738        let blocking_io = BlockingIoManager::new(config.isolated_op == IsolatedOp::Allow)
739            .expect("Couldn't create poll instance");
740        let alloc_addresses =
741            RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr, tcx));
742        MiriMachine {
743            tcx,
744            borrow_tracker,
745            data_race,
746            alloc_addresses,
747            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
748            env_vars: EnvVars::default(),
749            main_fn_ret_place: None,
750            argc: None,
751            argv: None,
752            cmd_line: None,
753            tls: TlsData::default(),
754            isolated_op: config.isolated_op,
755            validation: config.validation,
756            fds: shims::FdTable::init(config.mute_stdout_stderr),
757            epoll_interests: shims::EpollInterestTable::new(),
758            dirs: Default::default(),
759            layouts,
760            threads,
761            thread_cpu_affinity,
762            blocking_io,
763            static_roots: Vec::new(),
764            profiler,
765            string_cache: Default::default(),
766            exported_symbols_cache: FxHashMap::default(),
767            backtrace_style: config.backtrace_style,
768            user_relevant_crates,
769            extern_statics: FxHashMap::default(),
770            rng: RefCell::new(rng),
771            allocator: (!config.native_lib.is_empty())
772                .then(|| Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new()))),
773            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
774            track_alloc_accesses: config.track_alloc_accesses,
775            check_alignment: config.check_alignment,
776            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
777            preemption_rate: config.preemption_rate,
778            report_progress: config.report_progress,
779            basic_block_count: 0,
780            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
781            #[cfg(all(feature = "native-lib", unix))]
782            native_lib: config.native_lib.iter().map(|lib_file_path| {
783                let host_triple = rustc_session::config::host_tuple();
784                let target_triple = tcx.sess.opts.target_triple.tuple();
785                // Check if host target == the session target.
786                if host_triple != target_triple {
787                    panic!(
788                        "calling native C functions in linked .so file requires host and target to be the same: \
789                        host={host_triple}, target={target_triple}",
790                    );
791                }
792                // Note: it is the user's responsibility to provide a correct SO file.
793                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
794                // undefined behaviour in Miri itself!
795                (
796                    unsafe {
797                        libloading::Library::new(lib_file_path)
798                            .expect("failed to read specified extern shared object file")
799                    },
800                    lib_file_path.clone(),
801                )
802            }).collect(),
803            #[cfg(all(feature = "native-lib", unix))]
804            native_lib_ecx_interchange: Box::leak(Box::new(Cell::new(0))),
805            #[cfg(not(all(feature = "native-lib", unix)))]
806            native_lib: config.native_lib.iter().map(|_| {
807                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
808            }).collect(),
809            gc_interval: config.gc_interval,
810            since_gc: 0,
811            num_cpus: config.num_cpus,
812            page_size,
813            stack_addr,
814            stack_size,
815            collect_leak_backtraces: config.collect_leak_backtraces,
816            allocation_spans: RefCell::new(FxHashMap::default()),
817            symbolic_alignment: RefCell::new(FxHashMap::default()),
818            union_data_ranges: FxHashMap::default(),
819            pthread_mutex_sanity: Cell::new(false),
820            pthread_rwlock_sanity: Cell::new(false),
821            pthread_condvar_sanity: Cell::new(false),
822            allocator_shim_symbols: Self::allocator_shim_symbols(tcx),
823            mangle_internal_symbol_cache: Default::default(),
824            force_intrinsic_fallback: config.force_intrinsic_fallback,
825            float_nondet: config.float_nondet,
826            float_rounding_error: config.float_rounding_error,
827            short_fd_operations: config.short_fd_operations,
828        }
829    }
830
831    fn allocator_shim_symbols(
832        tcx: TyCtxt<'tcx>,
833    ) -> FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>> {
834        use rustc_codegen_ssa::base::allocator_shim_contents;
835
836        // codegen uses `allocator_kind_for_codegen` here, but that's only needed to deal with
837        // dylibs which we do not support.
838        let Some(kind) = tcx.allocator_kind(()) else {
839            return Default::default();
840        };
841        let methods = allocator_shim_contents(tcx, kind);
842        let mut symbols = FxHashMap::default();
843        for method in methods {
844            let from_name = Symbol::intern(&mangle_internal_symbol(
845                tcx,
846                &allocator::global_fn_name(method.name),
847            ));
848            let to = match method.special {
849                Some(special) => Either::Right(special),
850                None =>
851                    Either::Left(Symbol::intern(&mangle_internal_symbol(
852                        tcx,
853                        &allocator::default_fn_name(method.name),
854                    ))),
855            };
856            symbols.try_insert(from_name, to).unwrap();
857        }
858        symbols
859    }
860
861    /// Retrieve the list of user-relevant crates based on MIRI_LOCAL_CRATES as set by cargo-miri,
862    /// and extra crates set in the config.
863    fn get_user_relevant_crates(tcx: TyCtxt<'_>, config: &MiriConfig) -> Vec<CrateNum> {
864        // Convert the local crate names from the passed-in config into CrateNums so that they can
865        // be looked up quickly during execution
866        let local_crate_names = std::env::var("MIRI_LOCAL_CRATES")
867            .map(|crates| crates.split(',').map(|krate| krate.to_string()).collect::<Vec<_>>())
868            .unwrap_or_default();
869        let mut local_crates = Vec::new();
870        for &crate_num in tcx.crates(()) {
871            let name = tcx.crate_name(crate_num);
872            let name = name.as_str();
873            if local_crate_names
874                .iter()
875                .chain(&config.user_relevant_crates)
876                .any(|local_name| local_name == name)
877            {
878                local_crates.push(crate_num);
879            }
880        }
881        local_crates
882    }
883
884    pub(crate) fn late_init(
885        ecx: &mut MiriInterpCx<'tcx>,
886        config: &MiriConfig,
887        on_main_stack_empty: StackEmptyCallback<'tcx>,
888    ) -> InterpResult<'tcx> {
889        EnvVars::init(ecx, config)?;
890        MiriMachine::init_extern_statics(ecx)?;
891        ThreadManager::init(ecx, on_main_stack_empty);
892        interp_ok(())
893    }
894
895    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
896        // This got just allocated, so there definitely is a pointer here.
897        let ptr = ptr.into_pointer_or_addr().unwrap();
898        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
899    }
900
901    pub(crate) fn communicate(&self) -> bool {
902        self.isolated_op == IsolatedOp::Allow
903    }
904
905    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
906    pub(crate) fn is_local(&self, instance: ty::Instance<'tcx>) -> bool {
907        let def_id = instance.def_id();
908        def_id.is_local() || self.user_relevant_crates.contains(&def_id.krate)
909    }
910
911    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
912    pub(crate) fn handle_abnormal_termination(&mut self) {
913        // All strings in the profile data are stored in a single string table which is not
914        // written to disk until the profiler is dropped. If the interpreter exits without dropping
915        // the profiler, it is not possible to interpret the profile data and all measureme tools
916        // will panic when given the file.
917        drop(self.profiler.take());
918    }
919
920    pub(crate) fn page_align(&self) -> Align {
921        Align::from_bytes(self.page_size).unwrap()
922    }
923
924    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
925        self.allocation_spans
926            .borrow()
927            .get(&alloc_id)
928            .map(|(allocated, _deallocated)| allocated.data())
929    }
930
931    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
932        self.allocation_spans
933            .borrow()
934            .get(&alloc_id)
935            .and_then(|(_allocated, deallocated)| *deallocated)
936            .map(Span::data)
937    }
938
939    fn init_allocation(
940        ecx: &MiriInterpCx<'tcx>,
941        id: AllocId,
942        kind: MemoryKind,
943        size: Size,
944        align: Align,
945    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
946        if ecx.machine.tracked_alloc_ids.contains(&id) {
947            ecx.emit_diagnostic(NonHaltingDiagnostic::TrackingAlloc(id, size, align));
948        }
949
950        let borrow_tracker = ecx
951            .machine
952            .borrow_tracker
953            .as_ref()
954            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
955
956        let data_race = match &ecx.machine.data_race {
957            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
958            GlobalDataRaceHandler::Vclocks(data_race) =>
959                AllocDataRaceHandler::Vclocks(
960                    data_race::AllocState::new_allocation(
961                        data_race,
962                        &ecx.machine.threads,
963                        size,
964                        kind,
965                        ecx.machine.current_user_relevant_span(),
966                    ),
967                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
968                ),
969            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
970                // GenMC learns about new allocations directly from the alloc_addresses module,
971                // since it has to be able to control the address at which they are placed.
972                AllocDataRaceHandler::Genmc
973            }
974        };
975
976        // If an allocation is leaked, we want to report a backtrace to indicate where it was
977        // allocated. We don't need to record a backtrace for allocations which are allowed to
978        // leak.
979        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
980            None
981        } else {
982            Some(ecx.generate_stacktrace())
983        };
984
985        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
986            ecx.machine
987                .allocation_spans
988                .borrow_mut()
989                .insert(id, (ecx.machine.current_user_relevant_span(), None));
990        }
991
992        interp_ok(AllocExtra {
993            borrow_tracker,
994            data_race,
995            backtrace,
996            sync_objs: BTreeMap::default(),
997        })
998    }
999}
1000
1001impl VisitProvenance for MiriMachine<'_> {
1002    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
1003        #[rustfmt::skip]
1004        let MiriMachine {
1005            threads,
1006            thread_cpu_affinity: _,
1007            tls,
1008            env_vars,
1009            main_fn_ret_place,
1010            argc,
1011            argv,
1012            cmd_line,
1013            extern_statics,
1014            dirs,
1015            borrow_tracker,
1016            data_race,
1017            alloc_addresses,
1018            fds,
1019            blocking_io:_,
1020            epoll_interests:_,
1021            tcx: _,
1022            isolated_op: _,
1023            validation: _,
1024            monotonic_clock: _,
1025            layouts: _,
1026            static_roots: _,
1027            profiler: _,
1028            string_cache: _,
1029            exported_symbols_cache: _,
1030            backtrace_style: _,
1031            user_relevant_crates: _,
1032            rng: _,
1033            allocator: _,
1034            tracked_alloc_ids: _,
1035            track_alloc_accesses: _,
1036            check_alignment: _,
1037            cmpxchg_weak_failure_rate: _,
1038            preemption_rate: _,
1039            report_progress: _,
1040            basic_block_count: _,
1041            native_lib: _,
1042            #[cfg(all(feature = "native-lib", unix))]
1043            native_lib_ecx_interchange: _,
1044            gc_interval: _,
1045            since_gc: _,
1046            num_cpus: _,
1047            page_size: _,
1048            stack_addr: _,
1049            stack_size: _,
1050            collect_leak_backtraces: _,
1051            allocation_spans: _,
1052            symbolic_alignment: _,
1053            union_data_ranges: _,
1054            pthread_mutex_sanity: _,
1055            pthread_rwlock_sanity: _,
1056            pthread_condvar_sanity: _,
1057            allocator_shim_symbols: _,
1058            mangle_internal_symbol_cache: _,
1059            force_intrinsic_fallback: _,
1060            float_nondet: _,
1061            float_rounding_error: _,
1062            short_fd_operations: _,
1063        } = self;
1064
1065        threads.visit_provenance(visit);
1066        tls.visit_provenance(visit);
1067        env_vars.visit_provenance(visit);
1068        dirs.visit_provenance(visit);
1069        fds.visit_provenance(visit);
1070        data_race.visit_provenance(visit);
1071        borrow_tracker.visit_provenance(visit);
1072        alloc_addresses.visit_provenance(visit);
1073        main_fn_ret_place.visit_provenance(visit);
1074        argc.visit_provenance(visit);
1075        argv.visit_provenance(visit);
1076        cmd_line.visit_provenance(visit);
1077        for ptr in extern_statics.values() {
1078            ptr.visit_provenance(visit);
1079        }
1080    }
1081}
1082
1083/// A rustc InterpCx for Miri.
1084pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
1085
1086/// A little trait that's useful to be inherited by extension traits.
1087pub trait MiriInterpCxExt<'tcx> {
1088    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
1089    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
1090}
1091impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
1092    #[inline(always)]
1093    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
1094        self
1095    }
1096    #[inline(always)]
1097    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
1098        self
1099    }
1100}
1101
1102/// Machine hook implementations.
1103impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1104    type MemoryKind = MiriMemoryKind;
1105    type ExtraFnVal = DynSym;
1106
1107    type FrameExtra = FrameExtra<'tcx>;
1108    type AllocExtra = AllocExtra<'tcx>;
1109
1110    type Provenance = Provenance;
1111    type ProvenanceExtra = ProvenanceExtra;
1112    type Bytes = MiriAllocBytes;
1113
1114    type MemoryMap =
1115        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1116
1117    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1118
1119    const PANIC_ON_ALLOC_FAIL: bool = false;
1120
1121    #[inline(always)]
1122    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1123        ecx.machine.check_alignment != AlignmentCheck::None
1124    }
1125
1126    #[inline(always)]
1127    fn alignment_check(
1128        ecx: &MiriInterpCx<'tcx>,
1129        alloc_id: AllocId,
1130        alloc_align: Align,
1131        alloc_kind: AllocKind,
1132        offset: Size,
1133        align: Align,
1134    ) -> Option<Misalignment> {
1135        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1136            // Just use the built-in check.
1137            return None;
1138        }
1139        if alloc_kind != AllocKind::LiveData {
1140            // Can't have any extra info here.
1141            return None;
1142        }
1143        // Let's see which alignment we have been promised for this allocation.
1144        let (promised_offset, promised_align) = ecx
1145            .machine
1146            .symbolic_alignment
1147            .borrow()
1148            .get(&alloc_id)
1149            .copied()
1150            .unwrap_or((Size::ZERO, alloc_align));
1151        if promised_align < align {
1152            // Definitely not enough.
1153            Some(Misalignment { has: promised_align, required: align })
1154        } else {
1155            // What's the offset between us and the promised alignment?
1156            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1157            // That must also be aligned.
1158            if distance.is_multiple_of(align.bytes()) {
1159                // All looking good!
1160                None
1161            } else {
1162                // The biggest power of two through which `distance` is divisible.
1163                let distance_pow2 = 1 << distance.trailing_zeros();
1164                Some(Misalignment {
1165                    has: Align::from_bytes(distance_pow2).unwrap(),
1166                    required: align,
1167                })
1168            }
1169        }
1170    }
1171
1172    #[inline(always)]
1173    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1174        ecx.machine.validation != ValidationMode::No
1175    }
1176    #[inline(always)]
1177    fn enforce_validity_recursively(
1178        ecx: &InterpCx<'tcx, Self>,
1179        _layout: TyAndLayout<'tcx>,
1180    ) -> bool {
1181        ecx.machine.validation == ValidationMode::Deep
1182    }
1183
1184    #[inline(always)]
1185    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1186        !ecx.tcx.sess.overflow_checks()
1187    }
1188
1189    fn check_fn_target_features(
1190        ecx: &MiriInterpCx<'tcx>,
1191        instance: ty::Instance<'tcx>,
1192    ) -> InterpResult<'tcx> {
1193        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1194        if attrs
1195            .target_features
1196            .iter()
1197            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1198        {
1199            let unavailable = attrs
1200                .target_features
1201                .iter()
1202                .filter(|&feature| {
1203                    feature.kind != TargetFeatureKind::Implied
1204                        && !ecx.tcx.sess.target_features.contains(&feature.name)
1205                })
1206                .fold(String::new(), |mut s, feature| {
1207                    if !s.is_empty() {
1208                        s.push_str(", ");
1209                    }
1210                    s.push_str(feature.name.as_str());
1211                    s
1212                });
1213            let msg = format!(
1214                "calling a function that requires unavailable target features: {unavailable}"
1215            );
1216            // On WASM, this is not UB, but instead gets rejected during validation of the module
1217            // (see #84988).
1218            if ecx.tcx.sess.target.is_like_wasm {
1219                throw_machine_stop!(TerminationInfo::Abort(msg));
1220            } else {
1221                throw_ub_format!("{msg}");
1222            }
1223        }
1224        interp_ok(())
1225    }
1226
1227    #[inline(always)]
1228    fn find_mir_or_eval_fn(
1229        ecx: &mut MiriInterpCx<'tcx>,
1230        instance: ty::Instance<'tcx>,
1231        abi: &FnAbi<'tcx, Ty<'tcx>>,
1232        args: &[FnArg<'tcx>],
1233        dest: &PlaceTy<'tcx>,
1234        ret: Option<mir::BasicBlock>,
1235        unwind: mir::UnwindAction,
1236    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1237        // For foreign items, try to see if we can emulate them.
1238        if ecx.tcx.is_foreign_item(instance.def_id()) {
1239            let _trace = enter_trace_span!("emulate_foreign_item");
1240            // An external function call that does not have a MIR body. We either find MIR elsewhere
1241            // or emulate its effect.
1242            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1243            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1244            // foreign function
1245            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1246            let args = MiriInterpCx::copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1247            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1248            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1249        }
1250
1251        if ecx.machine.data_race.as_genmc_ref().is_some()
1252            && ecx.genmc_intercept_function(instance, args, dest)?
1253        {
1254            ecx.return_to_block(ret)?;
1255            return interp_ok(None);
1256        }
1257
1258        // Otherwise, load the MIR.
1259        let _trace = enter_trace_span!("load_mir");
1260        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1261    }
1262
1263    #[inline(always)]
1264    fn call_extra_fn(
1265        ecx: &mut MiriInterpCx<'tcx>,
1266        fn_val: DynSym,
1267        abi: &FnAbi<'tcx, Ty<'tcx>>,
1268        args: &[FnArg<'tcx>],
1269        dest: &PlaceTy<'tcx>,
1270        ret: Option<mir::BasicBlock>,
1271        unwind: mir::UnwindAction,
1272    ) -> InterpResult<'tcx> {
1273        let args = MiriInterpCx::copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1274        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1275    }
1276
1277    #[inline(always)]
1278    fn call_intrinsic(
1279        ecx: &mut MiriInterpCx<'tcx>,
1280        instance: ty::Instance<'tcx>,
1281        args: &[OpTy<'tcx>],
1282        dest: &PlaceTy<'tcx>,
1283        ret: Option<mir::BasicBlock>,
1284        unwind: mir::UnwindAction,
1285    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1286        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1287    }
1288
1289    #[inline(always)]
1290    fn assert_panic(
1291        ecx: &mut MiriInterpCx<'tcx>,
1292        msg: &mir::AssertMessage<'tcx>,
1293        unwind: mir::UnwindAction,
1294    ) -> InterpResult<'tcx> {
1295        ecx.assert_panic(msg, unwind)
1296    }
1297
1298    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1299        ecx.start_panic_nounwind(msg)
1300    }
1301
1302    fn unwind_terminate(
1303        ecx: &mut InterpCx<'tcx, Self>,
1304        reason: mir::UnwindTerminateReason,
1305    ) -> InterpResult<'tcx> {
1306        // Call the lang item.
1307        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1308        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1309        ecx.call_function(
1310            panic,
1311            ExternAbi::Rust,
1312            &[],
1313            None,
1314            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1315        )?;
1316        interp_ok(())
1317    }
1318
1319    #[inline(always)]
1320    fn binary_ptr_op(
1321        ecx: &MiriInterpCx<'tcx>,
1322        bin_op: mir::BinOp,
1323        left: &ImmTy<'tcx>,
1324        right: &ImmTy<'tcx>,
1325    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1326        ecx.binary_ptr_op(bin_op, left, right)
1327    }
1328
1329    #[inline(always)]
1330    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1331        ecx: &InterpCx<'tcx, Self>,
1332        inputs: &[F1],
1333    ) -> F2 {
1334        ecx.generate_nan(inputs)
1335    }
1336
1337    #[inline(always)]
1338    fn apply_float_nondet(
1339        ecx: &mut InterpCx<'tcx, Self>,
1340        val: ImmTy<'tcx>,
1341    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1342        crate::math::apply_random_float_error_to_imm(ecx, val, 4)
1343    }
1344
1345    #[inline(always)]
1346    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1347        ecx.equal_float_min_max(a, b)
1348    }
1349
1350    #[inline(always)]
1351    fn float_fuse_mul_add(ecx: &InterpCx<'tcx, Self>) -> bool {
1352        ecx.machine.float_nondet && ecx.machine.rng.borrow_mut().random()
1353    }
1354
1355    #[inline(always)]
1356    fn runtime_checks(
1357        ecx: &InterpCx<'tcx, Self>,
1358        r: mir::RuntimeChecks,
1359    ) -> InterpResult<'tcx, bool> {
1360        interp_ok(r.value(ecx.tcx.sess))
1361    }
1362
1363    #[inline(always)]
1364    fn thread_local_static_pointer(
1365        ecx: &mut MiriInterpCx<'tcx>,
1366        def_id: DefId,
1367    ) -> InterpResult<'tcx, StrictPointer> {
1368        ecx.get_or_create_thread_local_alloc(def_id)
1369    }
1370
1371    fn extern_static_pointer(
1372        ecx: &MiriInterpCx<'tcx>,
1373        def_id: DefId,
1374    ) -> InterpResult<'tcx, StrictPointer> {
1375        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1376        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1377            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1378            // information. That uses the type information of this static.
1379            // Make sure it matches the Miri allocation for this.
1380            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1381                panic!("extern_statics cannot contain wildcards")
1382            };
1383            let info = ecx.get_alloc_info(alloc_id);
1384            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1385            let extern_decl_layout =
1386                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1387            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1388                throw_unsup_format!(
1389                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1390                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1391                    but Miri emulates it via an extern static shim \
1392                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1393                    name = ecx.tcx.def_path_str(def_id),
1394                    krate = ecx.tcx.crate_name(def_id.krate),
1395                    decl_size = extern_decl_layout.size.bytes(),
1396                    decl_align = extern_decl_layout.align.bytes(),
1397                    shim_size = info.size.bytes(),
1398                    shim_align = info.align.bytes(),
1399                )
1400            }
1401            interp_ok(ptr)
1402        } else {
1403            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1404        }
1405    }
1406
1407    fn init_local_allocation(
1408        ecx: &MiriInterpCx<'tcx>,
1409        id: AllocId,
1410        kind: MemoryKind,
1411        size: Size,
1412        align: Align,
1413    ) -> InterpResult<'tcx, Self::AllocExtra> {
1414        assert!(kind != MiriMemoryKind::Global.into());
1415        MiriMachine::init_allocation(ecx, id, kind, size, align)
1416    }
1417
1418    fn adjust_alloc_root_pointer(
1419        ecx: &MiriInterpCx<'tcx>,
1420        ptr: interpret::Pointer<CtfeProvenance>,
1421        kind: Option<MemoryKind>,
1422    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1423        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1424        let alloc_id = ptr.provenance.alloc_id();
1425        if cfg!(debug_assertions) {
1426            // The machine promises to never call us on thread-local or extern statics.
1427            match ecx.tcx.try_get_global_alloc(alloc_id) {
1428                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1429                    panic!("adjust_alloc_root_pointer called on thread-local static")
1430                }
1431                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1432                    panic!("adjust_alloc_root_pointer called on extern static")
1433                }
1434                _ => {}
1435            }
1436        }
1437        // FIXME: can we somehow preserve the immutability of `ptr`?
1438        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1439            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1440        } else {
1441            // Value does not matter, SB is disabled
1442            BorTag::default()
1443        };
1444        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1445    }
1446
1447    /// Called on `usize as ptr` casts.
1448    #[inline(always)]
1449    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1450        ecx.ptr_from_addr_cast(addr)
1451    }
1452
1453    /// Called on `ptr as usize` casts.
1454    /// (Actually computing the resulting `usize` doesn't need machine help,
1455    /// that's just `Scalar::try_to_int`.)
1456    #[inline(always)]
1457    fn expose_provenance(
1458        ecx: &InterpCx<'tcx, Self>,
1459        provenance: Self::Provenance,
1460    ) -> InterpResult<'tcx> {
1461        ecx.expose_provenance(provenance)
1462    }
1463
1464    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1465    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1466    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1467    /// allocations.
1468    ///
1469    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1470    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1471    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1472    /// stored in machine state).
1473    ///
1474    /// When this fails, that means the pointer does not point to a live allocation.
1475    fn ptr_get_alloc(
1476        ecx: &MiriInterpCx<'tcx>,
1477        ptr: StrictPointer,
1478        size: i64,
1479    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1480        let rel = ecx.ptr_get_alloc(ptr, size);
1481
1482        rel.map(|(alloc_id, size)| {
1483            let tag = match ptr.provenance {
1484                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1485                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1486            };
1487            (alloc_id, size, tag)
1488        })
1489    }
1490
1491    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1492    ///
1493    /// If `alloc` contains pointers, then they are all pointing to globals.
1494    ///
1495    /// This should avoid copying if no work has to be done! If this returns an owned
1496    /// allocation (because a copy had to be done to adjust things), machine memory will
1497    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1498    /// owned allocation to the map even when the map is shared.)
1499    fn adjust_global_allocation<'b>(
1500        ecx: &InterpCx<'tcx, Self>,
1501        id: AllocId,
1502        alloc: &'b Allocation,
1503    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1504    {
1505        let alloc = alloc.adjust_from_tcx(
1506            &ecx.tcx,
1507            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1508            |ptr| ecx.global_root_pointer(ptr),
1509        )?;
1510        let kind = MiriMemoryKind::Global.into();
1511        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1512        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1513    }
1514
1515    #[inline(always)]
1516    fn before_memory_read(
1517        _tcx: TyCtxtAt<'tcx>,
1518        machine: &Self,
1519        alloc_extra: &AllocExtra<'tcx>,
1520        ptr: Pointer,
1521        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1522        range: AllocRange,
1523    ) -> InterpResult<'tcx> {
1524        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1525            machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
1526                alloc_id,
1527                range,
1528                borrow_tracker::AccessKind::Read,
1529            ));
1530        }
1531        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1532        match &machine.data_race {
1533            GlobalDataRaceHandler::None => {}
1534            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1535                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1536            GlobalDataRaceHandler::Vclocks(_data_race) => {
1537                let _trace = enter_trace_span!(data_race::before_memory_read);
1538                let AllocDataRaceHandler::Vclocks(data_race, _weak_memory) = &alloc_extra.data_race
1539                else {
1540                    unreachable!();
1541                };
1542                data_race.read_non_atomic(alloc_id, range, NaReadType::Read, None, machine)?;
1543            }
1544        }
1545        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1546            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1547        }
1548        // Check if there are any sync objects that would like to prevent reading this memory.
1549        for (_offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
1550            obj.on_access(concurrency::sync::AccessKind::Read)?;
1551        }
1552
1553        interp_ok(())
1554    }
1555
1556    #[inline(always)]
1557    fn before_memory_write(
1558        _tcx: TyCtxtAt<'tcx>,
1559        machine: &mut Self,
1560        alloc_extra: &mut AllocExtra<'tcx>,
1561        ptr: Pointer,
1562        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1563        range: AllocRange,
1564    ) -> InterpResult<'tcx> {
1565        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1566            machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
1567                alloc_id,
1568                range,
1569                borrow_tracker::AccessKind::Write,
1570            ));
1571        }
1572        match &machine.data_race {
1573            GlobalDataRaceHandler::None => {}
1574            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1575                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?,
1576            GlobalDataRaceHandler::Vclocks(_global_state) => {
1577                let _trace = enter_trace_span!(data_race::before_memory_write);
1578                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1579                    &mut alloc_extra.data_race
1580                else {
1581                    unreachable!()
1582                };
1583                data_race.write_non_atomic(alloc_id, range, NaWriteType::Write, None, machine)?;
1584                if let Some(weak_memory) = weak_memory {
1585                    weak_memory
1586                        .non_atomic_write(range, machine.data_race.as_vclocks_ref().unwrap());
1587                }
1588            }
1589        }
1590        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1591            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1592        }
1593        // Delete sync objects that don't like writes.
1594        // Most of the time, we can just skip this.
1595        if !alloc_extra.sync_objs.is_empty() {
1596            let mut to_delete = vec![];
1597            for (offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
1598                obj.on_access(concurrency::sync::AccessKind::Write)?;
1599                if obj.delete_on_write() {
1600                    to_delete.push(*offset);
1601                }
1602            }
1603            for offset in to_delete {
1604                alloc_extra.sync_objs.remove(&offset);
1605            }
1606        }
1607        interp_ok(())
1608    }
1609
1610    #[inline(always)]
1611    fn before_memory_deallocation(
1612        _tcx: TyCtxtAt<'tcx>,
1613        machine: &mut Self,
1614        alloc_extra: &mut AllocExtra<'tcx>,
1615        ptr: Pointer,
1616        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1617        size: Size,
1618        align: Align,
1619        kind: MemoryKind,
1620    ) -> InterpResult<'tcx> {
1621        if machine.tracked_alloc_ids.contains(&alloc_id) {
1622            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1623        }
1624        match &machine.data_race {
1625            GlobalDataRaceHandler::None => {}
1626            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1627                genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), kind)?,
1628            GlobalDataRaceHandler::Vclocks(_global_state) => {
1629                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1630                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1631                data_race.write_non_atomic(
1632                    alloc_id,
1633                    alloc_range(Size::ZERO, size),
1634                    NaWriteType::Deallocate,
1635                    None,
1636                    machine,
1637                )?;
1638            }
1639        }
1640        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1641            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1642        }
1643        // Check if there are any sync objects that would like to prevent freeing this memory.
1644        for obj in alloc_extra.sync_objs.values() {
1645            obj.on_access(concurrency::sync::AccessKind::Dealloc)?;
1646        }
1647
1648        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1649        {
1650            *deallocated_at = Some(machine.current_user_relevant_span());
1651        }
1652        machine.free_alloc_id(alloc_id, size, align, kind);
1653        interp_ok(())
1654    }
1655
1656    #[inline(always)]
1657    fn retag_ptr_value(
1658        ecx: &mut InterpCx<'tcx, Self>,
1659        kind: mir::RetagKind,
1660        val: &ImmTy<'tcx>,
1661    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1662        if ecx.machine.borrow_tracker.is_some() {
1663            ecx.retag_ptr_value(kind, val)
1664        } else {
1665            interp_ok(val.clone())
1666        }
1667    }
1668
1669    #[inline(always)]
1670    fn retag_place_contents(
1671        ecx: &mut InterpCx<'tcx, Self>,
1672        kind: mir::RetagKind,
1673        place: &PlaceTy<'tcx>,
1674    ) -> InterpResult<'tcx> {
1675        if ecx.machine.borrow_tracker.is_some() {
1676            ecx.retag_place_contents(kind, place)?;
1677        }
1678        interp_ok(())
1679    }
1680
1681    fn protect_in_place_function_argument(
1682        ecx: &mut InterpCx<'tcx, Self>,
1683        place: &MPlaceTy<'tcx>,
1684    ) -> InterpResult<'tcx> {
1685        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1686        // writes* during this call are insta-UB.
1687        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1688            ecx.protect_place(place)?
1689        } else {
1690            // No borrow tracker.
1691            place.clone()
1692        };
1693        // We do need to write `uninit` so that even after the call ends, the former contents of
1694        // this place cannot be observed any more. We do the write after retagging so that for
1695        // Tree Borrows, this is considered to activate the new tag.
1696        // Conveniently this also ensures that the place actually points to suitable memory.
1697        ecx.write_uninit(&protected_place)?;
1698        // Now we throw away the protected place, ensuring its tag is never used again.
1699        interp_ok(())
1700    }
1701
1702    #[inline(always)]
1703    fn init_frame(
1704        ecx: &mut InterpCx<'tcx, Self>,
1705        frame: Frame<'tcx, Provenance>,
1706    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1707        // Start recording our event before doing anything else
1708        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1709            let fn_name = frame.instance().to_string();
1710            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1711            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1712
1713            Some(profiler.start_recording_interval_event_detached(
1714                *name,
1715                measureme::EventId::from_label(*name),
1716                ecx.active_thread().to_u32(),
1717            ))
1718        } else {
1719            None
1720        };
1721
1722        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1723
1724        let extra = FrameExtra {
1725            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1726            catch_unwind: None,
1727            timing,
1728            user_relevance: ecx.machine.user_relevance(&frame),
1729            data_race: ecx
1730                .machine
1731                .data_race
1732                .as_vclocks_ref()
1733                .map(|_| data_race::FrameState::default()),
1734        };
1735
1736        interp_ok(frame.with_extra(extra))
1737    }
1738
1739    fn stack<'a>(
1740        ecx: &'a InterpCx<'tcx, Self>,
1741    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1742        ecx.active_thread_stack()
1743    }
1744
1745    fn stack_mut<'a>(
1746        ecx: &'a mut InterpCx<'tcx, Self>,
1747    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1748        ecx.active_thread_stack_mut()
1749    }
1750
1751    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1752        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1753        ecx.machine.since_gc += 1;
1754        // Possibly report our progress. This will point at the terminator we are about to execute.
1755        if let Some(report_progress) = ecx.machine.report_progress {
1756            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1757                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1758                    block_count: ecx.machine.basic_block_count,
1759                });
1760            }
1761        }
1762
1763        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1764        // stacks.
1765        // When debug assertions are enabled, run the GC as often as possible so that any cases
1766        // where it mistakenly removes an important tag become visible.
1767        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1768            ecx.machine.since_gc = 0;
1769            ecx.run_provenance_gc();
1770        }
1771
1772        // These are our preemption points.
1773        // (This will only take effect after the terminator has been executed.)
1774        ecx.maybe_preempt_active_thread();
1775
1776        // Make sure some time passes.
1777        ecx.machine.monotonic_clock.tick();
1778
1779        interp_ok(())
1780    }
1781
1782    #[inline(always)]
1783    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1784        if ecx.frame().extra.user_relevance >= ecx.active_thread_ref().current_user_relevance() {
1785            // We just pushed a frame that's at least as relevant as the so-far most relevant frame.
1786            // That means we are now the most relevant frame.
1787            let stack_len = ecx.active_thread_stack().len();
1788            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1789        }
1790        interp_ok(())
1791    }
1792
1793    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1794        let frame = ecx.frame();
1795        // We want this *before* the return value copy, because the return place itself is protected
1796        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1797        if ecx.machine.borrow_tracker.is_some() {
1798            ecx.on_stack_pop(frame)?;
1799        }
1800        if ecx
1801            .active_thread_ref()
1802            .top_user_relevant_frame()
1803            .expect("there should always be a most relevant frame for a non-empty stack")
1804            == ecx.frame_idx()
1805        {
1806            // We are popping the most relevant frame. We have no clue what the next relevant frame
1807            // below that is, so we recompute that.
1808            // (If this ever becomes a bottleneck, we could have `push` store the previous
1809            // user-relevant frame and restore that here.)
1810            // We have to skip the frame that is just being popped.
1811            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1812        }
1813        // tracing-tree can automatically annotate scope changes, but it gets very confused by our
1814        // concurrency and what it prints is just plain wrong. So we print our own information
1815        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1816        info!("Leaving {}", ecx.frame().instance());
1817        interp_ok(())
1818    }
1819
1820    #[inline(always)]
1821    fn after_stack_pop(
1822        ecx: &mut InterpCx<'tcx, Self>,
1823        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1824        unwinding: bool,
1825    ) -> InterpResult<'tcx, ReturnAction> {
1826        let res = {
1827            // Move `frame` into a sub-scope so we control when it will be dropped.
1828            let mut frame = frame;
1829            let timing = frame.extra.timing.take();
1830            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1831            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1832                profiler.finish_recording_interval_event(timing.unwrap());
1833            }
1834            res
1835        };
1836        // Needs to be done after dropping frame to show up on the right nesting level.
1837        // (Cc https://github.com/rust-lang/miri/issues/2266)
1838        if !ecx.active_thread_stack().is_empty() {
1839            info!("Continuing in {}", ecx.frame().instance());
1840        }
1841        res
1842    }
1843
1844    fn after_local_read(
1845        ecx: &InterpCx<'tcx, Self>,
1846        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1847        local: mir::Local,
1848    ) -> InterpResult<'tcx> {
1849        if let Some(data_race) = &frame.extra.data_race {
1850            let _trace = enter_trace_span!(data_race::after_local_read);
1851            data_race.local_read(local, &ecx.machine);
1852        }
1853        interp_ok(())
1854    }
1855
1856    fn after_local_write(
1857        ecx: &mut InterpCx<'tcx, Self>,
1858        local: mir::Local,
1859        storage_live: bool,
1860    ) -> InterpResult<'tcx> {
1861        if let Some(data_race) = &ecx.frame().extra.data_race {
1862            let _trace = enter_trace_span!(data_race::after_local_write);
1863            data_race.local_write(local, storage_live, &ecx.machine);
1864        }
1865        interp_ok(())
1866    }
1867
1868    fn after_local_moved_to_memory(
1869        ecx: &mut InterpCx<'tcx, Self>,
1870        local: mir::Local,
1871        mplace: &MPlaceTy<'tcx>,
1872    ) -> InterpResult<'tcx> {
1873        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1874            panic!("after_local_allocated should only be called on fresh allocations");
1875        };
1876        // Record the span where this was allocated: the declaration of the local.
1877        let local_decl = &ecx.frame().body().local_decls[local];
1878        let span = local_decl.source_info.span;
1879        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1880        // The data race system has to fix the clocks used for this write.
1881        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1882        if let Some(data_race) =
1883            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1884        {
1885            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1886            data_race.local_moved_to_memory(
1887                local,
1888                alloc_info.data_race.as_vclocks_mut().unwrap(),
1889                machine,
1890            );
1891        }
1892        interp_ok(())
1893    }
1894
1895    fn get_global_alloc_salt(
1896        ecx: &InterpCx<'tcx, Self>,
1897        instance: Option<ty::Instance<'tcx>>,
1898    ) -> usize {
1899        let unique = if let Some(instance) = instance {
1900            // Functions cannot be identified by pointers, as asm-equal functions can get
1901            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1902            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1903            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1904            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1905            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1906            // whether codegen will actually emit duplicate functions. It does that when they have
1907            // non-lifetime generics, or when they can be inlined. All other functions are given a
1908            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1909            // cannot be relied upon for anything. But if we don't do this, the
1910            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1911            // backtraces look terrible.
1912            let is_generic = instance
1913                .args
1914                .into_iter()
1915                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1916            let can_be_inlined = matches!(
1917                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1918                InliningThreshold::Always
1919            ) || !matches!(
1920                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1921                InlineAttr::Never
1922            );
1923            !is_generic && !can_be_inlined
1924        } else {
1925            // Non-functions are never unique.
1926            false
1927        };
1928        // Always use the same salt if the allocation is unique.
1929        if unique {
1930            CTFE_ALLOC_SALT
1931        } else {
1932            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1933        }
1934    }
1935
1936    fn cached_union_data_range<'e>(
1937        ecx: &'e mut InterpCx<'tcx, Self>,
1938        ty: Ty<'tcx>,
1939        compute_range: impl FnOnce() -> RangeSet,
1940    ) -> Cow<'e, RangeSet> {
1941        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1942    }
1943
1944    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1945        use crate::alloc::MiriAllocParams;
1946
1947        match &self.allocator {
1948            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1949            None => MiriAllocParams::Global,
1950        }
1951    }
1952
1953    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1954        #[cfg(feature = "tracing")]
1955        {
1956            span().entered()
1957        }
1958        #[cfg(not(feature = "tracing"))]
1959        #[expect(clippy::unused_unit)]
1960        {
1961            let _ = span; // so we avoid the "unused variable" warning
1962            ()
1963        }
1964    }
1965}
1966
1967/// Trait for callbacks handling asynchronous machine operations.
1968pub trait MachineCallback<'tcx, T>: VisitProvenance {
1969    /// The function to be invoked when the callback is fired.
1970    fn call(
1971        self: Box<Self>,
1972        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1973        arg: T,
1974    ) -> InterpResult<'tcx>;
1975}
1976
1977/// Type alias for boxed machine callbacks with generic argument type.
1978pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1979
1980/// Creates a `DynMachineCallback`:
1981///
1982/// ```rust
1983/// callback!(
1984///     @capture<'tcx> {
1985///         var1: Ty1,
1986///         var2: Ty2<'tcx>,
1987///     }
1988///     |this, arg: ArgTy| {
1989///         // Implement the callback here.
1990///         todo!()
1991///     }
1992/// )
1993/// ```
1994///
1995/// All the argument types must implement `VisitProvenance`.
1996#[macro_export]
1997macro_rules! callback {
1998    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1999        { $($name:ident: $type:ty),* $(,)? }
2000     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
2001        struct Callback<$tcx, $($lft),*> {
2002            $($name: $type,)*
2003            _phantom: std::marker::PhantomData<&$tcx ()>,
2004        }
2005
2006        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
2007            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
2008                $(
2009                    self.$name.visit_provenance(_visit);
2010                )*
2011            }
2012        }
2013
2014        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
2015            fn call(
2016                self: Box<Self>,
2017                $this: &mut MiriInterpCx<$tcx>,
2018                $arg: $arg_ty
2019            ) -> InterpResult<$tcx> {
2020                #[allow(unused_variables)]
2021                let Callback { $($name,)* _phantom } = *self;
2022                $body
2023            }
2024        }
2025
2026        Box::new(Callback {
2027            $($name,)*
2028            _phantom: std::marker::PhantomData
2029        })
2030    }};
2031}