miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::borrow::Cow;
5use std::cell::{Cell, RefCell};
6use std::collections::BTreeMap;
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_ast::expand::allocator::{self, SpecialAllocatorMethod};
16use rustc_data_structures::either::Either;
17use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18#[allow(unused)]
19use rustc_data_structures::static_assert_size;
20use rustc_hir::attrs::InlineAttr;
21use rustc_log::tracing;
22use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
23use rustc_middle::mir;
24use rustc_middle::query::TyCtxtAt;
25use rustc_middle::ty::layout::{
26    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
27};
28use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
29use rustc_session::config::InliningThreshold;
30use rustc_span::def_id::{CrateNum, DefId};
31use rustc_span::{Span, SpanData, Symbol};
32use rustc_symbol_mangling::mangle_internal_symbol;
33use rustc_target::callconv::FnAbi;
34use rustc_target::spec::{Arch, Os};
35
36use crate::alloc_addresses::EvalContextExt;
37use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
38use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
39use crate::concurrency::sync::SyncObj;
40use crate::concurrency::{
41    AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
42};
43use crate::*;
44
45/// First real-time signal.
46/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
47/// as typical values.
48pub const SIGRTMIN: i32 = 34;
49
50/// Last real-time signal.
51/// `signal(7)` says it must be between 32 and 64 and specifies
52/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
53pub const SIGRTMAX: i32 = 42;
54
55/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
56/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
57/// base address for each evaluation would produce unbounded memory usage.
58const ADDRS_PER_ANON_GLOBAL: usize = 32;
59
60#[derive(Copy, Clone, Debug, PartialEq)]
61pub enum AlignmentCheck {
62    /// Do not check alignment.
63    None,
64    /// Check alignment "symbolically", i.e., using only the requested alignment for an allocation and not its real base address.
65    Symbolic,
66    /// Check alignment on the actual physical integer address.
67    Int,
68}
69
70#[derive(Copy, Clone, Debug, PartialEq)]
71pub enum RejectOpWith {
72    /// Isolated op is rejected with an abort of the machine.
73    Abort,
74
75    /// If not Abort, miri returns an error for an isolated op.
76    /// Following options determine if user should be warned about such error.
77    /// Do not print warning about rejected isolated op.
78    NoWarning,
79
80    /// Print a warning about rejected isolated op, with backtrace.
81    Warning,
82
83    /// Print a warning about rejected isolated op, without backtrace.
84    WarningWithoutBacktrace,
85}
86
87#[derive(Copy, Clone, Debug, PartialEq)]
88pub enum IsolatedOp {
89    /// Reject an op requiring communication with the host. By
90    /// default, miri rejects the op with an abort. If not, it returns
91    /// an error code, and prints a warning about it. Warning levels
92    /// are controlled by `RejectOpWith` enum.
93    Reject(RejectOpWith),
94
95    /// Execute op requiring communication with the host, i.e. disable isolation.
96    Allow,
97}
98
99#[derive(Debug, Copy, Clone, PartialEq, Eq)]
100pub enum BacktraceStyle {
101    /// Prints a terser backtrace which ideally only contains relevant information.
102    Short,
103    /// Prints a backtrace with all possible information.
104    Full,
105    /// Prints only the frame that the error occurs in.
106    Off,
107}
108
109#[derive(Debug, Copy, Clone, PartialEq, Eq)]
110pub enum ValidationMode {
111    /// Do not perform any kind of validation.
112    No,
113    /// Validate the interior of the value, but not things behind references.
114    Shallow,
115    /// Fully recursively validate references.
116    Deep,
117}
118
119#[derive(Debug, Copy, Clone, PartialEq, Eq)]
120pub enum FloatRoundingErrorMode {
121    /// Apply a random error (the default).
122    Random,
123    /// Don't apply any error.
124    None,
125    /// Always apply the maximum error (with a random sign).
126    Max,
127}
128
129/// Extra data stored with each stack frame
130pub struct FrameExtra<'tcx> {
131    /// Extra data for the Borrow Tracker.
132    pub borrow_tracker: Option<borrow_tracker::FrameState>,
133
134    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
135    /// called by `try`). When this frame is popped during unwinding a panic,
136    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
137    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
138
139    /// If `measureme` profiling is enabled, holds timing information
140    /// for the start of this frame. When we finish executing this frame,
141    /// we use this to register a completed event with `measureme`.
142    pub timing: Option<measureme::DetachedTiming>,
143
144    /// Indicates how user-relevant this frame is. `#[track_caller]` frames are never relevant.
145    /// Frames from user-relevant crates are maximally relevant; frames from other crates are less
146    /// relevant.
147    pub user_relevance: u8,
148
149    /// Data race detector per-frame data.
150    pub data_race: Option<data_race::FrameState>,
151}
152
153impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
154    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
155        // Omitting `timing`, it does not support `Debug`.
156        let FrameExtra { borrow_tracker, catch_unwind, timing: _, user_relevance, data_race } =
157            self;
158        f.debug_struct("FrameData")
159            .field("borrow_tracker", borrow_tracker)
160            .field("catch_unwind", catch_unwind)
161            .field("user_relevance", user_relevance)
162            .field("data_race", data_race)
163            .finish()
164    }
165}
166
167impl VisitProvenance for FrameExtra<'_> {
168    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
169        let FrameExtra { catch_unwind, borrow_tracker, timing: _, user_relevance: _, data_race: _ } =
170            self;
171
172        catch_unwind.visit_provenance(visit);
173        borrow_tracker.visit_provenance(visit);
174    }
175}
176
177/// Extra memory kinds
178#[derive(Debug, Copy, Clone, PartialEq, Eq)]
179pub enum MiriMemoryKind {
180    /// `__rust_alloc` memory.
181    Rust,
182    /// `miri_alloc` memory.
183    Miri,
184    /// `malloc` memory.
185    C,
186    /// Windows `HeapAlloc` memory.
187    WinHeap,
188    /// Windows "local" memory (to be freed with `LocalFree`)
189    WinLocal,
190    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
191    /// This memory may leak.
192    Machine,
193    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
194    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
195    Runtime,
196    /// Globals copied from `tcx`.
197    /// This memory may leak.
198    Global,
199    /// Memory for extern statics.
200    /// This memory may leak.
201    ExternStatic,
202    /// Memory for thread-local statics.
203    /// This memory may leak.
204    Tls,
205    /// Memory mapped directly by the program
206    Mmap,
207}
208
209impl From<MiriMemoryKind> for MemoryKind {
210    #[inline(always)]
211    fn from(kind: MiriMemoryKind) -> MemoryKind {
212        MemoryKind::Machine(kind)
213    }
214}
215
216impl MayLeak for MiriMemoryKind {
217    #[inline(always)]
218    fn may_leak(self) -> bool {
219        use self::MiriMemoryKind::*;
220        match self {
221            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
222            Machine | Global | ExternStatic | Tls | Mmap => true,
223        }
224    }
225}
226
227impl MiriMemoryKind {
228    /// Whether we have a useful allocation span for an allocation of this kind.
229    fn should_save_allocation_span(self) -> bool {
230        use self::MiriMemoryKind::*;
231        match self {
232            // Heap allocations are fine since the `Allocation` is created immediately.
233            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
234            // Everything else is unclear, let's not show potentially confusing spans.
235            Machine | Global | ExternStatic | Tls | Runtime => false,
236        }
237    }
238}
239
240impl fmt::Display for MiriMemoryKind {
241    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
242        use self::MiriMemoryKind::*;
243        match self {
244            Rust => write!(f, "Rust heap"),
245            Miri => write!(f, "Miri bare-metal heap"),
246            C => write!(f, "C heap"),
247            WinHeap => write!(f, "Windows heap"),
248            WinLocal => write!(f, "Windows local memory"),
249            Machine => write!(f, "machine-managed memory"),
250            Runtime => write!(f, "language runtime memory"),
251            Global => write!(f, "global (static or const)"),
252            ExternStatic => write!(f, "extern static"),
253            Tls => write!(f, "thread-local static"),
254            Mmap => write!(f, "mmap"),
255        }
256    }
257}
258
259pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
260
261/// Pointer provenance.
262// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
263// *might* be recursive and then it has to track which places have already been visited.
264// These implementations are a bit questionable, and it means we may check the same place multiple
265// times with different provenance, but that is in general not wrong.
266#[derive(Clone, Copy, PartialEq, Eq, Hash)]
267pub enum Provenance {
268    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
269    /// and what their borrow tag is.
270    Concrete {
271        alloc_id: AllocId,
272        /// Borrow Tracker tag.
273        tag: BorTag,
274    },
275    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
276    /// specification, we should at that point angelically "guess" a provenance that will make all
277    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
278    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
279    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
280    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
281    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
282    /// then we allow the access. This allows too much code in two ways:
283    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
284    ///   subsequent memory accesses.
285    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
286    ///   the access, we also have to update the aliasing state -- and that update can be very
287    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
288    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
289    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
290    ///   stack that would be true with all possible choices.
291    Wildcard,
292}
293
294/// The "extra" information a pointer has over a regular AllocId.
295#[derive(Copy, Clone, PartialEq)]
296pub enum ProvenanceExtra {
297    Concrete(BorTag),
298    Wildcard,
299}
300
301#[cfg(target_pointer_width = "64")]
302static_assert_size!(StrictPointer, 24);
303// FIXME: this would with in 24bytes but layout optimizations are not smart enough
304// #[cfg(target_pointer_width = "64")]
305//static_assert_size!(Pointer, 24);
306#[cfg(target_pointer_width = "64")]
307static_assert_size!(Scalar, 32);
308
309impl fmt::Debug for Provenance {
310    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
311        match self {
312            Provenance::Concrete { alloc_id, tag } => {
313                // Forward `alternate` flag to `alloc_id` printing.
314                if f.alternate() {
315                    write!(f, "[{alloc_id:#?}]")?;
316                } else {
317                    write!(f, "[{alloc_id:?}]")?;
318                }
319                // Print Borrow Tracker tag.
320                write!(f, "{tag:?}")?;
321            }
322            Provenance::Wildcard => {
323                write!(f, "[wildcard]")?;
324            }
325        }
326        Ok(())
327    }
328}
329
330impl interpret::Provenance for Provenance {
331    /// We use absolute addresses in the `offset` of a `StrictPointer`.
332    const OFFSET_IS_ADDR: bool = true;
333
334    /// Miri implements wildcard provenance.
335    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
336
337    fn get_alloc_id(self) -> Option<AllocId> {
338        match self {
339            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
340            Provenance::Wildcard => None,
341        }
342    }
343
344    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
345        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
346        write!(f, "{:#x}", addr.bytes())?;
347        if f.alternate() {
348            write!(f, "{prov:#?}")?;
349        } else {
350            write!(f, "{prov:?}")?;
351        }
352        Ok(())
353    }
354}
355
356impl fmt::Debug for ProvenanceExtra {
357    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
358        match self {
359            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
360            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
361        }
362    }
363}
364
365impl ProvenanceExtra {
366    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
367        match self {
368            ProvenanceExtra::Concrete(pid) => f(pid),
369            ProvenanceExtra::Wildcard => None,
370        }
371    }
372}
373
374/// Extra per-allocation data
375#[derive(Debug)]
376pub struct AllocExtra<'tcx> {
377    /// Global state of the borrow tracker, if enabled.
378    pub borrow_tracker: Option<borrow_tracker::AllocState>,
379    /// Extra state for data race detection.
380    ///
381    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
382    pub data_race: AllocDataRaceHandler,
383    /// A backtrace to where this allocation was allocated.
384    /// As this is recorded for leak reports, it only exists
385    /// if this allocation is leakable. The backtrace is not
386    /// pruned yet; that should be done before printing it.
387    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
388    /// Synchronization objects like to attach extra data to particular addresses. We store that
389    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
390    /// freed.
391    /// This maps offsets to synchronization-primitive-specific data.
392    pub sync_objs: BTreeMap<Size, Box<dyn SyncObj>>,
393}
394
395// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
396// but that should never end up actually cloning our `AllocExtra`.
397impl<'tcx> Clone for AllocExtra<'tcx> {
398    fn clone(&self) -> Self {
399        panic!("our allocations should never be cloned");
400    }
401}
402
403impl VisitProvenance for AllocExtra<'_> {
404    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
405        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync_objs: _ } = self;
406
407        borrow_tracker.visit_provenance(visit);
408        data_race.visit_provenance(visit);
409    }
410}
411
412/// Precomputed layouts of primitive types
413pub struct PrimitiveLayouts<'tcx> {
414    pub unit: TyAndLayout<'tcx>,
415    pub i8: TyAndLayout<'tcx>,
416    pub i16: TyAndLayout<'tcx>,
417    pub i32: TyAndLayout<'tcx>,
418    pub i64: TyAndLayout<'tcx>,
419    pub i128: TyAndLayout<'tcx>,
420    pub isize: TyAndLayout<'tcx>,
421    pub u8: TyAndLayout<'tcx>,
422    pub u16: TyAndLayout<'tcx>,
423    pub u32: TyAndLayout<'tcx>,
424    pub u64: TyAndLayout<'tcx>,
425    pub u128: TyAndLayout<'tcx>,
426    pub usize: TyAndLayout<'tcx>,
427    pub bool: TyAndLayout<'tcx>,
428    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
429    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
430}
431
432impl<'tcx> PrimitiveLayouts<'tcx> {
433    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
434        let tcx = layout_cx.tcx();
435        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
436        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
437        Ok(Self {
438            unit: layout_cx.layout_of(tcx.types.unit)?,
439            i8: layout_cx.layout_of(tcx.types.i8)?,
440            i16: layout_cx.layout_of(tcx.types.i16)?,
441            i32: layout_cx.layout_of(tcx.types.i32)?,
442            i64: layout_cx.layout_of(tcx.types.i64)?,
443            i128: layout_cx.layout_of(tcx.types.i128)?,
444            isize: layout_cx.layout_of(tcx.types.isize)?,
445            u8: layout_cx.layout_of(tcx.types.u8)?,
446            u16: layout_cx.layout_of(tcx.types.u16)?,
447            u32: layout_cx.layout_of(tcx.types.u32)?,
448            u64: layout_cx.layout_of(tcx.types.u64)?,
449            u128: layout_cx.layout_of(tcx.types.u128)?,
450            usize: layout_cx.layout_of(tcx.types.usize)?,
451            bool: layout_cx.layout_of(tcx.types.bool)?,
452            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
453            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
454        })
455    }
456
457    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
458        match size.bits() {
459            8 => Some(self.u8),
460            16 => Some(self.u16),
461            32 => Some(self.u32),
462            64 => Some(self.u64),
463            128 => Some(self.u128),
464            _ => None,
465        }
466    }
467
468    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
469        match size.bits() {
470            8 => Some(self.i8),
471            16 => Some(self.i16),
472            32 => Some(self.i32),
473            64 => Some(self.i64),
474            128 => Some(self.i128),
475            _ => None,
476        }
477    }
478}
479
480/// The machine itself.
481///
482/// If you add anything here that stores machine values, remember to update
483/// `visit_all_machine_values`!
484pub struct MiriMachine<'tcx> {
485    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
486    pub tcx: TyCtxt<'tcx>,
487
488    /// Global data for borrow tracking.
489    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
490
491    /// Depending on settings, this will be `None`,
492    /// global data for a data race detector,
493    /// or the context required for running in GenMC mode.
494    ///
495    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
496    pub data_race: GlobalDataRaceHandler,
497
498    /// Ptr-int-cast module global data.
499    pub alloc_addresses: alloc_addresses::GlobalState,
500
501    /// Environment variables.
502    pub(crate) env_vars: EnvVars<'tcx>,
503
504    /// Return place of the main function.
505    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
506
507    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
508    /// These are *pointers* to argc/argv because macOS.
509    /// We also need the full command line as one string because of Windows.
510    pub(crate) argc: Option<Pointer>,
511    pub(crate) argv: Option<Pointer>,
512    pub(crate) cmd_line: Option<Pointer>,
513
514    /// TLS state.
515    pub(crate) tls: TlsData<'tcx>,
516
517    /// What should Miri do when an op requires communicating with the host,
518    /// such as accessing host env vars, random number generation, and
519    /// file system access.
520    pub(crate) isolated_op: IsolatedOp,
521
522    /// Whether to enforce the validity invariant.
523    pub(crate) validation: ValidationMode,
524
525    /// The table of file descriptors.
526    pub(crate) fds: shims::FdTable,
527    /// The table of directory descriptors.
528    pub(crate) dirs: shims::DirTable,
529
530    /// The list of all EpollEventInterest.
531    pub(crate) epoll_interests: shims::EpollInterestTable,
532
533    /// This machine's monotone clock.
534    pub(crate) monotonic_clock: MonotonicClock,
535
536    /// The set of threads.
537    pub(crate) threads: ThreadManager<'tcx>,
538
539    /// Stores which thread is eligible to run on which CPUs.
540    /// This has no effect at all, it is just tracked to produce the correct result
541    /// in `sched_getaffinity`
542    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
543
544    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
545    pub(crate) layouts: PrimitiveLayouts<'tcx>,
546
547    /// Allocations that are considered roots of static memory (that may leak).
548    pub(crate) static_roots: Vec<AllocId>,
549
550    /// The `measureme` profiler used to record timing information about
551    /// the emulated program.
552    profiler: Option<measureme::Profiler>,
553    /// Used with `profiler` to cache the `StringId`s for event names
554    /// used with `measureme`.
555    string_cache: FxHashMap<String, measureme::StringId>,
556
557    /// Cache of `Instance` exported under the given `Symbol` name.
558    /// `None` means no `Instance` exported under the given name is found.
559    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
560
561    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
562    pub(crate) backtrace_style: BacktraceStyle,
563
564    /// Crates which are considered user-relevant for the purposes of error reporting.
565    pub(crate) user_relevant_crates: Vec<CrateNum>,
566
567    /// Mapping extern static names to their pointer.
568    extern_statics: FxHashMap<Symbol, StrictPointer>,
569
570    /// The random number generator used for resolving non-determinism.
571    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
572    pub(crate) rng: RefCell<StdRng>,
573
574    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
575    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
576
577    /// The allocation IDs to report when they are being allocated
578    /// (helps for debugging memory leaks and use after free bugs).
579    pub(crate) tracked_alloc_ids: FxHashSet<AllocId>,
580    /// For the tracked alloc ids, also report read/write accesses.
581    track_alloc_accesses: bool,
582
583    /// Controls whether alignment of memory accesses is being checked.
584    pub(crate) check_alignment: AlignmentCheck,
585
586    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
587    pub(crate) cmpxchg_weak_failure_rate: f64,
588
589    /// The probability of the active thread being preempted at the end of each basic block.
590    pub(crate) preemption_rate: f64,
591
592    /// If `Some`, we will report the current stack every N basic blocks.
593    pub(crate) report_progress: Option<u32>,
594    // The total number of blocks that have been executed.
595    pub(crate) basic_block_count: u64,
596
597    /// Handle of the optional shared object file for native functions.
598    #[cfg(all(unix, feature = "native-lib"))]
599    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
600    #[cfg(not(all(unix, feature = "native-lib")))]
601    pub native_lib: Vec<!>,
602    /// A memory location for exchanging the current `ecx` pointer with native code.
603    #[cfg(all(unix, feature = "native-lib"))]
604    pub native_lib_ecx_interchange: &'static Cell<usize>,
605
606    /// Run a garbage collector for BorTags every N basic blocks.
607    pub(crate) gc_interval: u32,
608    /// The number of blocks that passed since the last BorTag GC pass.
609    pub(crate) since_gc: u32,
610
611    /// The number of CPUs to be reported by miri.
612    pub(crate) num_cpus: u32,
613
614    /// Determines Miri's page size and associated values
615    pub(crate) page_size: u64,
616    pub(crate) stack_addr: u64,
617    pub(crate) stack_size: u64,
618
619    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
620    pub(crate) collect_leak_backtraces: bool,
621
622    /// The spans we will use to report where an allocation was created and deallocated in
623    /// diagnostics.
624    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
625
626    /// For each allocation, an offset inside that allocation that was deemed aligned even for
627    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
628    /// tracked for vtables and function allocations as well as regular allocations.
629    ///
630    /// Invariant: the promised alignment will never be less than the native alignment of the
631    /// allocation.
632    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
633
634    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
635    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
636
637    /// Caches the sanity-checks for various pthread primitives.
638    pub(crate) pthread_mutex_sanity: Cell<bool>,
639    pub(crate) pthread_rwlock_sanity: Cell<bool>,
640    pub(crate) pthread_condvar_sanity: Cell<bool>,
641
642    /// (Foreign) symbols that are synthesized as part of the allocator shim: the key indicates the
643    /// name of the symbol being synthesized; the value indicates whether this should invoke some
644    /// other symbol or whether this has special allocator semantics.
645    pub(crate) allocator_shim_symbols: FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>>,
646    /// Cache for `mangle_internal_symbol`.
647    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
648
649    /// Always prefer the intrinsic fallback body over the native Miri implementation.
650    pub force_intrinsic_fallback: bool,
651
652    /// Whether floating-point operations can behave non-deterministically.
653    pub float_nondet: bool,
654    /// Whether floating-point operations can have a non-deterministic rounding error.
655    pub float_rounding_error: FloatRoundingErrorMode,
656
657    /// Whether Miri artifically introduces short reads/writes on file descriptors.
658    pub short_fd_operations: bool,
659}
660
661impl<'tcx> MiriMachine<'tcx> {
662    /// Create a new MiriMachine.
663    ///
664    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
665    pub(crate) fn new(
666        config: &MiriConfig,
667        layout_cx: LayoutCx<'tcx>,
668        genmc_ctx: Option<Rc<GenmcCtx>>,
669    ) -> Self {
670        let tcx = layout_cx.tcx();
671        let user_relevant_crates = Self::get_user_relevant_crates(tcx, config);
672        let layouts =
673            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
674        let profiler = config.measureme_out.as_ref().map(|out| {
675            let crate_name =
676                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
677            let pid = process::id();
678            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
679            // the PID is padded so that the nondeterministic value of the PID does not spread
680            // nondeterminism to the allocator. In Miri we are not aiming for such performance
681            // control, we just pad for consistency with rustc.
682            let filename = format!("{crate_name}-{pid:07}");
683            let path = Path::new(out).join(filename);
684            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
685        });
686        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
687        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
688        let data_race = if config.genmc_config.is_some() {
689            // `genmc_ctx` persists across executions, so we don't create a new one here.
690            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
691        } else if config.data_race_detector {
692            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
693        } else {
694            GlobalDataRaceHandler::None
695        };
696        // Determine page size, stack address, and stack size.
697        // These values are mostly meaningless, but the stack address is also where we start
698        // allocating physical integer addresses for all allocations.
699        let page_size = if let Some(page_size) = config.page_size {
700            page_size
701        } else {
702            let target = &tcx.sess.target;
703            match target.arch {
704                Arch::Wasm32 | Arch::Wasm64 => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
705                Arch::AArch64 => {
706                    if target.is_like_darwin {
707                        // No "definitive" source, but see:
708                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
709                        // https://github.com/ziglang/zig/issues/11308 etc.
710                        16 * 1024
711                    } else {
712                        4 * 1024
713                    }
714                }
715                _ => 4 * 1024,
716            }
717        };
718        // On 16bit targets, 32 pages is more than the entire address space!
719        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
720        let stack_size =
721            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
722        assert!(
723            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
724            "miri only supports up to {} CPUs, but {} were configured",
725            cpu_affinity::MAX_CPUS,
726            config.num_cpus
727        );
728        let threads = ThreadManager::new(config);
729        let mut thread_cpu_affinity = FxHashMap::default();
730        if matches!(&tcx.sess.target.os, Os::Linux | Os::FreeBsd | Os::Android) {
731            thread_cpu_affinity
732                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
733        }
734        let alloc_addresses =
735            RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr, tcx));
736        MiriMachine {
737            tcx,
738            borrow_tracker,
739            data_race,
740            alloc_addresses,
741            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
742            env_vars: EnvVars::default(),
743            main_fn_ret_place: None,
744            argc: None,
745            argv: None,
746            cmd_line: None,
747            tls: TlsData::default(),
748            isolated_op: config.isolated_op,
749            validation: config.validation,
750            fds: shims::FdTable::init(config.mute_stdout_stderr),
751            epoll_interests: shims::EpollInterestTable::new(),
752            dirs: Default::default(),
753            layouts,
754            threads,
755            thread_cpu_affinity,
756            static_roots: Vec::new(),
757            profiler,
758            string_cache: Default::default(),
759            exported_symbols_cache: FxHashMap::default(),
760            backtrace_style: config.backtrace_style,
761            user_relevant_crates,
762            extern_statics: FxHashMap::default(),
763            rng: RefCell::new(rng),
764            allocator: (!config.native_lib.is_empty())
765                .then(|| Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new()))),
766            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
767            track_alloc_accesses: config.track_alloc_accesses,
768            check_alignment: config.check_alignment,
769            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
770            preemption_rate: config.preemption_rate,
771            report_progress: config.report_progress,
772            basic_block_count: 0,
773            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
774            #[cfg(all(unix, feature = "native-lib"))]
775            native_lib: config.native_lib.iter().map(|lib_file_path| {
776                let host_triple = rustc_session::config::host_tuple();
777                let target_triple = tcx.sess.opts.target_triple.tuple();
778                // Check if host target == the session target.
779                if host_triple != target_triple {
780                    panic!(
781                        "calling native C functions in linked .so file requires host and target to be the same: \
782                        host={host_triple}, target={target_triple}",
783                    );
784                }
785                // Note: it is the user's responsibility to provide a correct SO file.
786                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
787                // undefined behaviour in Miri itself!
788                (
789                    unsafe {
790                        libloading::Library::new(lib_file_path)
791                            .expect("failed to read specified extern shared object file")
792                    },
793                    lib_file_path.clone(),
794                )
795            }).collect(),
796            #[cfg(all(unix, feature = "native-lib"))]
797            native_lib_ecx_interchange: Box::leak(Box::new(Cell::new(0))),
798            #[cfg(not(all(unix, feature = "native-lib")))]
799            native_lib: config.native_lib.iter().map(|_| {
800                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
801            }).collect(),
802            gc_interval: config.gc_interval,
803            since_gc: 0,
804            num_cpus: config.num_cpus,
805            page_size,
806            stack_addr,
807            stack_size,
808            collect_leak_backtraces: config.collect_leak_backtraces,
809            allocation_spans: RefCell::new(FxHashMap::default()),
810            symbolic_alignment: RefCell::new(FxHashMap::default()),
811            union_data_ranges: FxHashMap::default(),
812            pthread_mutex_sanity: Cell::new(false),
813            pthread_rwlock_sanity: Cell::new(false),
814            pthread_condvar_sanity: Cell::new(false),
815            allocator_shim_symbols: Self::allocator_shim_symbols(tcx),
816            mangle_internal_symbol_cache: Default::default(),
817            force_intrinsic_fallback: config.force_intrinsic_fallback,
818            float_nondet: config.float_nondet,
819            float_rounding_error: config.float_rounding_error,
820            short_fd_operations: config.short_fd_operations,
821        }
822    }
823
824    fn allocator_shim_symbols(
825        tcx: TyCtxt<'tcx>,
826    ) -> FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>> {
827        use rustc_codegen_ssa::base::allocator_shim_contents;
828
829        // codegen uses `allocator_kind_for_codegen` here, but that's only needed to deal with
830        // dylibs which we do not support.
831        let Some(kind) = tcx.allocator_kind(()) else {
832            return Default::default();
833        };
834        let methods = allocator_shim_contents(tcx, kind);
835        let mut symbols = FxHashMap::default();
836        for method in methods {
837            let from_name = Symbol::intern(&mangle_internal_symbol(
838                tcx,
839                &allocator::global_fn_name(method.name),
840            ));
841            let to = match method.special {
842                Some(special) => Either::Right(special),
843                None =>
844                    Either::Left(Symbol::intern(&mangle_internal_symbol(
845                        tcx,
846                        &allocator::default_fn_name(method.name),
847                    ))),
848            };
849            symbols.try_insert(from_name, to).unwrap();
850        }
851        symbols
852    }
853
854    /// Retrieve the list of user-relevant crates based on MIRI_LOCAL_CRATES as set by cargo-miri,
855    /// and extra crates set in the config.
856    fn get_user_relevant_crates(tcx: TyCtxt<'_>, config: &MiriConfig) -> Vec<CrateNum> {
857        // Convert the local crate names from the passed-in config into CrateNums so that they can
858        // be looked up quickly during execution
859        let local_crate_names = std::env::var("MIRI_LOCAL_CRATES")
860            .map(|crates| crates.split(',').map(|krate| krate.to_string()).collect::<Vec<_>>())
861            .unwrap_or_default();
862        let mut local_crates = Vec::new();
863        for &crate_num in tcx.crates(()) {
864            let name = tcx.crate_name(crate_num);
865            let name = name.as_str();
866            if local_crate_names
867                .iter()
868                .chain(&config.user_relevant_crates)
869                .any(|local_name| local_name == name)
870            {
871                local_crates.push(crate_num);
872            }
873        }
874        local_crates
875    }
876
877    pub(crate) fn late_init(
878        ecx: &mut MiriInterpCx<'tcx>,
879        config: &MiriConfig,
880        on_main_stack_empty: StackEmptyCallback<'tcx>,
881    ) -> InterpResult<'tcx> {
882        EnvVars::init(ecx, config)?;
883        MiriMachine::init_extern_statics(ecx)?;
884        ThreadManager::init(ecx, on_main_stack_empty);
885        interp_ok(())
886    }
887
888    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
889        // This got just allocated, so there definitely is a pointer here.
890        let ptr = ptr.into_pointer_or_addr().unwrap();
891        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
892    }
893
894    pub(crate) fn communicate(&self) -> bool {
895        self.isolated_op == IsolatedOp::Allow
896    }
897
898    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
899    pub(crate) fn is_local(&self, instance: ty::Instance<'tcx>) -> bool {
900        let def_id = instance.def_id();
901        def_id.is_local() || self.user_relevant_crates.contains(&def_id.krate)
902    }
903
904    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
905    pub(crate) fn handle_abnormal_termination(&mut self) {
906        // All strings in the profile data are stored in a single string table which is not
907        // written to disk until the profiler is dropped. If the interpreter exits without dropping
908        // the profiler, it is not possible to interpret the profile data and all measureme tools
909        // will panic when given the file.
910        drop(self.profiler.take());
911    }
912
913    pub(crate) fn page_align(&self) -> Align {
914        Align::from_bytes(self.page_size).unwrap()
915    }
916
917    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
918        self.allocation_spans
919            .borrow()
920            .get(&alloc_id)
921            .map(|(allocated, _deallocated)| allocated.data())
922    }
923
924    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
925        self.allocation_spans
926            .borrow()
927            .get(&alloc_id)
928            .and_then(|(_allocated, deallocated)| *deallocated)
929            .map(Span::data)
930    }
931
932    fn init_allocation(
933        ecx: &MiriInterpCx<'tcx>,
934        id: AllocId,
935        kind: MemoryKind,
936        size: Size,
937        align: Align,
938    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
939        if ecx.machine.tracked_alloc_ids.contains(&id) {
940            ecx.emit_diagnostic(NonHaltingDiagnostic::TrackingAlloc(id, size, align));
941        }
942
943        let borrow_tracker = ecx
944            .machine
945            .borrow_tracker
946            .as_ref()
947            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
948
949        let data_race = match &ecx.machine.data_race {
950            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
951            GlobalDataRaceHandler::Vclocks(data_race) =>
952                AllocDataRaceHandler::Vclocks(
953                    data_race::AllocState::new_allocation(
954                        data_race,
955                        &ecx.machine.threads,
956                        size,
957                        kind,
958                        ecx.machine.current_user_relevant_span(),
959                    ),
960                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
961                ),
962            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
963                // GenMC learns about new allocations directly from the alloc_addresses module,
964                // since it has to be able to control the address at which they are placed.
965                AllocDataRaceHandler::Genmc
966            }
967        };
968
969        // If an allocation is leaked, we want to report a backtrace to indicate where it was
970        // allocated. We don't need to record a backtrace for allocations which are allowed to
971        // leak.
972        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
973            None
974        } else {
975            Some(ecx.generate_stacktrace())
976        };
977
978        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
979            ecx.machine
980                .allocation_spans
981                .borrow_mut()
982                .insert(id, (ecx.machine.current_user_relevant_span(), None));
983        }
984
985        interp_ok(AllocExtra {
986            borrow_tracker,
987            data_race,
988            backtrace,
989            sync_objs: BTreeMap::default(),
990        })
991    }
992}
993
994impl VisitProvenance for MiriMachine<'_> {
995    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
996        #[rustfmt::skip]
997        let MiriMachine {
998            threads,
999            thread_cpu_affinity: _,
1000            tls,
1001            env_vars,
1002            main_fn_ret_place,
1003            argc,
1004            argv,
1005            cmd_line,
1006            extern_statics,
1007            dirs,
1008            borrow_tracker,
1009            data_race,
1010            alloc_addresses,
1011            fds,
1012            epoll_interests:_,
1013            tcx: _,
1014            isolated_op: _,
1015            validation: _,
1016            monotonic_clock: _,
1017            layouts: _,
1018            static_roots: _,
1019            profiler: _,
1020            string_cache: _,
1021            exported_symbols_cache: _,
1022            backtrace_style: _,
1023            user_relevant_crates: _,
1024            rng: _,
1025            allocator: _,
1026            tracked_alloc_ids: _,
1027            track_alloc_accesses: _,
1028            check_alignment: _,
1029            cmpxchg_weak_failure_rate: _,
1030            preemption_rate: _,
1031            report_progress: _,
1032            basic_block_count: _,
1033            native_lib: _,
1034            #[cfg(all(unix, feature = "native-lib"))]
1035            native_lib_ecx_interchange: _,
1036            gc_interval: _,
1037            since_gc: _,
1038            num_cpus: _,
1039            page_size: _,
1040            stack_addr: _,
1041            stack_size: _,
1042            collect_leak_backtraces: _,
1043            allocation_spans: _,
1044            symbolic_alignment: _,
1045            union_data_ranges: _,
1046            pthread_mutex_sanity: _,
1047            pthread_rwlock_sanity: _,
1048            pthread_condvar_sanity: _,
1049            allocator_shim_symbols: _,
1050            mangle_internal_symbol_cache: _,
1051            force_intrinsic_fallback: _,
1052            float_nondet: _,
1053            float_rounding_error: _,
1054            short_fd_operations: _,
1055        } = self;
1056
1057        threads.visit_provenance(visit);
1058        tls.visit_provenance(visit);
1059        env_vars.visit_provenance(visit);
1060        dirs.visit_provenance(visit);
1061        fds.visit_provenance(visit);
1062        data_race.visit_provenance(visit);
1063        borrow_tracker.visit_provenance(visit);
1064        alloc_addresses.visit_provenance(visit);
1065        main_fn_ret_place.visit_provenance(visit);
1066        argc.visit_provenance(visit);
1067        argv.visit_provenance(visit);
1068        cmd_line.visit_provenance(visit);
1069        for ptr in extern_statics.values() {
1070            ptr.visit_provenance(visit);
1071        }
1072    }
1073}
1074
1075/// A rustc InterpCx for Miri.
1076pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
1077
1078/// A little trait that's useful to be inherited by extension traits.
1079pub trait MiriInterpCxExt<'tcx> {
1080    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
1081    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
1082}
1083impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
1084    #[inline(always)]
1085    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
1086        self
1087    }
1088    #[inline(always)]
1089    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
1090        self
1091    }
1092}
1093
1094/// Machine hook implementations.
1095impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1096    type MemoryKind = MiriMemoryKind;
1097    type ExtraFnVal = DynSym;
1098
1099    type FrameExtra = FrameExtra<'tcx>;
1100    type AllocExtra = AllocExtra<'tcx>;
1101
1102    type Provenance = Provenance;
1103    type ProvenanceExtra = ProvenanceExtra;
1104    type Bytes = MiriAllocBytes;
1105
1106    type MemoryMap =
1107        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1108
1109    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1110
1111    const PANIC_ON_ALLOC_FAIL: bool = false;
1112
1113    #[inline(always)]
1114    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1115        ecx.machine.check_alignment != AlignmentCheck::None
1116    }
1117
1118    #[inline(always)]
1119    fn alignment_check(
1120        ecx: &MiriInterpCx<'tcx>,
1121        alloc_id: AllocId,
1122        alloc_align: Align,
1123        alloc_kind: AllocKind,
1124        offset: Size,
1125        align: Align,
1126    ) -> Option<Misalignment> {
1127        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1128            // Just use the built-in check.
1129            return None;
1130        }
1131        if alloc_kind != AllocKind::LiveData {
1132            // Can't have any extra info here.
1133            return None;
1134        }
1135        // Let's see which alignment we have been promised for this allocation.
1136        let (promised_offset, promised_align) = ecx
1137            .machine
1138            .symbolic_alignment
1139            .borrow()
1140            .get(&alloc_id)
1141            .copied()
1142            .unwrap_or((Size::ZERO, alloc_align));
1143        if promised_align < align {
1144            // Definitely not enough.
1145            Some(Misalignment { has: promised_align, required: align })
1146        } else {
1147            // What's the offset between us and the promised alignment?
1148            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1149            // That must also be aligned.
1150            if distance.is_multiple_of(align.bytes()) {
1151                // All looking good!
1152                None
1153            } else {
1154                // The biggest power of two through which `distance` is divisible.
1155                let distance_pow2 = 1 << distance.trailing_zeros();
1156                Some(Misalignment {
1157                    has: Align::from_bytes(distance_pow2).unwrap(),
1158                    required: align,
1159                })
1160            }
1161        }
1162    }
1163
1164    #[inline(always)]
1165    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1166        ecx.machine.validation != ValidationMode::No
1167    }
1168    #[inline(always)]
1169    fn enforce_validity_recursively(
1170        ecx: &InterpCx<'tcx, Self>,
1171        _layout: TyAndLayout<'tcx>,
1172    ) -> bool {
1173        ecx.machine.validation == ValidationMode::Deep
1174    }
1175
1176    #[inline(always)]
1177    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1178        !ecx.tcx.sess.overflow_checks()
1179    }
1180
1181    fn check_fn_target_features(
1182        ecx: &MiriInterpCx<'tcx>,
1183        instance: ty::Instance<'tcx>,
1184    ) -> InterpResult<'tcx> {
1185        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1186        if attrs
1187            .target_features
1188            .iter()
1189            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1190        {
1191            let unavailable = attrs
1192                .target_features
1193                .iter()
1194                .filter(|&feature| {
1195                    feature.kind != TargetFeatureKind::Implied
1196                        && !ecx.tcx.sess.target_features.contains(&feature.name)
1197                })
1198                .fold(String::new(), |mut s, feature| {
1199                    if !s.is_empty() {
1200                        s.push_str(", ");
1201                    }
1202                    s.push_str(feature.name.as_str());
1203                    s
1204                });
1205            let msg = format!(
1206                "calling a function that requires unavailable target features: {unavailable}"
1207            );
1208            // On WASM, this is not UB, but instead gets rejected during validation of the module
1209            // (see #84988).
1210            if ecx.tcx.sess.target.is_like_wasm {
1211                throw_machine_stop!(TerminationInfo::Abort(msg));
1212            } else {
1213                throw_ub_format!("{msg}");
1214            }
1215        }
1216        interp_ok(())
1217    }
1218
1219    #[inline(always)]
1220    fn find_mir_or_eval_fn(
1221        ecx: &mut MiriInterpCx<'tcx>,
1222        instance: ty::Instance<'tcx>,
1223        abi: &FnAbi<'tcx, Ty<'tcx>>,
1224        args: &[FnArg<'tcx>],
1225        dest: &PlaceTy<'tcx>,
1226        ret: Option<mir::BasicBlock>,
1227        unwind: mir::UnwindAction,
1228    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1229        // For foreign items, try to see if we can emulate them.
1230        if ecx.tcx.is_foreign_item(instance.def_id()) {
1231            let _trace = enter_trace_span!("emulate_foreign_item");
1232            // An external function call that does not have a MIR body. We either find MIR elsewhere
1233            // or emulate its effect.
1234            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1235            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1236            // foreign function
1237            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1238            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1239            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1240            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1241        }
1242
1243        if ecx.machine.data_race.as_genmc_ref().is_some()
1244            && ecx.genmc_intercept_function(instance, args, dest)?
1245        {
1246            ecx.return_to_block(ret)?;
1247            return interp_ok(None);
1248        }
1249
1250        // Otherwise, load the MIR.
1251        let _trace = enter_trace_span!("load_mir");
1252        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1253    }
1254
1255    #[inline(always)]
1256    fn call_extra_fn(
1257        ecx: &mut MiriInterpCx<'tcx>,
1258        fn_val: DynSym,
1259        abi: &FnAbi<'tcx, Ty<'tcx>>,
1260        args: &[FnArg<'tcx>],
1261        dest: &PlaceTy<'tcx>,
1262        ret: Option<mir::BasicBlock>,
1263        unwind: mir::UnwindAction,
1264    ) -> InterpResult<'tcx> {
1265        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1266        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1267    }
1268
1269    #[inline(always)]
1270    fn call_intrinsic(
1271        ecx: &mut MiriInterpCx<'tcx>,
1272        instance: ty::Instance<'tcx>,
1273        args: &[OpTy<'tcx>],
1274        dest: &PlaceTy<'tcx>,
1275        ret: Option<mir::BasicBlock>,
1276        unwind: mir::UnwindAction,
1277    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1278        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1279    }
1280
1281    #[inline(always)]
1282    fn assert_panic(
1283        ecx: &mut MiriInterpCx<'tcx>,
1284        msg: &mir::AssertMessage<'tcx>,
1285        unwind: mir::UnwindAction,
1286    ) -> InterpResult<'tcx> {
1287        ecx.assert_panic(msg, unwind)
1288    }
1289
1290    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1291        ecx.start_panic_nounwind(msg)
1292    }
1293
1294    fn unwind_terminate(
1295        ecx: &mut InterpCx<'tcx, Self>,
1296        reason: mir::UnwindTerminateReason,
1297    ) -> InterpResult<'tcx> {
1298        // Call the lang item.
1299        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1300        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1301        ecx.call_function(
1302            panic,
1303            ExternAbi::Rust,
1304            &[],
1305            None,
1306            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1307        )?;
1308        interp_ok(())
1309    }
1310
1311    #[inline(always)]
1312    fn binary_ptr_op(
1313        ecx: &MiriInterpCx<'tcx>,
1314        bin_op: mir::BinOp,
1315        left: &ImmTy<'tcx>,
1316        right: &ImmTy<'tcx>,
1317    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1318        ecx.binary_ptr_op(bin_op, left, right)
1319    }
1320
1321    #[inline(always)]
1322    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1323        ecx: &InterpCx<'tcx, Self>,
1324        inputs: &[F1],
1325    ) -> F2 {
1326        ecx.generate_nan(inputs)
1327    }
1328
1329    #[inline(always)]
1330    fn apply_float_nondet(
1331        ecx: &mut InterpCx<'tcx, Self>,
1332        val: ImmTy<'tcx>,
1333    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1334        crate::math::apply_random_float_error_to_imm(ecx, val, 4)
1335    }
1336
1337    #[inline(always)]
1338    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1339        ecx.equal_float_min_max(a, b)
1340    }
1341
1342    #[inline(always)]
1343    fn float_fuse_mul_add(ecx: &InterpCx<'tcx, Self>) -> bool {
1344        ecx.machine.float_nondet && ecx.machine.rng.borrow_mut().random()
1345    }
1346
1347    #[inline(always)]
1348    fn runtime_checks(
1349        ecx: &InterpCx<'tcx, Self>,
1350        r: mir::RuntimeChecks,
1351    ) -> InterpResult<'tcx, bool> {
1352        interp_ok(r.value(ecx.tcx.sess))
1353    }
1354
1355    #[inline(always)]
1356    fn thread_local_static_pointer(
1357        ecx: &mut MiriInterpCx<'tcx>,
1358        def_id: DefId,
1359    ) -> InterpResult<'tcx, StrictPointer> {
1360        ecx.get_or_create_thread_local_alloc(def_id)
1361    }
1362
1363    fn extern_static_pointer(
1364        ecx: &MiriInterpCx<'tcx>,
1365        def_id: DefId,
1366    ) -> InterpResult<'tcx, StrictPointer> {
1367        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1368        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1369            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1370            // information. That uses the type information of this static.
1371            // Make sure it matches the Miri allocation for this.
1372            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1373                panic!("extern_statics cannot contain wildcards")
1374            };
1375            let info = ecx.get_alloc_info(alloc_id);
1376            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1377            let extern_decl_layout =
1378                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1379            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1380                throw_unsup_format!(
1381                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1382                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1383                    but Miri emulates it via an extern static shim \
1384                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1385                    name = ecx.tcx.def_path_str(def_id),
1386                    krate = ecx.tcx.crate_name(def_id.krate),
1387                    decl_size = extern_decl_layout.size.bytes(),
1388                    decl_align = extern_decl_layout.align.bytes(),
1389                    shim_size = info.size.bytes(),
1390                    shim_align = info.align.bytes(),
1391                )
1392            }
1393            interp_ok(ptr)
1394        } else {
1395            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1396        }
1397    }
1398
1399    fn init_local_allocation(
1400        ecx: &MiriInterpCx<'tcx>,
1401        id: AllocId,
1402        kind: MemoryKind,
1403        size: Size,
1404        align: Align,
1405    ) -> InterpResult<'tcx, Self::AllocExtra> {
1406        assert!(kind != MiriMemoryKind::Global.into());
1407        MiriMachine::init_allocation(ecx, id, kind, size, align)
1408    }
1409
1410    fn adjust_alloc_root_pointer(
1411        ecx: &MiriInterpCx<'tcx>,
1412        ptr: interpret::Pointer<CtfeProvenance>,
1413        kind: Option<MemoryKind>,
1414    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1415        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1416        let alloc_id = ptr.provenance.alloc_id();
1417        if cfg!(debug_assertions) {
1418            // The machine promises to never call us on thread-local or extern statics.
1419            match ecx.tcx.try_get_global_alloc(alloc_id) {
1420                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1421                    panic!("adjust_alloc_root_pointer called on thread-local static")
1422                }
1423                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1424                    panic!("adjust_alloc_root_pointer called on extern static")
1425                }
1426                _ => {}
1427            }
1428        }
1429        // FIXME: can we somehow preserve the immutability of `ptr`?
1430        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1431            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1432        } else {
1433            // Value does not matter, SB is disabled
1434            BorTag::default()
1435        };
1436        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1437    }
1438
1439    /// Called on `usize as ptr` casts.
1440    #[inline(always)]
1441    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1442        ecx.ptr_from_addr_cast(addr)
1443    }
1444
1445    /// Called on `ptr as usize` casts.
1446    /// (Actually computing the resulting `usize` doesn't need machine help,
1447    /// that's just `Scalar::try_to_int`.)
1448    #[inline(always)]
1449    fn expose_provenance(
1450        ecx: &InterpCx<'tcx, Self>,
1451        provenance: Self::Provenance,
1452    ) -> InterpResult<'tcx> {
1453        ecx.expose_provenance(provenance)
1454    }
1455
1456    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1457    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1458    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1459    /// allocations.
1460    ///
1461    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1462    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1463    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1464    /// stored in machine state).
1465    ///
1466    /// When this fails, that means the pointer does not point to a live allocation.
1467    fn ptr_get_alloc(
1468        ecx: &MiriInterpCx<'tcx>,
1469        ptr: StrictPointer,
1470        size: i64,
1471    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1472        let rel = ecx.ptr_get_alloc(ptr, size);
1473
1474        rel.map(|(alloc_id, size)| {
1475            let tag = match ptr.provenance {
1476                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1477                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1478            };
1479            (alloc_id, size, tag)
1480        })
1481    }
1482
1483    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1484    ///
1485    /// If `alloc` contains pointers, then they are all pointing to globals.
1486    ///
1487    /// This should avoid copying if no work has to be done! If this returns an owned
1488    /// allocation (because a copy had to be done to adjust things), machine memory will
1489    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1490    /// owned allocation to the map even when the map is shared.)
1491    fn adjust_global_allocation<'b>(
1492        ecx: &InterpCx<'tcx, Self>,
1493        id: AllocId,
1494        alloc: &'b Allocation,
1495    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1496    {
1497        let alloc = alloc.adjust_from_tcx(
1498            &ecx.tcx,
1499            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1500            |ptr| ecx.global_root_pointer(ptr),
1501        )?;
1502        let kind = MiriMemoryKind::Global.into();
1503        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1504        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1505    }
1506
1507    #[inline(always)]
1508    fn before_memory_read(
1509        _tcx: TyCtxtAt<'tcx>,
1510        machine: &Self,
1511        alloc_extra: &AllocExtra<'tcx>,
1512        ptr: Pointer,
1513        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1514        range: AllocRange,
1515    ) -> InterpResult<'tcx> {
1516        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1517            machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
1518                alloc_id,
1519                range,
1520                borrow_tracker::AccessKind::Read,
1521            ));
1522        }
1523        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1524        match &machine.data_race {
1525            GlobalDataRaceHandler::None => {}
1526            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1527                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1528            GlobalDataRaceHandler::Vclocks(_data_race) => {
1529                let _trace = enter_trace_span!(data_race::before_memory_read);
1530                let AllocDataRaceHandler::Vclocks(data_race, _weak_memory) = &alloc_extra.data_race
1531                else {
1532                    unreachable!();
1533                };
1534                data_race.read_non_atomic(alloc_id, range, NaReadType::Read, None, machine)?;
1535            }
1536        }
1537        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1538            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1539        }
1540        // Check if there are any sync objects that would like to prevent reading this memory.
1541        for (_offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
1542            obj.on_access(concurrency::sync::AccessKind::Read)?;
1543        }
1544
1545        interp_ok(())
1546    }
1547
1548    #[inline(always)]
1549    fn before_memory_write(
1550        _tcx: TyCtxtAt<'tcx>,
1551        machine: &mut Self,
1552        alloc_extra: &mut AllocExtra<'tcx>,
1553        ptr: Pointer,
1554        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1555        range: AllocRange,
1556    ) -> InterpResult<'tcx> {
1557        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1558            machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
1559                alloc_id,
1560                range,
1561                borrow_tracker::AccessKind::Write,
1562            ));
1563        }
1564        match &machine.data_race {
1565            GlobalDataRaceHandler::None => {}
1566            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1567                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?,
1568            GlobalDataRaceHandler::Vclocks(_global_state) => {
1569                let _trace = enter_trace_span!(data_race::before_memory_write);
1570                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1571                    &mut alloc_extra.data_race
1572                else {
1573                    unreachable!()
1574                };
1575                data_race.write_non_atomic(alloc_id, range, NaWriteType::Write, None, machine)?;
1576                if let Some(weak_memory) = weak_memory {
1577                    weak_memory
1578                        .non_atomic_write(range, machine.data_race.as_vclocks_ref().unwrap());
1579                }
1580            }
1581        }
1582        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1583            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1584        }
1585        // Delete sync objects that don't like writes.
1586        // Most of the time, we can just skip this.
1587        if !alloc_extra.sync_objs.is_empty() {
1588            let mut to_delete = vec![];
1589            for (offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
1590                obj.on_access(concurrency::sync::AccessKind::Write)?;
1591                if obj.delete_on_write() {
1592                    to_delete.push(*offset);
1593                }
1594            }
1595            for offset in to_delete {
1596                alloc_extra.sync_objs.remove(&offset);
1597            }
1598        }
1599        interp_ok(())
1600    }
1601
1602    #[inline(always)]
1603    fn before_memory_deallocation(
1604        _tcx: TyCtxtAt<'tcx>,
1605        machine: &mut Self,
1606        alloc_extra: &mut AllocExtra<'tcx>,
1607        ptr: Pointer,
1608        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1609        size: Size,
1610        align: Align,
1611        kind: MemoryKind,
1612    ) -> InterpResult<'tcx> {
1613        if machine.tracked_alloc_ids.contains(&alloc_id) {
1614            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1615        }
1616        match &machine.data_race {
1617            GlobalDataRaceHandler::None => {}
1618            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1619                genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), kind)?,
1620            GlobalDataRaceHandler::Vclocks(_global_state) => {
1621                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1622                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1623                data_race.write_non_atomic(
1624                    alloc_id,
1625                    alloc_range(Size::ZERO, size),
1626                    NaWriteType::Deallocate,
1627                    None,
1628                    machine,
1629                )?;
1630            }
1631        }
1632        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1633            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1634        }
1635        // Check if there are any sync objects that would like to prevent freeing this memory.
1636        for obj in alloc_extra.sync_objs.values() {
1637            obj.on_access(concurrency::sync::AccessKind::Dealloc)?;
1638        }
1639
1640        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1641        {
1642            *deallocated_at = Some(machine.current_user_relevant_span());
1643        }
1644        machine.free_alloc_id(alloc_id, size, align, kind);
1645        interp_ok(())
1646    }
1647
1648    #[inline(always)]
1649    fn retag_ptr_value(
1650        ecx: &mut InterpCx<'tcx, Self>,
1651        kind: mir::RetagKind,
1652        val: &ImmTy<'tcx>,
1653    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1654        if ecx.machine.borrow_tracker.is_some() {
1655            ecx.retag_ptr_value(kind, val)
1656        } else {
1657            interp_ok(val.clone())
1658        }
1659    }
1660
1661    #[inline(always)]
1662    fn retag_place_contents(
1663        ecx: &mut InterpCx<'tcx, Self>,
1664        kind: mir::RetagKind,
1665        place: &PlaceTy<'tcx>,
1666    ) -> InterpResult<'tcx> {
1667        if ecx.machine.borrow_tracker.is_some() {
1668            ecx.retag_place_contents(kind, place)?;
1669        }
1670        interp_ok(())
1671    }
1672
1673    fn protect_in_place_function_argument(
1674        ecx: &mut InterpCx<'tcx, Self>,
1675        place: &MPlaceTy<'tcx>,
1676    ) -> InterpResult<'tcx> {
1677        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1678        // writes* during this call are insta-UB.
1679        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1680            ecx.protect_place(place)?
1681        } else {
1682            // No borrow tracker.
1683            place.clone()
1684        };
1685        // We do need to write `uninit` so that even after the call ends, the former contents of
1686        // this place cannot be observed any more. We do the write after retagging so that for
1687        // Tree Borrows, this is considered to activate the new tag.
1688        // Conveniently this also ensures that the place actually points to suitable memory.
1689        ecx.write_uninit(&protected_place)?;
1690        // Now we throw away the protected place, ensuring its tag is never used again.
1691        interp_ok(())
1692    }
1693
1694    #[inline(always)]
1695    fn init_frame(
1696        ecx: &mut InterpCx<'tcx, Self>,
1697        frame: Frame<'tcx, Provenance>,
1698    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1699        // Start recording our event before doing anything else
1700        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1701            let fn_name = frame.instance().to_string();
1702            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1703            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1704
1705            Some(profiler.start_recording_interval_event_detached(
1706                *name,
1707                measureme::EventId::from_label(*name),
1708                ecx.active_thread().to_u32(),
1709            ))
1710        } else {
1711            None
1712        };
1713
1714        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1715
1716        let extra = FrameExtra {
1717            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1718            catch_unwind: None,
1719            timing,
1720            user_relevance: ecx.machine.user_relevance(&frame),
1721            data_race: ecx
1722                .machine
1723                .data_race
1724                .as_vclocks_ref()
1725                .map(|_| data_race::FrameState::default()),
1726        };
1727
1728        interp_ok(frame.with_extra(extra))
1729    }
1730
1731    fn stack<'a>(
1732        ecx: &'a InterpCx<'tcx, Self>,
1733    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1734        ecx.active_thread_stack()
1735    }
1736
1737    fn stack_mut<'a>(
1738        ecx: &'a mut InterpCx<'tcx, Self>,
1739    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1740        ecx.active_thread_stack_mut()
1741    }
1742
1743    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1744        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1745        ecx.machine.since_gc += 1;
1746        // Possibly report our progress. This will point at the terminator we are about to execute.
1747        if let Some(report_progress) = ecx.machine.report_progress {
1748            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1749                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1750                    block_count: ecx.machine.basic_block_count,
1751                });
1752            }
1753        }
1754
1755        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1756        // stacks.
1757        // When debug assertions are enabled, run the GC as often as possible so that any cases
1758        // where it mistakenly removes an important tag become visible.
1759        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1760            ecx.machine.since_gc = 0;
1761            ecx.run_provenance_gc();
1762        }
1763
1764        // These are our preemption points.
1765        // (This will only take effect after the terminator has been executed.)
1766        ecx.maybe_preempt_active_thread();
1767
1768        // Make sure some time passes.
1769        ecx.machine.monotonic_clock.tick();
1770
1771        interp_ok(())
1772    }
1773
1774    #[inline(always)]
1775    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1776        if ecx.frame().extra.user_relevance >= ecx.active_thread_ref().current_user_relevance() {
1777            // We just pushed a frame that's at least as relevant as the so-far most relevant frame.
1778            // That means we are now the most relevant frame.
1779            let stack_len = ecx.active_thread_stack().len();
1780            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1781        }
1782        interp_ok(())
1783    }
1784
1785    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1786        let frame = ecx.frame();
1787        // We want this *before* the return value copy, because the return place itself is protected
1788        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1789        if ecx.machine.borrow_tracker.is_some() {
1790            ecx.on_stack_pop(frame)?;
1791        }
1792        if ecx
1793            .active_thread_ref()
1794            .top_user_relevant_frame()
1795            .expect("there should always be a most relevant frame for a non-empty stack")
1796            == ecx.frame_idx()
1797        {
1798            // We are popping the most relevant frame. We have no clue what the next relevant frame
1799            // below that is, so we recompute that.
1800            // (If this ever becomes a bottleneck, we could have `push` store the previous
1801            // user-relevant frame and restore that here.)
1802            // We have to skip the frame that is just being popped.
1803            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1804        }
1805        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1806        // concurrency and what it prints is just plain wrong. So we print our own information
1807        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1808        info!("Leaving {}", ecx.frame().instance());
1809        interp_ok(())
1810    }
1811
1812    #[inline(always)]
1813    fn after_stack_pop(
1814        ecx: &mut InterpCx<'tcx, Self>,
1815        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1816        unwinding: bool,
1817    ) -> InterpResult<'tcx, ReturnAction> {
1818        let res = {
1819            // Move `frame` into a sub-scope so we control when it will be dropped.
1820            let mut frame = frame;
1821            let timing = frame.extra.timing.take();
1822            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1823            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1824                profiler.finish_recording_interval_event(timing.unwrap());
1825            }
1826            res
1827        };
1828        // Needs to be done after dropping frame to show up on the right nesting level.
1829        // (Cc https://github.com/rust-lang/miri/issues/2266)
1830        if !ecx.active_thread_stack().is_empty() {
1831            info!("Continuing in {}", ecx.frame().instance());
1832        }
1833        res
1834    }
1835
1836    fn after_local_read(
1837        ecx: &InterpCx<'tcx, Self>,
1838        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1839        local: mir::Local,
1840    ) -> InterpResult<'tcx> {
1841        if let Some(data_race) = &frame.extra.data_race {
1842            let _trace = enter_trace_span!(data_race::after_local_read);
1843            data_race.local_read(local, &ecx.machine);
1844        }
1845        interp_ok(())
1846    }
1847
1848    fn after_local_write(
1849        ecx: &mut InterpCx<'tcx, Self>,
1850        local: mir::Local,
1851        storage_live: bool,
1852    ) -> InterpResult<'tcx> {
1853        if let Some(data_race) = &ecx.frame().extra.data_race {
1854            let _trace = enter_trace_span!(data_race::after_local_write);
1855            data_race.local_write(local, storage_live, &ecx.machine);
1856        }
1857        interp_ok(())
1858    }
1859
1860    fn after_local_moved_to_memory(
1861        ecx: &mut InterpCx<'tcx, Self>,
1862        local: mir::Local,
1863        mplace: &MPlaceTy<'tcx>,
1864    ) -> InterpResult<'tcx> {
1865        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1866            panic!("after_local_allocated should only be called on fresh allocations");
1867        };
1868        // Record the span where this was allocated: the declaration of the local.
1869        let local_decl = &ecx.frame().body().local_decls[local];
1870        let span = local_decl.source_info.span;
1871        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1872        // The data race system has to fix the clocks used for this write.
1873        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1874        if let Some(data_race) =
1875            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1876        {
1877            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1878            data_race.local_moved_to_memory(
1879                local,
1880                alloc_info.data_race.as_vclocks_mut().unwrap(),
1881                machine,
1882            );
1883        }
1884        interp_ok(())
1885    }
1886
1887    fn get_global_alloc_salt(
1888        ecx: &InterpCx<'tcx, Self>,
1889        instance: Option<ty::Instance<'tcx>>,
1890    ) -> usize {
1891        let unique = if let Some(instance) = instance {
1892            // Functions cannot be identified by pointers, as asm-equal functions can get
1893            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1894            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1895            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1896            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1897            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1898            // whether codegen will actually emit duplicate functions. It does that when they have
1899            // non-lifetime generics, or when they can be inlined. All other functions are given a
1900            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1901            // cannot be relied upon for anything. But if we don't do this, the
1902            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1903            // backtraces look terrible.
1904            let is_generic = instance
1905                .args
1906                .into_iter()
1907                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1908            let can_be_inlined = matches!(
1909                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1910                InliningThreshold::Always
1911            ) || !matches!(
1912                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1913                InlineAttr::Never
1914            );
1915            !is_generic && !can_be_inlined
1916        } else {
1917            // Non-functions are never unique.
1918            false
1919        };
1920        // Always use the same salt if the allocation is unique.
1921        if unique {
1922            CTFE_ALLOC_SALT
1923        } else {
1924            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1925        }
1926    }
1927
1928    fn cached_union_data_range<'e>(
1929        ecx: &'e mut InterpCx<'tcx, Self>,
1930        ty: Ty<'tcx>,
1931        compute_range: impl FnOnce() -> RangeSet,
1932    ) -> Cow<'e, RangeSet> {
1933        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1934    }
1935
1936    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1937        use crate::alloc::MiriAllocParams;
1938
1939        match &self.allocator {
1940            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1941            None => MiriAllocParams::Global,
1942        }
1943    }
1944
1945    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1946        #[cfg(feature = "tracing")]
1947        {
1948            span().entered()
1949        }
1950        #[cfg(not(feature = "tracing"))]
1951        #[expect(clippy::unused_unit)]
1952        {
1953            let _ = span; // so we avoid the "unused variable" warning
1954            ()
1955        }
1956    }
1957}
1958
1959/// Trait for callbacks handling asynchronous machine operations.
1960pub trait MachineCallback<'tcx, T>: VisitProvenance {
1961    /// The function to be invoked when the callback is fired.
1962    fn call(
1963        self: Box<Self>,
1964        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1965        arg: T,
1966    ) -> InterpResult<'tcx>;
1967}
1968
1969/// Type alias for boxed machine callbacks with generic argument type.
1970pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1971
1972/// Creates a `DynMachineCallback`:
1973///
1974/// ```rust
1975/// callback!(
1976///     @capture<'tcx> {
1977///         var1: Ty1,
1978///         var2: Ty2<'tcx>,
1979///     }
1980///     |this, arg: ArgTy| {
1981///         // Implement the callback here.
1982///         todo!()
1983///     }
1984/// )
1985/// ```
1986///
1987/// All the argument types must implement `VisitProvenance`.
1988#[macro_export]
1989macro_rules! callback {
1990    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1991        { $($name:ident: $type:ty),* $(,)? }
1992     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1993        struct Callback<$tcx, $($lft),*> {
1994            $($name: $type,)*
1995            _phantom: std::marker::PhantomData<&$tcx ()>,
1996        }
1997
1998        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1999            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
2000                $(
2001                    self.$name.visit_provenance(_visit);
2002                )*
2003            }
2004        }
2005
2006        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
2007            fn call(
2008                self: Box<Self>,
2009                $this: &mut MiriInterpCx<$tcx>,
2010                $arg: $arg_ty
2011            ) -> InterpResult<$tcx> {
2012                #[allow(unused_variables)]
2013                let Callback { $($name,)* _phantom } = *self;
2014                $body
2015            }
2016        }
2017
2018        Box::new(Callback {
2019            $($name,)*
2020            _phantom: std::marker::PhantomData
2021        })
2022    }};
2023}