miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_ast::expand::allocator::{self, SpecialAllocatorMethod};
16use rustc_data_structures::either::Either;
17use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18#[allow(unused)]
19use rustc_data_structures::static_assert_size;
20use rustc_hir::attrs::InlineAttr;
21use rustc_log::tracing;
22use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
23use rustc_middle::mir;
24use rustc_middle::query::TyCtxtAt;
25use rustc_middle::ty::layout::{
26    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
27};
28use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
29use rustc_session::config::InliningThreshold;
30use rustc_span::def_id::{CrateNum, DefId};
31use rustc_span::{Span, SpanData, Symbol};
32use rustc_symbol_mangling::mangle_internal_symbol;
33use rustc_target::callconv::FnAbi;
34
35use crate::alloc_addresses::EvalContextExt;
36use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
37use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
38use crate::concurrency::{
39    AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
40};
41use crate::*;
42
43/// First real-time signal.
44/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
45/// as typical values.
46pub const SIGRTMIN: i32 = 34;
47
48/// Last real-time signal.
49/// `signal(7)` says it must be between 32 and 64 and specifies
50/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
51pub const SIGRTMAX: i32 = 42;
52
53/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
54/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
55/// base address for each evaluation would produce unbounded memory usage.
56const ADDRS_PER_ANON_GLOBAL: usize = 32;
57
58#[derive(Copy, Clone, Debug, PartialEq)]
59pub enum AlignmentCheck {
60    /// Do not check alignment.
61    None,
62    /// Check alignment "symbolically", i.e., using only the requested alignment for an allocation and not its real base address.
63    Symbolic,
64    /// Check alignment on the actual physical integer address.
65    Int,
66}
67
68#[derive(Copy, Clone, Debug, PartialEq)]
69pub enum RejectOpWith {
70    /// Isolated op is rejected with an abort of the machine.
71    Abort,
72
73    /// If not Abort, miri returns an error for an isolated op.
74    /// Following options determine if user should be warned about such error.
75    /// Do not print warning about rejected isolated op.
76    NoWarning,
77
78    /// Print a warning about rejected isolated op, with backtrace.
79    Warning,
80
81    /// Print a warning about rejected isolated op, without backtrace.
82    WarningWithoutBacktrace,
83}
84
85#[derive(Copy, Clone, Debug, PartialEq)]
86pub enum IsolatedOp {
87    /// Reject an op requiring communication with the host. By
88    /// default, miri rejects the op with an abort. If not, it returns
89    /// an error code, and prints a warning about it. Warning levels
90    /// are controlled by `RejectOpWith` enum.
91    Reject(RejectOpWith),
92
93    /// Execute op requiring communication with the host, i.e. disable isolation.
94    Allow,
95}
96
97#[derive(Debug, Copy, Clone, PartialEq, Eq)]
98pub enum BacktraceStyle {
99    /// Prints a terser backtrace which ideally only contains relevant information.
100    Short,
101    /// Prints a backtrace with all possible information.
102    Full,
103    /// Prints only the frame that the error occurs in.
104    Off,
105}
106
107#[derive(Debug, Copy, Clone, PartialEq, Eq)]
108pub enum ValidationMode {
109    /// Do not perform any kind of validation.
110    No,
111    /// Validate the interior of the value, but not things behind references.
112    Shallow,
113    /// Fully recursively validate references.
114    Deep,
115}
116
117#[derive(Debug, Copy, Clone, PartialEq, Eq)]
118pub enum FloatRoundingErrorMode {
119    /// Apply a random error (the default).
120    Random,
121    /// Don't apply any error.
122    None,
123    /// Always apply the maximum error (with a random sign).
124    Max,
125}
126
127/// Extra data stored with each stack frame
128pub struct FrameExtra<'tcx> {
129    /// Extra data for the Borrow Tracker.
130    pub borrow_tracker: Option<borrow_tracker::FrameState>,
131
132    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
133    /// called by `try`). When this frame is popped during unwinding a panic,
134    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
135    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
136
137    /// If `measureme` profiling is enabled, holds timing information
138    /// for the start of this frame. When we finish executing this frame,
139    /// we use this to register a completed event with `measureme`.
140    pub timing: Option<measureme::DetachedTiming>,
141
142    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
143    /// `#[track_caller]`. We compute this once on creation and store the result, as an
144    /// optimization.
145    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
146    pub is_user_relevant: bool,
147
148    /// Data race detector per-frame data.
149    pub data_race: Option<data_race::FrameState>,
150}
151
152impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
153    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
154        // Omitting `timing`, it does not support `Debug`.
155        let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant, data_race } =
156            self;
157        f.debug_struct("FrameData")
158            .field("borrow_tracker", borrow_tracker)
159            .field("catch_unwind", catch_unwind)
160            .field("is_user_relevant", is_user_relevant)
161            .field("data_race", data_race)
162            .finish()
163    }
164}
165
166impl VisitProvenance for FrameExtra<'_> {
167    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
168        let FrameExtra {
169            catch_unwind,
170            borrow_tracker,
171            timing: _,
172            is_user_relevant: _,
173            data_race: _,
174        } = self;
175
176        catch_unwind.visit_provenance(visit);
177        borrow_tracker.visit_provenance(visit);
178    }
179}
180
181/// Extra memory kinds
182#[derive(Debug, Copy, Clone, PartialEq, Eq)]
183pub enum MiriMemoryKind {
184    /// `__rust_alloc` memory.
185    Rust,
186    /// `miri_alloc` memory.
187    Miri,
188    /// `malloc` memory.
189    C,
190    /// Windows `HeapAlloc` memory.
191    WinHeap,
192    /// Windows "local" memory (to be freed with `LocalFree`)
193    WinLocal,
194    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
195    /// This memory may leak.
196    Machine,
197    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
198    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
199    Runtime,
200    /// Globals copied from `tcx`.
201    /// This memory may leak.
202    Global,
203    /// Memory for extern statics.
204    /// This memory may leak.
205    ExternStatic,
206    /// Memory for thread-local statics.
207    /// This memory may leak.
208    Tls,
209    /// Memory mapped directly by the program
210    Mmap,
211}
212
213impl From<MiriMemoryKind> for MemoryKind {
214    #[inline(always)]
215    fn from(kind: MiriMemoryKind) -> MemoryKind {
216        MemoryKind::Machine(kind)
217    }
218}
219
220impl MayLeak for MiriMemoryKind {
221    #[inline(always)]
222    fn may_leak(self) -> bool {
223        use self::MiriMemoryKind::*;
224        match self {
225            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
226            Machine | Global | ExternStatic | Tls | Mmap => true,
227        }
228    }
229}
230
231impl MiriMemoryKind {
232    /// Whether we have a useful allocation span for an allocation of this kind.
233    fn should_save_allocation_span(self) -> bool {
234        use self::MiriMemoryKind::*;
235        match self {
236            // Heap allocations are fine since the `Allocation` is created immediately.
237            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
238            // Everything else is unclear, let's not show potentially confusing spans.
239            Machine | Global | ExternStatic | Tls | Runtime => false,
240        }
241    }
242}
243
244impl fmt::Display for MiriMemoryKind {
245    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
246        use self::MiriMemoryKind::*;
247        match self {
248            Rust => write!(f, "Rust heap"),
249            Miri => write!(f, "Miri bare-metal heap"),
250            C => write!(f, "C heap"),
251            WinHeap => write!(f, "Windows heap"),
252            WinLocal => write!(f, "Windows local memory"),
253            Machine => write!(f, "machine-managed memory"),
254            Runtime => write!(f, "language runtime memory"),
255            Global => write!(f, "global (static or const)"),
256            ExternStatic => write!(f, "extern static"),
257            Tls => write!(f, "thread-local static"),
258            Mmap => write!(f, "mmap"),
259        }
260    }
261}
262
263pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
264
265/// Pointer provenance.
266// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
267// *might* be recursive and then it has to track which places have already been visited.
268// These implementations are a bit questionable, and it means we may check the same place multiple
269// times with different provenance, but that is in general not wrong.
270#[derive(Clone, Copy, PartialEq, Eq, Hash)]
271pub enum Provenance {
272    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
273    /// and what their borrow tag is.
274    Concrete {
275        alloc_id: AllocId,
276        /// Borrow Tracker tag.
277        tag: BorTag,
278    },
279    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
280    /// specification, we should at that point angelically "guess" a provenance that will make all
281    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
282    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
283    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
284    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
285    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
286    /// then we allow the access. This allows too much code in two ways:
287    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
288    ///   subsequent memory accesses.
289    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
290    ///   the access, we also have to update the aliasing state -- and that update can be very
291    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
292    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
293    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
294    ///   stack that would be true with all possible choices.
295    Wildcard,
296}
297
298/// The "extra" information a pointer has over a regular AllocId.
299#[derive(Copy, Clone, PartialEq)]
300pub enum ProvenanceExtra {
301    Concrete(BorTag),
302    Wildcard,
303}
304
305#[cfg(target_pointer_width = "64")]
306static_assert_size!(StrictPointer, 24);
307// FIXME: this would with in 24bytes but layout optimizations are not smart enough
308// #[cfg(target_pointer_width = "64")]
309//static_assert_size!(Pointer, 24);
310#[cfg(target_pointer_width = "64")]
311static_assert_size!(Scalar, 32);
312
313impl fmt::Debug for Provenance {
314    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
315        match self {
316            Provenance::Concrete { alloc_id, tag } => {
317                // Forward `alternate` flag to `alloc_id` printing.
318                if f.alternate() {
319                    write!(f, "[{alloc_id:#?}]")?;
320                } else {
321                    write!(f, "[{alloc_id:?}]")?;
322                }
323                // Print Borrow Tracker tag.
324                write!(f, "{tag:?}")?;
325            }
326            Provenance::Wildcard => {
327                write!(f, "[wildcard]")?;
328            }
329        }
330        Ok(())
331    }
332}
333
334impl interpret::Provenance for Provenance {
335    /// We use absolute addresses in the `offset` of a `StrictPointer`.
336    const OFFSET_IS_ADDR: bool = true;
337
338    /// Miri implements wildcard provenance.
339    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
340
341    fn get_alloc_id(self) -> Option<AllocId> {
342        match self {
343            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
344            Provenance::Wildcard => None,
345        }
346    }
347
348    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
349        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
350        write!(f, "{:#x}", addr.bytes())?;
351        if f.alternate() {
352            write!(f, "{prov:#?}")?;
353        } else {
354            write!(f, "{prov:?}")?;
355        }
356        Ok(())
357    }
358
359    fn join(left: Self, right: Self) -> Option<Self> {
360        match (left, right) {
361            // If both are the *same* concrete tag, that is the result.
362            (
363                Provenance::Concrete { alloc_id: left_alloc, tag: left_tag },
364                Provenance::Concrete { alloc_id: right_alloc, tag: right_tag },
365            ) if left_alloc == right_alloc && left_tag == right_tag => Some(left),
366            // If one side is a wildcard, the best possible outcome is that it is equal to the other
367            // one, and we use that.
368            (Provenance::Wildcard, o) | (o, Provenance::Wildcard) => Some(o),
369            // Otherwise, fall back to `None`.
370            _ => None,
371        }
372    }
373}
374
375impl fmt::Debug for ProvenanceExtra {
376    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
377        match self {
378            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
379            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
380        }
381    }
382}
383
384impl ProvenanceExtra {
385    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
386        match self {
387            ProvenanceExtra::Concrete(pid) => f(pid),
388            ProvenanceExtra::Wildcard => None,
389        }
390    }
391}
392
393/// Extra per-allocation data
394#[derive(Debug)]
395pub struct AllocExtra<'tcx> {
396    /// Global state of the borrow tracker, if enabled.
397    pub borrow_tracker: Option<borrow_tracker::AllocState>,
398    /// Extra state for data race detection.
399    ///
400    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
401    pub data_race: AllocDataRaceHandler,
402    /// A backtrace to where this allocation was allocated.
403    /// As this is recorded for leak reports, it only exists
404    /// if this allocation is leakable. The backtrace is not
405    /// pruned yet; that should be done before printing it.
406    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
407    /// Synchronization primitives like to attach extra data to particular addresses. We store that
408    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
409    /// freed.
410    /// This maps offsets to synchronization-primitive-specific data.
411    pub sync: FxHashMap<Size, Box<dyn Any>>,
412}
413
414// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
415// but that should never end up actually cloning our `AllocExtra`.
416impl<'tcx> Clone for AllocExtra<'tcx> {
417    fn clone(&self) -> Self {
418        panic!("our allocations should never be cloned");
419    }
420}
421
422impl VisitProvenance for AllocExtra<'_> {
423    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
424        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
425
426        borrow_tracker.visit_provenance(visit);
427        data_race.visit_provenance(visit);
428    }
429}
430
431/// Precomputed layouts of primitive types
432pub struct PrimitiveLayouts<'tcx> {
433    pub unit: TyAndLayout<'tcx>,
434    pub i8: TyAndLayout<'tcx>,
435    pub i16: TyAndLayout<'tcx>,
436    pub i32: TyAndLayout<'tcx>,
437    pub i64: TyAndLayout<'tcx>,
438    pub i128: TyAndLayout<'tcx>,
439    pub isize: TyAndLayout<'tcx>,
440    pub u8: TyAndLayout<'tcx>,
441    pub u16: TyAndLayout<'tcx>,
442    pub u32: TyAndLayout<'tcx>,
443    pub u64: TyAndLayout<'tcx>,
444    pub u128: TyAndLayout<'tcx>,
445    pub usize: TyAndLayout<'tcx>,
446    pub bool: TyAndLayout<'tcx>,
447    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
448    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
449}
450
451impl<'tcx> PrimitiveLayouts<'tcx> {
452    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
453        let tcx = layout_cx.tcx();
454        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
455        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
456        Ok(Self {
457            unit: layout_cx.layout_of(tcx.types.unit)?,
458            i8: layout_cx.layout_of(tcx.types.i8)?,
459            i16: layout_cx.layout_of(tcx.types.i16)?,
460            i32: layout_cx.layout_of(tcx.types.i32)?,
461            i64: layout_cx.layout_of(tcx.types.i64)?,
462            i128: layout_cx.layout_of(tcx.types.i128)?,
463            isize: layout_cx.layout_of(tcx.types.isize)?,
464            u8: layout_cx.layout_of(tcx.types.u8)?,
465            u16: layout_cx.layout_of(tcx.types.u16)?,
466            u32: layout_cx.layout_of(tcx.types.u32)?,
467            u64: layout_cx.layout_of(tcx.types.u64)?,
468            u128: layout_cx.layout_of(tcx.types.u128)?,
469            usize: layout_cx.layout_of(tcx.types.usize)?,
470            bool: layout_cx.layout_of(tcx.types.bool)?,
471            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
472            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
473        })
474    }
475
476    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
477        match size.bits() {
478            8 => Some(self.u8),
479            16 => Some(self.u16),
480            32 => Some(self.u32),
481            64 => Some(self.u64),
482            128 => Some(self.u128),
483            _ => None,
484        }
485    }
486
487    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
488        match size.bits() {
489            8 => Some(self.i8),
490            16 => Some(self.i16),
491            32 => Some(self.i32),
492            64 => Some(self.i64),
493            128 => Some(self.i128),
494            _ => None,
495        }
496    }
497}
498
499/// The machine itself.
500///
501/// If you add anything here that stores machine values, remember to update
502/// `visit_all_machine_values`!
503pub struct MiriMachine<'tcx> {
504    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
505    pub tcx: TyCtxt<'tcx>,
506
507    /// Global data for borrow tracking.
508    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
509
510    /// Depending on settings, this will be `None`,
511    /// global data for a data race detector,
512    /// or the context required for running in GenMC mode.
513    ///
514    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
515    pub data_race: GlobalDataRaceHandler,
516
517    /// Ptr-int-cast module global data.
518    pub alloc_addresses: alloc_addresses::GlobalState,
519
520    /// Environment variables.
521    pub(crate) env_vars: EnvVars<'tcx>,
522
523    /// Return place of the main function.
524    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
525
526    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
527    /// These are *pointers* to argc/argv because macOS.
528    /// We also need the full command line as one string because of Windows.
529    pub(crate) argc: Option<Pointer>,
530    pub(crate) argv: Option<Pointer>,
531    pub(crate) cmd_line: Option<Pointer>,
532
533    /// TLS state.
534    pub(crate) tls: TlsData<'tcx>,
535
536    /// What should Miri do when an op requires communicating with the host,
537    /// such as accessing host env vars, random number generation, and
538    /// file system access.
539    pub(crate) isolated_op: IsolatedOp,
540
541    /// Whether to enforce the validity invariant.
542    pub(crate) validation: ValidationMode,
543
544    /// The table of file descriptors.
545    pub(crate) fds: shims::FdTable,
546    /// The table of directory descriptors.
547    pub(crate) dirs: shims::DirTable,
548
549    /// The list of all EpollEventInterest.
550    pub(crate) epoll_interests: shims::EpollInterestTable,
551
552    /// This machine's monotone clock.
553    pub(crate) monotonic_clock: MonotonicClock,
554
555    /// The set of threads.
556    pub(crate) threads: ThreadManager<'tcx>,
557
558    /// Stores which thread is eligible to run on which CPUs.
559    /// This has no effect at all, it is just tracked to produce the correct result
560    /// in `sched_getaffinity`
561    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
562
563    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
564    pub(crate) layouts: PrimitiveLayouts<'tcx>,
565
566    /// Allocations that are considered roots of static memory (that may leak).
567    pub(crate) static_roots: Vec<AllocId>,
568
569    /// The `measureme` profiler used to record timing information about
570    /// the emulated program.
571    profiler: Option<measureme::Profiler>,
572    /// Used with `profiler` to cache the `StringId`s for event names
573    /// used with `measureme`.
574    string_cache: FxHashMap<String, measureme::StringId>,
575
576    /// Cache of `Instance` exported under the given `Symbol` name.
577    /// `None` means no `Instance` exported under the given name is found.
578    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
579
580    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
581    pub(crate) backtrace_style: BacktraceStyle,
582
583    /// Crates which are considered local for the purposes of error reporting.
584    pub(crate) local_crates: Vec<CrateNum>,
585
586    /// Mapping extern static names to their pointer.
587    extern_statics: FxHashMap<Symbol, StrictPointer>,
588
589    /// The random number generator used for resolving non-determinism.
590    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
591    pub(crate) rng: RefCell<StdRng>,
592
593    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
594    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
595
596    /// The allocation IDs to report when they are being allocated
597    /// (helps for debugging memory leaks and use after free bugs).
598    tracked_alloc_ids: FxHashSet<AllocId>,
599    /// For the tracked alloc ids, also report read/write accesses.
600    track_alloc_accesses: bool,
601
602    /// Controls whether alignment of memory accesses is being checked.
603    pub(crate) check_alignment: AlignmentCheck,
604
605    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
606    pub(crate) cmpxchg_weak_failure_rate: f64,
607
608    /// The probability of the active thread being preempted at the end of each basic block.
609    pub(crate) preemption_rate: f64,
610
611    /// If `Some`, we will report the current stack every N basic blocks.
612    pub(crate) report_progress: Option<u32>,
613    // The total number of blocks that have been executed.
614    pub(crate) basic_block_count: u64,
615
616    /// Handle of the optional shared object file for native functions.
617    #[cfg(all(unix, feature = "native-lib"))]
618    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
619    #[cfg(not(all(unix, feature = "native-lib")))]
620    pub native_lib: Vec<!>,
621
622    /// Run a garbage collector for BorTags every N basic blocks.
623    pub(crate) gc_interval: u32,
624    /// The number of blocks that passed since the last BorTag GC pass.
625    pub(crate) since_gc: u32,
626
627    /// The number of CPUs to be reported by miri.
628    pub(crate) num_cpus: u32,
629
630    /// Determines Miri's page size and associated values
631    pub(crate) page_size: u64,
632    pub(crate) stack_addr: u64,
633    pub(crate) stack_size: u64,
634
635    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
636    pub(crate) collect_leak_backtraces: bool,
637
638    /// The spans we will use to report where an allocation was created and deallocated in
639    /// diagnostics.
640    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
641
642    /// For each allocation, an offset inside that allocation that was deemed aligned even for
643    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
644    /// tracked for vtables and function allocations as well as regular allocations.
645    ///
646    /// Invariant: the promised alignment will never be less than the native alignment of the
647    /// allocation.
648    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
649
650    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
651    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
652
653    /// Caches the sanity-checks for various pthread primitives.
654    pub(crate) pthread_mutex_sanity: Cell<bool>,
655    pub(crate) pthread_rwlock_sanity: Cell<bool>,
656    pub(crate) pthread_condvar_sanity: Cell<bool>,
657
658    /// (Foreign) symbols that are synthesized as part of the allocator shim: the key indicates the
659    /// name of the symbol being synthesized; the value indicates whether this should invoke some
660    /// other symbol or whether this has special allocator semantics.
661    pub(crate) allocator_shim_symbols: FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>>,
662    /// Cache for `mangle_internal_symbol`.
663    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
664
665    /// Always prefer the intrinsic fallback body over the native Miri implementation.
666    pub force_intrinsic_fallback: bool,
667
668    /// Whether floating-point operations can behave non-deterministically.
669    pub float_nondet: bool,
670    /// Whether floating-point operations can have a non-deterministic rounding error.
671    pub float_rounding_error: FloatRoundingErrorMode,
672
673    /// Whether Miri artifically introduces short reads/writes on file descriptors.
674    pub short_fd_operations: bool,
675}
676
677impl<'tcx> MiriMachine<'tcx> {
678    /// Create a new MiriMachine.
679    ///
680    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
681    pub(crate) fn new(
682        config: &MiriConfig,
683        layout_cx: LayoutCx<'tcx>,
684        genmc_ctx: Option<Rc<GenmcCtx>>,
685    ) -> Self {
686        let tcx = layout_cx.tcx();
687        let local_crates = helpers::get_local_crates(tcx);
688        let layouts =
689            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
690        let profiler = config.measureme_out.as_ref().map(|out| {
691            let crate_name =
692                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
693            let pid = process::id();
694            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
695            // the PID is padded so that the nondeterministic value of the PID does not spread
696            // nondeterminism to the allocator. In Miri we are not aiming for such performance
697            // control, we just pad for consistency with rustc.
698            let filename = format!("{crate_name}-{pid:07}");
699            let path = Path::new(out).join(filename);
700            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
701        });
702        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
703        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
704        let data_race = if config.genmc_config.is_some() {
705            // `genmc_ctx` persists across executions, so we don't create a new one here.
706            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
707        } else if config.data_race_detector {
708            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
709        } else {
710            GlobalDataRaceHandler::None
711        };
712        // Determine page size, stack address, and stack size.
713        // These values are mostly meaningless, but the stack address is also where we start
714        // allocating physical integer addresses for all allocations.
715        let page_size = if let Some(page_size) = config.page_size {
716            page_size
717        } else {
718            let target = &tcx.sess.target;
719            match target.arch.as_ref() {
720                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
721                "aarch64" => {
722                    if target.options.vendor.as_ref() == "apple" {
723                        // No "definitive" source, but see:
724                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
725                        // https://github.com/ziglang/zig/issues/11308 etc.
726                        16 * 1024
727                    } else {
728                        4 * 1024
729                    }
730                }
731                _ => 4 * 1024,
732            }
733        };
734        // On 16bit targets, 32 pages is more than the entire address space!
735        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
736        let stack_size =
737            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
738        assert!(
739            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
740            "miri only supports up to {} CPUs, but {} were configured",
741            cpu_affinity::MAX_CPUS,
742            config.num_cpus
743        );
744        let threads = ThreadManager::new(config);
745        let mut thread_cpu_affinity = FxHashMap::default();
746        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
747            thread_cpu_affinity
748                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
749        }
750        let alloc_addresses =
751            RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr, tcx));
752        MiriMachine {
753            tcx,
754            borrow_tracker,
755            data_race,
756            alloc_addresses,
757            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
758            env_vars: EnvVars::default(),
759            main_fn_ret_place: None,
760            argc: None,
761            argv: None,
762            cmd_line: None,
763            tls: TlsData::default(),
764            isolated_op: config.isolated_op,
765            validation: config.validation,
766            fds: shims::FdTable::init(config.mute_stdout_stderr),
767            epoll_interests: shims::EpollInterestTable::new(),
768            dirs: Default::default(),
769            layouts,
770            threads,
771            thread_cpu_affinity,
772            static_roots: Vec::new(),
773            profiler,
774            string_cache: Default::default(),
775            exported_symbols_cache: FxHashMap::default(),
776            backtrace_style: config.backtrace_style,
777            local_crates,
778            extern_statics: FxHashMap::default(),
779            rng: RefCell::new(rng),
780            allocator: (!config.native_lib.is_empty())
781                .then(|| Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new()))),
782            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
783            track_alloc_accesses: config.track_alloc_accesses,
784            check_alignment: config.check_alignment,
785            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
786            preemption_rate: config.preemption_rate,
787            report_progress: config.report_progress,
788            basic_block_count: 0,
789            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
790            #[cfg(all(unix, feature = "native-lib"))]
791            native_lib: config.native_lib.iter().map(|lib_file_path| {
792                let host_triple = rustc_session::config::host_tuple();
793                let target_triple = tcx.sess.opts.target_triple.tuple();
794                // Check if host target == the session target.
795                if host_triple != target_triple {
796                    panic!(
797                        "calling native C functions in linked .so file requires host and target to be the same: \
798                        host={host_triple}, target={target_triple}",
799                    );
800                }
801                // Note: it is the user's responsibility to provide a correct SO file.
802                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
803                // undefined behaviour in Miri itself!
804                (
805                    unsafe {
806                        libloading::Library::new(lib_file_path)
807                            .expect("failed to read specified extern shared object file")
808                    },
809                    lib_file_path.clone(),
810                )
811            }).collect(),
812            #[cfg(not(all(unix, feature = "native-lib")))]
813            native_lib: config.native_lib.iter().map(|_| {
814                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
815            }).collect(),
816            gc_interval: config.gc_interval,
817            since_gc: 0,
818            num_cpus: config.num_cpus,
819            page_size,
820            stack_addr,
821            stack_size,
822            collect_leak_backtraces: config.collect_leak_backtraces,
823            allocation_spans: RefCell::new(FxHashMap::default()),
824            symbolic_alignment: RefCell::new(FxHashMap::default()),
825            union_data_ranges: FxHashMap::default(),
826            pthread_mutex_sanity: Cell::new(false),
827            pthread_rwlock_sanity: Cell::new(false),
828            pthread_condvar_sanity: Cell::new(false),
829            allocator_shim_symbols: Self::allocator_shim_symbols(tcx),
830            mangle_internal_symbol_cache: Default::default(),
831            force_intrinsic_fallback: config.force_intrinsic_fallback,
832            float_nondet: config.float_nondet,
833            float_rounding_error: config.float_rounding_error,
834            short_fd_operations: config.short_fd_operations,
835        }
836    }
837
838    fn allocator_shim_symbols(
839        tcx: TyCtxt<'tcx>,
840    ) -> FxHashMap<Symbol, Either<Symbol, SpecialAllocatorMethod>> {
841        use rustc_codegen_ssa::base::allocator_shim_contents;
842
843        // codegen uses `allocator_kind_for_codegen` here, but that's only needed to deal with
844        // dylibs which we do not support.
845        let Some(kind) = tcx.allocator_kind(()) else {
846            return Default::default();
847        };
848        let methods = allocator_shim_contents(tcx, kind);
849        let mut symbols = FxHashMap::default();
850        for method in methods {
851            let from_name = Symbol::intern(&mangle_internal_symbol(
852                tcx,
853                &allocator::global_fn_name(method.name),
854            ));
855            let to = match method.special {
856                Some(special) => Either::Right(special),
857                None =>
858                    Either::Left(Symbol::intern(&mangle_internal_symbol(
859                        tcx,
860                        &allocator::default_fn_name(method.name),
861                    ))),
862            };
863            symbols.try_insert(from_name, to).unwrap();
864        }
865        symbols
866    }
867
868    pub(crate) fn late_init(
869        ecx: &mut MiriInterpCx<'tcx>,
870        config: &MiriConfig,
871        on_main_stack_empty: StackEmptyCallback<'tcx>,
872    ) -> InterpResult<'tcx> {
873        EnvVars::init(ecx, config)?;
874        MiriMachine::init_extern_statics(ecx)?;
875        ThreadManager::init(ecx, on_main_stack_empty);
876        interp_ok(())
877    }
878
879    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
880        // This got just allocated, so there definitely is a pointer here.
881        let ptr = ptr.into_pointer_or_addr().unwrap();
882        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
883    }
884
885    pub(crate) fn communicate(&self) -> bool {
886        self.isolated_op == IsolatedOp::Allow
887    }
888
889    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
890    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
891        let def_id = frame.instance.def_id();
892        def_id.is_local() || self.local_crates.contains(&def_id.krate)
893    }
894
895    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
896    pub(crate) fn handle_abnormal_termination(&mut self) {
897        // All strings in the profile data are stored in a single string table which is not
898        // written to disk until the profiler is dropped. If the interpreter exits without dropping
899        // the profiler, it is not possible to interpret the profile data and all measureme tools
900        // will panic when given the file.
901        drop(self.profiler.take());
902    }
903
904    pub(crate) fn page_align(&self) -> Align {
905        Align::from_bytes(self.page_size).unwrap()
906    }
907
908    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
909        self.allocation_spans
910            .borrow()
911            .get(&alloc_id)
912            .map(|(allocated, _deallocated)| allocated.data())
913    }
914
915    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
916        self.allocation_spans
917            .borrow()
918            .get(&alloc_id)
919            .and_then(|(_allocated, deallocated)| *deallocated)
920            .map(Span::data)
921    }
922
923    fn init_allocation(
924        ecx: &MiriInterpCx<'tcx>,
925        id: AllocId,
926        kind: MemoryKind,
927        size: Size,
928        align: Align,
929    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
930        if ecx.machine.tracked_alloc_ids.contains(&id) {
931            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
932        }
933
934        let borrow_tracker = ecx
935            .machine
936            .borrow_tracker
937            .as_ref()
938            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
939
940        let data_race = match &ecx.machine.data_race {
941            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
942            GlobalDataRaceHandler::Vclocks(data_race) =>
943                AllocDataRaceHandler::Vclocks(
944                    data_race::AllocState::new_allocation(
945                        data_race,
946                        &ecx.machine.threads,
947                        size,
948                        kind,
949                        ecx.machine.current_user_relevant_span(),
950                    ),
951                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
952                ),
953            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
954                // GenMC learns about new allocations directly from the alloc_addresses module,
955                // since it has to be able to control the address at which they are placed.
956                AllocDataRaceHandler::Genmc
957            }
958        };
959
960        // If an allocation is leaked, we want to report a backtrace to indicate where it was
961        // allocated. We don't need to record a backtrace for allocations which are allowed to
962        // leak.
963        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
964            None
965        } else {
966            Some(ecx.generate_stacktrace())
967        };
968
969        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
970            ecx.machine
971                .allocation_spans
972                .borrow_mut()
973                .insert(id, (ecx.machine.current_user_relevant_span(), None));
974        }
975
976        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
977    }
978}
979
980impl VisitProvenance for MiriMachine<'_> {
981    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
982        #[rustfmt::skip]
983        let MiriMachine {
984            threads,
985            thread_cpu_affinity: _,
986            tls,
987            env_vars,
988            main_fn_ret_place,
989            argc,
990            argv,
991            cmd_line,
992            extern_statics,
993            dirs,
994            borrow_tracker,
995            data_race,
996            alloc_addresses,
997            fds,
998            epoll_interests:_,
999            tcx: _,
1000            isolated_op: _,
1001            validation: _,
1002            monotonic_clock: _,
1003            layouts: _,
1004            static_roots: _,
1005            profiler: _,
1006            string_cache: _,
1007            exported_symbols_cache: _,
1008            backtrace_style: _,
1009            local_crates: _,
1010            rng: _,
1011            allocator: _,
1012            tracked_alloc_ids: _,
1013            track_alloc_accesses: _,
1014            check_alignment: _,
1015            cmpxchg_weak_failure_rate: _,
1016            preemption_rate: _,
1017            report_progress: _,
1018            basic_block_count: _,
1019            native_lib: _,
1020            gc_interval: _,
1021            since_gc: _,
1022            num_cpus: _,
1023            page_size: _,
1024            stack_addr: _,
1025            stack_size: _,
1026            collect_leak_backtraces: _,
1027            allocation_spans: _,
1028            symbolic_alignment: _,
1029            union_data_ranges: _,
1030            pthread_mutex_sanity: _,
1031            pthread_rwlock_sanity: _,
1032            pthread_condvar_sanity: _,
1033            allocator_shim_symbols: _,
1034            mangle_internal_symbol_cache: _,
1035            force_intrinsic_fallback: _,
1036            float_nondet: _,
1037            float_rounding_error: _,
1038            short_fd_operations: _,
1039        } = self;
1040
1041        threads.visit_provenance(visit);
1042        tls.visit_provenance(visit);
1043        env_vars.visit_provenance(visit);
1044        dirs.visit_provenance(visit);
1045        fds.visit_provenance(visit);
1046        data_race.visit_provenance(visit);
1047        borrow_tracker.visit_provenance(visit);
1048        alloc_addresses.visit_provenance(visit);
1049        main_fn_ret_place.visit_provenance(visit);
1050        argc.visit_provenance(visit);
1051        argv.visit_provenance(visit);
1052        cmd_line.visit_provenance(visit);
1053        for ptr in extern_statics.values() {
1054            ptr.visit_provenance(visit);
1055        }
1056    }
1057}
1058
1059/// A rustc InterpCx for Miri.
1060pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
1061
1062/// A little trait that's useful to be inherited by extension traits.
1063pub trait MiriInterpCxExt<'tcx> {
1064    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
1065    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
1066}
1067impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
1068    #[inline(always)]
1069    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
1070        self
1071    }
1072    #[inline(always)]
1073    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
1074        self
1075    }
1076}
1077
1078/// Machine hook implementations.
1079impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1080    type MemoryKind = MiriMemoryKind;
1081    type ExtraFnVal = DynSym;
1082
1083    type FrameExtra = FrameExtra<'tcx>;
1084    type AllocExtra = AllocExtra<'tcx>;
1085
1086    type Provenance = Provenance;
1087    type ProvenanceExtra = ProvenanceExtra;
1088    type Bytes = MiriAllocBytes;
1089
1090    type MemoryMap =
1091        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1092
1093    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1094
1095    const PANIC_ON_ALLOC_FAIL: bool = false;
1096
1097    #[inline(always)]
1098    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1099        ecx.machine.check_alignment != AlignmentCheck::None
1100    }
1101
1102    #[inline(always)]
1103    fn alignment_check(
1104        ecx: &MiriInterpCx<'tcx>,
1105        alloc_id: AllocId,
1106        alloc_align: Align,
1107        alloc_kind: AllocKind,
1108        offset: Size,
1109        align: Align,
1110    ) -> Option<Misalignment> {
1111        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1112            // Just use the built-in check.
1113            return None;
1114        }
1115        if alloc_kind != AllocKind::LiveData {
1116            // Can't have any extra info here.
1117            return None;
1118        }
1119        // Let's see which alignment we have been promised for this allocation.
1120        let (promised_offset, promised_align) = ecx
1121            .machine
1122            .symbolic_alignment
1123            .borrow()
1124            .get(&alloc_id)
1125            .copied()
1126            .unwrap_or((Size::ZERO, alloc_align));
1127        if promised_align < align {
1128            // Definitely not enough.
1129            Some(Misalignment { has: promised_align, required: align })
1130        } else {
1131            // What's the offset between us and the promised alignment?
1132            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1133            // That must also be aligned.
1134            if distance.is_multiple_of(align.bytes()) {
1135                // All looking good!
1136                None
1137            } else {
1138                // The biggest power of two through which `distance` is divisible.
1139                let distance_pow2 = 1 << distance.trailing_zeros();
1140                Some(Misalignment {
1141                    has: Align::from_bytes(distance_pow2).unwrap(),
1142                    required: align,
1143                })
1144            }
1145        }
1146    }
1147
1148    #[inline(always)]
1149    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1150        ecx.machine.validation != ValidationMode::No
1151    }
1152    #[inline(always)]
1153    fn enforce_validity_recursively(
1154        ecx: &InterpCx<'tcx, Self>,
1155        _layout: TyAndLayout<'tcx>,
1156    ) -> bool {
1157        ecx.machine.validation == ValidationMode::Deep
1158    }
1159
1160    #[inline(always)]
1161    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1162        !ecx.tcx.sess.overflow_checks()
1163    }
1164
1165    fn check_fn_target_features(
1166        ecx: &MiriInterpCx<'tcx>,
1167        instance: ty::Instance<'tcx>,
1168    ) -> InterpResult<'tcx> {
1169        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1170        if attrs
1171            .target_features
1172            .iter()
1173            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1174        {
1175            let unavailable = attrs
1176                .target_features
1177                .iter()
1178                .filter(|&feature| {
1179                    feature.kind != TargetFeatureKind::Implied
1180                        && !ecx.tcx.sess.target_features.contains(&feature.name)
1181                })
1182                .fold(String::new(), |mut s, feature| {
1183                    if !s.is_empty() {
1184                        s.push_str(", ");
1185                    }
1186                    s.push_str(feature.name.as_str());
1187                    s
1188                });
1189            let msg = format!(
1190                "calling a function that requires unavailable target features: {unavailable}"
1191            );
1192            // On WASM, this is not UB, but instead gets rejected during validation of the module
1193            // (see #84988).
1194            if ecx.tcx.sess.target.is_like_wasm {
1195                throw_machine_stop!(TerminationInfo::Abort(msg));
1196            } else {
1197                throw_ub_format!("{msg}");
1198            }
1199        }
1200        interp_ok(())
1201    }
1202
1203    #[inline(always)]
1204    fn find_mir_or_eval_fn(
1205        ecx: &mut MiriInterpCx<'tcx>,
1206        instance: ty::Instance<'tcx>,
1207        abi: &FnAbi<'tcx, Ty<'tcx>>,
1208        args: &[FnArg<'tcx>],
1209        dest: &PlaceTy<'tcx>,
1210        ret: Option<mir::BasicBlock>,
1211        unwind: mir::UnwindAction,
1212    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1213        // For foreign items, try to see if we can emulate them.
1214        if ecx.tcx.is_foreign_item(instance.def_id()) {
1215            let _trace = enter_trace_span!("emulate_foreign_item");
1216            // An external function call that does not have a MIR body. We either find MIR elsewhere
1217            // or emulate its effect.
1218            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1219            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1220            // foreign function
1221            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1222            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1223            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1224            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1225        }
1226
1227        if ecx.machine.data_race.as_genmc_ref().is_some()
1228            && ecx.genmc_intercept_function(instance, args, dest)?
1229        {
1230            ecx.return_to_block(ret)?;
1231            return interp_ok(None);
1232        }
1233
1234        // Otherwise, load the MIR.
1235        let _trace = enter_trace_span!("load_mir");
1236        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1237    }
1238
1239    #[inline(always)]
1240    fn call_extra_fn(
1241        ecx: &mut MiriInterpCx<'tcx>,
1242        fn_val: DynSym,
1243        abi: &FnAbi<'tcx, Ty<'tcx>>,
1244        args: &[FnArg<'tcx>],
1245        dest: &PlaceTy<'tcx>,
1246        ret: Option<mir::BasicBlock>,
1247        unwind: mir::UnwindAction,
1248    ) -> InterpResult<'tcx> {
1249        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1250        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1251    }
1252
1253    #[inline(always)]
1254    fn call_intrinsic(
1255        ecx: &mut MiriInterpCx<'tcx>,
1256        instance: ty::Instance<'tcx>,
1257        args: &[OpTy<'tcx>],
1258        dest: &PlaceTy<'tcx>,
1259        ret: Option<mir::BasicBlock>,
1260        unwind: mir::UnwindAction,
1261    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1262        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1263    }
1264
1265    #[inline(always)]
1266    fn assert_panic(
1267        ecx: &mut MiriInterpCx<'tcx>,
1268        msg: &mir::AssertMessage<'tcx>,
1269        unwind: mir::UnwindAction,
1270    ) -> InterpResult<'tcx> {
1271        ecx.assert_panic(msg, unwind)
1272    }
1273
1274    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1275        ecx.start_panic_nounwind(msg)
1276    }
1277
1278    fn unwind_terminate(
1279        ecx: &mut InterpCx<'tcx, Self>,
1280        reason: mir::UnwindTerminateReason,
1281    ) -> InterpResult<'tcx> {
1282        // Call the lang item.
1283        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1284        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1285        ecx.call_function(
1286            panic,
1287            ExternAbi::Rust,
1288            &[],
1289            None,
1290            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1291        )?;
1292        interp_ok(())
1293    }
1294
1295    #[inline(always)]
1296    fn binary_ptr_op(
1297        ecx: &MiriInterpCx<'tcx>,
1298        bin_op: mir::BinOp,
1299        left: &ImmTy<'tcx>,
1300        right: &ImmTy<'tcx>,
1301    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1302        ecx.binary_ptr_op(bin_op, left, right)
1303    }
1304
1305    #[inline(always)]
1306    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1307        ecx: &InterpCx<'tcx, Self>,
1308        inputs: &[F1],
1309    ) -> F2 {
1310        ecx.generate_nan(inputs)
1311    }
1312
1313    #[inline(always)]
1314    fn apply_float_nondet(
1315        ecx: &mut InterpCx<'tcx, Self>,
1316        val: ImmTy<'tcx>,
1317    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1318        crate::math::apply_random_float_error_to_imm(ecx, val, 4)
1319    }
1320
1321    #[inline(always)]
1322    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1323        ecx.equal_float_min_max(a, b)
1324    }
1325
1326    #[inline(always)]
1327    fn float_fuse_mul_add(ecx: &mut InterpCx<'tcx, Self>) -> bool {
1328        ecx.machine.float_nondet && ecx.machine.rng.get_mut().random()
1329    }
1330
1331    #[inline(always)]
1332    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1333        interp_ok(ecx.tcx.sess.ub_checks())
1334    }
1335
1336    #[inline(always)]
1337    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1338        interp_ok(ecx.tcx.sess.contract_checks())
1339    }
1340
1341    #[inline(always)]
1342    fn thread_local_static_pointer(
1343        ecx: &mut MiriInterpCx<'tcx>,
1344        def_id: DefId,
1345    ) -> InterpResult<'tcx, StrictPointer> {
1346        ecx.get_or_create_thread_local_alloc(def_id)
1347    }
1348
1349    fn extern_static_pointer(
1350        ecx: &MiriInterpCx<'tcx>,
1351        def_id: DefId,
1352    ) -> InterpResult<'tcx, StrictPointer> {
1353        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1354        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1355            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1356            // information. That uses the type information of this static.
1357            // Make sure it matches the Miri allocation for this.
1358            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1359                panic!("extern_statics cannot contain wildcards")
1360            };
1361            let info = ecx.get_alloc_info(alloc_id);
1362            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1363            let extern_decl_layout =
1364                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1365            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1366                throw_unsup_format!(
1367                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1368                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1369                    but Miri emulates it via an extern static shim \
1370                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1371                    name = ecx.tcx.def_path_str(def_id),
1372                    krate = ecx.tcx.crate_name(def_id.krate),
1373                    decl_size = extern_decl_layout.size.bytes(),
1374                    decl_align = extern_decl_layout.align.bytes(),
1375                    shim_size = info.size.bytes(),
1376                    shim_align = info.align.bytes(),
1377                )
1378            }
1379            interp_ok(ptr)
1380        } else {
1381            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1382        }
1383    }
1384
1385    fn init_local_allocation(
1386        ecx: &MiriInterpCx<'tcx>,
1387        id: AllocId,
1388        kind: MemoryKind,
1389        size: Size,
1390        align: Align,
1391    ) -> InterpResult<'tcx, Self::AllocExtra> {
1392        assert!(kind != MiriMemoryKind::Global.into());
1393        MiriMachine::init_allocation(ecx, id, kind, size, align)
1394    }
1395
1396    fn adjust_alloc_root_pointer(
1397        ecx: &MiriInterpCx<'tcx>,
1398        ptr: interpret::Pointer<CtfeProvenance>,
1399        kind: Option<MemoryKind>,
1400    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1401        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1402        let alloc_id = ptr.provenance.alloc_id();
1403        if cfg!(debug_assertions) {
1404            // The machine promises to never call us on thread-local or extern statics.
1405            match ecx.tcx.try_get_global_alloc(alloc_id) {
1406                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1407                    panic!("adjust_alloc_root_pointer called on thread-local static")
1408                }
1409                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1410                    panic!("adjust_alloc_root_pointer called on extern static")
1411                }
1412                _ => {}
1413            }
1414        }
1415        // FIXME: can we somehow preserve the immutability of `ptr`?
1416        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1417            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1418        } else {
1419            // Value does not matter, SB is disabled
1420            BorTag::default()
1421        };
1422        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1423    }
1424
1425    /// Called on `usize as ptr` casts.
1426    #[inline(always)]
1427    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1428        ecx.ptr_from_addr_cast(addr)
1429    }
1430
1431    /// Called on `ptr as usize` casts.
1432    /// (Actually computing the resulting `usize` doesn't need machine help,
1433    /// that's just `Scalar::try_to_int`.)
1434    #[inline(always)]
1435    fn expose_provenance(
1436        ecx: &InterpCx<'tcx, Self>,
1437        provenance: Self::Provenance,
1438    ) -> InterpResult<'tcx> {
1439        ecx.expose_provenance(provenance)
1440    }
1441
1442    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1443    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1444    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1445    /// allocations.
1446    ///
1447    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1448    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1449    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1450    /// stored in machine state).
1451    ///
1452    /// When this fails, that means the pointer does not point to a live allocation.
1453    fn ptr_get_alloc(
1454        ecx: &MiriInterpCx<'tcx>,
1455        ptr: StrictPointer,
1456        size: i64,
1457    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1458        let rel = ecx.ptr_get_alloc(ptr, size);
1459
1460        rel.map(|(alloc_id, size)| {
1461            let tag = match ptr.provenance {
1462                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1463                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1464            };
1465            (alloc_id, size, tag)
1466        })
1467    }
1468
1469    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1470    ///
1471    /// If `alloc` contains pointers, then they are all pointing to globals.
1472    ///
1473    /// This should avoid copying if no work has to be done! If this returns an owned
1474    /// allocation (because a copy had to be done to adjust things), machine memory will
1475    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1476    /// owned allocation to the map even when the map is shared.)
1477    fn adjust_global_allocation<'b>(
1478        ecx: &InterpCx<'tcx, Self>,
1479        id: AllocId,
1480        alloc: &'b Allocation,
1481    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1482    {
1483        let alloc = alloc.adjust_from_tcx(
1484            &ecx.tcx,
1485            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1486            |ptr| ecx.global_root_pointer(ptr),
1487        )?;
1488        let kind = MiriMemoryKind::Global.into();
1489        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1490        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1491    }
1492
1493    #[inline(always)]
1494    fn before_memory_read(
1495        _tcx: TyCtxtAt<'tcx>,
1496        machine: &Self,
1497        alloc_extra: &AllocExtra<'tcx>,
1498        ptr: Pointer,
1499        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1500        range: AllocRange,
1501    ) -> InterpResult<'tcx> {
1502        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1503            machine
1504                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1505        }
1506        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1507        match &machine.data_race {
1508            GlobalDataRaceHandler::None => {}
1509            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1510                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1511            GlobalDataRaceHandler::Vclocks(_data_race) => {
1512                let _trace = enter_trace_span!(data_race::before_memory_read);
1513                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1514                else {
1515                    unreachable!();
1516                };
1517                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1518                if let Some(weak_memory) = weak_memory {
1519                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1520                }
1521            }
1522        }
1523        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1524            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1525        }
1526        interp_ok(())
1527    }
1528
1529    #[inline(always)]
1530    fn before_memory_write(
1531        _tcx: TyCtxtAt<'tcx>,
1532        machine: &mut Self,
1533        alloc_extra: &mut AllocExtra<'tcx>,
1534        ptr: Pointer,
1535        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1536        range: AllocRange,
1537    ) -> InterpResult<'tcx> {
1538        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1539            machine
1540                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1541        }
1542        match &machine.data_race {
1543            GlobalDataRaceHandler::None => {}
1544            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1545                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?,
1546            GlobalDataRaceHandler::Vclocks(_global_state) => {
1547                let _trace = enter_trace_span!(data_race::before_memory_write);
1548                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1549                    &mut alloc_extra.data_race
1550                else {
1551                    unreachable!()
1552                };
1553                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1554                if let Some(weak_memory) = weak_memory {
1555                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1556                }
1557            }
1558        }
1559        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1560            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1561        }
1562        interp_ok(())
1563    }
1564
1565    #[inline(always)]
1566    fn before_memory_deallocation(
1567        _tcx: TyCtxtAt<'tcx>,
1568        machine: &mut Self,
1569        alloc_extra: &mut AllocExtra<'tcx>,
1570        ptr: Pointer,
1571        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1572        size: Size,
1573        align: Align,
1574        kind: MemoryKind,
1575    ) -> InterpResult<'tcx> {
1576        if machine.tracked_alloc_ids.contains(&alloc_id) {
1577            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1578        }
1579        match &machine.data_race {
1580            GlobalDataRaceHandler::None => {}
1581            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1582                genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), kind)?,
1583            GlobalDataRaceHandler::Vclocks(_global_state) => {
1584                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1585                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1586                data_race.write(
1587                    alloc_id,
1588                    alloc_range(Size::ZERO, size),
1589                    NaWriteType::Deallocate,
1590                    None,
1591                    machine,
1592                )?;
1593            }
1594        }
1595        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1596            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1597        }
1598        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1599        {
1600            *deallocated_at = Some(machine.current_user_relevant_span());
1601        }
1602        machine.free_alloc_id(alloc_id, size, align, kind);
1603        interp_ok(())
1604    }
1605
1606    #[inline(always)]
1607    fn retag_ptr_value(
1608        ecx: &mut InterpCx<'tcx, Self>,
1609        kind: mir::RetagKind,
1610        val: &ImmTy<'tcx>,
1611    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1612        if ecx.machine.borrow_tracker.is_some() {
1613            ecx.retag_ptr_value(kind, val)
1614        } else {
1615            interp_ok(val.clone())
1616        }
1617    }
1618
1619    #[inline(always)]
1620    fn retag_place_contents(
1621        ecx: &mut InterpCx<'tcx, Self>,
1622        kind: mir::RetagKind,
1623        place: &PlaceTy<'tcx>,
1624    ) -> InterpResult<'tcx> {
1625        if ecx.machine.borrow_tracker.is_some() {
1626            ecx.retag_place_contents(kind, place)?;
1627        }
1628        interp_ok(())
1629    }
1630
1631    fn protect_in_place_function_argument(
1632        ecx: &mut InterpCx<'tcx, Self>,
1633        place: &MPlaceTy<'tcx>,
1634    ) -> InterpResult<'tcx> {
1635        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1636        // writes* during this call are insta-UB.
1637        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1638            ecx.protect_place(place)?
1639        } else {
1640            // No borrow tracker.
1641            place.clone()
1642        };
1643        // We do need to write `uninit` so that even after the call ends, the former contents of
1644        // this place cannot be observed any more. We do the write after retagging so that for
1645        // Tree Borrows, this is considered to activate the new tag.
1646        // Conveniently this also ensures that the place actually points to suitable memory.
1647        ecx.write_uninit(&protected_place)?;
1648        // Now we throw away the protected place, ensuring its tag is never used again.
1649        interp_ok(())
1650    }
1651
1652    #[inline(always)]
1653    fn init_frame(
1654        ecx: &mut InterpCx<'tcx, Self>,
1655        frame: Frame<'tcx, Provenance>,
1656    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1657        // Start recording our event before doing anything else
1658        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1659            let fn_name = frame.instance().to_string();
1660            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1661            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1662
1663            Some(profiler.start_recording_interval_event_detached(
1664                *name,
1665                measureme::EventId::from_label(*name),
1666                ecx.active_thread().to_u32(),
1667            ))
1668        } else {
1669            None
1670        };
1671
1672        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1673
1674        let extra = FrameExtra {
1675            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1676            catch_unwind: None,
1677            timing,
1678            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1679            data_race: ecx
1680                .machine
1681                .data_race
1682                .as_vclocks_ref()
1683                .map(|_| data_race::FrameState::default()),
1684        };
1685
1686        interp_ok(frame.with_extra(extra))
1687    }
1688
1689    fn stack<'a>(
1690        ecx: &'a InterpCx<'tcx, Self>,
1691    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1692        ecx.active_thread_stack()
1693    }
1694
1695    fn stack_mut<'a>(
1696        ecx: &'a mut InterpCx<'tcx, Self>,
1697    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1698        ecx.active_thread_stack_mut()
1699    }
1700
1701    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1702        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1703        ecx.machine.since_gc += 1;
1704        // Possibly report our progress. This will point at the terminator we are about to execute.
1705        if let Some(report_progress) = ecx.machine.report_progress {
1706            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1707                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1708                    block_count: ecx.machine.basic_block_count,
1709                });
1710            }
1711        }
1712
1713        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1714        // stacks.
1715        // When debug assertions are enabled, run the GC as often as possible so that any cases
1716        // where it mistakenly removes an important tag become visible.
1717        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1718            ecx.machine.since_gc = 0;
1719            ecx.run_provenance_gc();
1720        }
1721
1722        // These are our preemption points.
1723        // (This will only take effect after the terminator has been executed.)
1724        ecx.maybe_preempt_active_thread();
1725
1726        // Make sure some time passes.
1727        ecx.machine.monotonic_clock.tick();
1728
1729        interp_ok(())
1730    }
1731
1732    #[inline(always)]
1733    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1734        if ecx.frame().extra.is_user_relevant {
1735            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1736            // frame. If we push a non-local frame, there's no need to do anything.
1737            let stack_len = ecx.active_thread_stack().len();
1738            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1739        }
1740        interp_ok(())
1741    }
1742
1743    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1744        let frame = ecx.frame();
1745        // We want this *before* the return value copy, because the return place itself is protected
1746        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1747        if ecx.machine.borrow_tracker.is_some() {
1748            ecx.on_stack_pop(frame)?;
1749        }
1750        if frame.extra.is_user_relevant {
1751            // All that we store is whether or not the frame we just removed is local, so now we
1752            // have no idea where the next topmost local frame is. So we recompute it.
1753            // (If this ever becomes a bottleneck, we could have `push` store the previous
1754            // user-relevant frame and restore that here.)
1755            // We have to skip the frame that is just being popped.
1756            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1757        }
1758        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1759        // concurrency and what it prints is just plain wrong. So we print our own information
1760        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1761        info!("Leaving {}", ecx.frame().instance());
1762        interp_ok(())
1763    }
1764
1765    #[inline(always)]
1766    fn after_stack_pop(
1767        ecx: &mut InterpCx<'tcx, Self>,
1768        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1769        unwinding: bool,
1770    ) -> InterpResult<'tcx, ReturnAction> {
1771        let res = {
1772            // Move `frame` into a sub-scope so we control when it will be dropped.
1773            let mut frame = frame;
1774            let timing = frame.extra.timing.take();
1775            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1776            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1777                profiler.finish_recording_interval_event(timing.unwrap());
1778            }
1779            res
1780        };
1781        // Needs to be done after dropping frame to show up on the right nesting level.
1782        // (Cc https://github.com/rust-lang/miri/issues/2266)
1783        if !ecx.active_thread_stack().is_empty() {
1784            info!("Continuing in {}", ecx.frame().instance());
1785        }
1786        res
1787    }
1788
1789    fn after_local_read(
1790        ecx: &InterpCx<'tcx, Self>,
1791        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1792        local: mir::Local,
1793    ) -> InterpResult<'tcx> {
1794        if let Some(data_race) = &frame.extra.data_race {
1795            let _trace = enter_trace_span!(data_race::after_local_read);
1796            data_race.local_read(local, &ecx.machine);
1797        }
1798        interp_ok(())
1799    }
1800
1801    fn after_local_write(
1802        ecx: &mut InterpCx<'tcx, Self>,
1803        local: mir::Local,
1804        storage_live: bool,
1805    ) -> InterpResult<'tcx> {
1806        if let Some(data_race) = &ecx.frame().extra.data_race {
1807            let _trace = enter_trace_span!(data_race::after_local_write);
1808            data_race.local_write(local, storage_live, &ecx.machine);
1809        }
1810        interp_ok(())
1811    }
1812
1813    fn after_local_moved_to_memory(
1814        ecx: &mut InterpCx<'tcx, Self>,
1815        local: mir::Local,
1816        mplace: &MPlaceTy<'tcx>,
1817    ) -> InterpResult<'tcx> {
1818        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1819            panic!("after_local_allocated should only be called on fresh allocations");
1820        };
1821        // Record the span where this was allocated: the declaration of the local.
1822        let local_decl = &ecx.frame().body().local_decls[local];
1823        let span = local_decl.source_info.span;
1824        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1825        // The data race system has to fix the clocks used for this write.
1826        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1827        if let Some(data_race) =
1828            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1829        {
1830            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1831            data_race.local_moved_to_memory(
1832                local,
1833                alloc_info.data_race.as_vclocks_mut().unwrap(),
1834                machine,
1835            );
1836        }
1837        interp_ok(())
1838    }
1839
1840    fn get_global_alloc_salt(
1841        ecx: &InterpCx<'tcx, Self>,
1842        instance: Option<ty::Instance<'tcx>>,
1843    ) -> usize {
1844        let unique = if let Some(instance) = instance {
1845            // Functions cannot be identified by pointers, as asm-equal functions can get
1846            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1847            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1848            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1849            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1850            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1851            // whether codegen will actually emit duplicate functions. It does that when they have
1852            // non-lifetime generics, or when they can be inlined. All other functions are given a
1853            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1854            // cannot be relied upon for anything. But if we don't do this, the
1855            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1856            // backtraces look terrible.
1857            let is_generic = instance
1858                .args
1859                .into_iter()
1860                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1861            let can_be_inlined = matches!(
1862                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1863                InliningThreshold::Always
1864            ) || !matches!(
1865                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1866                InlineAttr::Never
1867            );
1868            !is_generic && !can_be_inlined
1869        } else {
1870            // Non-functions are never unique.
1871            false
1872        };
1873        // Always use the same salt if the allocation is unique.
1874        if unique {
1875            CTFE_ALLOC_SALT
1876        } else {
1877            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1878        }
1879    }
1880
1881    fn cached_union_data_range<'e>(
1882        ecx: &'e mut InterpCx<'tcx, Self>,
1883        ty: Ty<'tcx>,
1884        compute_range: impl FnOnce() -> RangeSet,
1885    ) -> Cow<'e, RangeSet> {
1886        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1887    }
1888
1889    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1890        use crate::alloc::MiriAllocParams;
1891
1892        match &self.allocator {
1893            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1894            None => MiriAllocParams::Global,
1895        }
1896    }
1897
1898    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1899        #[cfg(feature = "tracing")]
1900        {
1901            span().entered()
1902        }
1903        #[cfg(not(feature = "tracing"))]
1904        #[expect(clippy::unused_unit)]
1905        {
1906            let _ = span; // so we avoid the "unused variable" warning
1907            ()
1908        }
1909    }
1910}
1911
1912/// Trait for callbacks handling asynchronous machine operations.
1913pub trait MachineCallback<'tcx, T>: VisitProvenance {
1914    /// The function to be invoked when the callback is fired.
1915    fn call(
1916        self: Box<Self>,
1917        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1918        arg: T,
1919    ) -> InterpResult<'tcx>;
1920}
1921
1922/// Type alias for boxed machine callbacks with generic argument type.
1923pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1924
1925/// Creates a `DynMachineCallback`:
1926///
1927/// ```rust
1928/// callback!(
1929///     @capture<'tcx> {
1930///         var1: Ty1,
1931///         var2: Ty2<'tcx>,
1932///     }
1933///     |this, arg: ArgTy| {
1934///         // Implement the callback here.
1935///         todo!()
1936///     }
1937/// )
1938/// ```
1939///
1940/// All the argument types must implement `VisitProvenance`.
1941#[macro_export]
1942macro_rules! callback {
1943    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1944        { $($name:ident: $type:ty),* $(,)? }
1945     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1946        struct Callback<$tcx, $($lft),*> {
1947            $($name: $type,)*
1948            _phantom: std::marker::PhantomData<&$tcx ()>,
1949        }
1950
1951        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1952            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1953                $(
1954                    self.$name.visit_provenance(_visit);
1955                )*
1956            }
1957        }
1958
1959        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1960            fn call(
1961                self: Box<Self>,
1962                $this: &mut MiriInterpCx<$tcx>,
1963                $arg: $arg_ty
1964            ) -> InterpResult<$tcx> {
1965                #[allow(unused_variables)]
1966                let Callback { $($name,)* _phantom } = *self;
1967                $body
1968            }
1969        }
1970
1971        Box::new(Callback {
1972            $($name,)*
1973            _phantom: std::marker::PhantomData
1974        })
1975    }};
1976}