Skip to main content

miri/concurrency/
thread.rs

1//! Implements threads.
2
3use std::sync::atomic::Ordering::Relaxed;
4use std::task::Poll;
5use std::time::{Duration, SystemTime};
6use std::{io, mem};
7
8use rand::seq::IteratorRandom;
9use rustc_abi::ExternAbi;
10use rustc_const_eval::CTRL_C_RECEIVED;
11use rustc_data_structures::either::Either;
12use rustc_data_structures::fx::FxHashMap;
13use rustc_hir::def_id::DefId;
14use rustc_index::{Idx, IndexVec};
15use rustc_middle::mir::Mutability;
16use rustc_middle::ty::layout::TyAndLayout;
17use rustc_span::{DUMMY_SP, Span};
18use rustc_target::spec::Os;
19
20use crate::concurrency::GlobalDataRaceHandler;
21use crate::shims::tls;
22use crate::*;
23
24#[derive(Clone, Copy, Debug, PartialEq)]
25enum SchedulingAction {
26    /// Execute step on the active thread.
27    ExecuteStep,
28    /// Wait for a bit, but at most as long as the duration specified.
29    /// We wake up early if an I/O event happened.
30    /// If the duration is [`None`], we sleep indefinitely. This is
31    /// only allowed when isolation is disabled and there are threads waiting for I/O!
32    SleepAndWaitForIo(Option<Duration>),
33}
34
35/// What to do with TLS allocations from terminated threads
36#[derive(Clone, Copy, Debug, PartialEq)]
37pub enum TlsAllocAction {
38    /// Deallocate backing memory of thread-local statics as usual
39    Deallocate,
40    /// Skip deallocating backing memory of thread-local statics and consider all memory reachable
41    /// from them as "allowed to leak" (like global `static`s).
42    Leak,
43}
44
45/// The argument type for the "unblock" callback, indicating why the thread got unblocked.
46#[derive(Clone, Copy, Debug, PartialEq)]
47pub enum UnblockKind {
48    /// Operation completed successfully, thread continues normal execution.
49    Ready,
50    /// The operation did not complete within its specified duration.
51    TimedOut,
52}
53
54/// Type alias for unblock callbacks, i.e. machine callbacks invoked when
55/// a thread gets unblocked.
56pub type DynUnblockCallback<'tcx> = DynMachineCallback<'tcx, UnblockKind>;
57
58/// A thread identifier.
59#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
60pub struct ThreadId(u32);
61
62impl ThreadId {
63    pub fn to_u32(self) -> u32 {
64        self.0
65    }
66
67    /// Create a new thread id from a `u32` without checking if this thread exists.
68    pub fn new_unchecked(id: u32) -> Self {
69        Self(id)
70    }
71
72    pub const MAIN_THREAD: ThreadId = ThreadId(0);
73}
74
75impl Idx for ThreadId {
76    fn new(idx: usize) -> Self {
77        ThreadId(u32::try_from(idx).unwrap())
78    }
79
80    fn index(self) -> usize {
81        usize::try_from(self.0).unwrap()
82    }
83}
84
85impl From<ThreadId> for u64 {
86    fn from(t: ThreadId) -> Self {
87        t.0.into()
88    }
89}
90
91/// Keeps track of what the thread is blocked on.
92#[derive(Debug, Copy, Clone, PartialEq, Eq)]
93pub enum BlockReason {
94    /// The thread tried to join the specified thread and is blocked until that
95    /// thread terminates.
96    Join(ThreadId),
97    /// Waiting for time to pass.
98    Sleep,
99    /// Blocked on a mutex.
100    Mutex,
101    /// Blocked on a condition variable.
102    Condvar,
103    /// Blocked on a reader-writer lock.
104    RwLock,
105    /// Blocked on a Futex variable.
106    Futex,
107    /// Blocked on an InitOnce.
108    InitOnce,
109    /// Blocked on epoll.
110    Epoll,
111    /// Blocked on eventfd.
112    Eventfd,
113    /// Blocked on unnamed_socket.
114    UnnamedSocket,
115    /// Blocked on an IO operation.
116    IO,
117    /// Blocked for any reason related to GenMC, such as `assume` statements (GenMC mode only).
118    /// Will be implicitly unblocked when GenMC schedules this thread again.
119    Genmc,
120}
121
122/// The state of a thread.
123enum ThreadState<'tcx> {
124    /// The thread is enabled and can be executed.
125    Enabled,
126    /// The thread is blocked on something.
127    Blocked { reason: BlockReason, timeout: Option<Timeout>, callback: DynUnblockCallback<'tcx> },
128    /// The thread has terminated its execution. We do not delete terminated
129    /// threads (FIXME: why?).
130    Terminated,
131}
132
133impl<'tcx> std::fmt::Debug for ThreadState<'tcx> {
134    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
135        match self {
136            Self::Enabled => write!(f, "Enabled"),
137            Self::Blocked { reason, timeout, .. } =>
138                f.debug_struct("Blocked").field("reason", reason).field("timeout", timeout).finish(),
139            Self::Terminated => write!(f, "Terminated"),
140        }
141    }
142}
143
144impl<'tcx> ThreadState<'tcx> {
145    fn is_enabled(&self) -> bool {
146        matches!(self, ThreadState::Enabled)
147    }
148
149    fn is_terminated(&self) -> bool {
150        matches!(self, ThreadState::Terminated)
151    }
152
153    fn is_blocked_on(&self, reason: BlockReason) -> bool {
154        matches!(*self, ThreadState::Blocked { reason: actual_reason, .. } if actual_reason == reason)
155    }
156}
157
158/// The join status of a thread.
159#[derive(Debug, Copy, Clone, PartialEq, Eq)]
160enum ThreadJoinStatus {
161    /// The thread can be joined.
162    Joinable,
163    /// A thread is detached if its join handle was destroyed and no other
164    /// thread can join it.
165    Detached,
166    /// The thread was already joined by some thread and cannot be joined again.
167    Joined,
168}
169
170/// A thread.
171pub struct Thread<'tcx> {
172    state: ThreadState<'tcx>,
173
174    /// Name of the thread.
175    thread_name: Option<Vec<u8>>,
176
177    /// The virtual call stack.
178    stack: Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>>,
179
180    /// A span that explains where the thread (or more specifically, its current root
181    /// frame) "comes from".
182    pub(crate) origin_span: Span,
183
184    /// The function to call when the stack ran empty, to figure out what to do next.
185    /// Conceptually, this is the interpreter implementation of the things that happen 'after' the
186    /// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
187    /// (`None` is an error, it means the callback has not been set up yet or is actively running.)
188    pub(crate) on_stack_empty: Option<StackEmptyCallback<'tcx>>,
189
190    /// The index of the topmost user-relevant frame in `stack`. This field must contain
191    /// the value produced by `get_top_user_relevant_frame`.
192    /// This field is a cache to reduce how often we call that method. The cache is manually
193    /// maintained inside `MiriMachine::after_stack_push` and `MiriMachine::after_stack_pop`.
194    top_user_relevant_frame: Option<usize>,
195
196    /// The join status.
197    join_status: ThreadJoinStatus,
198
199    /// Stack of active unwind payloads for the current thread. Used for storing
200    /// the argument of the call to `miri_start_unwind` (the payload) when unwinding.
201    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
202    ///
203    /// In real unwinding, the payload gets passed as an argument to the landing pad,
204    /// which then forwards it to 'Resume'. However this argument is implicit in MIR,
205    /// so we have to store it out-of-band. When there are multiple active unwinds,
206    /// the innermost one is always caught first, so we can store them as a stack.
207    pub(crate) unwind_payloads: Vec<ImmTy<'tcx>>,
208
209    /// Last OS error location in memory. It is a 32-bit integer.
210    pub(crate) last_error: Option<MPlaceTy<'tcx>>,
211}
212
213pub type StackEmptyCallback<'tcx> =
214    Box<dyn FnMut(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Poll<()>> + 'tcx>;
215
216impl<'tcx> Thread<'tcx> {
217    /// Get the name of the current thread if it was set.
218    fn thread_name(&self) -> Option<&[u8]> {
219        self.thread_name.as_deref()
220    }
221
222    /// Return whether this thread is enabled or not.
223    pub fn is_enabled(&self) -> bool {
224        self.state.is_enabled()
225    }
226
227    /// Get the name of the current thread for display purposes; will include thread ID if not set.
228    fn thread_display_name(&self, id: ThreadId) -> String {
229        if let Some(ref thread_name) = self.thread_name {
230            String::from_utf8_lossy(thread_name).into_owned()
231        } else {
232            format!("unnamed-{}", id.index())
233        }
234    }
235
236    /// Return the top user-relevant frame, if there is one. `skip` indicates how many top frames
237    /// should be skipped.
238    /// Note that the choice to return `None` here when there is no user-relevant frame is part of
239    /// justifying the optimization that only pushes of user-relevant frames require updating the
240    /// `top_user_relevant_frame` field.
241    fn compute_top_user_relevant_frame(&self, skip: usize) -> Option<usize> {
242        // We are search for the frame with maximum relevance.
243        let mut best = None;
244        for (idx, frame) in self.stack.iter().enumerate().rev().skip(skip) {
245            let relevance = frame.extra.user_relevance;
246            if relevance == u8::MAX {
247                // We can short-circuit this search.
248                return Some(idx);
249            }
250            if best.is_none_or(|(_best_idx, best_relevance)| best_relevance < relevance) {
251                // The previous best frame has strictly worse relevance, so despite us being lower
252                // in the stack, we win.
253                best = Some((idx, relevance));
254            }
255        }
256        best.map(|(idx, _relevance)| idx)
257    }
258
259    /// Re-compute the top user-relevant frame from scratch. `skip` indicates how many top frames
260    /// should be skipped.
261    pub fn recompute_top_user_relevant_frame(&mut self, skip: usize) {
262        self.top_user_relevant_frame = self.compute_top_user_relevant_frame(skip);
263    }
264
265    /// Set the top user-relevant frame to the given value. Must be equal to what
266    /// `get_top_user_relevant_frame` would return!
267    pub fn set_top_user_relevant_frame(&mut self, frame_idx: usize) {
268        debug_assert_eq!(Some(frame_idx), self.compute_top_user_relevant_frame(0));
269        self.top_user_relevant_frame = Some(frame_idx);
270    }
271
272    /// Returns the topmost frame that is considered user-relevant, or the
273    /// top of the stack if there is no such frame, or `None` if the stack is empty.
274    pub fn top_user_relevant_frame(&self) -> Option<usize> {
275        // This can be called upon creation of an allocation. We create allocations while setting up
276        // parts of the Rust runtime when we do not have any stack frames yet, so we need to handle
277        // empty stacks.
278        self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
279    }
280
281    pub fn current_user_relevance(&self) -> u8 {
282        self.top_user_relevant_frame()
283            .map(|frame_idx| self.stack[frame_idx].extra.user_relevance)
284            .unwrap_or(0)
285    }
286
287    pub fn current_user_relevant_span(&self) -> Span {
288        debug_assert_eq!(self.top_user_relevant_frame, self.compute_top_user_relevant_frame(0));
289        self.top_user_relevant_frame()
290            .map(|frame_idx| self.stack[frame_idx].current_span())
291            .unwrap_or(rustc_span::DUMMY_SP)
292    }
293}
294
295impl<'tcx> std::fmt::Debug for Thread<'tcx> {
296    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
297        write!(
298            f,
299            "{}({:?}, {:?})",
300            String::from_utf8_lossy(self.thread_name().unwrap_or(b"<unnamed>")),
301            self.state,
302            self.join_status
303        )
304    }
305}
306
307impl<'tcx> Thread<'tcx> {
308    fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'tcx>>) -> Self {
309        Self {
310            state: ThreadState::Enabled,
311            thread_name: name.map(|name| Vec::from(name.as_bytes())),
312            stack: Vec::new(),
313            origin_span: DUMMY_SP,
314            top_user_relevant_frame: None,
315            join_status: ThreadJoinStatus::Joinable,
316            unwind_payloads: Vec::new(),
317            last_error: None,
318            on_stack_empty,
319        }
320    }
321}
322
323impl VisitProvenance for Thread<'_> {
324    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
325        let Thread {
326            unwind_payloads: panic_payload,
327            last_error,
328            stack,
329            origin_span: _,
330            top_user_relevant_frame: _,
331            state: _,
332            thread_name: _,
333            join_status: _,
334            on_stack_empty: _, // we assume the closure captures no GC-relevant state
335        } = self;
336
337        for payload in panic_payload {
338            payload.visit_provenance(visit);
339        }
340        last_error.visit_provenance(visit);
341        for frame in stack {
342            frame.visit_provenance(visit)
343        }
344    }
345}
346
347impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> {
348    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
349        let return_place = self.return_place();
350        let Frame {
351            locals,
352            extra,
353            // There are some private fields we cannot access; they contain no tags.
354            ..
355        } = self;
356
357        // Return place.
358        return_place.visit_provenance(visit);
359        // Locals.
360        for local in locals.iter() {
361            match local.as_mplace_or_imm() {
362                None => {}
363                Some(Either::Left((ptr, meta))) => {
364                    ptr.visit_provenance(visit);
365                    meta.visit_provenance(visit);
366                }
367                Some(Either::Right(imm)) => {
368                    imm.visit_provenance(visit);
369                }
370            }
371        }
372
373        extra.visit_provenance(visit);
374    }
375}
376
377/// The moment in time when a blocked thread should be woken up.
378#[derive(Debug)]
379enum Timeout {
380    Monotonic(Instant),
381    RealTime(SystemTime),
382}
383
384impl Timeout {
385    /// How long do we have to wait from now until the specified time?
386    fn get_wait_time(&self, clock: &MonotonicClock) -> Duration {
387        match self {
388            Timeout::Monotonic(instant) => instant.duration_since(clock.now()),
389            Timeout::RealTime(time) =>
390                time.duration_since(SystemTime::now()).unwrap_or(Duration::ZERO),
391        }
392    }
393
394    /// Will try to add `duration`, but if that overflows it may add less.
395    fn add_lossy(&self, duration: Duration) -> Self {
396        match self {
397            Timeout::Monotonic(i) => Timeout::Monotonic(i.add_lossy(duration)),
398            Timeout::RealTime(s) => {
399                // If this overflows, try adding just 1h and assume that will not overflow.
400                Timeout::RealTime(
401                    s.checked_add(duration)
402                        .unwrap_or_else(|| s.checked_add(Duration::from_secs(3600)).unwrap()),
403                )
404            }
405        }
406    }
407}
408
409/// The clock to use for the timeout you are asking for.
410#[derive(Debug, Copy, Clone, PartialEq)]
411pub enum TimeoutClock {
412    Monotonic,
413    RealTime,
414}
415
416/// Whether the timeout is relative or absolute.
417#[derive(Debug, Copy, Clone)]
418pub enum TimeoutAnchor {
419    Relative,
420    Absolute,
421}
422
423/// An error signaling that the requested thread doesn't exist.
424#[derive(Debug, Copy, Clone)]
425pub struct ThreadNotFound;
426
427/// A set of threads.
428#[derive(Debug)]
429pub struct ThreadManager<'tcx> {
430    /// Identifier of the currently active thread.
431    active_thread: ThreadId,
432    /// Threads used in the program.
433    ///
434    /// Note that this vector also contains terminated threads.
435    threads: IndexVec<ThreadId, Thread<'tcx>>,
436    /// A mapping from a thread-local static to the thread specific allocation.
437    thread_local_allocs: FxHashMap<(DefId, ThreadId), StrictPointer>,
438    /// A flag that indicates that we should change the active thread.
439    /// Completely ignored in GenMC mode.
440    yield_active_thread: bool,
441    /// A flag that indicates that we should do round robin scheduling of threads else randomized scheduling is used.
442    fixed_scheduling: bool,
443}
444
445impl VisitProvenance for ThreadManager<'_> {
446    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
447        let ThreadManager {
448            threads,
449            thread_local_allocs,
450            active_thread: _,
451            yield_active_thread: _,
452            fixed_scheduling: _,
453        } = self;
454
455        for thread in threads {
456            thread.visit_provenance(visit);
457        }
458        for ptr in thread_local_allocs.values() {
459            ptr.visit_provenance(visit);
460        }
461    }
462}
463
464impl<'tcx> ThreadManager<'tcx> {
465    pub(crate) fn new(config: &MiriConfig) -> Self {
466        let mut threads = IndexVec::new();
467        // Create the main thread and add it to the list of threads.
468        threads.push(Thread::new(Some("main"), None));
469        Self {
470            active_thread: ThreadId::MAIN_THREAD,
471            threads,
472            thread_local_allocs: Default::default(),
473            yield_active_thread: false,
474            fixed_scheduling: config.fixed_scheduling,
475        }
476    }
477
478    pub(crate) fn init(
479        ecx: &mut MiriInterpCx<'tcx>,
480        on_main_stack_empty: StackEmptyCallback<'tcx>,
481    ) {
482        ecx.machine.threads.threads[ThreadId::MAIN_THREAD].on_stack_empty =
483            Some(on_main_stack_empty);
484        if ecx.tcx.sess.target.os != Os::Windows {
485            // The main thread can *not* be joined on except on windows.
486            ecx.machine.threads.threads[ThreadId::MAIN_THREAD].join_status =
487                ThreadJoinStatus::Detached;
488        }
489    }
490
491    pub fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadNotFound> {
492        if let Ok(id) = id.try_into()
493            && usize::try_from(id).is_ok_and(|id| id < self.threads.len())
494        {
495            Ok(ThreadId(id))
496        } else {
497            Err(ThreadNotFound)
498        }
499    }
500
501    /// Check if we have an allocation for the given thread local static for the
502    /// active thread.
503    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<StrictPointer> {
504        self.thread_local_allocs.get(&(def_id, self.active_thread)).cloned()
505    }
506
507    /// Set the pointer for the allocation of the given thread local
508    /// static for the active thread.
509    ///
510    /// Panics if a thread local is initialized twice for the same thread.
511    fn set_thread_local_alloc(&mut self, def_id: DefId, ptr: StrictPointer) {
512        self.thread_local_allocs.try_insert((def_id, self.active_thread), ptr).unwrap();
513    }
514
515    /// Borrow the stack of the active thread.
516    pub fn active_thread_stack(&self) -> &[Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
517        &self.threads[self.active_thread].stack
518    }
519
520    /// Mutably borrow the stack of the active thread.
521    pub fn active_thread_stack_mut(
522        &mut self,
523    ) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
524        &mut self.threads[self.active_thread].stack
525    }
526
527    pub fn all_blocked_stacks(
528        &self,
529    ) -> impl Iterator<Item = (ThreadId, &[Frame<'tcx, Provenance, FrameExtra<'tcx>>])> {
530        self.threads
531            .iter_enumerated()
532            .filter(|(_id, t)| matches!(t.state, ThreadState::Blocked { .. }))
533            .map(|(id, t)| (id, &t.stack[..]))
534    }
535
536    /// Create a new thread and returns its id.
537    fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'tcx>) -> ThreadId {
538        let new_thread_id = ThreadId::new(self.threads.len());
539        self.threads.push(Thread::new(None, Some(on_stack_empty)));
540        new_thread_id
541    }
542
543    /// Set an active thread and return the id of the thread that was active before.
544    fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId {
545        assert!(id.index() < self.threads.len());
546        info!(
547            "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
548            self.get_thread_display_name(id),
549            self.get_thread_display_name(self.active_thread)
550        );
551        std::mem::replace(&mut self.active_thread, id)
552    }
553
554    /// Get the id of the currently active thread.
555    pub fn active_thread(&self) -> ThreadId {
556        self.active_thread
557    }
558
559    /// Get the total number of threads that were ever spawn by this program.
560    pub fn get_total_thread_count(&self) -> usize {
561        self.threads.len()
562    }
563
564    /// Get the total of threads that are currently live, i.e., not yet terminated.
565    /// (They might be blocked.)
566    pub fn get_live_thread_count(&self) -> usize {
567        self.threads.iter().filter(|t| !t.state.is_terminated()).count()
568    }
569
570    /// Has the given thread terminated?
571    fn has_terminated(&self, thread_id: ThreadId) -> bool {
572        self.threads[thread_id].state.is_terminated()
573    }
574
575    /// Have all threads terminated?
576    fn have_all_terminated(&self) -> bool {
577        self.threads.iter().all(|thread| thread.state.is_terminated())
578    }
579
580    /// Enable the thread for execution. The thread must be terminated.
581    fn enable_thread(&mut self, thread_id: ThreadId) {
582        assert!(self.has_terminated(thread_id));
583        self.threads[thread_id].state = ThreadState::Enabled;
584    }
585
586    /// Get a mutable borrow of the currently active thread.
587    pub fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
588        &mut self.threads[self.active_thread]
589    }
590
591    /// Get a shared borrow of the currently active thread.
592    pub fn active_thread_ref(&self) -> &Thread<'tcx> {
593        &self.threads[self.active_thread]
594    }
595
596    pub fn thread_ref(&self, thread_id: ThreadId) -> &Thread<'tcx> {
597        &self.threads[thread_id]
598    }
599
600    /// Mark the thread as detached, which means that no other thread will try
601    /// to join it and the thread is responsible for cleaning up.
602    ///
603    /// `allow_terminated_joined` allows detaching joined threads that have already terminated.
604    /// This matches Windows's behavior for `CloseHandle`.
605    ///
606    /// See <https://docs.microsoft.com/en-us/windows/win32/procthread/thread-handles-and-identifiers>:
607    /// > The handle is valid until closed, even after the thread it represents has been terminated.
608    fn detach_thread(&mut self, id: ThreadId, allow_terminated_joined: bool) -> InterpResult<'tcx> {
609        // NOTE: In GenMC mode, we treat detached threads like regular threads that are never joined, so there is no special handling required here.
610        trace!("detaching {:?}", id);
611
612        let is_ub = if allow_terminated_joined && self.threads[id].state.is_terminated() {
613            // "Detached" in particular means "not yet joined". Redundant detaching is still UB.
614            self.threads[id].join_status == ThreadJoinStatus::Detached
615        } else {
616            self.threads[id].join_status != ThreadJoinStatus::Joinable
617        };
618        if is_ub {
619            throw_ub_format!("trying to detach thread that was already detached or joined");
620        }
621
622        self.threads[id].join_status = ThreadJoinStatus::Detached;
623        interp_ok(())
624    }
625
626    /// Set the name of the given thread.
627    pub fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
628        self.threads[thread].thread_name = Some(new_thread_name);
629    }
630
631    /// Get the name of the given thread.
632    pub fn get_thread_name(&self, thread: ThreadId) -> Option<&[u8]> {
633        self.threads[thread].thread_name()
634    }
635
636    pub fn get_thread_display_name(&self, thread: ThreadId) -> String {
637        self.threads[thread].thread_display_name(thread)
638    }
639
640    /// Put the thread into the blocked state.
641    fn block_thread(
642        &mut self,
643        reason: BlockReason,
644        timeout: Option<Timeout>,
645        callback: DynUnblockCallback<'tcx>,
646    ) {
647        let state = &mut self.threads[self.active_thread].state;
648        assert!(state.is_enabled());
649        *state = ThreadState::Blocked { reason, timeout, callback }
650    }
651
652    /// Change the active thread to some enabled thread.
653    fn yield_active_thread(&mut self) {
654        // We do not yield immediately, as swapping out the current stack while executing a MIR statement
655        // could lead to all sorts of confusion.
656        // We should only switch stacks between steps.
657        self.yield_active_thread = true;
658    }
659}
660
661impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
662trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
663    #[inline]
664    fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
665        let this = self.eval_context_mut();
666        let active_thread = this.active_thread_mut();
667        active_thread.origin_span = DUMMY_SP; // reset, the old value no longer applied
668        let mut callback = active_thread
669            .on_stack_empty
670            .take()
671            .expect("`on_stack_empty` not set up, or already running");
672        let res = callback(this)?;
673        this.active_thread_mut().on_stack_empty = Some(callback);
674        interp_ok(res)
675    }
676
677    /// Decide which action to take next and on which thread.
678    ///
679    /// The currently implemented scheduling policy is the one that is commonly
680    /// used in stateless model checkers such as Loom: run the active thread as
681    /// long as we can and switch only when we have to (the active thread was
682    /// blocked, terminated, or has explicitly asked to be preempted).
683    ///
684    /// If GenMC mode is active, the scheduling is instead handled by GenMC.
685    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
686        let this = self.eval_context_mut();
687
688        // In GenMC mode, we let GenMC do the scheduling.
689        if this.machine.data_race.as_genmc_ref().is_some() {
690            loop {
691                let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
692                let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
693                    return interp_ok(SchedulingAction::ExecuteStep);
694                };
695                // If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
696                if this.machine.threads.threads[next_thread_id]
697                    .state
698                    .is_blocked_on(BlockReason::Genmc)
699                {
700                    info!(
701                        "GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now."
702                    );
703                    this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
704                }
705                // The thread we just unblocked may have been blocked again during the unblocking callback.
706                // In that case, we need to ask for a different thread to run next.
707                let thread_manager = &mut this.machine.threads;
708                if thread_manager.threads[next_thread_id].state.is_enabled() {
709                    // Set the new active thread.
710                    thread_manager.active_thread = next_thread_id;
711                    return interp_ok(SchedulingAction::ExecuteStep);
712                }
713            }
714        }
715
716        // We are not in GenMC mode, so we control the scheduling.
717        let thread_manager = &this.machine.threads;
718        // This thread and the program can keep going.
719        if thread_manager.threads[thread_manager.active_thread].state.is_enabled()
720            && !thread_manager.yield_active_thread
721        {
722            // The currently active thread is still enabled, just continue with it.
723            return interp_ok(SchedulingAction::ExecuteStep);
724        }
725
726        // The active thread yielded or got terminated. Let's see if there are any I/O events
727        // or timeouts to take care of.
728
729        if this.machine.communicate() {
730            // When isolation is disabled we need to check for events for
731            // threads which are blocked on host I/O.
732            // We do this before running any other threads such that the threads
733            // which received events are available for scheduling afterwards.
734
735            // Perform a non-blocking poll for newly available I/O events from the OS.
736            this.poll_and_unblock(Some(Duration::ZERO))?;
737        }
738
739        // We also check timeouts before running any other thread, to ensure that timeouts
740        // "in the past" fire before any other thread can take an action. This ensures that for
741        // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
742        // abstime has already been passed at the time of the call".
743        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
744        let potential_sleep_time = this.unblock_expired_timeouts()?;
745
746        let thread_manager = &mut this.machine.threads;
747        let rng = this.machine.rng.get_mut();
748
749        // No callbacks immediately scheduled, pick a regular thread to execute.
750        // The active thread blocked or yielded. So we go search for another enabled thread.
751        // We build the list of threads by starting with the threads after the current one, followed by
752        // the threads before the current one and then the current thread itself (i.e., this iterator acts
753        // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first
754        // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step.
755        let mut threads_iter = thread_manager
756            .threads
757            .iter_enumerated()
758            .skip(thread_manager.active_thread.index() + 1)
759            .chain(
760                thread_manager
761                    .threads
762                    .iter_enumerated()
763                    .take(thread_manager.active_thread.index() + 1),
764            )
765            .filter(|(_id, thread)| thread.state.is_enabled());
766        // Pick a new thread, and switch to it.
767        let new_thread = if thread_manager.fixed_scheduling {
768            threads_iter.next()
769        } else {
770            threads_iter.choose(rng)
771        };
772
773        if let Some((id, _thread)) = new_thread {
774            if thread_manager.active_thread != id {
775                info!(
776                    "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
777                    thread_manager.get_thread_display_name(id),
778                    thread_manager.get_thread_display_name(thread_manager.active_thread)
779                );
780                thread_manager.active_thread = id;
781            }
782        }
783        // This completes the `yield`, if any was requested.
784        thread_manager.yield_active_thread = false;
785
786        if thread_manager.threads[thread_manager.active_thread].state.is_enabled() {
787            return interp_ok(SchedulingAction::ExecuteStep);
788        }
789        // We have not found a thread to execute.
790        if thread_manager.threads.iter().all(|thread| thread.state.is_terminated()) {
791            unreachable!("all threads terminated without the main thread terminating?!");
792        } else if let Some(sleep_time) = potential_sleep_time {
793            // All threads are currently blocked, but we have unexecuted
794            // timeout_callbacks, which may unblock some of the threads. Hence,
795            // sleep until the first callback.
796            interp_ok(SchedulingAction::SleepAndWaitForIo(Some(sleep_time)))
797        } else if thread_manager
798            .threads
799            .iter()
800            .any(|thread| thread.state.is_blocked_on(BlockReason::IO))
801        {
802            // At least one thread is blocked on host I/O but doesn't
803            // have a timeout set. Hence, we sleep indefinitely in the
804            // hope that eventually an I/O event for this thread happens.
805            interp_ok(SchedulingAction::SleepAndWaitForIo(None))
806        } else {
807            throw_machine_stop!(TerminationInfo::GlobalDeadlock);
808        }
809    }
810
811    /// Poll for I/O events until either an I/O event happened or the timeout expired.
812    /// The different timeout values are described in [`BlockingIoManager::poll`].
813    fn poll_and_unblock(&mut self, timeout: Option<Duration>) -> InterpResult<'tcx> {
814        let this = self.eval_context_mut();
815
816        let ready = match this.machine.blocking_io.poll(timeout) {
817            Ok(ready) => ready,
818            // We can ignore errors originating from interrupts; that's just a spurious wakeup.
819            Err(e) if e.kind() == io::ErrorKind::Interrupted => return interp_ok(()),
820            // For other errors we panic. On Linux and BSD hosts this should only be
821            // reachable when a system resource error (e.g. ENOMEM or ENOSPC) occurred.
822            Err(e) => panic!("unexpected error while polling: {e}"),
823        };
824
825        ready.into_iter().try_for_each(|thread_id| this.unblock_thread(thread_id, BlockReason::IO))
826    }
827
828    /// Find all threads with expired timeouts, unblock them and execute their timeout callbacks.
829    ///
830    /// This method returns the minimum duration until the next thread timeout expires.
831    /// If all ready threads have no timeout set, [`None`] is returned.
832    fn unblock_expired_timeouts(&mut self) -> InterpResult<'tcx, Option<Duration>> {
833        let this = self.eval_context_mut();
834        let clock = &this.machine.monotonic_clock;
835
836        let mut min_wait_time = Option::<Duration>::None;
837        let mut callbacks = Vec::new();
838
839        for (id, thread) in this.machine.threads.threads.iter_enumerated_mut() {
840            match &thread.state {
841                ThreadState::Blocked { timeout: Some(timeout), .. } => {
842                    let wait_time = timeout.get_wait_time(clock);
843                    if wait_time.is_zero() {
844                        // The timeout expired for this thread.
845                        let old_state = mem::replace(&mut thread.state, ThreadState::Enabled);
846                        let ThreadState::Blocked { callback, .. } = old_state else {
847                            unreachable!()
848                        };
849                        // Add callback to list to be run after this loop because of borrow-checking.
850                        callbacks.push((id, callback));
851                    } else {
852                        // Update `min_wait_time` to contain the smallest duration until
853                        // the next timeout expires.
854                        min_wait_time = Some(wait_time.min(min_wait_time.unwrap_or(Duration::MAX)));
855                    }
856                }
857                _ => {}
858            }
859        }
860
861        for (thread, callback) in callbacks {
862            // This back-and-forth with `set_active_thread` is here because of two
863            // design decisions:
864            // 1. Make the caller and not the callback responsible for changing
865            //    thread.
866            // 2. Make the scheduler the only place that can change the active
867            //    thread.
868            let old_thread = this.machine.threads.set_active_thread_id(thread);
869            callback.call(this, UnblockKind::TimedOut)?;
870            this.machine.threads.set_active_thread_id(old_thread);
871        }
872
873        interp_ok(min_wait_time)
874    }
875}
876
877// Public interface to thread management.
878impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
879pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
880    #[inline]
881    fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadNotFound> {
882        self.eval_context_ref().machine.threads.thread_id_try_from(id)
883    }
884
885    /// Get a thread-specific allocation id for the given thread-local static.
886    /// If needed, allocate a new one.
887    fn get_or_create_thread_local_alloc(
888        &mut self,
889        def_id: DefId,
890    ) -> InterpResult<'tcx, StrictPointer> {
891        let this = self.eval_context_mut();
892        let tcx = this.tcx;
893        if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
894            // We already have a thread-specific allocation id for this
895            // thread-local static.
896            interp_ok(old_alloc)
897        } else {
898            // We need to allocate a thread-specific allocation id for this
899            // thread-local static.
900            // First, we compute the initial value for this static.
901            if tcx.is_foreign_item(def_id) {
902                throw_unsup_format!("foreign thread-local statics are not supported");
903            }
904            let params = this.machine.get_default_alloc_params();
905            let alloc = this.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
906            // We make a full copy of this allocation.
907            let mut alloc = alloc.inner().adjust_from_tcx(
908                &this.tcx,
909                |bytes, align| {
910                    interp_ok(MiriAllocBytes::from_bytes(
911                        std::borrow::Cow::Borrowed(bytes),
912                        align,
913                        params,
914                    ))
915                },
916                |ptr| this.global_root_pointer(ptr),
917            )?;
918            // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
919            alloc.mutability = Mutability::Mut;
920            // Create a fresh allocation with this content.
921            let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
922            this.machine.threads.set_thread_local_alloc(def_id, ptr);
923            interp_ok(ptr)
924        }
925    }
926
927    /// Start a regular (non-main) thread.
928    #[inline]
929    fn start_regular_thread(
930        &mut self,
931        thread: Option<MPlaceTy<'tcx>>,
932        start_routine: Pointer,
933        start_abi: ExternAbi,
934        func_arg: ImmTy<'tcx>,
935        ret_layout: TyAndLayout<'tcx>,
936    ) -> InterpResult<'tcx, ThreadId> {
937        let this = self.eval_context_mut();
938
939        // Create the new thread
940        let current_span = this.machine.current_user_relevant_span();
941        let new_thread_id = this.machine.threads.create_thread({
942            let mut state = tls::TlsDtorsState::default();
943            Box::new(move |m| state.on_stack_empty(m))
944        });
945        match &mut this.machine.data_race {
946            GlobalDataRaceHandler::None => {}
947            GlobalDataRaceHandler::Vclocks(data_race) =>
948                data_race.thread_created(&this.machine.threads, new_thread_id, current_span),
949            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
950                genmc_ctx.handle_thread_create(
951                    &this.machine.threads,
952                    start_routine,
953                    &func_arg,
954                    new_thread_id,
955                )?,
956        }
957        // Write the current thread-id, switch to the next thread later
958        // to treat this write operation as occurring on the current thread.
959        if let Some(thread_info_place) = thread {
960            this.write_scalar(
961                Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
962                &thread_info_place,
963            )?;
964        }
965
966        // Finally switch to new thread so that we can push the first stackframe.
967        // After this all accesses will be treated as occurring in the new thread.
968        let old_thread_id = this.machine.threads.set_active_thread_id(new_thread_id);
969
970        // The child inherits its parent's cpu affinity.
971        if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&old_thread_id).cloned() {
972            this.machine.thread_cpu_affinity.insert(new_thread_id, cpuset);
973        }
974
975        // Perform the function pointer load in the new thread frame.
976        let instance = this.get_ptr_fn(start_routine)?.as_instance()?;
977
978        // Note: the returned value is currently ignored (see the FIXME in
979        // pthread_join in shims/unix/thread.rs) because the Rust standard library does not use
980        // it.
981        let ret_place = this.allocate(ret_layout, MiriMemoryKind::Machine.into())?;
982
983        this.call_thread_root_function(
984            instance,
985            start_abi,
986            &[func_arg],
987            Some(&ret_place),
988            current_span,
989        )?;
990
991        // Restore the old active thread frame.
992        this.machine.threads.set_active_thread_id(old_thread_id);
993
994        interp_ok(new_thread_id)
995    }
996
997    /// Handles thread termination of the active thread: wakes up threads joining on this one,
998    /// and deals with the thread's thread-local statics according to `tls_alloc_action`.
999    ///
1000    /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
1001    fn terminate_active_thread(&mut self, tls_alloc_action: TlsAllocAction) -> InterpResult<'tcx> {
1002        let this = self.eval_context_mut();
1003
1004        // Mark thread as terminated.
1005        let thread = this.active_thread_mut();
1006        assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
1007        thread.state = ThreadState::Terminated;
1008
1009        // Deallocate TLS.
1010        let gone_thread = this.active_thread();
1011        {
1012            let mut free_tls_statics = Vec::new();
1013            this.machine.threads.thread_local_allocs.retain(|&(_def_id, thread), &mut alloc_id| {
1014                if thread != gone_thread {
1015                    // A different thread, keep this static around.
1016                    return true;
1017                }
1018                // Delete this static from the map and from memory.
1019                // We cannot free directly here as we cannot use `?` in this context.
1020                free_tls_statics.push(alloc_id);
1021                false
1022            });
1023            // Now free the TLS statics.
1024            for ptr in free_tls_statics {
1025                match tls_alloc_action {
1026                    TlsAllocAction::Deallocate =>
1027                        this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?,
1028                    TlsAllocAction::Leak =>
1029                        if let Some(alloc) = ptr.provenance.get_alloc_id() {
1030                            trace!(
1031                                "Thread-local static leaked and stored as static root: {:?}",
1032                                alloc
1033                            );
1034                            this.machine.static_roots.push(alloc);
1035                        },
1036                }
1037            }
1038        }
1039
1040        match &mut this.machine.data_race {
1041            GlobalDataRaceHandler::None => {}
1042            GlobalDataRaceHandler::Vclocks(data_race) =>
1043                data_race.thread_terminated(&this.machine.threads),
1044            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1045                // Inform GenMC that the thread finished.
1046                // This needs to happen once all accesses to the thread are done, including freeing any TLS statics.
1047                genmc_ctx.handle_thread_finish(&this.machine.threads)
1048            }
1049        }
1050
1051        // Unblock joining threads.
1052        let unblock_reason = BlockReason::Join(gone_thread);
1053        let threads = &this.machine.threads.threads;
1054        let joining_threads = threads
1055            .iter_enumerated()
1056            .filter(|(_, thread)| thread.state.is_blocked_on(unblock_reason))
1057            .map(|(id, _)| id)
1058            .collect::<Vec<_>>();
1059        for thread in joining_threads {
1060            this.unblock_thread(thread, unblock_reason)?;
1061        }
1062
1063        interp_ok(())
1064    }
1065
1066    /// Block the current thread, with an optional timeout.
1067    /// The callback will be invoked when the thread gets unblocked.
1068    #[inline]
1069    fn block_thread(
1070        &mut self,
1071        reason: BlockReason,
1072        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
1073        callback: DynUnblockCallback<'tcx>,
1074    ) {
1075        let this = self.eval_context_mut();
1076        if timeout.is_some() && this.machine.data_race.as_genmc_ref().is_some() {
1077            panic!("Unimplemented: Timeouts not yet supported in GenMC mode.");
1078        }
1079        let timeout = timeout.map(|(clock, anchor, duration)| {
1080            let anchor = match clock {
1081                TimeoutClock::RealTime => {
1082                    assert!(
1083                        this.machine.communicate(),
1084                        "cannot have `RealTime` timeout with isolation enabled!"
1085                    );
1086                    Timeout::RealTime(match anchor {
1087                        TimeoutAnchor::Absolute => SystemTime::UNIX_EPOCH,
1088                        TimeoutAnchor::Relative => SystemTime::now(),
1089                    })
1090                }
1091                TimeoutClock::Monotonic =>
1092                    Timeout::Monotonic(match anchor {
1093                        TimeoutAnchor::Absolute => this.machine.monotonic_clock.epoch(),
1094                        TimeoutAnchor::Relative => this.machine.monotonic_clock.now(),
1095                    }),
1096            };
1097            anchor.add_lossy(duration)
1098        });
1099        this.machine.threads.block_thread(reason, timeout, callback);
1100    }
1101
1102    /// Put the blocked thread into the enabled state.
1103    /// Sanity-checks that the thread previously was blocked for the right reason.
1104    fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) -> InterpResult<'tcx> {
1105        let this = self.eval_context_mut();
1106        let old_state =
1107            mem::replace(&mut this.machine.threads.threads[thread].state, ThreadState::Enabled);
1108        let callback = match old_state {
1109            ThreadState::Blocked { reason: actual_reason, callback, .. } => {
1110                assert_eq!(
1111                    reason, actual_reason,
1112                    "unblock_thread: thread was blocked for the wrong reason"
1113                );
1114                callback
1115            }
1116            _ => panic!("unblock_thread: thread was not blocked"),
1117        };
1118        // The callback must be executed in the previously blocked thread.
1119        let old_thread = this.machine.threads.set_active_thread_id(thread);
1120        callback.call(this, UnblockKind::Ready)?;
1121        this.machine.threads.set_active_thread_id(old_thread);
1122        interp_ok(())
1123    }
1124
1125    #[inline]
1126    fn detach_thread(
1127        &mut self,
1128        thread_id: ThreadId,
1129        allow_terminated_joined: bool,
1130    ) -> InterpResult<'tcx> {
1131        let this = self.eval_context_mut();
1132        this.machine.threads.detach_thread(thread_id, allow_terminated_joined)
1133    }
1134
1135    /// Mark that the active thread tries to join the thread with `joined_thread_id`.
1136    ///
1137    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1138    fn join_thread(
1139        &mut self,
1140        joined_thread_id: ThreadId,
1141        success_retval: Scalar,
1142        return_dest: &MPlaceTy<'tcx>,
1143    ) -> InterpResult<'tcx> {
1144        let this = self.eval_context_mut();
1145        let thread_mgr = &mut this.machine.threads;
1146        if thread_mgr.threads[joined_thread_id].join_status == ThreadJoinStatus::Detached {
1147            // On Windows this corresponds to joining on a closed handle.
1148            throw_ub_format!("trying to join a detached thread");
1149        }
1150
1151        fn after_join<'tcx>(
1152            this: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1153            joined_thread_id: ThreadId,
1154            success_retval: Scalar,
1155            return_dest: &MPlaceTy<'tcx>,
1156        ) -> InterpResult<'tcx> {
1157            let threads = &this.machine.threads;
1158            match &mut this.machine.data_race {
1159                GlobalDataRaceHandler::None => {}
1160                GlobalDataRaceHandler::Vclocks(data_race) =>
1161                    data_race.thread_joined(threads, joined_thread_id),
1162                GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1163                    genmc_ctx.handle_thread_join(threads.active_thread, joined_thread_id)?,
1164            }
1165            this.write_scalar(success_retval, return_dest)?;
1166            interp_ok(())
1167        }
1168
1169        // Mark the joined thread as being joined so that we detect if other
1170        // threads try to join it.
1171        thread_mgr.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
1172        if !thread_mgr.threads[joined_thread_id].state.is_terminated() {
1173            trace!(
1174                "{:?} blocked on {:?} when trying to join",
1175                thread_mgr.active_thread, joined_thread_id
1176            );
1177            if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
1178                genmc_ctx.handle_thread_join(thread_mgr.active_thread, joined_thread_id)?;
1179            }
1180
1181            // The joined thread is still running, we need to wait for it.
1182            // Once we get unblocked, perform the appropriate synchronization and write the return value.
1183            let dest = return_dest.clone();
1184            thread_mgr.block_thread(
1185                BlockReason::Join(joined_thread_id),
1186                None,
1187                callback!(
1188                    @capture<'tcx> {
1189                        joined_thread_id: ThreadId,
1190                        dest: MPlaceTy<'tcx>,
1191                        success_retval: Scalar,
1192                    }
1193                    |this, unblock: UnblockKind| {
1194                        assert_eq!(unblock, UnblockKind::Ready);
1195                        after_join(this, joined_thread_id, success_retval, &dest)
1196                    }
1197                ),
1198            );
1199        } else {
1200            // The thread has already terminated - establish happens-before and write the return value.
1201            after_join(this, joined_thread_id, success_retval, return_dest)?;
1202        }
1203        interp_ok(())
1204    }
1205
1206    /// Mark that the active thread tries to exclusively join the thread with `joined_thread_id`.
1207    /// If the thread is already joined by another thread, it will throw UB.
1208    ///
1209    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1210    fn join_thread_exclusive(
1211        &mut self,
1212        joined_thread_id: ThreadId,
1213        success_retval: Scalar,
1214        return_dest: &MPlaceTy<'tcx>,
1215    ) -> InterpResult<'tcx> {
1216        let this = self.eval_context_mut();
1217        let threads = &this.machine.threads.threads;
1218        if threads[joined_thread_id].join_status == ThreadJoinStatus::Joined {
1219            throw_ub_format!("trying to join an already joined thread");
1220        }
1221
1222        if joined_thread_id == this.machine.threads.active_thread {
1223            throw_ub_format!("trying to join itself");
1224        }
1225
1226        // Sanity check `join_status`.
1227        assert!(
1228            threads
1229                .iter()
1230                .all(|thread| { !thread.state.is_blocked_on(BlockReason::Join(joined_thread_id)) }),
1231            "this thread already has threads waiting for its termination"
1232        );
1233
1234        this.join_thread(joined_thread_id, success_retval, return_dest)
1235    }
1236
1237    #[inline]
1238    fn active_thread(&self) -> ThreadId {
1239        let this = self.eval_context_ref();
1240        this.machine.threads.active_thread()
1241    }
1242
1243    #[inline]
1244    fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
1245        let this = self.eval_context_mut();
1246        this.machine.threads.active_thread_mut()
1247    }
1248
1249    #[inline]
1250    fn active_thread_ref(&self) -> &Thread<'tcx> {
1251        let this = self.eval_context_ref();
1252        this.machine.threads.active_thread_ref()
1253    }
1254
1255    #[inline]
1256    fn get_total_thread_count(&self) -> usize {
1257        let this = self.eval_context_ref();
1258        this.machine.threads.get_total_thread_count()
1259    }
1260
1261    #[inline]
1262    fn have_all_terminated(&self) -> bool {
1263        let this = self.eval_context_ref();
1264        this.machine.threads.have_all_terminated()
1265    }
1266
1267    #[inline]
1268    fn enable_thread(&mut self, thread_id: ThreadId) {
1269        let this = self.eval_context_mut();
1270        this.machine.threads.enable_thread(thread_id);
1271    }
1272
1273    #[inline]
1274    fn active_thread_stack<'a>(&'a self) -> &'a [Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
1275        let this = self.eval_context_ref();
1276        this.machine.threads.active_thread_stack()
1277    }
1278
1279    #[inline]
1280    fn active_thread_stack_mut<'a>(
1281        &'a mut self,
1282    ) -> &'a mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1283        let this = self.eval_context_mut();
1284        this.machine.threads.active_thread_stack_mut()
1285    }
1286
1287    /// Set the name of the current thread. The buffer must not include the null terminator.
1288    #[inline]
1289    fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
1290        self.eval_context_mut().machine.threads.set_thread_name(thread, new_thread_name);
1291    }
1292
1293    #[inline]
1294    fn get_thread_name<'c>(&'c self, thread: ThreadId) -> Option<&'c [u8]>
1295    where
1296        'tcx: 'c,
1297    {
1298        self.eval_context_ref().machine.threads.get_thread_name(thread)
1299    }
1300
1301    #[inline]
1302    fn yield_active_thread(&mut self) {
1303        self.eval_context_mut().machine.threads.yield_active_thread();
1304    }
1305
1306    #[inline]
1307    fn maybe_preempt_active_thread(&mut self) {
1308        use rand::Rng as _;
1309
1310        let this = self.eval_context_mut();
1311        if !this.machine.threads.fixed_scheduling
1312            && this.machine.rng.get_mut().random_bool(this.machine.preemption_rate)
1313        {
1314            this.yield_active_thread();
1315        }
1316    }
1317
1318    /// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
1319    /// termination).
1320    fn run_threads(&mut self) -> InterpResult<'tcx, !> {
1321        let this = self.eval_context_mut();
1322        loop {
1323            if CTRL_C_RECEIVED.load(Relaxed) {
1324                this.machine.handle_abnormal_termination();
1325                throw_machine_stop!(TerminationInfo::Interrupted);
1326            }
1327            match this.schedule()? {
1328                SchedulingAction::ExecuteStep => {
1329                    if !this.step()? {
1330                        // See if this thread can do something else.
1331                        match this.run_on_stack_empty()? {
1332                            Poll::Pending => {} // keep going
1333                            Poll::Ready(()) =>
1334                                this.terminate_active_thread(TlsAllocAction::Deallocate)?,
1335                        }
1336                    }
1337                }
1338                SchedulingAction::SleepAndWaitForIo(duration) => {
1339                    if this.machine.communicate() {
1340                        // When we're running with isolation disabled, instead of
1341                        // strictly sleeping the duration we allow waking up
1342                        // early for I/O events from the OS.
1343
1344                        this.poll_and_unblock(duration)?;
1345                    } else {
1346                        let duration = duration.expect(
1347                            "Infinite sleep should not be triggered when isolation is enabled",
1348                        );
1349                        this.machine.monotonic_clock.sleep(duration);
1350                    }
1351                }
1352            }
1353        }
1354    }
1355}