miri/concurrency/
thread.rs

1//! Implements threads.
2
3use std::mem;
4use std::sync::atomic::Ordering::Relaxed;
5use std::task::Poll;
6use std::time::{Duration, SystemTime};
7
8use rand::seq::IteratorRandom;
9use rustc_abi::ExternAbi;
10use rustc_const_eval::CTRL_C_RECEIVED;
11use rustc_data_structures::either::Either;
12use rustc_data_structures::fx::FxHashMap;
13use rustc_hir::def_id::DefId;
14use rustc_index::{Idx, IndexVec};
15use rustc_middle::mir::Mutability;
16use rustc_middle::ty::layout::TyAndLayout;
17use rustc_span::{DUMMY_SP, Span};
18use rustc_target::spec::Os;
19
20use crate::concurrency::GlobalDataRaceHandler;
21use crate::shims::tls;
22use crate::*;
23
24#[derive(Clone, Copy, Debug, PartialEq)]
25enum SchedulingAction {
26    /// Execute step on the active thread.
27    ExecuteStep,
28    /// Execute a timeout callback.
29    ExecuteTimeoutCallback,
30    /// Wait for a bit, until there is a timeout to be called.
31    Sleep(Duration),
32}
33
34/// What to do with TLS allocations from terminated threads
35#[derive(Clone, Copy, Debug, PartialEq)]
36pub enum TlsAllocAction {
37    /// Deallocate backing memory of thread-local statics as usual
38    Deallocate,
39    /// Skip deallocating backing memory of thread-local statics and consider all memory reachable
40    /// from them as "allowed to leak" (like global `static`s).
41    Leak,
42}
43
44/// The argument type for the "unblock" callback, indicating why the thread got unblocked.
45#[derive(Clone, Copy, Debug, PartialEq)]
46pub enum UnblockKind {
47    /// Operation completed successfully, thread continues normal execution.
48    Ready,
49    /// The operation did not complete within its specified duration.
50    TimedOut,
51}
52
53/// Type alias for unblock callbacks, i.e. machine callbacks invoked when
54/// a thread gets unblocked.
55pub type DynUnblockCallback<'tcx> = DynMachineCallback<'tcx, UnblockKind>;
56
57/// A thread identifier.
58#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
59pub struct ThreadId(u32);
60
61impl ThreadId {
62    pub fn to_u32(self) -> u32 {
63        self.0
64    }
65
66    /// Create a new thread id from a `u32` without checking if this thread exists.
67    pub fn new_unchecked(id: u32) -> Self {
68        Self(id)
69    }
70
71    pub const MAIN_THREAD: ThreadId = ThreadId(0);
72}
73
74impl Idx for ThreadId {
75    fn new(idx: usize) -> Self {
76        ThreadId(u32::try_from(idx).unwrap())
77    }
78
79    fn index(self) -> usize {
80        usize::try_from(self.0).unwrap()
81    }
82}
83
84impl From<ThreadId> for u64 {
85    fn from(t: ThreadId) -> Self {
86        t.0.into()
87    }
88}
89
90/// Keeps track of what the thread is blocked on.
91#[derive(Debug, Copy, Clone, PartialEq, Eq)]
92pub enum BlockReason {
93    /// The thread tried to join the specified thread and is blocked until that
94    /// thread terminates.
95    Join(ThreadId),
96    /// Waiting for time to pass.
97    Sleep,
98    /// Blocked on a mutex.
99    Mutex,
100    /// Blocked on a condition variable.
101    Condvar,
102    /// Blocked on a reader-writer lock.
103    RwLock,
104    /// Blocked on a Futex variable.
105    Futex,
106    /// Blocked on an InitOnce.
107    InitOnce,
108    /// Blocked on epoll.
109    Epoll,
110    /// Blocked on eventfd.
111    Eventfd,
112    /// Blocked on unnamed_socket.
113    UnnamedSocket,
114    /// Blocked for any reason related to GenMC, such as `assume` statements (GenMC mode only).
115    /// Will be implicitly unblocked when GenMC schedules this thread again.
116    Genmc,
117}
118
119/// The state of a thread.
120enum ThreadState<'tcx> {
121    /// The thread is enabled and can be executed.
122    Enabled,
123    /// The thread is blocked on something.
124    Blocked { reason: BlockReason, timeout: Option<Timeout>, callback: DynUnblockCallback<'tcx> },
125    /// The thread has terminated its execution. We do not delete terminated
126    /// threads (FIXME: why?).
127    Terminated,
128}
129
130impl<'tcx> std::fmt::Debug for ThreadState<'tcx> {
131    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
132        match self {
133            Self::Enabled => write!(f, "Enabled"),
134            Self::Blocked { reason, timeout, .. } =>
135                f.debug_struct("Blocked").field("reason", reason).field("timeout", timeout).finish(),
136            Self::Terminated => write!(f, "Terminated"),
137        }
138    }
139}
140
141impl<'tcx> ThreadState<'tcx> {
142    fn is_enabled(&self) -> bool {
143        matches!(self, ThreadState::Enabled)
144    }
145
146    fn is_terminated(&self) -> bool {
147        matches!(self, ThreadState::Terminated)
148    }
149
150    fn is_blocked_on(&self, reason: BlockReason) -> bool {
151        matches!(*self, ThreadState::Blocked { reason: actual_reason, .. } if actual_reason == reason)
152    }
153}
154
155/// The join status of a thread.
156#[derive(Debug, Copy, Clone, PartialEq, Eq)]
157enum ThreadJoinStatus {
158    /// The thread can be joined.
159    Joinable,
160    /// A thread is detached if its join handle was destroyed and no other
161    /// thread can join it.
162    Detached,
163    /// The thread was already joined by some thread and cannot be joined again.
164    Joined,
165}
166
167/// A thread.
168pub struct Thread<'tcx> {
169    state: ThreadState<'tcx>,
170
171    /// Name of the thread.
172    thread_name: Option<Vec<u8>>,
173
174    /// The virtual call stack.
175    stack: Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>>,
176
177    /// A span that explains where the thread (or more specifically, its current root
178    /// frame) "comes from".
179    pub(crate) origin_span: Span,
180
181    /// The function to call when the stack ran empty, to figure out what to do next.
182    /// Conceptually, this is the interpreter implementation of the things that happen 'after' the
183    /// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
184    /// (`None` is an error, it means the callback has not been set up yet or is actively running.)
185    pub(crate) on_stack_empty: Option<StackEmptyCallback<'tcx>>,
186
187    /// The index of the topmost user-relevant frame in `stack`. This field must contain
188    /// the value produced by `get_top_user_relevant_frame`.
189    /// This field is a cache to reduce how often we call that method. The cache is manually
190    /// maintained inside `MiriMachine::after_stack_push` and `MiriMachine::after_stack_pop`.
191    top_user_relevant_frame: Option<usize>,
192
193    /// The join status.
194    join_status: ThreadJoinStatus,
195
196    /// Stack of active unwind payloads for the current thread. Used for storing
197    /// the argument of the call to `miri_start_unwind` (the payload) when unwinding.
198    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
199    ///
200    /// In real unwinding, the payload gets passed as an argument to the landing pad,
201    /// which then forwards it to 'Resume'. However this argument is implicit in MIR,
202    /// so we have to store it out-of-band. When there are multiple active unwinds,
203    /// the innermost one is always caught first, so we can store them as a stack.
204    pub(crate) unwind_payloads: Vec<ImmTy<'tcx>>,
205
206    /// Last OS error location in memory. It is a 32-bit integer.
207    pub(crate) last_error: Option<MPlaceTy<'tcx>>,
208}
209
210pub type StackEmptyCallback<'tcx> =
211    Box<dyn FnMut(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Poll<()>> + 'tcx>;
212
213impl<'tcx> Thread<'tcx> {
214    /// Get the name of the current thread if it was set.
215    fn thread_name(&self) -> Option<&[u8]> {
216        self.thread_name.as_deref()
217    }
218
219    /// Return whether this thread is enabled or not.
220    pub fn is_enabled(&self) -> bool {
221        self.state.is_enabled()
222    }
223
224    /// Get the name of the current thread for display purposes; will include thread ID if not set.
225    fn thread_display_name(&self, id: ThreadId) -> String {
226        if let Some(ref thread_name) = self.thread_name {
227            String::from_utf8_lossy(thread_name).into_owned()
228        } else {
229            format!("unnamed-{}", id.index())
230        }
231    }
232
233    /// Return the top user-relevant frame, if there is one. `skip` indicates how many top frames
234    /// should be skipped.
235    /// Note that the choice to return `None` here when there is no user-relevant frame is part of
236    /// justifying the optimization that only pushes of user-relevant frames require updating the
237    /// `top_user_relevant_frame` field.
238    fn compute_top_user_relevant_frame(&self, skip: usize) -> Option<usize> {
239        // We are search for the frame with maximum relevance.
240        let mut best = None;
241        for (idx, frame) in self.stack.iter().enumerate().rev().skip(skip) {
242            let relevance = frame.extra.user_relevance;
243            if relevance == u8::MAX {
244                // We can short-circuit this search.
245                return Some(idx);
246            }
247            if best.is_none_or(|(_best_idx, best_relevance)| best_relevance < relevance) {
248                // The previous best frame has strictly worse relevance, so despite us being lower
249                // in the stack, we win.
250                best = Some((idx, relevance));
251            }
252        }
253        best.map(|(idx, _relevance)| idx)
254    }
255
256    /// Re-compute the top user-relevant frame from scratch. `skip` indicates how many top frames
257    /// should be skipped.
258    pub fn recompute_top_user_relevant_frame(&mut self, skip: usize) {
259        self.top_user_relevant_frame = self.compute_top_user_relevant_frame(skip);
260    }
261
262    /// Set the top user-relevant frame to the given value. Must be equal to what
263    /// `get_top_user_relevant_frame` would return!
264    pub fn set_top_user_relevant_frame(&mut self, frame_idx: usize) {
265        debug_assert_eq!(Some(frame_idx), self.compute_top_user_relevant_frame(0));
266        self.top_user_relevant_frame = Some(frame_idx);
267    }
268
269    /// Returns the topmost frame that is considered user-relevant, or the
270    /// top of the stack if there is no such frame, or `None` if the stack is empty.
271    pub fn top_user_relevant_frame(&self) -> Option<usize> {
272        // This can be called upon creation of an allocation. We create allocations while setting up
273        // parts of the Rust runtime when we do not have any stack frames yet, so we need to handle
274        // empty stacks.
275        self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
276    }
277
278    pub fn current_user_relevance(&self) -> u8 {
279        self.top_user_relevant_frame()
280            .map(|frame_idx| self.stack[frame_idx].extra.user_relevance)
281            .unwrap_or(0)
282    }
283
284    pub fn current_user_relevant_span(&self) -> Span {
285        debug_assert_eq!(self.top_user_relevant_frame, self.compute_top_user_relevant_frame(0));
286        self.top_user_relevant_frame()
287            .map(|frame_idx| self.stack[frame_idx].current_span())
288            .unwrap_or(rustc_span::DUMMY_SP)
289    }
290}
291
292impl<'tcx> std::fmt::Debug for Thread<'tcx> {
293    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
294        write!(
295            f,
296            "{}({:?}, {:?})",
297            String::from_utf8_lossy(self.thread_name().unwrap_or(b"<unnamed>")),
298            self.state,
299            self.join_status
300        )
301    }
302}
303
304impl<'tcx> Thread<'tcx> {
305    fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'tcx>>) -> Self {
306        Self {
307            state: ThreadState::Enabled,
308            thread_name: name.map(|name| Vec::from(name.as_bytes())),
309            stack: Vec::new(),
310            origin_span: DUMMY_SP,
311            top_user_relevant_frame: None,
312            join_status: ThreadJoinStatus::Joinable,
313            unwind_payloads: Vec::new(),
314            last_error: None,
315            on_stack_empty,
316        }
317    }
318}
319
320impl VisitProvenance for Thread<'_> {
321    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
322        let Thread {
323            unwind_payloads: panic_payload,
324            last_error,
325            stack,
326            origin_span: _,
327            top_user_relevant_frame: _,
328            state: _,
329            thread_name: _,
330            join_status: _,
331            on_stack_empty: _, // we assume the closure captures no GC-relevant state
332        } = self;
333
334        for payload in panic_payload {
335            payload.visit_provenance(visit);
336        }
337        last_error.visit_provenance(visit);
338        for frame in stack {
339            frame.visit_provenance(visit)
340        }
341    }
342}
343
344impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> {
345    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
346        let Frame {
347            return_place,
348            locals,
349            extra,
350            // There are some private fields we cannot access; they contain no tags.
351            ..
352        } = self;
353
354        // Return place.
355        return_place.visit_provenance(visit);
356        // Locals.
357        for local in locals.iter() {
358            match local.as_mplace_or_imm() {
359                None => {}
360                Some(Either::Left((ptr, meta))) => {
361                    ptr.visit_provenance(visit);
362                    meta.visit_provenance(visit);
363                }
364                Some(Either::Right(imm)) => {
365                    imm.visit_provenance(visit);
366                }
367            }
368        }
369
370        extra.visit_provenance(visit);
371    }
372}
373
374/// The moment in time when a blocked thread should be woken up.
375#[derive(Debug)]
376enum Timeout {
377    Monotonic(Instant),
378    RealTime(SystemTime),
379}
380
381impl Timeout {
382    /// How long do we have to wait from now until the specified time?
383    fn get_wait_time(&self, clock: &MonotonicClock) -> Duration {
384        match self {
385            Timeout::Monotonic(instant) => instant.duration_since(clock.now()),
386            Timeout::RealTime(time) =>
387                time.duration_since(SystemTime::now()).unwrap_or(Duration::ZERO),
388        }
389    }
390
391    /// Will try to add `duration`, but if that overflows it may add less.
392    fn add_lossy(&self, duration: Duration) -> Self {
393        match self {
394            Timeout::Monotonic(i) => Timeout::Monotonic(i.add_lossy(duration)),
395            Timeout::RealTime(s) => {
396                // If this overflows, try adding just 1h and assume that will not overflow.
397                Timeout::RealTime(
398                    s.checked_add(duration)
399                        .unwrap_or_else(|| s.checked_add(Duration::from_secs(3600)).unwrap()),
400                )
401            }
402        }
403    }
404}
405
406/// The clock to use for the timeout you are asking for.
407#[derive(Debug, Copy, Clone, PartialEq)]
408pub enum TimeoutClock {
409    Monotonic,
410    RealTime,
411}
412
413/// Whether the timeout is relative or absolute.
414#[derive(Debug, Copy, Clone)]
415pub enum TimeoutAnchor {
416    Relative,
417    Absolute,
418}
419
420/// An error signaling that the requested thread doesn't exist.
421#[derive(Debug, Copy, Clone)]
422pub struct ThreadNotFound;
423
424/// A set of threads.
425#[derive(Debug)]
426pub struct ThreadManager<'tcx> {
427    /// Identifier of the currently active thread.
428    active_thread: ThreadId,
429    /// Threads used in the program.
430    ///
431    /// Note that this vector also contains terminated threads.
432    threads: IndexVec<ThreadId, Thread<'tcx>>,
433    /// A mapping from a thread-local static to the thread specific allocation.
434    thread_local_allocs: FxHashMap<(DefId, ThreadId), StrictPointer>,
435    /// A flag that indicates that we should change the active thread.
436    /// Completely ignored in GenMC mode.
437    yield_active_thread: bool,
438    /// A flag that indicates that we should do round robin scheduling of threads else randomized scheduling is used.
439    fixed_scheduling: bool,
440}
441
442impl VisitProvenance for ThreadManager<'_> {
443    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
444        let ThreadManager {
445            threads,
446            thread_local_allocs,
447            active_thread: _,
448            yield_active_thread: _,
449            fixed_scheduling: _,
450        } = self;
451
452        for thread in threads {
453            thread.visit_provenance(visit);
454        }
455        for ptr in thread_local_allocs.values() {
456            ptr.visit_provenance(visit);
457        }
458    }
459}
460
461impl<'tcx> ThreadManager<'tcx> {
462    pub(crate) fn new(config: &MiriConfig) -> Self {
463        let mut threads = IndexVec::new();
464        // Create the main thread and add it to the list of threads.
465        threads.push(Thread::new(Some("main"), None));
466        Self {
467            active_thread: ThreadId::MAIN_THREAD,
468            threads,
469            thread_local_allocs: Default::default(),
470            yield_active_thread: false,
471            fixed_scheduling: config.fixed_scheduling,
472        }
473    }
474
475    pub(crate) fn init(
476        ecx: &mut MiriInterpCx<'tcx>,
477        on_main_stack_empty: StackEmptyCallback<'tcx>,
478    ) {
479        ecx.machine.threads.threads[ThreadId::MAIN_THREAD].on_stack_empty =
480            Some(on_main_stack_empty);
481        if ecx.tcx.sess.target.os != Os::Windows {
482            // The main thread can *not* be joined on except on windows.
483            ecx.machine.threads.threads[ThreadId::MAIN_THREAD].join_status =
484                ThreadJoinStatus::Detached;
485        }
486    }
487
488    pub fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadNotFound> {
489        if let Ok(id) = id.try_into()
490            && usize::try_from(id).is_ok_and(|id| id < self.threads.len())
491        {
492            Ok(ThreadId(id))
493        } else {
494            Err(ThreadNotFound)
495        }
496    }
497
498    /// Check if we have an allocation for the given thread local static for the
499    /// active thread.
500    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<StrictPointer> {
501        self.thread_local_allocs.get(&(def_id, self.active_thread)).cloned()
502    }
503
504    /// Set the pointer for the allocation of the given thread local
505    /// static for the active thread.
506    ///
507    /// Panics if a thread local is initialized twice for the same thread.
508    fn set_thread_local_alloc(&mut self, def_id: DefId, ptr: StrictPointer) {
509        self.thread_local_allocs.try_insert((def_id, self.active_thread), ptr).unwrap();
510    }
511
512    /// Borrow the stack of the active thread.
513    pub fn active_thread_stack(&self) -> &[Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
514        &self.threads[self.active_thread].stack
515    }
516
517    /// Mutably borrow the stack of the active thread.
518    pub fn active_thread_stack_mut(
519        &mut self,
520    ) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
521        &mut self.threads[self.active_thread].stack
522    }
523
524    pub fn all_blocked_stacks(
525        &self,
526    ) -> impl Iterator<Item = (ThreadId, &[Frame<'tcx, Provenance, FrameExtra<'tcx>>])> {
527        self.threads
528            .iter_enumerated()
529            .filter(|(_id, t)| matches!(t.state, ThreadState::Blocked { .. }))
530            .map(|(id, t)| (id, &t.stack[..]))
531    }
532
533    /// Create a new thread and returns its id.
534    fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'tcx>) -> ThreadId {
535        let new_thread_id = ThreadId::new(self.threads.len());
536        self.threads.push(Thread::new(None, Some(on_stack_empty)));
537        new_thread_id
538    }
539
540    /// Set an active thread and return the id of the thread that was active before.
541    fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId {
542        assert!(id.index() < self.threads.len());
543        info!(
544            "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
545            self.get_thread_display_name(id),
546            self.get_thread_display_name(self.active_thread)
547        );
548        std::mem::replace(&mut self.active_thread, id)
549    }
550
551    /// Get the id of the currently active thread.
552    pub fn active_thread(&self) -> ThreadId {
553        self.active_thread
554    }
555
556    /// Get the total number of threads that were ever spawn by this program.
557    pub fn get_total_thread_count(&self) -> usize {
558        self.threads.len()
559    }
560
561    /// Get the total of threads that are currently live, i.e., not yet terminated.
562    /// (They might be blocked.)
563    pub fn get_live_thread_count(&self) -> usize {
564        self.threads.iter().filter(|t| !t.state.is_terminated()).count()
565    }
566
567    /// Has the given thread terminated?
568    fn has_terminated(&self, thread_id: ThreadId) -> bool {
569        self.threads[thread_id].state.is_terminated()
570    }
571
572    /// Have all threads terminated?
573    fn have_all_terminated(&self) -> bool {
574        self.threads.iter().all(|thread| thread.state.is_terminated())
575    }
576
577    /// Enable the thread for execution. The thread must be terminated.
578    fn enable_thread(&mut self, thread_id: ThreadId) {
579        assert!(self.has_terminated(thread_id));
580        self.threads[thread_id].state = ThreadState::Enabled;
581    }
582
583    /// Get a mutable borrow of the currently active thread.
584    pub fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
585        &mut self.threads[self.active_thread]
586    }
587
588    /// Get a shared borrow of the currently active thread.
589    pub fn active_thread_ref(&self) -> &Thread<'tcx> {
590        &self.threads[self.active_thread]
591    }
592
593    pub fn thread_ref(&self, thread_id: ThreadId) -> &Thread<'tcx> {
594        &self.threads[thread_id]
595    }
596
597    /// Mark the thread as detached, which means that no other thread will try
598    /// to join it and the thread is responsible for cleaning up.
599    ///
600    /// `allow_terminated_joined` allows detaching joined threads that have already terminated.
601    /// This matches Windows's behavior for `CloseHandle`.
602    ///
603    /// See <https://docs.microsoft.com/en-us/windows/win32/procthread/thread-handles-and-identifiers>:
604    /// > The handle is valid until closed, even after the thread it represents has been terminated.
605    fn detach_thread(&mut self, id: ThreadId, allow_terminated_joined: bool) -> InterpResult<'tcx> {
606        // NOTE: In GenMC mode, we treat detached threads like regular threads that are never joined, so there is no special handling required here.
607        trace!("detaching {:?}", id);
608
609        let is_ub = if allow_terminated_joined && self.threads[id].state.is_terminated() {
610            // "Detached" in particular means "not yet joined". Redundant detaching is still UB.
611            self.threads[id].join_status == ThreadJoinStatus::Detached
612        } else {
613            self.threads[id].join_status != ThreadJoinStatus::Joinable
614        };
615        if is_ub {
616            throw_ub_format!("trying to detach thread that was already detached or joined");
617        }
618
619        self.threads[id].join_status = ThreadJoinStatus::Detached;
620        interp_ok(())
621    }
622
623    /// Set the name of the given thread.
624    pub fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
625        self.threads[thread].thread_name = Some(new_thread_name);
626    }
627
628    /// Get the name of the given thread.
629    pub fn get_thread_name(&self, thread: ThreadId) -> Option<&[u8]> {
630        self.threads[thread].thread_name()
631    }
632
633    pub fn get_thread_display_name(&self, thread: ThreadId) -> String {
634        self.threads[thread].thread_display_name(thread)
635    }
636
637    /// Put the thread into the blocked state.
638    fn block_thread(
639        &mut self,
640        reason: BlockReason,
641        timeout: Option<Timeout>,
642        callback: DynUnblockCallback<'tcx>,
643    ) {
644        let state = &mut self.threads[self.active_thread].state;
645        assert!(state.is_enabled());
646        *state = ThreadState::Blocked { reason, timeout, callback }
647    }
648
649    /// Change the active thread to some enabled thread.
650    fn yield_active_thread(&mut self) {
651        // We do not yield immediately, as swapping out the current stack while executing a MIR statement
652        // could lead to all sorts of confusion.
653        // We should only switch stacks between steps.
654        self.yield_active_thread = true;
655    }
656
657    /// Get the wait time for the next timeout, or `None` if no timeout is pending.
658    fn next_callback_wait_time(&self, clock: &MonotonicClock) -> Option<Duration> {
659        self.threads
660            .iter()
661            .filter_map(|t| {
662                match &t.state {
663                    ThreadState::Blocked { timeout: Some(timeout), .. } =>
664                        Some(timeout.get_wait_time(clock)),
665                    _ => None,
666                }
667            })
668            .min()
669    }
670}
671
672impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
673trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
674    /// Execute a timeout callback on the callback's thread.
675    #[inline]
676    fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
677        let this = self.eval_context_mut();
678        let mut found_callback = None;
679        // Find a blocked thread that has timed out.
680        for (id, thread) in this.machine.threads.threads.iter_enumerated_mut() {
681            match &thread.state {
682                ThreadState::Blocked { timeout: Some(timeout), .. }
683                    if timeout.get_wait_time(&this.machine.monotonic_clock) == Duration::ZERO =>
684                {
685                    let old_state = mem::replace(&mut thread.state, ThreadState::Enabled);
686                    let ThreadState::Blocked { callback, .. } = old_state else { unreachable!() };
687                    found_callback = Some((id, callback));
688                    // Run the fallback (after the loop because borrow-checking).
689                    break;
690                }
691                _ => {}
692            }
693        }
694        if let Some((thread, callback)) = found_callback {
695            // This back-and-forth with `set_active_thread` is here because of two
696            // design decisions:
697            // 1. Make the caller and not the callback responsible for changing
698            //    thread.
699            // 2. Make the scheduler the only place that can change the active
700            //    thread.
701            let old_thread = this.machine.threads.set_active_thread_id(thread);
702            callback.call(this, UnblockKind::TimedOut)?;
703            this.machine.threads.set_active_thread_id(old_thread);
704        }
705        // found_callback can remain None if the computer's clock
706        // was shifted after calling the scheduler and before the call
707        // to get_ready_callback (see issue
708        // https://github.com/rust-lang/miri/issues/1763). In this case,
709        // just do nothing, which effectively just returns to the
710        // scheduler.
711        interp_ok(())
712    }
713
714    #[inline]
715    fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
716        let this = self.eval_context_mut();
717        let active_thread = this.active_thread_mut();
718        active_thread.origin_span = DUMMY_SP; // reset, the old value no longer applied
719        let mut callback = active_thread
720            .on_stack_empty
721            .take()
722            .expect("`on_stack_empty` not set up, or already running");
723        let res = callback(this)?;
724        this.active_thread_mut().on_stack_empty = Some(callback);
725        interp_ok(res)
726    }
727
728    /// Decide which action to take next and on which thread.
729    ///
730    /// The currently implemented scheduling policy is the one that is commonly
731    /// used in stateless model checkers such as Loom: run the active thread as
732    /// long as we can and switch only when we have to (the active thread was
733    /// blocked, terminated, or has explicitly asked to be preempted).
734    ///
735    /// If GenMC mode is active, the scheduling is instead handled by GenMC.
736    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
737        let this = self.eval_context_mut();
738
739        // In GenMC mode, we let GenMC do the scheduling.
740        if this.machine.data_race.as_genmc_ref().is_some() {
741            loop {
742                let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
743                let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
744                    return interp_ok(SchedulingAction::ExecuteStep);
745                };
746                // If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
747                if this.machine.threads.threads[next_thread_id]
748                    .state
749                    .is_blocked_on(BlockReason::Genmc)
750                {
751                    info!(
752                        "GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now."
753                    );
754                    this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
755                }
756                // The thread we just unblocked may have been blocked again during the unblocking callback.
757                // In that case, we need to ask for a different thread to run next.
758                let thread_manager = &mut this.machine.threads;
759                if thread_manager.threads[next_thread_id].state.is_enabled() {
760                    // Set the new active thread.
761                    thread_manager.active_thread = next_thread_id;
762                    return interp_ok(SchedulingAction::ExecuteStep);
763                }
764            }
765        }
766
767        // We are not in GenMC mode, so we control the scheduling.
768        let thread_manager = &mut this.machine.threads;
769        let clock = &this.machine.monotonic_clock;
770        let rng = this.machine.rng.get_mut();
771        // This thread and the program can keep going.
772        if thread_manager.threads[thread_manager.active_thread].state.is_enabled()
773            && !thread_manager.yield_active_thread
774        {
775            // The currently active thread is still enabled, just continue with it.
776            return interp_ok(SchedulingAction::ExecuteStep);
777        }
778        // The active thread yielded or got terminated. Let's see if there are any timeouts to take
779        // care of. We do this *before* running any other thread, to ensure that timeouts "in the
780        // past" fire before any other thread can take an action. This ensures that for
781        // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
782        // abstime has already been passed at the time of the call".
783        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
784        let potential_sleep_time = thread_manager.next_callback_wait_time(clock);
785        if potential_sleep_time == Some(Duration::ZERO) {
786            return interp_ok(SchedulingAction::ExecuteTimeoutCallback);
787        }
788        // No callbacks immediately scheduled, pick a regular thread to execute.
789        // The active thread blocked or yielded. So we go search for another enabled thread.
790        // We build the list of threads by starting with the threads after the current one, followed by
791        // the threads before the current one and then the current thread itself (i.e., this iterator acts
792        // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first
793        // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step.
794        let mut threads_iter = thread_manager
795            .threads
796            .iter_enumerated()
797            .skip(thread_manager.active_thread.index() + 1)
798            .chain(
799                thread_manager
800                    .threads
801                    .iter_enumerated()
802                    .take(thread_manager.active_thread.index() + 1),
803            )
804            .filter(|(_id, thread)| thread.state.is_enabled());
805        // Pick a new thread, and switch to it.
806        let new_thread = if thread_manager.fixed_scheduling {
807            threads_iter.next()
808        } else {
809            threads_iter.choose(rng)
810        };
811
812        if let Some((id, _thread)) = new_thread {
813            if thread_manager.active_thread != id {
814                info!(
815                    "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
816                    thread_manager.get_thread_display_name(id),
817                    thread_manager.get_thread_display_name(thread_manager.active_thread)
818                );
819                thread_manager.active_thread = id;
820            }
821        }
822        // This completes the `yield`, if any was requested.
823        thread_manager.yield_active_thread = false;
824
825        if thread_manager.threads[thread_manager.active_thread].state.is_enabled() {
826            return interp_ok(SchedulingAction::ExecuteStep);
827        }
828        // We have not found a thread to execute.
829        if thread_manager.threads.iter().all(|thread| thread.state.is_terminated()) {
830            unreachable!("all threads terminated without the main thread terminating?!");
831        } else if let Some(sleep_time) = potential_sleep_time {
832            // All threads are currently blocked, but we have unexecuted
833            // timeout_callbacks, which may unblock some of the threads. Hence,
834            // sleep until the first callback.
835            interp_ok(SchedulingAction::Sleep(sleep_time))
836        } else {
837            throw_machine_stop!(TerminationInfo::Deadlock);
838        }
839    }
840}
841
842// Public interface to thread management.
843impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
844pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
845    #[inline]
846    fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadNotFound> {
847        self.eval_context_ref().machine.threads.thread_id_try_from(id)
848    }
849
850    /// Get a thread-specific allocation id for the given thread-local static.
851    /// If needed, allocate a new one.
852    fn get_or_create_thread_local_alloc(
853        &mut self,
854        def_id: DefId,
855    ) -> InterpResult<'tcx, StrictPointer> {
856        let this = self.eval_context_mut();
857        let tcx = this.tcx;
858        if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
859            // We already have a thread-specific allocation id for this
860            // thread-local static.
861            interp_ok(old_alloc)
862        } else {
863            // We need to allocate a thread-specific allocation id for this
864            // thread-local static.
865            // First, we compute the initial value for this static.
866            if tcx.is_foreign_item(def_id) {
867                throw_unsup_format!("foreign thread-local statics are not supported");
868            }
869            let params = this.machine.get_default_alloc_params();
870            let alloc = this.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
871            // We make a full copy of this allocation.
872            let mut alloc = alloc.inner().adjust_from_tcx(
873                &this.tcx,
874                |bytes, align| {
875                    interp_ok(MiriAllocBytes::from_bytes(
876                        std::borrow::Cow::Borrowed(bytes),
877                        align,
878                        params,
879                    ))
880                },
881                |ptr| this.global_root_pointer(ptr),
882            )?;
883            // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
884            alloc.mutability = Mutability::Mut;
885            // Create a fresh allocation with this content.
886            let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
887            this.machine.threads.set_thread_local_alloc(def_id, ptr);
888            interp_ok(ptr)
889        }
890    }
891
892    /// Start a regular (non-main) thread.
893    #[inline]
894    fn start_regular_thread(
895        &mut self,
896        thread: Option<MPlaceTy<'tcx>>,
897        start_routine: Pointer,
898        start_abi: ExternAbi,
899        func_arg: ImmTy<'tcx>,
900        ret_layout: TyAndLayout<'tcx>,
901    ) -> InterpResult<'tcx, ThreadId> {
902        let this = self.eval_context_mut();
903
904        // Create the new thread
905        let current_span = this.machine.current_user_relevant_span();
906        let new_thread_id = this.machine.threads.create_thread({
907            let mut state = tls::TlsDtorsState::default();
908            Box::new(move |m| state.on_stack_empty(m))
909        });
910        match &mut this.machine.data_race {
911            GlobalDataRaceHandler::None => {}
912            GlobalDataRaceHandler::Vclocks(data_race) =>
913                data_race.thread_created(&this.machine.threads, new_thread_id, current_span),
914            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
915                genmc_ctx.handle_thread_create(
916                    &this.machine.threads,
917                    start_routine,
918                    &func_arg,
919                    new_thread_id,
920                )?,
921        }
922        // Write the current thread-id, switch to the next thread later
923        // to treat this write operation as occurring on the current thread.
924        if let Some(thread_info_place) = thread {
925            this.write_scalar(
926                Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
927                &thread_info_place,
928            )?;
929        }
930
931        // Finally switch to new thread so that we can push the first stackframe.
932        // After this all accesses will be treated as occurring in the new thread.
933        let old_thread_id = this.machine.threads.set_active_thread_id(new_thread_id);
934
935        // The child inherits its parent's cpu affinity.
936        if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&old_thread_id).cloned() {
937            this.machine.thread_cpu_affinity.insert(new_thread_id, cpuset);
938        }
939
940        // Perform the function pointer load in the new thread frame.
941        let instance = this.get_ptr_fn(start_routine)?.as_instance()?;
942
943        // Note: the returned value is currently ignored (see the FIXME in
944        // pthread_join in shims/unix/thread.rs) because the Rust standard library does not use
945        // it.
946        let ret_place = this.allocate(ret_layout, MiriMemoryKind::Machine.into())?;
947
948        this.call_thread_root_function(
949            instance,
950            start_abi,
951            &[func_arg],
952            Some(&ret_place),
953            current_span,
954        )?;
955
956        // Restore the old active thread frame.
957        this.machine.threads.set_active_thread_id(old_thread_id);
958
959        interp_ok(new_thread_id)
960    }
961
962    /// Handles thread termination of the active thread: wakes up threads joining on this one,
963    /// and deals with the thread's thread-local statics according to `tls_alloc_action`.
964    ///
965    /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
966    fn terminate_active_thread(&mut self, tls_alloc_action: TlsAllocAction) -> InterpResult<'tcx> {
967        let this = self.eval_context_mut();
968
969        // Mark thread as terminated.
970        let thread = this.active_thread_mut();
971        assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
972        thread.state = ThreadState::Terminated;
973
974        // Deallocate TLS.
975        let gone_thread = this.active_thread();
976        {
977            let mut free_tls_statics = Vec::new();
978            this.machine.threads.thread_local_allocs.retain(|&(_def_id, thread), &mut alloc_id| {
979                if thread != gone_thread {
980                    // A different thread, keep this static around.
981                    return true;
982                }
983                // Delete this static from the map and from memory.
984                // We cannot free directly here as we cannot use `?` in this context.
985                free_tls_statics.push(alloc_id);
986                false
987            });
988            // Now free the TLS statics.
989            for ptr in free_tls_statics {
990                match tls_alloc_action {
991                    TlsAllocAction::Deallocate =>
992                        this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?,
993                    TlsAllocAction::Leak =>
994                        if let Some(alloc) = ptr.provenance.get_alloc_id() {
995                            trace!(
996                                "Thread-local static leaked and stored as static root: {:?}",
997                                alloc
998                            );
999                            this.machine.static_roots.push(alloc);
1000                        },
1001                }
1002            }
1003        }
1004
1005        match &mut this.machine.data_race {
1006            GlobalDataRaceHandler::None => {}
1007            GlobalDataRaceHandler::Vclocks(data_race) =>
1008                data_race.thread_terminated(&this.machine.threads),
1009            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1010                // Inform GenMC that the thread finished.
1011                // This needs to happen once all accesses to the thread are done, including freeing any TLS statics.
1012                genmc_ctx.handle_thread_finish(&this.machine.threads)
1013            }
1014        }
1015
1016        // Unblock joining threads.
1017        let unblock_reason = BlockReason::Join(gone_thread);
1018        let threads = &this.machine.threads.threads;
1019        let joining_threads = threads
1020            .iter_enumerated()
1021            .filter(|(_, thread)| thread.state.is_blocked_on(unblock_reason))
1022            .map(|(id, _)| id)
1023            .collect::<Vec<_>>();
1024        for thread in joining_threads {
1025            this.unblock_thread(thread, unblock_reason)?;
1026        }
1027
1028        interp_ok(())
1029    }
1030
1031    /// Block the current thread, with an optional timeout.
1032    /// The callback will be invoked when the thread gets unblocked.
1033    #[inline]
1034    fn block_thread(
1035        &mut self,
1036        reason: BlockReason,
1037        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
1038        callback: DynUnblockCallback<'tcx>,
1039    ) {
1040        let this = self.eval_context_mut();
1041        if timeout.is_some() && this.machine.data_race.as_genmc_ref().is_some() {
1042            panic!("Unimplemented: Timeouts not yet supported in GenMC mode.");
1043        }
1044        let timeout = timeout.map(|(clock, anchor, duration)| {
1045            let anchor = match clock {
1046                TimeoutClock::RealTime => {
1047                    assert!(
1048                        this.machine.communicate(),
1049                        "cannot have `RealTime` timeout with isolation enabled!"
1050                    );
1051                    Timeout::RealTime(match anchor {
1052                        TimeoutAnchor::Absolute => SystemTime::UNIX_EPOCH,
1053                        TimeoutAnchor::Relative => SystemTime::now(),
1054                    })
1055                }
1056                TimeoutClock::Monotonic =>
1057                    Timeout::Monotonic(match anchor {
1058                        TimeoutAnchor::Absolute => this.machine.monotonic_clock.epoch(),
1059                        TimeoutAnchor::Relative => this.machine.monotonic_clock.now(),
1060                    }),
1061            };
1062            anchor.add_lossy(duration)
1063        });
1064        this.machine.threads.block_thread(reason, timeout, callback);
1065    }
1066
1067    /// Put the blocked thread into the enabled state.
1068    /// Sanity-checks that the thread previously was blocked for the right reason.
1069    fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) -> InterpResult<'tcx> {
1070        let this = self.eval_context_mut();
1071        let old_state =
1072            mem::replace(&mut this.machine.threads.threads[thread].state, ThreadState::Enabled);
1073        let callback = match old_state {
1074            ThreadState::Blocked { reason: actual_reason, callback, .. } => {
1075                assert_eq!(
1076                    reason, actual_reason,
1077                    "unblock_thread: thread was blocked for the wrong reason"
1078                );
1079                callback
1080            }
1081            _ => panic!("unblock_thread: thread was not blocked"),
1082        };
1083        // The callback must be executed in the previously blocked thread.
1084        let old_thread = this.machine.threads.set_active_thread_id(thread);
1085        callback.call(this, UnblockKind::Ready)?;
1086        this.machine.threads.set_active_thread_id(old_thread);
1087        interp_ok(())
1088    }
1089
1090    #[inline]
1091    fn detach_thread(
1092        &mut self,
1093        thread_id: ThreadId,
1094        allow_terminated_joined: bool,
1095    ) -> InterpResult<'tcx> {
1096        let this = self.eval_context_mut();
1097        this.machine.threads.detach_thread(thread_id, allow_terminated_joined)
1098    }
1099
1100    /// Mark that the active thread tries to join the thread with `joined_thread_id`.
1101    ///
1102    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1103    fn join_thread(
1104        &mut self,
1105        joined_thread_id: ThreadId,
1106        success_retval: Scalar,
1107        return_dest: &MPlaceTy<'tcx>,
1108    ) -> InterpResult<'tcx> {
1109        let this = self.eval_context_mut();
1110        let thread_mgr = &mut this.machine.threads;
1111        if thread_mgr.threads[joined_thread_id].join_status == ThreadJoinStatus::Detached {
1112            // On Windows this corresponds to joining on a closed handle.
1113            throw_ub_format!("trying to join a detached thread");
1114        }
1115
1116        fn after_join<'tcx>(
1117            this: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1118            joined_thread_id: ThreadId,
1119            success_retval: Scalar,
1120            return_dest: &MPlaceTy<'tcx>,
1121        ) -> InterpResult<'tcx> {
1122            let threads = &this.machine.threads;
1123            match &mut this.machine.data_race {
1124                GlobalDataRaceHandler::None => {}
1125                GlobalDataRaceHandler::Vclocks(data_race) =>
1126                    data_race.thread_joined(threads, joined_thread_id),
1127                GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1128                    genmc_ctx.handle_thread_join(threads.active_thread, joined_thread_id)?,
1129            }
1130            this.write_scalar(success_retval, return_dest)?;
1131            interp_ok(())
1132        }
1133
1134        // Mark the joined thread as being joined so that we detect if other
1135        // threads try to join it.
1136        thread_mgr.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
1137        if !thread_mgr.threads[joined_thread_id].state.is_terminated() {
1138            trace!(
1139                "{:?} blocked on {:?} when trying to join",
1140                thread_mgr.active_thread, joined_thread_id
1141            );
1142            if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
1143                genmc_ctx.handle_thread_join(thread_mgr.active_thread, joined_thread_id)?;
1144            }
1145
1146            // The joined thread is still running, we need to wait for it.
1147            // Once we get unblocked, perform the appropriate synchronization and write the return value.
1148            let dest = return_dest.clone();
1149            thread_mgr.block_thread(
1150                BlockReason::Join(joined_thread_id),
1151                None,
1152                callback!(
1153                    @capture<'tcx> {
1154                        joined_thread_id: ThreadId,
1155                        dest: MPlaceTy<'tcx>,
1156                        success_retval: Scalar,
1157                    }
1158                    |this, unblock: UnblockKind| {
1159                        assert_eq!(unblock, UnblockKind::Ready);
1160                        after_join(this, joined_thread_id, success_retval, &dest)
1161                    }
1162                ),
1163            );
1164        } else {
1165            // The thread has already terminated - establish happens-before and write the return value.
1166            after_join(this, joined_thread_id, success_retval, return_dest)?;
1167        }
1168        interp_ok(())
1169    }
1170
1171    /// Mark that the active thread tries to exclusively join the thread with `joined_thread_id`.
1172    /// If the thread is already joined by another thread, it will throw UB.
1173    ///
1174    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1175    fn join_thread_exclusive(
1176        &mut self,
1177        joined_thread_id: ThreadId,
1178        success_retval: Scalar,
1179        return_dest: &MPlaceTy<'tcx>,
1180    ) -> InterpResult<'tcx> {
1181        let this = self.eval_context_mut();
1182        let threads = &this.machine.threads.threads;
1183        if threads[joined_thread_id].join_status == ThreadJoinStatus::Joined {
1184            throw_ub_format!("trying to join an already joined thread");
1185        }
1186
1187        if joined_thread_id == this.machine.threads.active_thread {
1188            throw_ub_format!("trying to join itself");
1189        }
1190
1191        // Sanity check `join_status`.
1192        assert!(
1193            threads
1194                .iter()
1195                .all(|thread| { !thread.state.is_blocked_on(BlockReason::Join(joined_thread_id)) }),
1196            "this thread already has threads waiting for its termination"
1197        );
1198
1199        this.join_thread(joined_thread_id, success_retval, return_dest)
1200    }
1201
1202    #[inline]
1203    fn active_thread(&self) -> ThreadId {
1204        let this = self.eval_context_ref();
1205        this.machine.threads.active_thread()
1206    }
1207
1208    #[inline]
1209    fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
1210        let this = self.eval_context_mut();
1211        this.machine.threads.active_thread_mut()
1212    }
1213
1214    #[inline]
1215    fn active_thread_ref(&self) -> &Thread<'tcx> {
1216        let this = self.eval_context_ref();
1217        this.machine.threads.active_thread_ref()
1218    }
1219
1220    #[inline]
1221    fn get_total_thread_count(&self) -> usize {
1222        let this = self.eval_context_ref();
1223        this.machine.threads.get_total_thread_count()
1224    }
1225
1226    #[inline]
1227    fn have_all_terminated(&self) -> bool {
1228        let this = self.eval_context_ref();
1229        this.machine.threads.have_all_terminated()
1230    }
1231
1232    #[inline]
1233    fn enable_thread(&mut self, thread_id: ThreadId) {
1234        let this = self.eval_context_mut();
1235        this.machine.threads.enable_thread(thread_id);
1236    }
1237
1238    #[inline]
1239    fn active_thread_stack<'a>(&'a self) -> &'a [Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
1240        let this = self.eval_context_ref();
1241        this.machine.threads.active_thread_stack()
1242    }
1243
1244    #[inline]
1245    fn active_thread_stack_mut<'a>(
1246        &'a mut self,
1247    ) -> &'a mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1248        let this = self.eval_context_mut();
1249        this.machine.threads.active_thread_stack_mut()
1250    }
1251
1252    /// Set the name of the current thread. The buffer must not include the null terminator.
1253    #[inline]
1254    fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
1255        self.eval_context_mut().machine.threads.set_thread_name(thread, new_thread_name);
1256    }
1257
1258    #[inline]
1259    fn get_thread_name<'c>(&'c self, thread: ThreadId) -> Option<&'c [u8]>
1260    where
1261        'tcx: 'c,
1262    {
1263        self.eval_context_ref().machine.threads.get_thread_name(thread)
1264    }
1265
1266    #[inline]
1267    fn yield_active_thread(&mut self) {
1268        self.eval_context_mut().machine.threads.yield_active_thread();
1269    }
1270
1271    #[inline]
1272    fn maybe_preempt_active_thread(&mut self) {
1273        use rand::Rng as _;
1274
1275        let this = self.eval_context_mut();
1276        if !this.machine.threads.fixed_scheduling
1277            && this.machine.rng.get_mut().random_bool(this.machine.preemption_rate)
1278        {
1279            this.yield_active_thread();
1280        }
1281    }
1282
1283    /// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
1284    /// termination).
1285    fn run_threads(&mut self) -> InterpResult<'tcx, !> {
1286        let this = self.eval_context_mut();
1287        loop {
1288            if CTRL_C_RECEIVED.load(Relaxed) {
1289                this.machine.handle_abnormal_termination();
1290                throw_machine_stop!(TerminationInfo::Interrupted);
1291            }
1292            match this.schedule()? {
1293                SchedulingAction::ExecuteStep => {
1294                    if !this.step()? {
1295                        // See if this thread can do something else.
1296                        match this.run_on_stack_empty()? {
1297                            Poll::Pending => {} // keep going
1298                            Poll::Ready(()) =>
1299                                this.terminate_active_thread(TlsAllocAction::Deallocate)?,
1300                        }
1301                    }
1302                }
1303                SchedulingAction::ExecuteTimeoutCallback => {
1304                    this.run_timeout_callback()?;
1305                }
1306                SchedulingAction::Sleep(duration) => {
1307                    this.machine.monotonic_clock.sleep(duration);
1308                }
1309            }
1310        }
1311    }
1312}