miri/concurrency/
sync.rs

1use std::cell::RefCell;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4use std::default::Default;
5use std::ops::Not;
6use std::rc::Rc;
7use std::time::Duration;
8
9use rustc_abi::Size;
10use rustc_data_structures::fx::FxHashMap;
11
12use super::vector_clock::VClock;
13use crate::*;
14
15/// The mutex state.
16#[derive(Default, Debug)]
17struct Mutex {
18    /// The thread that currently owns the lock.
19    owner: Option<ThreadId>,
20    /// How many times the mutex was locked by the owner.
21    lock_count: usize,
22    /// The queue of threads waiting for this mutex.
23    queue: VecDeque<ThreadId>,
24    /// Mutex clock. This tracks the moment of the last unlock.
25    clock: VClock,
26}
27
28#[derive(Default, Clone, Debug)]
29pub struct MutexRef(Rc<RefCell<Mutex>>);
30
31impl MutexRef {
32    pub fn new() -> Self {
33        Self(Default::default())
34    }
35
36    /// Get the id of the thread that currently owns this lock, or `None` if it is not locked.
37    pub fn owner(&self) -> Option<ThreadId> {
38        self.0.borrow().owner
39    }
40}
41
42impl VisitProvenance for MutexRef {
43    // Mutex contains no provenance.
44    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
45}
46
47/// The read-write lock state.
48#[derive(Default, Debug)]
49struct RwLock {
50    /// The writer thread that currently owns the lock.
51    writer: Option<ThreadId>,
52    /// The readers that currently own the lock and how many times they acquired
53    /// the lock.
54    readers: FxHashMap<ThreadId, usize>,
55    /// The queue of writer threads waiting for this lock.
56    writer_queue: VecDeque<ThreadId>,
57    /// The queue of reader threads waiting for this lock.
58    reader_queue: VecDeque<ThreadId>,
59    /// Data race clock for writers. Tracks the happens-before
60    /// ordering between each write access to a rwlock and is updated
61    /// after a sequence of concurrent readers to track the happens-
62    /// before ordering between the set of previous readers and
63    /// the current writer.
64    /// Contains the clock of the last thread to release a writer
65    /// lock or the joined clock of the set of last threads to release
66    /// shared reader locks.
67    clock_unlocked: VClock,
68    /// Data race clock for readers. This is temporary storage
69    /// for the combined happens-before ordering for between all
70    /// concurrent readers and the next writer, and the value
71    /// is stored to the main data_race variable once all
72    /// readers are finished.
73    /// Has to be stored separately since reader lock acquires
74    /// must load the clock of the last write and must not
75    /// add happens-before orderings between shared reader
76    /// locks.
77    /// This is only relevant when there is an active reader.
78    clock_current_readers: VClock,
79}
80
81impl RwLock {
82    #[inline]
83    /// Check if locked.
84    fn is_locked(&self) -> bool {
85        trace!(
86            "rwlock_is_locked: writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
87            self.writer,
88            self.readers.len(),
89        );
90        self.writer.is_some() || self.readers.is_empty().not()
91    }
92
93    /// Check if write locked.
94    #[inline]
95    fn is_write_locked(&self) -> bool {
96        trace!("rwlock_is_write_locked: writer is {:?}", self.writer);
97        self.writer.is_some()
98    }
99}
100
101#[derive(Default, Clone, Debug)]
102pub struct RwLockRef(Rc<RefCell<RwLock>>);
103
104impl RwLockRef {
105    pub fn new() -> Self {
106        Self(Default::default())
107    }
108
109    pub fn is_locked(&self) -> bool {
110        self.0.borrow().is_locked()
111    }
112
113    pub fn is_write_locked(&self) -> bool {
114        self.0.borrow().is_write_locked()
115    }
116}
117
118impl VisitProvenance for RwLockRef {
119    // RwLock contains no provenance.
120    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
121}
122
123/// The conditional variable state.
124#[derive(Default, Debug)]
125struct Condvar {
126    waiters: VecDeque<ThreadId>,
127    /// Tracks the happens-before relationship
128    /// between a cond-var signal and a cond-var
129    /// wait during a non-spurious signal event.
130    /// Contains the clock of the last thread to
131    /// perform a condvar-signal.
132    clock: VClock,
133}
134
135#[derive(Default, Clone, Debug)]
136pub struct CondvarRef(Rc<RefCell<Condvar>>);
137
138impl CondvarRef {
139    pub fn new() -> Self {
140        Self(Default::default())
141    }
142
143    pub fn is_awaited(&self) -> bool {
144        !self.0.borrow().waiters.is_empty()
145    }
146}
147
148impl VisitProvenance for CondvarRef {
149    // Condvar contains no provenance.
150    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
151}
152
153/// The futex state.
154#[derive(Default, Debug)]
155struct Futex {
156    waiters: Vec<FutexWaiter>,
157    /// Tracks the happens-before relationship
158    /// between a futex-wake and a futex-wait
159    /// during a non-spurious wake event.
160    /// Contains the clock of the last thread to
161    /// perform a futex-wake.
162    clock: VClock,
163}
164
165#[derive(Default, Clone, Debug)]
166pub struct FutexRef(Rc<RefCell<Futex>>);
167
168impl FutexRef {
169    pub fn new() -> Self {
170        Self(Default::default())
171    }
172
173    pub fn waiters(&self) -> usize {
174        self.0.borrow().waiters.len()
175    }
176}
177
178impl VisitProvenance for FutexRef {
179    // Futex contains no provenance.
180    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
181}
182
183/// A thread waiting on a futex.
184#[derive(Debug)]
185struct FutexWaiter {
186    /// The thread that is waiting on this futex.
187    thread: ThreadId,
188    /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
189    bitset: u32,
190}
191
192// Private extension trait for local helper methods
193impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
194pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
195    fn condvar_reacquire_mutex(
196        &mut self,
197        mutex_ref: MutexRef,
198        retval: Scalar,
199        dest: MPlaceTy<'tcx>,
200    ) -> InterpResult<'tcx> {
201        let this = self.eval_context_mut();
202        if let Some(owner) = mutex_ref.owner() {
203            assert_ne!(owner, this.active_thread());
204            this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
205        } else {
206            // We can have it right now!
207            this.mutex_lock(&mutex_ref)?;
208            // Don't forget to write the return value.
209            this.write_scalar(retval, &dest)?;
210        }
211        interp_ok(())
212    }
213}
214
215impl<'tcx> AllocExtra<'tcx> {
216    fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
217        self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
218    }
219}
220
221/// We designate an `init`` field in all primitives.
222/// If `init` is set to this, we consider the primitive initialized.
223pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
224
225// Public interface to synchronization primitives. Please note that in most
226// cases, the function calls are infallible and it is the client's (shim
227// implementation's) responsibility to detect and deal with erroneous
228// situations.
229impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
230pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
231    /// Helper for lazily initialized `alloc_extra.sync` data:
232    /// this forces an immediate init.
233    /// Return a reference to the data in the machine state.
234    fn lazy_sync_init<'a, T: 'static>(
235        &'a mut self,
236        primitive: &MPlaceTy<'tcx>,
237        init_offset: Size,
238        data: T,
239    ) -> InterpResult<'tcx, &'a T>
240    where
241        'tcx: 'a,
242    {
243        let this = self.eval_context_mut();
244
245        let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
246        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
247        alloc_extra.sync.insert(offset, Box::new(data));
248        // Mark this as "initialized".
249        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
250        this.write_scalar_atomic(
251            Scalar::from_u32(LAZY_INIT_COOKIE),
252            &init_field,
253            AtomicWriteOrd::Relaxed,
254        )?;
255        interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
256    }
257
258    /// Helper for lazily initialized `alloc_extra.sync` data:
259    /// Checks if the primitive is initialized:
260    /// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
261    ///   and stores that in `alloc_extra.sync`.
262    /// - Otherwise, calls `new_data` to initialize the primitive.
263    ///
264    /// Return a reference to the data in the machine state.
265    fn lazy_sync_get_data<'a, T: 'static>(
266        &'a mut self,
267        primitive: &MPlaceTy<'tcx>,
268        init_offset: Size,
269        missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
270        new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
271    ) -> InterpResult<'tcx, &'a T>
272    where
273        'tcx: 'a,
274    {
275        let this = self.eval_context_mut();
276
277        // Check if this is already initialized. Needs to be atomic because we can race with another
278        // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
279        // So we just try to replace MUTEX_INIT_COOKIE with itself.
280        let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
281        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
282        let (_init, success) = this
283            .atomic_compare_exchange_scalar(
284                &init_field,
285                &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
286                init_cookie,
287                AtomicRwOrd::Relaxed,
288                AtomicReadOrd::Relaxed,
289                /* can_fail_spuriously */ false,
290            )?
291            .to_scalar_pair();
292
293        if success.to_bool()? {
294            // If it is initialized, it must be found in the "sync primitive" table,
295            // or else it has been moved illegally.
296            let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
297            let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
298            // Due to borrow checker reasons, we have to do the lookup twice.
299            if alloc_extra.get_sync::<T>(offset).is_none() {
300                let data = missing_data()?;
301                alloc_extra.sync.insert(offset, Box::new(data));
302            }
303            interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
304        } else {
305            let data = new_data(this)?;
306            this.lazy_sync_init(primitive, init_offset, data)
307        }
308    }
309
310    /// Get the synchronization primitive associated with the given pointer,
311    /// or initialize a new one.
312    ///
313    /// Return `None` if this pointer does not point to at least 1 byte of mutable memory.
314    fn get_sync_or_init<'a, T: 'static>(
315        &'a mut self,
316        ptr: Pointer,
317        new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
318    ) -> Option<&'a T>
319    where
320        'tcx: 'a,
321    {
322        let this = self.eval_context_mut();
323        if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
324            let info = this.get_alloc_info(alloc_id);
325            info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
326        }) {
327            return None;
328        }
329        // This cannot fail now.
330        let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
331        let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
332        // Due to borrow checker reasons, we have to do the lookup twice.
333        if alloc_extra.get_sync::<T>(offset).is_none() {
334            let new = new(machine);
335            alloc_extra.sync.insert(offset, Box::new(new));
336        }
337        Some(alloc_extra.get_sync::<T>(offset).unwrap())
338    }
339
340    /// Lock by setting the mutex owner and increasing the lock count.
341    fn mutex_lock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx> {
342        let this = self.eval_context_mut();
343        let thread = this.active_thread();
344        let mut mutex = mutex_ref.0.borrow_mut();
345        if let Some(current_owner) = mutex.owner {
346            assert_eq!(thread, current_owner, "mutex already locked by another thread");
347            assert!(
348                mutex.lock_count > 0,
349                "invariant violation: lock_count == 0 iff the thread is unlocked"
350            );
351        } else {
352            mutex.owner = Some(thread);
353        }
354        mutex.lock_count = mutex.lock_count.strict_add(1);
355        this.acquire_clock(&mutex.clock)?;
356        interp_ok(())
357    }
358
359    /// Try unlocking by decreasing the lock count and returning the old lock
360    /// count. If the lock count reaches 0, release the lock and potentially
361    /// give to a new owner. If the lock was not locked by the current thread,
362    /// return `None`.
363    fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
364        let this = self.eval_context_mut();
365        let mut mutex = mutex_ref.0.borrow_mut();
366        interp_ok(if let Some(current_owner) = mutex.owner {
367            // Mutex is locked.
368            if current_owner != this.machine.threads.active_thread() {
369                // Only the owner can unlock the mutex.
370                return interp_ok(None);
371            }
372            let old_lock_count = mutex.lock_count;
373            mutex.lock_count = old_lock_count.strict_sub(1);
374            if mutex.lock_count == 0 {
375                mutex.owner = None;
376                // The mutex is completely unlocked. Try transferring ownership
377                // to another thread.
378
379                this.release_clock(|clock| mutex.clock.clone_from(clock))?;
380                let thread_id = mutex.queue.pop_front();
381                // We need to drop our mutex borrow before unblock_thread
382                // because it will be borrowed again in the unblock callback.
383                drop(mutex);
384                if let Some(thread_id) = thread_id {
385                    this.unblock_thread(thread_id, BlockReason::Mutex)?;
386                }
387            }
388            Some(old_lock_count)
389        } else {
390            // Mutex is not locked.
391            None
392        })
393    }
394
395    /// Put the thread into the queue waiting for the mutex.
396    ///
397    /// Once the Mutex becomes available and if it exists, `retval_dest.0` will
398    /// be written to `retval_dest.1`.
399    #[inline]
400    fn mutex_enqueue_and_block(
401        &mut self,
402        mutex_ref: MutexRef,
403        retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
404    ) {
405        let this = self.eval_context_mut();
406        let thread = this.active_thread();
407        let mut mutex = mutex_ref.0.borrow_mut();
408        mutex.queue.push_back(thread);
409        assert!(mutex.owner.is_some(), "queuing on unlocked mutex");
410        drop(mutex);
411        this.block_thread(
412            BlockReason::Mutex,
413            None,
414            callback!(
415                @capture<'tcx> {
416                    mutex_ref: MutexRef,
417                    retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
418                }
419                |this, unblock: UnblockKind| {
420                    assert_eq!(unblock, UnblockKind::Ready);
421
422                    assert!(mutex_ref.owner().is_none());
423                    this.mutex_lock(&mutex_ref)?;
424
425                    if let Some((retval, dest)) = retval_dest {
426                        this.write_scalar(retval, &dest)?;
427                    }
428
429                    interp_ok(())
430                }
431            ),
432        );
433    }
434
435    /// Read-lock the lock by adding the `reader` the list of threads that own
436    /// this lock.
437    fn rwlock_reader_lock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx> {
438        let this = self.eval_context_mut();
439        let thread = this.active_thread();
440        trace!("rwlock_reader_lock: now also held (one more time) by {:?}", thread);
441        let mut rwlock = rwlock_ref.0.borrow_mut();
442        assert!(!rwlock.is_write_locked(), "the lock is write locked");
443        let count = rwlock.readers.entry(thread).or_insert(0);
444        *count = count.strict_add(1);
445        this.acquire_clock(&rwlock.clock_unlocked)?;
446        interp_ok(())
447    }
448
449    /// Try read-unlock the lock for the current threads and potentially give the lock to a new owner.
450    /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
451    fn rwlock_reader_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
452        let this = self.eval_context_mut();
453        let thread = this.active_thread();
454        let mut rwlock = rwlock_ref.0.borrow_mut();
455        match rwlock.readers.entry(thread) {
456            Entry::Occupied(mut entry) => {
457                let count = entry.get_mut();
458                assert!(*count > 0, "rwlock locked with count == 0");
459                *count -= 1;
460                if *count == 0 {
461                    trace!("rwlock_reader_unlock: no longer held by {:?}", thread);
462                    entry.remove();
463                } else {
464                    trace!("rwlock_reader_unlock: held one less time by {:?}", thread);
465                }
466            }
467            Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
468        }
469        // Add this to the shared-release clock of all concurrent readers.
470        this.release_clock(|clock| rwlock.clock_current_readers.join(clock))?;
471
472        // The thread was a reader. If the lock is not held any more, give it to a writer.
473        if rwlock.is_locked().not() {
474            // All the readers are finished, so set the writer data-race handle to the value
475            // of the union of all reader data race handles, since the set of readers
476            // happen-before the writers
477            let rwlock_ref = &mut *rwlock;
478            rwlock_ref.clock_unlocked.clone_from(&rwlock_ref.clock_current_readers);
479            // See if there is a thread to unblock.
480            if let Some(writer) = rwlock_ref.writer_queue.pop_front() {
481                drop(rwlock); // make RefCell available for unblock callback
482                this.unblock_thread(writer, BlockReason::RwLock)?;
483            }
484        }
485        interp_ok(true)
486    }
487
488    /// Put the reader in the queue waiting for the lock and block it.
489    /// Once the lock becomes available, `retval` will be written to `dest`.
490    #[inline]
491    fn rwlock_enqueue_and_block_reader(
492        &mut self,
493        rwlock_ref: RwLockRef,
494        retval: Scalar,
495        dest: MPlaceTy<'tcx>,
496    ) {
497        let this = self.eval_context_mut();
498        let thread = this.active_thread();
499        let mut rwlock = rwlock_ref.0.borrow_mut();
500        rwlock.reader_queue.push_back(thread);
501        assert!(rwlock.is_write_locked(), "read-queueing on not write locked rwlock");
502        drop(rwlock);
503        this.block_thread(
504            BlockReason::RwLock,
505            None,
506            callback!(
507                @capture<'tcx> {
508                    rwlock_ref: RwLockRef,
509                    retval: Scalar,
510                    dest: MPlaceTy<'tcx>,
511                }
512                |this, unblock: UnblockKind| {
513                    assert_eq!(unblock, UnblockKind::Ready);
514                    this.rwlock_reader_lock(&rwlock_ref)?;
515                    this.write_scalar(retval, &dest)?;
516                    interp_ok(())
517                }
518            ),
519        );
520    }
521
522    /// Lock by setting the writer that owns the lock.
523    #[inline]
524    fn rwlock_writer_lock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx> {
525        let this = self.eval_context_mut();
526        let thread = this.active_thread();
527        trace!("rwlock_writer_lock: now held by {:?}", thread);
528
529        let mut rwlock = rwlock_ref.0.borrow_mut();
530        assert!(!rwlock.is_locked(), "the rwlock is already locked");
531        rwlock.writer = Some(thread);
532        this.acquire_clock(&rwlock.clock_unlocked)?;
533        interp_ok(())
534    }
535
536    /// Try to unlock an rwlock held by the current thread.
537    /// Return `false` if it is held by another thread.
538    #[inline]
539    fn rwlock_writer_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
540        let this = self.eval_context_mut();
541        let thread = this.active_thread();
542        let mut rwlock = rwlock_ref.0.borrow_mut();
543        interp_ok(if let Some(current_writer) = rwlock.writer {
544            if current_writer != thread {
545                // Only the owner can unlock the rwlock.
546                return interp_ok(false);
547            }
548            rwlock.writer = None;
549            trace!("rwlock_writer_unlock: unlocked by {:?}", thread);
550            // Record release clock for next lock holder.
551            this.release_clock(|clock| rwlock.clock_unlocked.clone_from(clock))?;
552
553            // The thread was a writer.
554            //
555            // We are prioritizing writers here against the readers. As a
556            // result, not only readers can starve writers, but also writers can
557            // starve readers.
558            if let Some(writer) = rwlock.writer_queue.pop_front() {
559                drop(rwlock); // make RefCell available for unblock callback
560                this.unblock_thread(writer, BlockReason::RwLock)?;
561            } else {
562                // Take the entire read queue and wake them all up.
563                let readers = std::mem::take(&mut rwlock.reader_queue);
564                drop(rwlock); // make RefCell available for unblock callback
565                for reader in readers {
566                    this.unblock_thread(reader, BlockReason::RwLock)?;
567                }
568            }
569            true
570        } else {
571            false
572        })
573    }
574
575    /// Put the writer in the queue waiting for the lock.
576    /// Once the lock becomes available, `retval` will be written to `dest`.
577    #[inline]
578    fn rwlock_enqueue_and_block_writer(
579        &mut self,
580        rwlock_ref: RwLockRef,
581        retval: Scalar,
582        dest: MPlaceTy<'tcx>,
583    ) {
584        let this = self.eval_context_mut();
585        let thread = this.active_thread();
586        let mut rwlock = rwlock_ref.0.borrow_mut();
587        rwlock.writer_queue.push_back(thread);
588        assert!(rwlock.is_locked(), "write-queueing on unlocked rwlock");
589        drop(rwlock);
590        this.block_thread(
591            BlockReason::RwLock,
592            None,
593            callback!(
594                @capture<'tcx> {
595                    rwlock_ref: RwLockRef,
596                    retval: Scalar,
597                    dest: MPlaceTy<'tcx>,
598                }
599                |this, unblock: UnblockKind| {
600                    assert_eq!(unblock, UnblockKind::Ready);
601                    this.rwlock_writer_lock(&rwlock_ref)?;
602                    this.write_scalar(retval, &dest)?;
603                    interp_ok(())
604                }
605            ),
606        );
607    }
608
609    /// Release the mutex and let the current thread wait on the given condition variable.
610    /// Once it is signaled, the mutex will be acquired and `retval_succ` will be written to `dest`.
611    /// If the timeout happens first, `retval_timeout` will be written to `dest`.
612    fn condvar_wait(
613        &mut self,
614        condvar_ref: CondvarRef,
615        mutex_ref: MutexRef,
616        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
617        retval_succ: Scalar,
618        retval_timeout: Scalar,
619        dest: MPlaceTy<'tcx>,
620    ) -> InterpResult<'tcx> {
621        let this = self.eval_context_mut();
622        if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
623            if old_locked_count != 1 {
624                throw_unsup_format!(
625                    "awaiting a condvar on a mutex acquired multiple times is not supported"
626                );
627            }
628        } else {
629            throw_ub_format!(
630                "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
631            );
632        }
633        let thread = this.active_thread();
634
635        condvar_ref.0.borrow_mut().waiters.push_back(thread);
636        this.block_thread(
637            BlockReason::Condvar,
638            timeout,
639            callback!(
640                @capture<'tcx> {
641                    condvar_ref: CondvarRef,
642                    mutex_ref: MutexRef,
643                    retval_succ: Scalar,
644                    retval_timeout: Scalar,
645                    dest: MPlaceTy<'tcx>,
646                }
647                |this, unblock: UnblockKind| {
648                    match unblock {
649                        UnblockKind::Ready => {
650                            // The condvar was signaled. Make sure we get the clock for that.
651                            this.acquire_clock(
652                                    &condvar_ref.0.borrow().clock,
653                                )?;
654                            // Try to acquire the mutex.
655                            // The timeout only applies to the first wait (until the signal), not for mutex acquisition.
656                            this.condvar_reacquire_mutex(mutex_ref, retval_succ, dest)
657                        }
658                        UnblockKind::TimedOut => {
659                            // We have to remove the waiter from the queue again.
660                            let thread = this.active_thread();
661                            let waiters = &mut condvar_ref.0.borrow_mut().waiters;
662                            waiters.retain(|waiter| *waiter != thread);
663                            // Now get back the lock.
664                            this.condvar_reacquire_mutex(mutex_ref, retval_timeout, dest)
665                        }
666                    }
667                }
668            ),
669        );
670        interp_ok(())
671    }
672
673    /// Wake up some thread (if there is any) sleeping on the conditional
674    /// variable. Returns `true` iff any thread was woken up.
675    fn condvar_signal(&mut self, condvar_ref: &CondvarRef) -> InterpResult<'tcx, bool> {
676        let this = self.eval_context_mut();
677        let mut condvar = condvar_ref.0.borrow_mut();
678
679        // Each condvar signal happens-before the end of the condvar wake
680        this.release_clock(|clock| condvar.clock.clone_from(clock))?;
681        let Some(waiter) = condvar.waiters.pop_front() else {
682            return interp_ok(false);
683        };
684        drop(condvar);
685        this.unblock_thread(waiter, BlockReason::Condvar)?;
686        interp_ok(true)
687    }
688
689    /// Wait for the futex to be signaled, or a timeout. Once the thread is
690    /// unblocked, `callback` is called with the unblock reason.
691    fn futex_wait(
692        &mut self,
693        futex_ref: FutexRef,
694        bitset: u32,
695        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
696        callback: DynUnblockCallback<'tcx>,
697    ) {
698        let this = self.eval_context_mut();
699        let thread = this.active_thread();
700        let mut futex = futex_ref.0.borrow_mut();
701        let waiters = &mut futex.waiters;
702        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
703        waiters.push(FutexWaiter { thread, bitset });
704        drop(futex);
705
706        this.block_thread(
707            BlockReason::Futex,
708            timeout,
709            callback!(
710                @capture<'tcx> {
711                    futex_ref: FutexRef,
712                    callback: DynUnblockCallback<'tcx>,
713                }
714                |this, unblock: UnblockKind| {
715                    match unblock {
716                        UnblockKind::Ready => {
717                            let futex = futex_ref.0.borrow();
718                            // Acquire the clock of the futex.
719                            this.acquire_clock(&futex.clock)?;
720                        },
721                        UnblockKind::TimedOut => {
722                            // Remove the waiter from the futex.
723                            let thread = this.active_thread();
724                            let mut futex = futex_ref.0.borrow_mut();
725                            futex.waiters.retain(|waiter| waiter.thread != thread);
726                        },
727                    }
728
729                    callback.call(this, unblock)
730                }
731            ),
732        );
733    }
734
735    /// Wake up `count` of the threads in the queue that match any of the bits
736    /// in the bitset. Returns how many threads were woken.
737    fn futex_wake(
738        &mut self,
739        futex_ref: &FutexRef,
740        bitset: u32,
741        count: usize,
742    ) -> InterpResult<'tcx, usize> {
743        let this = self.eval_context_mut();
744        let mut futex = futex_ref.0.borrow_mut();
745
746        // Each futex-wake happens-before the end of the futex wait
747        this.release_clock(|clock| futex.clock.clone_from(clock))?;
748
749        // Remove `count` of the threads in the queue that match any of the bits in the bitset.
750        // We collect all of them before unblocking because the unblock callback may access the
751        // futex state to retrieve the remaining number of waiters on macOS.
752        let waiters: Vec<_> =
753            futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
754        drop(futex);
755
756        let woken = waiters.len();
757        for waiter in waiters {
758            this.unblock_thread(waiter.thread, BlockReason::Futex)?;
759        }
760
761        interp_ok(woken)
762    }
763}