miri/concurrency/
sync.rs

1use std::cell::RefCell;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4use std::default::Default;
5use std::ops::Not;
6use std::rc::Rc;
7use std::time::Duration;
8
9use rustc_abi::Size;
10use rustc_data_structures::fx::FxHashMap;
11use rustc_index::{Idx, IndexVec};
12
13use super::init_once::InitOnce;
14use super::vector_clock::VClock;
15use crate::*;
16
17/// We cannot use the `newtype_index!` macro because we have to use 0 as a
18/// sentinel value meaning that the identifier is not assigned. This is because
19/// the pthreads static initializers initialize memory with zeros (see the
20/// `src/shims/sync.rs` file).
21macro_rules! declare_id {
22    ($name: ident) => {
23        /// 0 is used to indicate that the id was not yet assigned and,
24        /// therefore, is not a valid identifier.
25        #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
26        pub struct $name(std::num::NonZero<u32>);
27
28        impl $crate::VisitProvenance for $name {
29            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
30        }
31
32        impl Idx for $name {
33            fn new(idx: usize) -> Self {
34                // We use 0 as a sentinel value (see the comment above) and,
35                // therefore, need to shift by one when converting from an index
36                // into a vector.
37                let shifted_idx = u32::try_from(idx).unwrap().strict_add(1);
38                $name(std::num::NonZero::new(shifted_idx).unwrap())
39            }
40            fn index(self) -> usize {
41                // See the comment in `Self::new`.
42                // (This cannot underflow because `self.0` is `NonZero<u32>`.)
43                usize::try_from(self.0.get() - 1).unwrap()
44            }
45        }
46    };
47}
48pub(super) use declare_id;
49
50/// The mutex state.
51#[derive(Default, Debug)]
52struct Mutex {
53    /// The thread that currently owns the lock.
54    owner: Option<ThreadId>,
55    /// How many times the mutex was locked by the owner.
56    lock_count: usize,
57    /// The queue of threads waiting for this mutex.
58    queue: VecDeque<ThreadId>,
59    /// Mutex clock. This tracks the moment of the last unlock.
60    clock: VClock,
61}
62
63#[derive(Default, Clone, Debug)]
64pub struct MutexRef(Rc<RefCell<Mutex>>);
65
66impl MutexRef {
67    fn new() -> Self {
68        MutexRef(Rc::new(RefCell::new(Mutex::default())))
69    }
70}
71
72impl VisitProvenance for MutexRef {
73    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
74        // Mutex contains no provenance.
75    }
76}
77
78declare_id!(RwLockId);
79
80/// The read-write lock state.
81#[derive(Default, Debug)]
82struct RwLock {
83    /// The writer thread that currently owns the lock.
84    writer: Option<ThreadId>,
85    /// The readers that currently own the lock and how many times they acquired
86    /// the lock.
87    readers: FxHashMap<ThreadId, usize>,
88    /// The queue of writer threads waiting for this lock.
89    writer_queue: VecDeque<ThreadId>,
90    /// The queue of reader threads waiting for this lock.
91    reader_queue: VecDeque<ThreadId>,
92    /// Data race clock for writers. Tracks the happens-before
93    /// ordering between each write access to a rwlock and is updated
94    /// after a sequence of concurrent readers to track the happens-
95    /// before ordering between the set of previous readers and
96    /// the current writer.
97    /// Contains the clock of the last thread to release a writer
98    /// lock or the joined clock of the set of last threads to release
99    /// shared reader locks.
100    clock_unlocked: VClock,
101    /// Data race clock for readers. This is temporary storage
102    /// for the combined happens-before ordering for between all
103    /// concurrent readers and the next writer, and the value
104    /// is stored to the main data_race variable once all
105    /// readers are finished.
106    /// Has to be stored separately since reader lock acquires
107    /// must load the clock of the last write and must not
108    /// add happens-before orderings between shared reader
109    /// locks.
110    /// This is only relevant when there is an active reader.
111    clock_current_readers: VClock,
112}
113
114declare_id!(CondvarId);
115
116/// The conditional variable state.
117#[derive(Default, Debug)]
118struct Condvar {
119    waiters: VecDeque<ThreadId>,
120    /// Tracks the happens-before relationship
121    /// between a cond-var signal and a cond-var
122    /// wait during a non-spurious signal event.
123    /// Contains the clock of the last thread to
124    /// perform a condvar-signal.
125    clock: VClock,
126}
127
128/// The futex state.
129#[derive(Default, Debug)]
130struct Futex {
131    waiters: Vec<FutexWaiter>,
132    /// Tracks the happens-before relationship
133    /// between a futex-wake and a futex-wait
134    /// during a non-spurious wake event.
135    /// Contains the clock of the last thread to
136    /// perform a futex-wake.
137    clock: VClock,
138}
139
140#[derive(Default, Clone)]
141pub struct FutexRef(Rc<RefCell<Futex>>);
142
143impl FutexRef {
144    pub fn waiters(&self) -> usize {
145        self.0.borrow().waiters.len()
146    }
147}
148
149impl VisitProvenance for FutexRef {
150    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
151        // No provenance in `Futex`.
152    }
153}
154
155/// A thread waiting on a futex.
156#[derive(Debug)]
157struct FutexWaiter {
158    /// The thread that is waiting on this futex.
159    thread: ThreadId,
160    /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
161    bitset: u32,
162}
163
164/// The state of all synchronization objects.
165#[derive(Default, Debug)]
166pub struct SynchronizationObjects {
167    rwlocks: IndexVec<RwLockId, RwLock>,
168    condvars: IndexVec<CondvarId, Condvar>,
169    pub(super) init_onces: IndexVec<InitOnceId, InitOnce>,
170}
171
172// Private extension trait for local helper methods
173impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
174pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
175    fn condvar_reacquire_mutex(
176        &mut self,
177        mutex_ref: &MutexRef,
178        retval: Scalar,
179        dest: MPlaceTy<'tcx>,
180    ) -> InterpResult<'tcx> {
181        let this = self.eval_context_mut();
182        if this.mutex_is_locked(mutex_ref) {
183            assert_ne!(this.mutex_get_owner(mutex_ref), this.active_thread());
184            this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
185        } else {
186            // We can have it right now!
187            this.mutex_lock(mutex_ref);
188            // Don't forget to write the return value.
189            this.write_scalar(retval, &dest)?;
190        }
191        interp_ok(())
192    }
193}
194
195impl SynchronizationObjects {
196    pub fn mutex_create(&mut self) -> MutexRef {
197        MutexRef::new()
198    }
199    pub fn rwlock_create(&mut self) -> RwLockId {
200        self.rwlocks.push(Default::default())
201    }
202
203    pub fn condvar_create(&mut self) -> CondvarId {
204        self.condvars.push(Default::default())
205    }
206
207    pub fn init_once_create(&mut self) -> InitOnceId {
208        self.init_onces.push(Default::default())
209    }
210}
211
212impl<'tcx> AllocExtra<'tcx> {
213    fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
214        self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
215    }
216}
217
218/// We designate an `init`` field in all primitives.
219/// If `init` is set to this, we consider the primitive initialized.
220pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
221
222// Public interface to synchronization primitives. Please note that in most
223// cases, the function calls are infallible and it is the client's (shim
224// implementation's) responsibility to detect and deal with erroneous
225// situations.
226impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
227pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
228    /// Helper for lazily initialized `alloc_extra.sync` data:
229    /// this forces an immediate init.
230    /// Return a reference to the data in the machine state.
231    fn lazy_sync_init<'a, T: 'static>(
232        &'a mut self,
233        primitive: &MPlaceTy<'tcx>,
234        init_offset: Size,
235        data: T,
236    ) -> InterpResult<'tcx, &'a T>
237    where
238        'tcx: 'a,
239    {
240        let this = self.eval_context_mut();
241
242        let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
243        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
244        alloc_extra.sync.insert(offset, Box::new(data));
245        // Mark this as "initialized".
246        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
247        this.write_scalar_atomic(
248            Scalar::from_u32(LAZY_INIT_COOKIE),
249            &init_field,
250            AtomicWriteOrd::Relaxed,
251        )?;
252        interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
253    }
254
255    /// Helper for lazily initialized `alloc_extra.sync` data:
256    /// Checks if the primitive is initialized:
257    /// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
258    ///   and stores that in `alloc_extra.sync`.
259    /// - Otherwise, calls `new_data` to initialize the primitive.
260    ///
261    /// Return a reference to the data in the machine state.
262    fn lazy_sync_get_data<'a, T: 'static>(
263        &'a mut self,
264        primitive: &MPlaceTy<'tcx>,
265        init_offset: Size,
266        missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
267        new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
268    ) -> InterpResult<'tcx, &'a T>
269    where
270        'tcx: 'a,
271    {
272        let this = self.eval_context_mut();
273
274        // Check if this is already initialized. Needs to be atomic because we can race with another
275        // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
276        // So we just try to replace MUTEX_INIT_COOKIE with itself.
277        let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
278        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
279        let (_init, success) = this
280            .atomic_compare_exchange_scalar(
281                &init_field,
282                &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
283                init_cookie,
284                AtomicRwOrd::Relaxed,
285                AtomicReadOrd::Relaxed,
286                /* can_fail_spuriously */ false,
287            )?
288            .to_scalar_pair();
289
290        if success.to_bool()? {
291            // If it is initialized, it must be found in the "sync primitive" table,
292            // or else it has been moved illegally.
293            let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
294            let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
295            // Due to borrow checker reasons, we have to do the lookup twice.
296            if alloc_extra.get_sync::<T>(offset).is_none() {
297                let data = missing_data()?;
298                alloc_extra.sync.insert(offset, Box::new(data));
299            }
300            interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
301        } else {
302            let data = new_data(this)?;
303            this.lazy_sync_init(primitive, init_offset, data)
304        }
305    }
306
307    /// Get the synchronization primitive associated with the given pointer,
308    /// or initialize a new one.
309    ///
310    /// Return `None` if this pointer does not point to at least 1 byte of mutable memory.
311    fn get_sync_or_init<'a, T: 'static>(
312        &'a mut self,
313        ptr: Pointer,
314        new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
315    ) -> Option<&'a T>
316    where
317        'tcx: 'a,
318    {
319        let this = self.eval_context_mut();
320        if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
321            let info = this.get_alloc_info(alloc_id);
322            info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
323        }) {
324            return None;
325        }
326        // This cannot fail now.
327        let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
328        let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
329        // Due to borrow checker reasons, we have to do the lookup twice.
330        if alloc_extra.get_sync::<T>(offset).is_none() {
331            let new = new(machine);
332            alloc_extra.sync.insert(offset, Box::new(new));
333        }
334        Some(alloc_extra.get_sync::<T>(offset).unwrap())
335    }
336
337    #[inline]
338    /// Get the id of the thread that currently owns this lock.
339    fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId {
340        mutex_ref.0.borrow().owner.unwrap()
341    }
342
343    #[inline]
344    /// Check if locked.
345    fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool {
346        mutex_ref.0.borrow().owner.is_some()
347    }
348
349    /// Lock by setting the mutex owner and increasing the lock count.
350    fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
351        let this = self.eval_context_mut();
352        let thread = this.active_thread();
353        let mut mutex = mutex_ref.0.borrow_mut();
354        if let Some(current_owner) = mutex.owner {
355            assert_eq!(thread, current_owner, "mutex already locked by another thread");
356            assert!(
357                mutex.lock_count > 0,
358                "invariant violation: lock_count == 0 iff the thread is unlocked"
359            );
360        } else {
361            mutex.owner = Some(thread);
362        }
363        mutex.lock_count = mutex.lock_count.strict_add(1);
364        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
365            data_race.acquire_clock(&mutex.clock, &this.machine.threads);
366        }
367    }
368
369    /// Try unlocking by decreasing the lock count and returning the old lock
370    /// count. If the lock count reaches 0, release the lock and potentially
371    /// give to a new owner. If the lock was not locked by the current thread,
372    /// return `None`.
373    fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
374        let this = self.eval_context_mut();
375        let mut mutex = mutex_ref.0.borrow_mut();
376        interp_ok(if let Some(current_owner) = mutex.owner {
377            // Mutex is locked.
378            if current_owner != this.machine.threads.active_thread() {
379                // Only the owner can unlock the mutex.
380                return interp_ok(None);
381            }
382            let old_lock_count = mutex.lock_count;
383            mutex.lock_count = old_lock_count.strict_sub(1);
384            if mutex.lock_count == 0 {
385                mutex.owner = None;
386                // The mutex is completely unlocked. Try transferring ownership
387                // to another thread.
388
389                if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
390                    data_race.release_clock(&this.machine.threads, |clock| {
391                        mutex.clock.clone_from(clock)
392                    });
393                }
394                let thread_id = mutex.queue.pop_front();
395                // We need to drop our mutex borrow before unblock_thread
396                // because it will be borrowed again in the unblock callback.
397                drop(mutex);
398                if thread_id.is_some() {
399                    this.unblock_thread(thread_id.unwrap(), BlockReason::Mutex)?;
400                }
401            }
402            Some(old_lock_count)
403        } else {
404            // Mutex is not locked.
405            None
406        })
407    }
408
409    /// Put the thread into the queue waiting for the mutex.
410    ///
411    /// Once the Mutex becomes available and if it exists, `retval_dest.0` will
412    /// be written to `retval_dest.1`.
413    #[inline]
414    fn mutex_enqueue_and_block(
415        &mut self,
416        mutex_ref: &MutexRef,
417        retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
418    ) {
419        let this = self.eval_context_mut();
420        assert!(this.mutex_is_locked(mutex_ref), "queuing on unlocked mutex");
421        let thread = this.active_thread();
422        mutex_ref.0.borrow_mut().queue.push_back(thread);
423        let mutex_ref = mutex_ref.clone();
424        this.block_thread(
425            BlockReason::Mutex,
426            None,
427            callback!(
428                @capture<'tcx> {
429                    mutex_ref: MutexRef,
430                    retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
431                }
432                |this, unblock: UnblockKind| {
433                    assert_eq!(unblock, UnblockKind::Ready);
434
435                    assert!(!this.mutex_is_locked(&mutex_ref));
436                    this.mutex_lock(&mutex_ref);
437
438                    if let Some((retval, dest)) = retval_dest {
439                        this.write_scalar(retval, &dest)?;
440                    }
441
442                    interp_ok(())
443                }
444            ),
445        );
446    }
447
448    #[inline]
449    /// Check if locked.
450    fn rwlock_is_locked(&self, id: RwLockId) -> bool {
451        let this = self.eval_context_ref();
452        let rwlock = &this.machine.sync.rwlocks[id];
453        trace!(
454            "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
455            id,
456            rwlock.writer,
457            rwlock.readers.len(),
458        );
459        rwlock.writer.is_some() || rwlock.readers.is_empty().not()
460    }
461
462    /// Check if write locked.
463    #[inline]
464    fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
465        let this = self.eval_context_ref();
466        let rwlock = &this.machine.sync.rwlocks[id];
467        trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
468        rwlock.writer.is_some()
469    }
470
471    /// Read-lock the lock by adding the `reader` the list of threads that own
472    /// this lock.
473    fn rwlock_reader_lock(&mut self, id: RwLockId) {
474        let this = self.eval_context_mut();
475        let thread = this.active_thread();
476        assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
477        trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, thread);
478        let rwlock = &mut this.machine.sync.rwlocks[id];
479        let count = rwlock.readers.entry(thread).or_insert(0);
480        *count = count.strict_add(1);
481        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
482            data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
483        }
484    }
485
486    /// Try read-unlock the lock for the current threads and potentially give the lock to a new owner.
487    /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
488    fn rwlock_reader_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
489        let this = self.eval_context_mut();
490        let thread = this.active_thread();
491        let rwlock = &mut this.machine.sync.rwlocks[id];
492        match rwlock.readers.entry(thread) {
493            Entry::Occupied(mut entry) => {
494                let count = entry.get_mut();
495                assert!(*count > 0, "rwlock locked with count == 0");
496                *count -= 1;
497                if *count == 0 {
498                    trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, thread);
499                    entry.remove();
500                } else {
501                    trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, thread);
502                }
503            }
504            Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
505        }
506        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
507            // Add this to the shared-release clock of all concurrent readers.
508            data_race.release_clock(&this.machine.threads, |clock| {
509                rwlock.clock_current_readers.join(clock)
510            });
511        }
512
513        // The thread was a reader. If the lock is not held any more, give it to a writer.
514        if this.rwlock_is_locked(id).not() {
515            // All the readers are finished, so set the writer data-race handle to the value
516            // of the union of all reader data race handles, since the set of readers
517            // happen-before the writers
518            let rwlock = &mut this.machine.sync.rwlocks[id];
519            rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
520            // See if there is a thread to unblock.
521            if let Some(writer) = rwlock.writer_queue.pop_front() {
522                this.unblock_thread(writer, BlockReason::RwLock(id))?;
523            }
524        }
525        interp_ok(true)
526    }
527
528    /// Put the reader in the queue waiting for the lock and block it.
529    /// Once the lock becomes available, `retval` will be written to `dest`.
530    #[inline]
531    fn rwlock_enqueue_and_block_reader(
532        &mut self,
533        id: RwLockId,
534        retval: Scalar,
535        dest: MPlaceTy<'tcx>,
536    ) {
537        let this = self.eval_context_mut();
538        let thread = this.active_thread();
539        assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
540        this.machine.sync.rwlocks[id].reader_queue.push_back(thread);
541        this.block_thread(
542            BlockReason::RwLock(id),
543            None,
544            callback!(
545                @capture<'tcx> {
546                    id: RwLockId,
547                    retval: Scalar,
548                    dest: MPlaceTy<'tcx>,
549                }
550                |this, unblock: UnblockKind| {
551                    assert_eq!(unblock, UnblockKind::Ready);
552                    this.rwlock_reader_lock(id);
553                    this.write_scalar(retval, &dest)?;
554                    interp_ok(())
555                }
556            ),
557        );
558    }
559
560    /// Lock by setting the writer that owns the lock.
561    #[inline]
562    fn rwlock_writer_lock(&mut self, id: RwLockId) {
563        let this = self.eval_context_mut();
564        let thread = this.active_thread();
565        assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
566        trace!("rwlock_writer_lock: {:?} now held by {:?}", id, thread);
567        let rwlock = &mut this.machine.sync.rwlocks[id];
568        rwlock.writer = Some(thread);
569        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
570            data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
571        }
572    }
573
574    /// Try to unlock an rwlock held by the current thread.
575    /// Return `false` if it is held by another thread.
576    #[inline]
577    fn rwlock_writer_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
578        let this = self.eval_context_mut();
579        let thread = this.active_thread();
580        let rwlock = &mut this.machine.sync.rwlocks[id];
581        interp_ok(if let Some(current_writer) = rwlock.writer {
582            if current_writer != thread {
583                // Only the owner can unlock the rwlock.
584                return interp_ok(false);
585            }
586            rwlock.writer = None;
587            trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
588            // Record release clock for next lock holder.
589            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
590                data_race.release_clock(&this.machine.threads, |clock| {
591                    rwlock.clock_unlocked.clone_from(clock)
592                });
593            }
594            // The thread was a writer.
595            //
596            // We are prioritizing writers here against the readers. As a
597            // result, not only readers can starve writers, but also writers can
598            // starve readers.
599            if let Some(writer) = rwlock.writer_queue.pop_front() {
600                this.unblock_thread(writer, BlockReason::RwLock(id))?;
601            } else {
602                // Take the entire read queue and wake them all up.
603                let readers = std::mem::take(&mut rwlock.reader_queue);
604                for reader in readers {
605                    this.unblock_thread(reader, BlockReason::RwLock(id))?;
606                }
607            }
608            true
609        } else {
610            false
611        })
612    }
613
614    /// Put the writer in the queue waiting for the lock.
615    /// Once the lock becomes available, `retval` will be written to `dest`.
616    #[inline]
617    fn rwlock_enqueue_and_block_writer(
618        &mut self,
619        id: RwLockId,
620        retval: Scalar,
621        dest: MPlaceTy<'tcx>,
622    ) {
623        let this = self.eval_context_mut();
624        assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
625        let thread = this.active_thread();
626        this.machine.sync.rwlocks[id].writer_queue.push_back(thread);
627        this.block_thread(
628            BlockReason::RwLock(id),
629            None,
630            callback!(
631                @capture<'tcx> {
632                    id: RwLockId,
633                    retval: Scalar,
634                    dest: MPlaceTy<'tcx>,
635                }
636                |this, unblock: UnblockKind| {
637                    assert_eq!(unblock, UnblockKind::Ready);
638                    this.rwlock_writer_lock(id);
639                    this.write_scalar(retval, &dest)?;
640                    interp_ok(())
641                }
642            ),
643        );
644    }
645
646    /// Is the conditional variable awaited?
647    #[inline]
648    fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
649        let this = self.eval_context_mut();
650        !this.machine.sync.condvars[id].waiters.is_empty()
651    }
652
653    /// Release the mutex and let the current thread wait on the given condition variable.
654    /// Once it is signaled, the mutex will be acquired and `retval_succ` will be written to `dest`.
655    /// If the timeout happens first, `retval_timeout` will be written to `dest`.
656    fn condvar_wait(
657        &mut self,
658        condvar: CondvarId,
659        mutex_ref: MutexRef,
660        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
661        retval_succ: Scalar,
662        retval_timeout: Scalar,
663        dest: MPlaceTy<'tcx>,
664    ) -> InterpResult<'tcx> {
665        let this = self.eval_context_mut();
666        if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
667            if old_locked_count != 1 {
668                throw_unsup_format!(
669                    "awaiting a condvar on a mutex acquired multiple times is not supported"
670                );
671            }
672        } else {
673            throw_ub_format!(
674                "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
675            );
676        }
677        let thread = this.active_thread();
678        let waiters = &mut this.machine.sync.condvars[condvar].waiters;
679        waiters.push_back(thread);
680        this.block_thread(
681            BlockReason::Condvar(condvar),
682            timeout,
683            callback!(
684                @capture<'tcx> {
685                    condvar: CondvarId,
686                    mutex_ref: MutexRef,
687                    retval_succ: Scalar,
688                    retval_timeout: Scalar,
689                    dest: MPlaceTy<'tcx>,
690                }
691                |this, unblock: UnblockKind| {
692                    match unblock {
693                        UnblockKind::Ready => {
694                            // The condvar was signaled. Make sure we get the clock for that.
695                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
696                                data_race.acquire_clock(
697                                    &this.machine.sync.condvars[condvar].clock,
698                                    &this.machine.threads,
699                                );
700                            }
701                            // Try to acquire the mutex.
702                            // The timeout only applies to the first wait (until the signal), not for mutex acquisition.
703                            this.condvar_reacquire_mutex(&mutex_ref, retval_succ, dest)
704                        }
705                        UnblockKind::TimedOut => {
706                            // We have to remove the waiter from the queue again.
707                            let thread = this.active_thread();
708                            let waiters = &mut this.machine.sync.condvars[condvar].waiters;
709                            waiters.retain(|waiter| *waiter != thread);
710                            // Now get back the lock.
711                            this.condvar_reacquire_mutex(&mutex_ref, retval_timeout, dest)
712                        }
713                    }
714                }
715            ),
716        );
717        interp_ok(())
718    }
719
720    /// Wake up some thread (if there is any) sleeping on the conditional
721    /// variable. Returns `true` iff any thread was woken up.
722    fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool> {
723        let this = self.eval_context_mut();
724        let condvar = &mut this.machine.sync.condvars[id];
725
726        // Each condvar signal happens-before the end of the condvar wake
727        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
728            data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
729        }
730        let Some(waiter) = condvar.waiters.pop_front() else {
731            return interp_ok(false);
732        };
733        this.unblock_thread(waiter, BlockReason::Condvar(id))?;
734        interp_ok(true)
735    }
736
737    /// Wait for the futex to be signaled, or a timeout. Once the thread is
738    /// unblocked, `callback` is called with the unblock reason.
739    fn futex_wait(
740        &mut self,
741        futex_ref: FutexRef,
742        bitset: u32,
743        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
744        callback: DynUnblockCallback<'tcx>,
745    ) {
746        let this = self.eval_context_mut();
747        let thread = this.active_thread();
748        let mut futex = futex_ref.0.borrow_mut();
749        let waiters = &mut futex.waiters;
750        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
751        waiters.push(FutexWaiter { thread, bitset });
752        drop(futex);
753
754        this.block_thread(
755            BlockReason::Futex,
756            timeout,
757            callback!(
758                @capture<'tcx> {
759                    futex_ref: FutexRef,
760                    callback: DynUnblockCallback<'tcx>,
761                }
762                |this, unblock: UnblockKind| {
763                    match unblock {
764                        UnblockKind::Ready => {
765                            let futex = futex_ref.0.borrow();
766                            // Acquire the clock of the futex.
767                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
768                                data_race.acquire_clock(&futex.clock, &this.machine.threads);
769                            }
770                        },
771                        UnblockKind::TimedOut => {
772                            // Remove the waiter from the futex.
773                            let thread = this.active_thread();
774                            let mut futex = futex_ref.0.borrow_mut();
775                            futex.waiters.retain(|waiter| waiter.thread != thread);
776                        },
777                    }
778
779                    callback.call(this, unblock)
780                }
781            ),
782        );
783    }
784
785    /// Wake up `count` of the threads in the queue that match any of the bits
786    /// in the bitset. Returns how many threads were woken.
787    fn futex_wake(
788        &mut self,
789        futex_ref: &FutexRef,
790        bitset: u32,
791        count: usize,
792    ) -> InterpResult<'tcx, usize> {
793        let this = self.eval_context_mut();
794        let mut futex = futex_ref.0.borrow_mut();
795
796        // Each futex-wake happens-before the end of the futex wait
797        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
798            data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
799        }
800
801        // Remove `count` of the threads in the queue that match any of the bits in the bitset.
802        // We collect all of them before unblocking because the unblock callback may access the
803        // futex state to retrieve the remaining number of waiters on macOS.
804        let waiters: Vec<_> =
805            futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
806        drop(futex);
807
808        let woken = waiters.len();
809        for waiter in waiters {
810            this.unblock_thread(waiter.thread, BlockReason::Futex)?;
811        }
812
813        interp_ok(woken)
814    }
815}