1use std::cell::RefCell;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4use std::default::Default;
5use std::ops::Not;
6use std::rc::Rc;
7use std::time::Duration;
8
9use rustc_abi::Size;
10use rustc_data_structures::fx::FxHashMap;
11use rustc_index::{Idx, IndexVec};
12
13use super::init_once::InitOnce;
14use super::vector_clock::VClock;
15use crate::*;
16
17macro_rules! declare_id {
22 ($name: ident) => {
23 #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
26 pub struct $name(std::num::NonZero<u32>);
27
28 impl $crate::VisitProvenance for $name {
29 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
30 }
31
32 impl Idx for $name {
33 fn new(idx: usize) -> Self {
34 let shifted_idx = u32::try_from(idx).unwrap().strict_add(1);
38 $name(std::num::NonZero::new(shifted_idx).unwrap())
39 }
40 fn index(self) -> usize {
41 usize::try_from(self.0.get() - 1).unwrap()
44 }
45 }
46 };
47}
48pub(super) use declare_id;
49
50#[derive(Default, Debug)]
52struct Mutex {
53 owner: Option<ThreadId>,
55 lock_count: usize,
57 queue: VecDeque<ThreadId>,
59 clock: VClock,
61}
62
63#[derive(Default, Clone, Debug)]
64pub struct MutexRef(Rc<RefCell<Mutex>>);
65
66impl MutexRef {
67 fn new() -> Self {
68 MutexRef(Rc::new(RefCell::new(Mutex::default())))
69 }
70}
71
72impl VisitProvenance for MutexRef {
73 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
74 }
76}
77
78declare_id!(RwLockId);
79
80#[derive(Default, Debug)]
82struct RwLock {
83 writer: Option<ThreadId>,
85 readers: FxHashMap<ThreadId, usize>,
88 writer_queue: VecDeque<ThreadId>,
90 reader_queue: VecDeque<ThreadId>,
92 clock_unlocked: VClock,
101 clock_current_readers: VClock,
112}
113
114declare_id!(CondvarId);
115
116#[derive(Default, Debug)]
118struct Condvar {
119 waiters: VecDeque<ThreadId>,
120 clock: VClock,
126}
127
128#[derive(Default, Debug)]
130struct Futex {
131 waiters: Vec<FutexWaiter>,
132 clock: VClock,
138}
139
140#[derive(Default, Clone)]
141pub struct FutexRef(Rc<RefCell<Futex>>);
142
143impl FutexRef {
144 pub fn waiters(&self) -> usize {
145 self.0.borrow().waiters.len()
146 }
147}
148
149impl VisitProvenance for FutexRef {
150 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
151 }
153}
154
155#[derive(Debug)]
157struct FutexWaiter {
158 thread: ThreadId,
160 bitset: u32,
162}
163
164#[derive(Default, Debug)]
166pub struct SynchronizationObjects {
167 rwlocks: IndexVec<RwLockId, RwLock>,
168 condvars: IndexVec<CondvarId, Condvar>,
169 pub(super) init_onces: IndexVec<InitOnceId, InitOnce>,
170}
171
172impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
174pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
175 fn condvar_reacquire_mutex(
176 &mut self,
177 mutex_ref: &MutexRef,
178 retval: Scalar,
179 dest: MPlaceTy<'tcx>,
180 ) -> InterpResult<'tcx> {
181 let this = self.eval_context_mut();
182 if this.mutex_is_locked(mutex_ref) {
183 assert_ne!(this.mutex_get_owner(mutex_ref), this.active_thread());
184 this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
185 } else {
186 this.mutex_lock(mutex_ref);
188 this.write_scalar(retval, &dest)?;
190 }
191 interp_ok(())
192 }
193}
194
195impl SynchronizationObjects {
196 pub fn mutex_create(&mut self) -> MutexRef {
197 MutexRef::new()
198 }
199 pub fn rwlock_create(&mut self) -> RwLockId {
200 self.rwlocks.push(Default::default())
201 }
202
203 pub fn condvar_create(&mut self) -> CondvarId {
204 self.condvars.push(Default::default())
205 }
206
207 pub fn init_once_create(&mut self) -> InitOnceId {
208 self.init_onces.push(Default::default())
209 }
210}
211
212impl<'tcx> AllocExtra<'tcx> {
213 fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
214 self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
215 }
216}
217
218pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
221
222impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
227pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
228 fn lazy_sync_init<'a, T: 'static>(
232 &'a mut self,
233 primitive: &MPlaceTy<'tcx>,
234 init_offset: Size,
235 data: T,
236 ) -> InterpResult<'tcx, &'a T>
237 where
238 'tcx: 'a,
239 {
240 let this = self.eval_context_mut();
241
242 let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
243 let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
244 alloc_extra.sync.insert(offset, Box::new(data));
245 let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
247 this.write_scalar_atomic(
248 Scalar::from_u32(LAZY_INIT_COOKIE),
249 &init_field,
250 AtomicWriteOrd::Relaxed,
251 )?;
252 interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
253 }
254
255 fn lazy_sync_get_data<'a, T: 'static>(
263 &'a mut self,
264 primitive: &MPlaceTy<'tcx>,
265 init_offset: Size,
266 missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
267 new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
268 ) -> InterpResult<'tcx, &'a T>
269 where
270 'tcx: 'a,
271 {
272 let this = self.eval_context_mut();
273
274 let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
278 let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
279 let (_init, success) = this
280 .atomic_compare_exchange_scalar(
281 &init_field,
282 &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
283 init_cookie,
284 AtomicRwOrd::Relaxed,
285 AtomicReadOrd::Relaxed,
286 false,
287 )?
288 .to_scalar_pair();
289
290 if success.to_bool()? {
291 let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
294 let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
295 if alloc_extra.get_sync::<T>(offset).is_none() {
297 let data = missing_data()?;
298 alloc_extra.sync.insert(offset, Box::new(data));
299 }
300 interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
301 } else {
302 let data = new_data(this)?;
303 this.lazy_sync_init(primitive, init_offset, data)
304 }
305 }
306
307 fn get_sync_or_init<'a, T: 'static>(
312 &'a mut self,
313 ptr: Pointer,
314 new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
315 ) -> Option<&'a T>
316 where
317 'tcx: 'a,
318 {
319 let this = self.eval_context_mut();
320 if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
321 let info = this.get_alloc_info(alloc_id);
322 info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
323 }) {
324 return None;
325 }
326 let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
328 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
329 if alloc_extra.get_sync::<T>(offset).is_none() {
331 let new = new(machine);
332 alloc_extra.sync.insert(offset, Box::new(new));
333 }
334 Some(alloc_extra.get_sync::<T>(offset).unwrap())
335 }
336
337 #[inline]
338 fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId {
340 mutex_ref.0.borrow().owner.unwrap()
341 }
342
343 #[inline]
344 fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool {
346 mutex_ref.0.borrow().owner.is_some()
347 }
348
349 fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
351 let this = self.eval_context_mut();
352 let thread = this.active_thread();
353 let mut mutex = mutex_ref.0.borrow_mut();
354 if let Some(current_owner) = mutex.owner {
355 assert_eq!(thread, current_owner, "mutex already locked by another thread");
356 assert!(
357 mutex.lock_count > 0,
358 "invariant violation: lock_count == 0 iff the thread is unlocked"
359 );
360 } else {
361 mutex.owner = Some(thread);
362 }
363 mutex.lock_count = mutex.lock_count.strict_add(1);
364 if let Some(data_race) = &this.machine.data_race {
365 data_race.acquire_clock(&mutex.clock, &this.machine.threads);
366 }
367 }
368
369 fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
374 let this = self.eval_context_mut();
375 let mut mutex = mutex_ref.0.borrow_mut();
376 interp_ok(if let Some(current_owner) = mutex.owner {
377 if current_owner != this.machine.threads.active_thread() {
379 return interp_ok(None);
381 }
382 let old_lock_count = mutex.lock_count;
383 mutex.lock_count = old_lock_count.strict_sub(1);
384 if mutex.lock_count == 0 {
385 mutex.owner = None;
386 if let Some(data_race) = &this.machine.data_race {
389 data_race.release_clock(&this.machine.threads, |clock| {
390 mutex.clock.clone_from(clock)
391 });
392 }
393 let thread_id = mutex.queue.pop_front();
394 drop(mutex);
397 if thread_id.is_some() {
398 this.unblock_thread(thread_id.unwrap(), BlockReason::Mutex)?;
399 }
400 }
401 Some(old_lock_count)
402 } else {
403 None
405 })
406 }
407
408 #[inline]
413 fn mutex_enqueue_and_block(
414 &mut self,
415 mutex_ref: &MutexRef,
416 retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
417 ) {
418 let this = self.eval_context_mut();
419 assert!(this.mutex_is_locked(mutex_ref), "queuing on unlocked mutex");
420 let thread = this.active_thread();
421 mutex_ref.0.borrow_mut().queue.push_back(thread);
422 let mutex_ref = mutex_ref.clone();
423 this.block_thread(
424 BlockReason::Mutex,
425 None,
426 callback!(
427 @capture<'tcx> {
428 mutex_ref: MutexRef,
429 retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
430 }
431 |this, unblock: UnblockKind| {
432 assert_eq!(unblock, UnblockKind::Ready);
433
434 assert!(!this.mutex_is_locked(&mutex_ref));
435 this.mutex_lock(&mutex_ref);
436
437 if let Some((retval, dest)) = retval_dest {
438 this.write_scalar(retval, &dest)?;
439 }
440
441 interp_ok(())
442 }
443 ),
444 );
445 }
446
447 #[inline]
448 fn rwlock_is_locked(&self, id: RwLockId) -> bool {
450 let this = self.eval_context_ref();
451 let rwlock = &this.machine.sync.rwlocks[id];
452 trace!(
453 "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
454 id,
455 rwlock.writer,
456 rwlock.readers.len(),
457 );
458 rwlock.writer.is_some() || rwlock.readers.is_empty().not()
459 }
460
461 #[inline]
463 fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
464 let this = self.eval_context_ref();
465 let rwlock = &this.machine.sync.rwlocks[id];
466 trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
467 rwlock.writer.is_some()
468 }
469
470 fn rwlock_reader_lock(&mut self, id: RwLockId) {
473 let this = self.eval_context_mut();
474 let thread = this.active_thread();
475 assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
476 trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, thread);
477 let rwlock = &mut this.machine.sync.rwlocks[id];
478 let count = rwlock.readers.entry(thread).or_insert(0);
479 *count = count.strict_add(1);
480 if let Some(data_race) = &this.machine.data_race {
481 data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
482 }
483 }
484
485 fn rwlock_reader_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
488 let this = self.eval_context_mut();
489 let thread = this.active_thread();
490 let rwlock = &mut this.machine.sync.rwlocks[id];
491 match rwlock.readers.entry(thread) {
492 Entry::Occupied(mut entry) => {
493 let count = entry.get_mut();
494 assert!(*count > 0, "rwlock locked with count == 0");
495 *count -= 1;
496 if *count == 0 {
497 trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, thread);
498 entry.remove();
499 } else {
500 trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, thread);
501 }
502 }
503 Entry::Vacant(_) => return interp_ok(false), }
505 if let Some(data_race) = &this.machine.data_race {
506 data_race.release_clock(&this.machine.threads, |clock| {
508 rwlock.clock_current_readers.join(clock)
509 });
510 }
511
512 if this.rwlock_is_locked(id).not() {
514 let rwlock = &mut this.machine.sync.rwlocks[id];
518 rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
519 if let Some(writer) = rwlock.writer_queue.pop_front() {
521 this.unblock_thread(writer, BlockReason::RwLock(id))?;
522 }
523 }
524 interp_ok(true)
525 }
526
527 #[inline]
530 fn rwlock_enqueue_and_block_reader(
531 &mut self,
532 id: RwLockId,
533 retval: Scalar,
534 dest: MPlaceTy<'tcx>,
535 ) {
536 let this = self.eval_context_mut();
537 let thread = this.active_thread();
538 assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
539 this.machine.sync.rwlocks[id].reader_queue.push_back(thread);
540 this.block_thread(
541 BlockReason::RwLock(id),
542 None,
543 callback!(
544 @capture<'tcx> {
545 id: RwLockId,
546 retval: Scalar,
547 dest: MPlaceTy<'tcx>,
548 }
549 |this, unblock: UnblockKind| {
550 assert_eq!(unblock, UnblockKind::Ready);
551 this.rwlock_reader_lock(id);
552 this.write_scalar(retval, &dest)?;
553 interp_ok(())
554 }
555 ),
556 );
557 }
558
559 #[inline]
561 fn rwlock_writer_lock(&mut self, id: RwLockId) {
562 let this = self.eval_context_mut();
563 let thread = this.active_thread();
564 assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
565 trace!("rwlock_writer_lock: {:?} now held by {:?}", id, thread);
566 let rwlock = &mut this.machine.sync.rwlocks[id];
567 rwlock.writer = Some(thread);
568 if let Some(data_race) = &this.machine.data_race {
569 data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
570 }
571 }
572
573 #[inline]
576 fn rwlock_writer_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
577 let this = self.eval_context_mut();
578 let thread = this.active_thread();
579 let rwlock = &mut this.machine.sync.rwlocks[id];
580 interp_ok(if let Some(current_writer) = rwlock.writer {
581 if current_writer != thread {
582 return interp_ok(false);
584 }
585 rwlock.writer = None;
586 trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
587 if let Some(data_race) = &this.machine.data_race {
589 data_race.release_clock(&this.machine.threads, |clock| {
590 rwlock.clock_unlocked.clone_from(clock)
591 });
592 }
593 if let Some(writer) = rwlock.writer_queue.pop_front() {
599 this.unblock_thread(writer, BlockReason::RwLock(id))?;
600 } else {
601 let readers = std::mem::take(&mut rwlock.reader_queue);
603 for reader in readers {
604 this.unblock_thread(reader, BlockReason::RwLock(id))?;
605 }
606 }
607 true
608 } else {
609 false
610 })
611 }
612
613 #[inline]
616 fn rwlock_enqueue_and_block_writer(
617 &mut self,
618 id: RwLockId,
619 retval: Scalar,
620 dest: MPlaceTy<'tcx>,
621 ) {
622 let this = self.eval_context_mut();
623 assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
624 let thread = this.active_thread();
625 this.machine.sync.rwlocks[id].writer_queue.push_back(thread);
626 this.block_thread(
627 BlockReason::RwLock(id),
628 None,
629 callback!(
630 @capture<'tcx> {
631 id: RwLockId,
632 retval: Scalar,
633 dest: MPlaceTy<'tcx>,
634 }
635 |this, unblock: UnblockKind| {
636 assert_eq!(unblock, UnblockKind::Ready);
637 this.rwlock_writer_lock(id);
638 this.write_scalar(retval, &dest)?;
639 interp_ok(())
640 }
641 ),
642 );
643 }
644
645 #[inline]
647 fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
648 let this = self.eval_context_mut();
649 !this.machine.sync.condvars[id].waiters.is_empty()
650 }
651
652 fn condvar_wait(
656 &mut self,
657 condvar: CondvarId,
658 mutex_ref: MutexRef,
659 timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
660 retval_succ: Scalar,
661 retval_timeout: Scalar,
662 dest: MPlaceTy<'tcx>,
663 ) -> InterpResult<'tcx> {
664 let this = self.eval_context_mut();
665 if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
666 if old_locked_count != 1 {
667 throw_unsup_format!(
668 "awaiting a condvar on a mutex acquired multiple times is not supported"
669 );
670 }
671 } else {
672 throw_ub_format!(
673 "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
674 );
675 }
676 let thread = this.active_thread();
677 let waiters = &mut this.machine.sync.condvars[condvar].waiters;
678 waiters.push_back(thread);
679 this.block_thread(
680 BlockReason::Condvar(condvar),
681 timeout,
682 callback!(
683 @capture<'tcx> {
684 condvar: CondvarId,
685 mutex_ref: MutexRef,
686 retval_succ: Scalar,
687 retval_timeout: Scalar,
688 dest: MPlaceTy<'tcx>,
689 }
690 |this, unblock: UnblockKind| {
691 match unblock {
692 UnblockKind::Ready => {
693 if let Some(data_race) = &this.machine.data_race {
695 data_race.acquire_clock(
696 &this.machine.sync.condvars[condvar].clock,
697 &this.machine.threads,
698 );
699 }
700 this.condvar_reacquire_mutex(&mutex_ref, retval_succ, dest)
703 }
704 UnblockKind::TimedOut => {
705 let thread = this.active_thread();
707 let waiters = &mut this.machine.sync.condvars[condvar].waiters;
708 waiters.retain(|waiter| *waiter != thread);
709 this.condvar_reacquire_mutex(&mutex_ref, retval_timeout, dest)
711 }
712 }
713 }
714 ),
715 );
716 interp_ok(())
717 }
718
719 fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool> {
722 let this = self.eval_context_mut();
723 let condvar = &mut this.machine.sync.condvars[id];
724 let data_race = &this.machine.data_race;
725
726 if let Some(data_race) = data_race {
728 data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
729 }
730 let Some(waiter) = condvar.waiters.pop_front() else {
731 return interp_ok(false);
732 };
733 this.unblock_thread(waiter, BlockReason::Condvar(id))?;
734 interp_ok(true)
735 }
736
737 fn futex_wait(
740 &mut self,
741 futex_ref: FutexRef,
742 bitset: u32,
743 timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
744 callback: DynUnblockCallback<'tcx>,
745 ) {
746 let this = self.eval_context_mut();
747 let thread = this.active_thread();
748 let mut futex = futex_ref.0.borrow_mut();
749 let waiters = &mut futex.waiters;
750 assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
751 waiters.push(FutexWaiter { thread, bitset });
752 drop(futex);
753
754 this.block_thread(
755 BlockReason::Futex,
756 timeout,
757 callback!(
758 @capture<'tcx> {
759 futex_ref: FutexRef,
760 callback: DynUnblockCallback<'tcx>,
761 }
762 |this, unblock: UnblockKind| {
763 match unblock {
764 UnblockKind::Ready => {
765 let futex = futex_ref.0.borrow();
766 if let Some(data_race) = &this.machine.data_race {
768 data_race.acquire_clock(&futex.clock, &this.machine.threads);
769 }
770 },
771 UnblockKind::TimedOut => {
772 let thread = this.active_thread();
774 let mut futex = futex_ref.0.borrow_mut();
775 futex.waiters.retain(|waiter| waiter.thread != thread);
776 },
777 }
778
779 callback.call(this, unblock)
780 }
781 ),
782 );
783 }
784
785 fn futex_wake(
788 &mut self,
789 futex_ref: &FutexRef,
790 bitset: u32,
791 count: usize,
792 ) -> InterpResult<'tcx, usize> {
793 let this = self.eval_context_mut();
794 let mut futex = futex_ref.0.borrow_mut();
795 let data_race = &this.machine.data_race;
796
797 if let Some(data_race) = data_race {
799 data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
800 }
801
802 let waiters: Vec<_> =
806 futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
807 drop(futex);
808
809 let woken = waiters.len();
810 for waiter in waiters {
811 this.unblock_thread(waiter.thread, BlockReason::Futex)?;
812 }
813
814 interp_ok(woken)
815 }
816}