1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_middle::mir;
52use rustc_middle::ty::Ty;
53use rustc_span::Span;
54
55use super::vector_clock::{VClock, VTimestamp, VectorIdx};
56use super::weak_memory::EvalContextExt as _;
57use crate::diagnostics::RacingOp;
58use crate::*;
59
60pub type AllocState = VClockAlloc;
61
62#[derive(Copy, Clone, PartialEq, Eq, Debug)]
64pub enum AtomicRwOrd {
65 Relaxed,
66 Acquire,
67 Release,
68 AcqRel,
69 SeqCst,
70}
71
72#[derive(Copy, Clone, PartialEq, Eq, Debug)]
74pub enum AtomicReadOrd {
75 Relaxed,
76 Acquire,
77 SeqCst,
78}
79
80#[derive(Copy, Clone, PartialEq, Eq, Debug)]
82pub enum AtomicWriteOrd {
83 Relaxed,
84 Release,
85 SeqCst,
86}
87
88#[derive(Copy, Clone, PartialEq, Eq, Debug)]
90pub enum AtomicFenceOrd {
91 Acquire,
92 Release,
93 AcqRel,
94 SeqCst,
95}
96
97#[derive(Clone, Default, Debug)]
101pub(super) struct ThreadClockSet {
102 pub(super) clock: VClock,
105
106 fence_acquire: VClock,
109
110 fence_release: VClock,
113
114 pub(super) write_seqcst: VClock,
119
120 pub(super) read_seqcst: VClock,
125}
126
127impl ThreadClockSet {
128 #[inline]
131 fn apply_release_fence(&mut self) {
132 self.fence_release.clone_from(&self.clock);
133 }
134
135 #[inline]
138 fn apply_acquire_fence(&mut self) {
139 self.clock.join(&self.fence_acquire);
140 }
141
142 #[inline]
145 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
146 self.clock.increment_index(index, current_span);
147 }
148
149 fn join_with(&mut self, other: &ThreadClockSet) {
153 self.clock.join(&other.clock);
154 }
155}
156
157#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
160pub struct DataRace;
161
162#[derive(Clone, PartialEq, Eq, Debug)]
167struct AtomicMemoryCellClocks {
168 read_vector: VClock,
173
174 write_vector: VClock,
179
180 sync_vector: VClock,
185
186 size: Option<Size>,
191}
192
193#[derive(Copy, Clone, PartialEq, Eq, Debug)]
194enum AtomicAccessType {
195 Load(AtomicReadOrd),
196 Store,
197 Rmw,
198}
199
200#[derive(Copy, Clone, PartialEq, Eq, Debug)]
202pub enum NaReadType {
203 Read,
205
206 Retag,
208}
209
210impl NaReadType {
211 fn description(self) -> &'static str {
212 match self {
213 NaReadType::Read => "non-atomic read",
214 NaReadType::Retag => "retag read",
215 }
216 }
217}
218
219#[derive(Copy, Clone, PartialEq, Eq, Debug)]
222pub enum NaWriteType {
223 Allocate,
225
226 Write,
228
229 Retag,
231
232 Deallocate,
237}
238
239impl NaWriteType {
240 fn description(self) -> &'static str {
241 match self {
242 NaWriteType::Allocate => "creating a new allocation",
243 NaWriteType::Write => "non-atomic write",
244 NaWriteType::Retag => "retag write",
245 NaWriteType::Deallocate => "deallocation",
246 }
247 }
248}
249
250#[derive(Copy, Clone, PartialEq, Eq, Debug)]
251enum AccessType {
252 NaRead(NaReadType),
253 NaWrite(NaWriteType),
254 AtomicLoad,
255 AtomicStore,
256 AtomicRmw,
257}
258
259#[derive(Clone, PartialEq, Eq, Debug)]
261struct MemoryCellClocks {
262 write: (VectorIdx, VTimestamp),
266
267 write_type: NaWriteType,
271
272 read: VClock,
276
277 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
281}
282
283#[derive(Debug, Clone, Default)]
285struct ThreadExtraState {
286 vector_index: Option<VectorIdx>,
292
293 termination_vector_clock: Option<VClock>,
298}
299
300#[derive(Debug, Clone)]
305pub struct GlobalState {
306 multi_threaded: Cell<bool>,
313
314 ongoing_action_data_race_free: Cell<bool>,
318
319 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
323
324 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
328
329 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
331
332 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
340
341 last_sc_fence: RefCell<VClock>,
344
345 last_sc_write_per_thread: RefCell<VClock>,
348
349 pub track_outdated_loads: bool,
351}
352
353impl VisitProvenance for GlobalState {
354 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
355 }
357}
358
359impl AccessType {
360 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
361 let mut msg = String::new();
362
363 if let Some(size) = size {
364 if size == Size::ZERO {
365 assert!(self == AccessType::AtomicLoad);
369 assert!(ty.is_none());
370 return format!("multiple differently-sized atomic loads, including one load");
371 }
372 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
373 }
374
375 msg.push_str(match self {
376 AccessType::NaRead(w) => w.description(),
377 AccessType::NaWrite(w) => w.description(),
378 AccessType::AtomicLoad => "atomic load",
379 AccessType::AtomicStore => "atomic store",
380 AccessType::AtomicRmw => "atomic read-modify-write",
381 });
382
383 if let Some(ty) = ty {
384 msg.push_str(&format!(" of type `{}`", ty));
385 }
386
387 msg
388 }
389
390 fn is_atomic(self) -> bool {
391 match self {
392 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
393 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
394 }
395 }
396
397 fn is_read(self) -> bool {
398 match self {
399 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
400 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
401 }
402 }
403
404 fn is_retag(self) -> bool {
405 matches!(
406 self,
407 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
408 )
409 }
410}
411
412impl AtomicMemoryCellClocks {
413 fn new(size: Size) -> Self {
414 AtomicMemoryCellClocks {
415 read_vector: Default::default(),
416 write_vector: Default::default(),
417 sync_vector: Default::default(),
418 size: Some(size),
419 }
420 }
421}
422
423impl MemoryCellClocks {
424 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
427 MemoryCellClocks {
428 read: VClock::default(),
429 write: (alloc_index, alloc),
430 write_type: NaWriteType::Allocate,
431 atomic_ops: None,
432 }
433 }
434
435 #[inline]
436 fn write_was_before(&self, other: &VClock) -> bool {
437 self.write.1 <= other[self.write.0]
440 }
441
442 #[inline]
443 fn write(&self) -> VClock {
444 VClock::new_with_index(self.write.0, self.write.1)
445 }
446
447 #[inline]
449 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
450 self.atomic_ops.as_deref()
451 }
452
453 #[inline]
455 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
456 self.atomic_ops.as_deref_mut().unwrap()
457 }
458
459 fn atomic_access(
462 &mut self,
463 thread_clocks: &ThreadClockSet,
464 size: Size,
465 write: bool,
466 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
467 match self.atomic_ops {
468 Some(ref mut atomic) => {
469 if atomic.size == Some(size) {
471 Ok(atomic)
472 } else if atomic.read_vector <= thread_clocks.clock
473 && atomic.write_vector <= thread_clocks.clock
474 {
475 atomic.size = Some(size);
477 Ok(atomic)
478 } else if !write && atomic.write_vector <= thread_clocks.clock {
479 atomic.size = None;
482 Ok(atomic)
483 } else {
484 Err(DataRace)
485 }
486 }
487 None => {
488 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
489 Ok(self.atomic_ops.as_mut().unwrap())
490 }
491 }
492 }
493
494 fn load_acquire(
498 &mut self,
499 thread_clocks: &mut ThreadClockSet,
500 index: VectorIdx,
501 access_size: Size,
502 ) -> Result<(), DataRace> {
503 self.atomic_read_detect(thread_clocks, index, access_size)?;
504 if let Some(atomic) = self.atomic() {
505 thread_clocks.clock.join(&atomic.sync_vector);
506 }
507 Ok(())
508 }
509
510 fn load_relaxed(
514 &mut self,
515 thread_clocks: &mut ThreadClockSet,
516 index: VectorIdx,
517 access_size: Size,
518 ) -> Result<(), DataRace> {
519 self.atomic_read_detect(thread_clocks, index, access_size)?;
520 if let Some(atomic) = self.atomic() {
521 thread_clocks.fence_acquire.join(&atomic.sync_vector);
522 }
523 Ok(())
524 }
525
526 fn store_release(
529 &mut self,
530 thread_clocks: &ThreadClockSet,
531 index: VectorIdx,
532 access_size: Size,
533 ) -> Result<(), DataRace> {
534 self.atomic_write_detect(thread_clocks, index, access_size)?;
535 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
537 Ok(())
538 }
539
540 fn store_relaxed(
543 &mut self,
544 thread_clocks: &ThreadClockSet,
545 index: VectorIdx,
546 access_size: Size,
547 ) -> Result<(), DataRace> {
548 self.atomic_write_detect(thread_clocks, index, access_size)?;
549
550 let atomic = self.atomic_mut_unwrap();
555 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
556 Ok(())
557 }
558
559 fn rmw_release(
562 &mut self,
563 thread_clocks: &ThreadClockSet,
564 index: VectorIdx,
565 access_size: Size,
566 ) -> Result<(), DataRace> {
567 self.atomic_write_detect(thread_clocks, index, access_size)?;
568 let atomic = self.atomic_mut_unwrap();
569 atomic.sync_vector.join(&thread_clocks.clock);
570 Ok(())
571 }
572
573 fn rmw_relaxed(
576 &mut self,
577 thread_clocks: &ThreadClockSet,
578 index: VectorIdx,
579 access_size: Size,
580 ) -> Result<(), DataRace> {
581 self.atomic_write_detect(thread_clocks, index, access_size)?;
582 let atomic = self.atomic_mut_unwrap();
583 atomic.sync_vector.join(&thread_clocks.fence_release);
584 Ok(())
585 }
586
587 fn atomic_read_detect(
590 &mut self,
591 thread_clocks: &ThreadClockSet,
592 index: VectorIdx,
593 access_size: Size,
594 ) -> Result<(), DataRace> {
595 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
596 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
597 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
598 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
600 }
601
602 fn atomic_write_detect(
605 &mut self,
606 thread_clocks: &ThreadClockSet,
607 index: VectorIdx,
608 access_size: Size,
609 ) -> Result<(), DataRace> {
610 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
611 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
612 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
613 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
615 Ok(())
616 } else {
617 Err(DataRace)
618 }
619 }
620
621 fn read_race_detect(
624 &mut self,
625 thread_clocks: &mut ThreadClockSet,
626 index: VectorIdx,
627 read_type: NaReadType,
628 current_span: Span,
629 ) -> Result<(), DataRace> {
630 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
631 if !current_span.is_dummy() {
632 thread_clocks.clock.index_mut(index).span = current_span;
633 }
634 thread_clocks.clock.index_mut(index).set_read_type(read_type);
635 if self.write_was_before(&thread_clocks.clock) {
636 let race_free = if let Some(atomic) = self.atomic() {
638 atomic.write_vector <= thread_clocks.clock
639 } else {
640 true
641 };
642 self.read.set_at_index(&thread_clocks.clock, index);
643 if race_free { Ok(()) } else { Err(DataRace) }
644 } else {
645 Err(DataRace)
646 }
647 }
648
649 fn write_race_detect(
652 &mut self,
653 thread_clocks: &mut ThreadClockSet,
654 index: VectorIdx,
655 write_type: NaWriteType,
656 current_span: Span,
657 ) -> Result<(), DataRace> {
658 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
659 if !current_span.is_dummy() {
660 thread_clocks.clock.index_mut(index).span = current_span;
661 }
662 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
663 let race_free = if let Some(atomic) = self.atomic() {
664 atomic.write_vector <= thread_clocks.clock
665 && atomic.read_vector <= thread_clocks.clock
666 } else {
667 true
668 };
669 self.write = (index, thread_clocks.clock[index]);
670 self.write_type = write_type;
671 if race_free {
672 self.read.set_zero_vector();
673 Ok(())
674 } else {
675 Err(DataRace)
676 }
677 } else {
678 Err(DataRace)
679 }
680 }
681}
682
683impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
685pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
686 fn read_scalar_atomic(
688 &self,
689 place: &MPlaceTy<'tcx>,
690 atomic: AtomicReadOrd,
691 ) -> InterpResult<'tcx, Scalar> {
692 let this = self.eval_context_ref();
693 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
694 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
700 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, || {
701 this.validate_atomic_load(place, atomic)
702 })?;
703 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
704 }
705
706 fn write_scalar_atomic(
708 &mut self,
709 val: Scalar,
710 dest: &MPlaceTy<'tcx>,
711 atomic: AtomicWriteOrd,
712 ) -> InterpResult<'tcx> {
713 let this = self.eval_context_mut();
714 this.atomic_access_check(dest, AtomicAccessType::Store)?;
715
716 let old_val = this.run_for_validation(|this| this.read_scalar(dest)).discard_err();
721 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
722 this.validate_atomic_store(dest, atomic)?;
723 this.buffered_atomic_write(val, dest, atomic, old_val)
724 }
725
726 fn atomic_rmw_op_immediate(
728 &mut self,
729 place: &MPlaceTy<'tcx>,
730 rhs: &ImmTy<'tcx>,
731 op: mir::BinOp,
732 not: bool,
733 atomic: AtomicRwOrd,
734 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
735 let this = self.eval_context_mut();
736 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
737
738 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
739
740 let val = this.binary_op(op, &old, rhs)?;
741 let val = if not { this.unary_op(mir::UnOp::Not, &val)? } else { val };
742 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
743
744 this.validate_atomic_rmw(place, atomic)?;
745
746 this.buffered_atomic_rmw(val.to_scalar(), place, atomic, old.to_scalar())?;
747 interp_ok(old)
748 }
749
750 fn atomic_exchange_scalar(
753 &mut self,
754 place: &MPlaceTy<'tcx>,
755 new: Scalar,
756 atomic: AtomicRwOrd,
757 ) -> InterpResult<'tcx, Scalar> {
758 let this = self.eval_context_mut();
759 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
760
761 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
762 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
763
764 this.validate_atomic_rmw(place, atomic)?;
765
766 this.buffered_atomic_rmw(new, place, atomic, old)?;
767 interp_ok(old)
768 }
769
770 fn atomic_min_max_scalar(
773 &mut self,
774 place: &MPlaceTy<'tcx>,
775 rhs: ImmTy<'tcx>,
776 min: bool,
777 atomic: AtomicRwOrd,
778 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
779 let this = self.eval_context_mut();
780 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
781
782 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
783 let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
784
785 #[rustfmt::skip] let new_val = if min {
787 if lt { &old } else { &rhs }
788 } else {
789 if lt { &rhs } else { &old }
790 };
791
792 this.allow_data_races_mut(|this| this.write_immediate(**new_val, place))?;
793
794 this.validate_atomic_rmw(place, atomic)?;
795
796 this.buffered_atomic_rmw(new_val.to_scalar(), place, atomic, old.to_scalar())?;
797
798 interp_ok(old)
800 }
801
802 fn atomic_compare_exchange_scalar(
809 &mut self,
810 place: &MPlaceTy<'tcx>,
811 expect_old: &ImmTy<'tcx>,
812 new: Scalar,
813 success: AtomicRwOrd,
814 fail: AtomicReadOrd,
815 can_fail_spuriously: bool,
816 ) -> InterpResult<'tcx, Immediate<Provenance>> {
817 use rand::Rng as _;
818 let this = self.eval_context_mut();
819 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
820
821 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
826 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
828 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
831 let cmpxchg_success = eq.to_scalar().to_bool()?
832 && if can_fail_spuriously {
833 this.machine.rng.get_mut().random_bool(success_rate)
834 } else {
835 true
836 };
837 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
838
839 if cmpxchg_success {
843 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
844 this.validate_atomic_rmw(place, success)?;
845 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
846 } else {
847 this.validate_atomic_load(place, fail)?;
848 this.perform_read_on_buffered_latest(place, fail)?;
853 }
854
855 interp_ok(res)
857 }
858
859 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
861 let this = self.eval_context_mut();
862 let current_span = this.machine.current_span();
863 if let Some(data_race) = &mut this.machine.data_race {
864 data_race.maybe_perform_sync_operation(
865 &this.machine.threads,
866 current_span,
867 |index, mut clocks| {
868 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
869
870 if atomic != AtomicFenceOrd::Release {
874 clocks.apply_acquire_fence();
876 }
877 if atomic == AtomicFenceOrd::SeqCst {
878 let mut sc_fence_clock = data_race.last_sc_fence.borrow_mut();
886 sc_fence_clock.join(&clocks.clock);
887 clocks.clock.join(&sc_fence_clock);
888 clocks.write_seqcst.join(&data_race.last_sc_write_per_thread.borrow());
891 }
892 if atomic != AtomicFenceOrd::Acquire {
895 clocks.apply_release_fence();
897 }
898
899 interp_ok(atomic != AtomicFenceOrd::Acquire)
901 },
902 )
903 } else {
904 interp_ok(())
905 }
906 }
907
908 fn allow_data_races_all_threads_done(&mut self) {
911 let this = self.eval_context_ref();
912 assert!(this.have_all_terminated());
913 if let Some(data_race) = &this.machine.data_race {
914 let old = data_race.ongoing_action_data_race_free.replace(true);
915 assert!(!old, "cannot nest allow_data_races");
916 }
917 }
918
919 fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R> {
925 let this = self.eval_context_ref();
926 Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback))
927 }
928
929 fn acquire_clock(&self, clock: &VClock) {
932 let this = self.eval_context_ref();
933 if let Some(data_race) = &this.machine.data_race {
934 data_race.acquire_clock(clock, &this.machine.threads);
935 }
936 }
937}
938
939#[derive(Debug, Clone)]
941pub struct VClockAlloc {
942 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
944}
945
946impl VisitProvenance for VClockAlloc {
947 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
948 }
950}
951
952impl VClockAlloc {
953 pub fn new_allocation(
955 global: &GlobalState,
956 thread_mgr: &ThreadManager<'_>,
957 len: Size,
958 kind: MemoryKind,
959 current_span: Span,
960 ) -> VClockAlloc {
961 let (alloc_timestamp, alloc_index) = match kind {
963 MemoryKind::Machine(
965 MiriMemoryKind::Rust
966 | MiriMemoryKind::Miri
967 | MiriMemoryKind::C
968 | MiriMemoryKind::WinHeap
969 | MiriMemoryKind::WinLocal
970 | MiriMemoryKind::Mmap,
971 )
972 | MemoryKind::Stack => {
973 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
974 let mut alloc_timestamp = clocks.clock[alloc_index];
975 alloc_timestamp.span = current_span;
976 (alloc_timestamp, alloc_index)
977 }
978 MemoryKind::Machine(
981 MiriMemoryKind::Global
982 | MiriMemoryKind::Machine
983 | MiriMemoryKind::Runtime
984 | MiriMemoryKind::ExternStatic
985 | MiriMemoryKind::Tls,
986 )
987 | MemoryKind::CallerLocation =>
988 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
989 };
990 VClockAlloc {
991 alloc_ranges: RefCell::new(RangeMap::new(
992 len,
993 MemoryCellClocks::new(alloc_timestamp, alloc_index),
994 )),
995 }
996 }
997
998 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1001 trace!("Find index where not {:?} <= {:?}", l, r);
1002 let l_slice = l.as_slice();
1003 let r_slice = r.as_slice();
1004 l_slice
1005 .iter()
1006 .zip(r_slice.iter())
1007 .enumerate()
1008 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1009 .or_else(|| {
1010 if l_slice.len() > r_slice.len() {
1011 let l_remainder_slice = &l_slice[r_slice.len()..];
1016 let idx = l_remainder_slice
1017 .iter()
1018 .enumerate()
1019 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1020 .expect("Invalid VClock Invariant");
1021 Some(idx + r_slice.len())
1022 } else {
1023 None
1024 }
1025 })
1026 .map(VectorIdx::new)
1027 }
1028
1029 #[cold]
1036 #[inline(never)]
1037 fn report_data_race<'tcx>(
1038 global: &GlobalState,
1039 thread_mgr: &ThreadManager<'_>,
1040 mem_clocks: &MemoryCellClocks,
1041 access: AccessType,
1042 access_size: Size,
1043 ptr_dbg: interpret::Pointer<AllocId>,
1044 ty: Option<Ty<'_>>,
1045 ) -> InterpResult<'tcx> {
1046 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1047 let mut other_size = None; let write_clock;
1049 let (other_access, other_thread, other_clock) =
1050 if !access.is_atomic() &&
1052 let Some(atomic) = mem_clocks.atomic() &&
1053 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1054 {
1055 (AccessType::AtomicStore, idx, &atomic.write_vector)
1056 } else if !access.is_atomic() &&
1057 let Some(atomic) = mem_clocks.atomic() &&
1058 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1059 {
1060 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1061 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1063 write_clock = mem_clocks.write();
1064 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1065 } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1066 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1067 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1069 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1072 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1073 {
1074 (AccessType::AtomicStore, idx, &atomic.write_vector)
1075 } else if let Some(idx) =
1076 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1077 {
1078 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1079 } else {
1080 unreachable!(
1081 "Failed to report data-race for mixed-size access: no race found"
1082 )
1083 }
1084 } else {
1085 unreachable!("Failed to report data-race")
1086 };
1087
1088 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1090 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1091 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1092
1093 let extra = if other_size.is_some() {
1095 assert!(!involves_non_atomic);
1096 Some("overlapping unsynchronized atomic accesses must use the same access size")
1097 } else if access.is_read() && other_access.is_read() {
1098 panic!("there should be no same-size read-read races")
1099 } else {
1100 None
1101 };
1102 Err(err_machine_stop!(TerminationInfo::DataRace {
1103 involves_non_atomic,
1104 extra,
1105 retag_explain: access.is_retag() || other_access.is_retag(),
1106 ptr: ptr_dbg,
1107 op1: RacingOp {
1108 action: other_access.description(None, other_size),
1109 thread_info: other_thread_info,
1110 span: other_clock.as_slice()[other_thread.index()].span_data(),
1111 },
1112 op2: RacingOp {
1113 action: access.description(ty, other_size.map(|_| access_size)),
1114 thread_info: active_thread_info,
1115 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1116 },
1117 }))?
1118 }
1119
1120 pub fn read<'tcx>(
1127 &self,
1128 alloc_id: AllocId,
1129 access_range: AllocRange,
1130 read_type: NaReadType,
1131 ty: Option<Ty<'_>>,
1132 machine: &MiriMachine<'_>,
1133 ) -> InterpResult<'tcx> {
1134 let current_span = machine.current_span();
1135 let global = machine.data_race.as_ref().unwrap();
1136 if !global.race_detecting() {
1137 return interp_ok(());
1138 }
1139 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1140 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1141 for (mem_clocks_range, mem_clocks) in
1142 alloc_ranges.iter_mut(access_range.start, access_range.size)
1143 {
1144 if let Err(DataRace) =
1145 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1146 {
1147 drop(thread_clocks);
1148 return Self::report_data_race(
1150 global,
1151 &machine.threads,
1152 mem_clocks,
1153 AccessType::NaRead(read_type),
1154 access_range.size,
1155 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1156 ty,
1157 );
1158 }
1159 }
1160 interp_ok(())
1161 }
1162
1163 pub fn write<'tcx>(
1169 &mut self,
1170 alloc_id: AllocId,
1171 access_range: AllocRange,
1172 write_type: NaWriteType,
1173 ty: Option<Ty<'_>>,
1174 machine: &mut MiriMachine<'_>,
1175 ) -> InterpResult<'tcx> {
1176 let current_span = machine.current_span();
1177 let global = machine.data_race.as_mut().unwrap();
1178 if !global.race_detecting() {
1179 return interp_ok(());
1180 }
1181 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1182 for (mem_clocks_range, mem_clocks) in
1183 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1184 {
1185 if let Err(DataRace) =
1186 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1187 {
1188 drop(thread_clocks);
1189 return Self::report_data_race(
1191 global,
1192 &machine.threads,
1193 mem_clocks,
1194 AccessType::NaWrite(write_type),
1195 access_range.size,
1196 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1197 ty,
1198 );
1199 }
1200 }
1201 interp_ok(())
1202 }
1203}
1204
1205#[derive(Debug, Default)]
1208pub struct FrameState {
1209 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1210}
1211
1212#[derive(Debug)]
1216struct LocalClocks {
1217 write: VTimestamp,
1218 write_type: NaWriteType,
1219 read: VTimestamp,
1220}
1221
1222impl Default for LocalClocks {
1223 fn default() -> Self {
1224 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1225 }
1226}
1227
1228impl FrameState {
1229 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1230 let current_span = machine.current_span();
1231 let global = machine.data_race.as_ref().unwrap();
1232 if !global.race_detecting() {
1233 return;
1234 }
1235 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1236 if !current_span.is_dummy() {
1238 thread_clocks.clock.index_mut(index).span = current_span;
1239 }
1240 let mut clocks = self.local_clocks.borrow_mut();
1241 if storage_live {
1242 let new_clocks = LocalClocks {
1243 write: thread_clocks.clock[index],
1244 write_type: NaWriteType::Allocate,
1245 read: VTimestamp::ZERO,
1246 };
1247 clocks.insert(local, new_clocks);
1250 } else {
1251 let clocks = clocks.entry(local).or_default();
1254 clocks.write = thread_clocks.clock[index];
1255 clocks.write_type = NaWriteType::Write;
1256 }
1257 }
1258
1259 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1260 let current_span = machine.current_span();
1261 let global = machine.data_race.as_ref().unwrap();
1262 if !global.race_detecting() {
1263 return;
1264 }
1265 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1266 if !current_span.is_dummy() {
1268 thread_clocks.clock.index_mut(index).span = current_span;
1269 }
1270 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1271 let mut clocks = self.local_clocks.borrow_mut();
1274 let clocks = clocks.entry(local).or_default();
1275 clocks.read = thread_clocks.clock[index];
1276 }
1277
1278 pub fn local_moved_to_memory(
1279 &self,
1280 local: mir::Local,
1281 alloc: &mut VClockAlloc,
1282 machine: &MiriMachine<'_>,
1283 ) {
1284 let global = machine.data_race.as_ref().unwrap();
1285 if !global.race_detecting() {
1286 return;
1287 }
1288 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1289 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1293 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1294 assert_eq!(mem_clocks.write.0, index);
1297 mem_clocks.write = (index, local_clocks.write);
1299 mem_clocks.write_type = local_clocks.write_type;
1300 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1301 }
1302 }
1303}
1304
1305impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1306trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1307 #[inline]
1315 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1316 let this = self.eval_context_ref();
1317 if let Some(data_race) = &this.machine.data_race {
1318 let old = data_race.ongoing_action_data_race_free.replace(true);
1319 assert!(!old, "cannot nest allow_data_races");
1320 }
1321 let result = op(this);
1322 if let Some(data_race) = &this.machine.data_race {
1323 data_race.ongoing_action_data_race_free.set(false);
1324 }
1325 result
1326 }
1327
1328 #[inline]
1332 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1333 let this = self.eval_context_mut();
1334 if let Some(data_race) = &this.machine.data_race {
1335 let old = data_race.ongoing_action_data_race_free.replace(true);
1336 assert!(!old, "cannot nest allow_data_races");
1337 }
1338 let result = op(this);
1339 if let Some(data_race) = &this.machine.data_race {
1340 data_race.ongoing_action_data_race_free.set(false);
1341 }
1342 result
1343 }
1344
1345 fn atomic_access_check(
1347 &self,
1348 place: &MPlaceTy<'tcx>,
1349 access_type: AtomicAccessType,
1350 ) -> InterpResult<'tcx> {
1351 let this = self.eval_context_ref();
1352 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1356 this.check_ptr_align(place.ptr(), align)?;
1357 let (alloc_id, _offset, _prov) = this
1365 .ptr_try_get_alloc_id(place.ptr(), 0)
1366 .expect("there are no zero-sized atomic accesses");
1367 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1368 match access_type {
1370 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1371 throw_ub_format!(
1372 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1373 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1374 );
1375 }
1376 AtomicAccessType::Load(_)
1377 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1378 {
1379 throw_ub_format!(
1380 "large atomic load operations cannot be performed on read-only memory\n\
1381 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1382 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1383 );
1384 }
1385 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1386 throw_ub_format!(
1387 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1388 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1389 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1390 );
1391 }
1392 _ => {
1393 }
1395 }
1396 }
1397 interp_ok(())
1398 }
1399
1400 fn validate_atomic_load(
1403 &self,
1404 place: &MPlaceTy<'tcx>,
1405 atomic: AtomicReadOrd,
1406 ) -> InterpResult<'tcx> {
1407 let this = self.eval_context_ref();
1408 this.validate_atomic_op(
1409 place,
1410 atomic,
1411 AccessType::AtomicLoad,
1412 move |memory, clocks, index, atomic| {
1413 if atomic == AtomicReadOrd::Relaxed {
1414 memory.load_relaxed(&mut *clocks, index, place.layout.size)
1415 } else {
1416 memory.load_acquire(&mut *clocks, index, place.layout.size)
1417 }
1418 },
1419 )
1420 }
1421
1422 fn validate_atomic_store(
1425 &mut self,
1426 place: &MPlaceTy<'tcx>,
1427 atomic: AtomicWriteOrd,
1428 ) -> InterpResult<'tcx> {
1429 let this = self.eval_context_mut();
1430 this.validate_atomic_op(
1431 place,
1432 atomic,
1433 AccessType::AtomicStore,
1434 move |memory, clocks, index, atomic| {
1435 if atomic == AtomicWriteOrd::Relaxed {
1436 memory.store_relaxed(clocks, index, place.layout.size)
1437 } else {
1438 memory.store_release(clocks, index, place.layout.size)
1439 }
1440 },
1441 )
1442 }
1443
1444 fn validate_atomic_rmw(
1447 &mut self,
1448 place: &MPlaceTy<'tcx>,
1449 atomic: AtomicRwOrd,
1450 ) -> InterpResult<'tcx> {
1451 use AtomicRwOrd::*;
1452 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1453 let release = matches!(atomic, Release | AcqRel | SeqCst);
1454 let this = self.eval_context_mut();
1455 this.validate_atomic_op(
1456 place,
1457 atomic,
1458 AccessType::AtomicRmw,
1459 move |memory, clocks, index, _| {
1460 if acquire {
1461 memory.load_acquire(clocks, index, place.layout.size)?;
1462 } else {
1463 memory.load_relaxed(clocks, index, place.layout.size)?;
1464 }
1465 if release {
1466 memory.rmw_release(clocks, index, place.layout.size)
1467 } else {
1468 memory.rmw_relaxed(clocks, index, place.layout.size)
1469 }
1470 },
1471 )
1472 }
1473
1474 fn validate_atomic_op<A: Debug + Copy>(
1476 &self,
1477 place: &MPlaceTy<'tcx>,
1478 atomic: A,
1479 access: AccessType,
1480 mut op: impl FnMut(
1481 &mut MemoryCellClocks,
1482 &mut ThreadClockSet,
1483 VectorIdx,
1484 A,
1485 ) -> Result<(), DataRace>,
1486 ) -> InterpResult<'tcx> {
1487 let this = self.eval_context_ref();
1488 assert!(access.is_atomic());
1489 let Some(data_race) = &this.machine.data_race else { return interp_ok(()) };
1490 if !data_race.race_detecting() {
1491 return interp_ok(());
1492 }
1493 let size = place.layout.size;
1494 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1495 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1498 trace!(
1499 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1500 access.description(None, None),
1501 &atomic,
1502 place.ptr(),
1503 size.bytes()
1504 );
1505
1506 let current_span = this.machine.current_span();
1507 data_race.maybe_perform_sync_operation(
1509 &this.machine.threads,
1510 current_span,
1511 |index, mut thread_clocks| {
1512 for (mem_clocks_range, mem_clocks) in
1513 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1514 {
1515 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1516 mem::drop(thread_clocks);
1517 return VClockAlloc::report_data_race(
1518 data_race,
1519 &this.machine.threads,
1520 mem_clocks,
1521 access,
1522 place.layout.size,
1523 interpret::Pointer::new(
1524 alloc_id,
1525 Size::from_bytes(mem_clocks_range.start),
1526 ),
1527 None,
1528 )
1529 .map(|_| true);
1530 }
1531 }
1532
1533 interp_ok(true)
1535 },
1536 )?;
1537
1538 if tracing::enabled!(tracing::Level::TRACE) {
1540 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1541 trace!(
1542 "Updated atomic memory({:?}, size={}) to {:#?}",
1543 place.ptr(),
1544 size.bytes(),
1545 mem_clocks.atomic_ops
1546 );
1547 }
1548 }
1549
1550 interp_ok(())
1551 }
1552}
1553
1554impl GlobalState {
1555 pub fn new(config: &MiriConfig) -> Self {
1558 let mut global_state = GlobalState {
1559 multi_threaded: Cell::new(false),
1560 ongoing_action_data_race_free: Cell::new(false),
1561 vector_clocks: RefCell::new(IndexVec::new()),
1562 vector_info: RefCell::new(IndexVec::new()),
1563 thread_info: RefCell::new(IndexVec::new()),
1564 reuse_candidates: RefCell::new(FxHashSet::default()),
1565 last_sc_fence: RefCell::new(VClock::default()),
1566 last_sc_write_per_thread: RefCell::new(VClock::default()),
1567 track_outdated_loads: config.track_outdated_loads,
1568 };
1569
1570 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1573 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1574 global_state
1575 .thread_info
1576 .get_mut()
1577 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1578
1579 global_state
1580 }
1581
1582 fn race_detecting(&self) -> bool {
1586 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1587 }
1588
1589 pub fn ongoing_action_data_race_free(&self) -> bool {
1590 self.ongoing_action_data_race_free.get()
1591 }
1592
1593 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1596 let mut reuse = self.reuse_candidates.borrow_mut();
1597 let vector_clocks = self.vector_clocks.borrow();
1598 for &candidate in reuse.iter() {
1599 let target_timestamp = vector_clocks[candidate].clock[candidate];
1600 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1601 let no_data_race = clock.clock[candidate] >= target_timestamp;
1604
1605 let vector_terminated = reuse.contains(&clock_idx);
1608
1609 no_data_race || vector_terminated
1612 }) {
1613 assert!(reuse.remove(&candidate));
1618 return Some(candidate);
1619 }
1620 }
1621 None
1622 }
1623
1624 #[inline]
1627 pub fn thread_created(
1628 &mut self,
1629 thread_mgr: &ThreadManager<'_>,
1630 thread: ThreadId,
1631 current_span: Span,
1632 ) {
1633 let current_index = self.active_thread_index(thread_mgr);
1634
1635 self.multi_threaded.set(true);
1638
1639 let mut thread_info = self.thread_info.borrow_mut();
1641 thread_info.ensure_contains_elem(thread, Default::default);
1642
1643 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1646 let vector_clocks = self.vector_clocks.get_mut();
1649 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1650
1651 let vector_info = self.vector_info.get_mut();
1654 let old_thread = vector_info[reuse_index];
1655 vector_info[reuse_index] = thread;
1656
1657 thread_info[old_thread].vector_index = None;
1660
1661 reuse_index
1662 } else {
1663 let vector_info = self.vector_info.get_mut();
1666 vector_info.push(thread)
1667 };
1668
1669 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1670
1671 thread_info[thread].vector_index = Some(created_index);
1673
1674 let vector_clocks = self.vector_clocks.get_mut();
1676 if created_index == vector_clocks.next_index() {
1677 vector_clocks.push(ThreadClockSet::default());
1678 }
1679
1680 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1682
1683 created.join_with(current);
1686
1687 current.increment_clock(current_index, current_span);
1690 created.increment_clock(created_index, current_span);
1691 }
1692
1693 #[inline]
1697 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1698 let thread_info = self.thread_info.borrow();
1699 let thread_info = &thread_info[joinee];
1700
1701 let join_clock = thread_info
1703 .termination_vector_clock
1704 .as_ref()
1705 .expect("joined with thread but thread has not terminated");
1706 self.acquire_clock(join_clock, threads);
1708
1709 if let Some(current_index) = thread_info.vector_index {
1714 if threads.get_live_thread_count() == 1 {
1715 let vector_clocks = self.vector_clocks.get_mut();
1716 let current_clock = &vector_clocks[current_index];
1718 if vector_clocks
1719 .iter_enumerated()
1720 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1721 {
1722 self.multi_threaded.set(false);
1726 }
1727 }
1728 }
1729 }
1730
1731 #[inline]
1739 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1740 let current_thread = thread_mgr.active_thread();
1741 let current_index = self.active_thread_index(thread_mgr);
1742
1743 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1745 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1746 Some(terminaion_clock);
1747
1748 let reuse = self.reuse_candidates.get_mut();
1750 reuse.insert(current_index);
1751 }
1752
1753 fn maybe_perform_sync_operation<'tcx>(
1761 &self,
1762 thread_mgr: &ThreadManager<'_>,
1763 current_span: Span,
1764 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1765 ) -> InterpResult<'tcx> {
1766 if self.multi_threaded.get() {
1767 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1768 if op(index, clocks)? {
1769 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1770 clocks.increment_clock(index, current_span);
1771 }
1772 }
1773 interp_ok(())
1774 }
1775
1776 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1779 let thread = self.vector_info.borrow()[vector];
1780 let thread_name = thread_mgr.get_thread_display_name(thread);
1781 format!("thread `{thread_name}`")
1782 }
1783
1784 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1789 let thread = threads.active_thread();
1790 let (_, mut clocks) = self.thread_state_mut(thread);
1791 clocks.clock.join(clock);
1792 }
1793
1794 pub fn release_clock<'tcx, R>(
1798 &self,
1799 threads: &ThreadManager<'tcx>,
1800 callback: impl FnOnce(&VClock) -> R,
1801 ) -> R {
1802 let thread = threads.active_thread();
1803 let span = threads.active_thread_ref().current_span();
1804 let (index, mut clocks) = self.thread_state_mut(thread);
1805 let r = callback(&clocks.clock);
1806 clocks.increment_clock(index, span);
1809
1810 r
1811 }
1812
1813 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1814 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1815 }
1816
1817 #[inline]
1820 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1821 let index = self.thread_index(thread);
1822 let ref_vector = self.vector_clocks.borrow_mut();
1823 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1824 (index, clocks)
1825 }
1826
1827 #[inline]
1830 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1831 let index = self.thread_index(thread);
1832 let ref_vector = self.vector_clocks.borrow();
1833 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1834 (index, clocks)
1835 }
1836
1837 #[inline]
1840 pub(super) fn active_thread_state(
1841 &self,
1842 thread_mgr: &ThreadManager<'_>,
1843 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1844 self.thread_state(thread_mgr.active_thread())
1845 }
1846
1847 #[inline]
1850 pub(super) fn active_thread_state_mut(
1851 &self,
1852 thread_mgr: &ThreadManager<'_>,
1853 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1854 self.thread_state_mut(thread_mgr.active_thread())
1855 }
1856
1857 #[inline]
1860 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
1861 let active_thread_id = thread_mgr.active_thread();
1862 self.thread_index(active_thread_id)
1863 }
1864
1865 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
1867 let (index, clocks) = self.active_thread_state(thread_mgr);
1868 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
1869 }
1870
1871 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
1873 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
1874 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
1875 }
1876}