1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_middle::mir;
52use rustc_middle::ty::Ty;
53use rustc_span::Span;
54
55use super::vector_clock::{VClock, VTimestamp, VectorIdx};
56use super::weak_memory::EvalContextExt as _;
57use crate::concurrency::GlobalDataRaceHandler;
58use crate::diagnostics::RacingOp;
59use crate::intrinsics::AtomicRmwOp;
60use crate::*;
61
62pub type AllocState = VClockAlloc;
63
64#[derive(Copy, Clone, PartialEq, Eq, Debug)]
66pub enum AtomicRwOrd {
67 Relaxed,
68 Acquire,
69 Release,
70 AcqRel,
71 SeqCst,
72}
73
74#[derive(Copy, Clone, PartialEq, Eq, Debug)]
76pub enum AtomicReadOrd {
77 Relaxed,
78 Acquire,
79 SeqCst,
80}
81
82#[derive(Copy, Clone, PartialEq, Eq, Debug)]
84pub enum AtomicWriteOrd {
85 Relaxed,
86 Release,
87 SeqCst,
88}
89
90#[derive(Copy, Clone, PartialEq, Eq, Debug)]
92pub enum AtomicFenceOrd {
93 Acquire,
94 Release,
95 AcqRel,
96 SeqCst,
97}
98
99#[derive(Clone, Default, Debug)]
103pub(super) struct ThreadClockSet {
104 pub(super) clock: VClock,
107
108 fence_acquire: VClock,
111
112 fence_release: VClock,
115
116 pub(super) write_seqcst: VClock,
121
122 pub(super) read_seqcst: VClock,
127}
128
129impl ThreadClockSet {
130 #[inline]
133 fn apply_release_fence(&mut self) {
134 self.fence_release.clone_from(&self.clock);
135 }
136
137 #[inline]
140 fn apply_acquire_fence(&mut self) {
141 self.clock.join(&self.fence_acquire);
142 }
143
144 #[inline]
147 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
148 self.clock.increment_index(index, current_span);
149 }
150
151 fn join_with(&mut self, other: &ThreadClockSet) {
155 self.clock.join(&other.clock);
156 }
157}
158
159#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
162pub struct DataRace;
163
164#[derive(Clone, PartialEq, Eq, Debug)]
169struct AtomicMemoryCellClocks {
170 read_vector: VClock,
175
176 write_vector: VClock,
181
182 sync_vector: VClock,
190
191 size: Option<Size>,
196}
197
198#[derive(Copy, Clone, PartialEq, Eq, Debug)]
199enum AtomicAccessType {
200 Load(AtomicReadOrd),
201 Store,
202 Rmw,
203}
204
205#[derive(Copy, Clone, PartialEq, Eq, Debug)]
207pub enum NaReadType {
208 Read,
210
211 Retag,
213}
214
215impl NaReadType {
216 fn description(self) -> &'static str {
217 match self {
218 NaReadType::Read => "non-atomic read",
219 NaReadType::Retag => "retag read",
220 }
221 }
222}
223
224#[derive(Copy, Clone, PartialEq, Eq, Debug)]
227pub enum NaWriteType {
228 Allocate,
230
231 Write,
233
234 Retag,
236
237 Deallocate,
242}
243
244impl NaWriteType {
245 fn description(self) -> &'static str {
246 match self {
247 NaWriteType::Allocate => "creating a new allocation",
248 NaWriteType::Write => "non-atomic write",
249 NaWriteType::Retag => "retag write",
250 NaWriteType::Deallocate => "deallocation",
251 }
252 }
253}
254
255#[derive(Copy, Clone, PartialEq, Eq, Debug)]
256enum AccessType {
257 NaRead(NaReadType),
258 NaWrite(NaWriteType),
259 AtomicLoad,
260 AtomicStore,
261 AtomicRmw,
262}
263
264#[derive(Clone, PartialEq, Eq, Debug)]
266struct MemoryCellClocks {
267 write: (VectorIdx, VTimestamp),
271
272 write_type: NaWriteType,
276
277 read: VClock,
281
282 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
286}
287
288#[derive(Debug, Clone, Default)]
290struct ThreadExtraState {
291 vector_index: Option<VectorIdx>,
297
298 termination_vector_clock: Option<VClock>,
303}
304
305#[derive(Debug, Clone)]
310pub struct GlobalState {
311 multi_threaded: Cell<bool>,
318
319 ongoing_action_data_race_free: Cell<bool>,
323
324 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
328
329 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
333
334 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
336
337 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
345
346 last_sc_fence: RefCell<VClock>,
349
350 last_sc_write_per_thread: RefCell<VClock>,
353
354 pub track_outdated_loads: bool,
356
357 pub weak_memory: bool,
359}
360
361impl VisitProvenance for GlobalState {
362 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
363 }
365}
366
367impl AccessType {
368 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
369 let mut msg = String::new();
370
371 if let Some(size) = size {
372 if size == Size::ZERO {
373 assert!(self == AccessType::AtomicLoad);
377 assert!(ty.is_none());
378 return format!("multiple differently-sized atomic loads, including one load");
379 }
380 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
381 }
382
383 msg.push_str(match self {
384 AccessType::NaRead(w) => w.description(),
385 AccessType::NaWrite(w) => w.description(),
386 AccessType::AtomicLoad => "atomic load",
387 AccessType::AtomicStore => "atomic store",
388 AccessType::AtomicRmw => "atomic read-modify-write",
389 });
390
391 if let Some(ty) = ty {
392 msg.push_str(&format!(" of type `{ty}`"));
393 }
394
395 msg
396 }
397
398 fn is_atomic(self) -> bool {
399 match self {
400 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
401 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
402 }
403 }
404
405 fn is_read(self) -> bool {
406 match self {
407 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
408 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
409 }
410 }
411
412 fn is_retag(self) -> bool {
413 matches!(
414 self,
415 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
416 )
417 }
418}
419
420impl AtomicMemoryCellClocks {
421 fn new(size: Size) -> Self {
422 AtomicMemoryCellClocks {
423 read_vector: Default::default(),
424 write_vector: Default::default(),
425 sync_vector: Default::default(),
426 size: Some(size),
427 }
428 }
429}
430
431impl MemoryCellClocks {
432 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
435 MemoryCellClocks {
436 read: VClock::default(),
437 write: (alloc_index, alloc),
438 write_type: NaWriteType::Allocate,
439 atomic_ops: None,
440 }
441 }
442
443 #[inline]
444 fn write_was_before(&self, other: &VClock) -> bool {
445 self.write.1 <= other[self.write.0]
448 }
449
450 #[inline]
451 fn write(&self) -> VClock {
452 VClock::new_with_index(self.write.0, self.write.1)
453 }
454
455 #[inline]
457 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
458 self.atomic_ops.as_deref()
459 }
460
461 #[inline]
463 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
464 self.atomic_ops.as_deref_mut().unwrap()
465 }
466
467 fn atomic_access(
470 &mut self,
471 thread_clocks: &ThreadClockSet,
472 size: Size,
473 write: bool,
474 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
475 match self.atomic_ops {
476 Some(ref mut atomic) => {
477 if atomic.size == Some(size) {
479 Ok(atomic)
480 } else if atomic.read_vector <= thread_clocks.clock
481 && atomic.write_vector <= thread_clocks.clock
482 {
483 atomic.size = Some(size);
485 Ok(atomic)
486 } else if !write && atomic.write_vector <= thread_clocks.clock {
487 atomic.size = None;
490 Ok(atomic)
491 } else {
492 Err(DataRace)
493 }
494 }
495 None => {
496 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
497 Ok(self.atomic_ops.as_mut().unwrap())
498 }
499 }
500 }
501
502 fn load_acquire(
506 &mut self,
507 thread_clocks: &mut ThreadClockSet,
508 index: VectorIdx,
509 access_size: Size,
510 sync_clock: Option<&VClock>,
511 ) -> Result<(), DataRace> {
512 self.atomic_read_detect(thread_clocks, index, access_size)?;
513 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
514 thread_clocks.clock.join(sync_clock);
515 }
516 Ok(())
517 }
518
519 fn load_relaxed(
523 &mut self,
524 thread_clocks: &mut ThreadClockSet,
525 index: VectorIdx,
526 access_size: Size,
527 sync_clock: Option<&VClock>,
528 ) -> Result<(), DataRace> {
529 self.atomic_read_detect(thread_clocks, index, access_size)?;
530 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
531 thread_clocks.fence_acquire.join(sync_clock);
532 }
533 Ok(())
534 }
535
536 fn store_release(
539 &mut self,
540 thread_clocks: &ThreadClockSet,
541 index: VectorIdx,
542 access_size: Size,
543 ) -> Result<(), DataRace> {
544 self.atomic_write_detect(thread_clocks, index, access_size)?;
545 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
547 Ok(())
548 }
549
550 fn store_relaxed(
553 &mut self,
554 thread_clocks: &ThreadClockSet,
555 index: VectorIdx,
556 access_size: Size,
557 ) -> Result<(), DataRace> {
558 self.atomic_write_detect(thread_clocks, index, access_size)?;
559
560 let atomic = self.atomic_mut_unwrap();
566 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
567 Ok(())
568 }
569
570 fn rmw_release(
573 &mut self,
574 thread_clocks: &ThreadClockSet,
575 index: VectorIdx,
576 access_size: Size,
577 ) -> Result<(), DataRace> {
578 self.atomic_write_detect(thread_clocks, index, access_size)?;
579 let atomic = self.atomic_mut_unwrap();
580 atomic.sync_vector.join(&thread_clocks.clock);
583 Ok(())
584 }
585
586 fn rmw_relaxed(
589 &mut self,
590 thread_clocks: &ThreadClockSet,
591 index: VectorIdx,
592 access_size: Size,
593 ) -> Result<(), DataRace> {
594 self.atomic_write_detect(thread_clocks, index, access_size)?;
595 let atomic = self.atomic_mut_unwrap();
596 atomic.sync_vector.join(&thread_clocks.fence_release);
599 Ok(())
600 }
601
602 fn atomic_read_detect(
605 &mut self,
606 thread_clocks: &ThreadClockSet,
607 index: VectorIdx,
608 access_size: Size,
609 ) -> Result<(), DataRace> {
610 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
611 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
612 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
613 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
615 }
616
617 fn atomic_write_detect(
620 &mut self,
621 thread_clocks: &ThreadClockSet,
622 index: VectorIdx,
623 access_size: Size,
624 ) -> Result<(), DataRace> {
625 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
626 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
627 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
628 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
630 Ok(())
631 } else {
632 Err(DataRace)
633 }
634 }
635
636 fn read_race_detect(
639 &mut self,
640 thread_clocks: &mut ThreadClockSet,
641 index: VectorIdx,
642 read_type: NaReadType,
643 current_span: Span,
644 ) -> Result<(), DataRace> {
645 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
646 if !current_span.is_dummy() {
647 thread_clocks.clock.index_mut(index).span = current_span;
648 }
649 thread_clocks.clock.index_mut(index).set_read_type(read_type);
650 if self.write_was_before(&thread_clocks.clock) {
651 let race_free = if let Some(atomic) = self.atomic() {
653 atomic.write_vector <= thread_clocks.clock
654 } else {
655 true
656 };
657 self.read.set_at_index(&thread_clocks.clock, index);
658 if race_free { Ok(()) } else { Err(DataRace) }
659 } else {
660 Err(DataRace)
661 }
662 }
663
664 fn write_race_detect(
667 &mut self,
668 thread_clocks: &mut ThreadClockSet,
669 index: VectorIdx,
670 write_type: NaWriteType,
671 current_span: Span,
672 ) -> Result<(), DataRace> {
673 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
674 if !current_span.is_dummy() {
675 thread_clocks.clock.index_mut(index).span = current_span;
676 }
677 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
678 let race_free = if let Some(atomic) = self.atomic() {
679 atomic.write_vector <= thread_clocks.clock
680 && atomic.read_vector <= thread_clocks.clock
681 } else {
682 true
683 };
684 self.write = (index, thread_clocks.clock[index]);
685 self.write_type = write_type;
686 if race_free {
687 self.read.set_zero_vector();
688 Ok(())
689 } else {
690 Err(DataRace)
691 }
692 } else {
693 Err(DataRace)
694 }
695 }
696}
697
698impl GlobalDataRaceHandler {
699 fn set_ongoing_action_data_race_free(&self, enable: bool) {
702 match self {
703 GlobalDataRaceHandler::None => {}
704 GlobalDataRaceHandler::Vclocks(data_race) => {
705 let old = data_race.ongoing_action_data_race_free.replace(enable);
706 assert_ne!(old, enable, "cannot nest allow_data_races");
707 }
708 GlobalDataRaceHandler::Genmc(genmc_ctx) => {
709 genmc_ctx.set_ongoing_action_data_race_free(enable);
710 }
711 }
712 }
713}
714
715impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
717pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
718 fn read_scalar_atomic(
720 &self,
721 place: &MPlaceTy<'tcx>,
722 atomic: AtomicReadOrd,
723 ) -> InterpResult<'tcx, Scalar> {
724 let this = self.eval_context_ref();
725 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
726 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
733 let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
734 return genmc_ctx.atomic_load(
735 this,
736 place.ptr().addr(),
737 place.layout.size,
738 atomic,
739 old_val,
740 );
741 }
742
743 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
744 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, |sync_clock| {
745 this.validate_atomic_load(place, atomic, sync_clock)
746 })?;
747 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
748 }
749
750 fn write_scalar_atomic(
752 &mut self,
753 val: Scalar,
754 dest: &MPlaceTy<'tcx>,
755 atomic: AtomicWriteOrd,
756 ) -> InterpResult<'tcx> {
757 let this = self.eval_context_mut();
758 this.atomic_access_check(dest, AtomicAccessType::Store)?;
759
760 let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
765
766 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
768 if genmc_ctx.atomic_store(
769 this,
770 dest.ptr().addr(),
771 dest.layout.size,
772 val,
773 old_val,
774 atomic,
775 )? {
776 this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
779 }
780 return interp_ok(());
781 }
782 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
783 this.validate_atomic_store(dest, atomic)?;
784 this.buffered_atomic_write(val, dest, atomic, old_val)
785 }
786
787 fn atomic_rmw_op_immediate(
789 &mut self,
790 place: &MPlaceTy<'tcx>,
791 rhs: &ImmTy<'tcx>,
792 atomic_op: AtomicRmwOp,
793 ord: AtomicRwOrd,
794 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
795 let this = self.eval_context_mut();
796 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
797
798 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
799
800 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
802 let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
803 this,
804 place.ptr().addr(),
805 place.layout.size,
806 atomic_op,
807 place.layout.backend_repr.is_signed(),
808 ord,
809 rhs.to_scalar(),
810 old.to_scalar(),
811 )?;
812 if let Some(new_val) = new_val {
813 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
814 }
815 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
816 }
817
818 let val = match atomic_op {
819 AtomicRmwOp::MirOp { op, neg } => {
820 let val = this.binary_op(op, &old, rhs)?;
821 if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }
822 }
823 AtomicRmwOp::Max => {
824 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
825 if lt { rhs } else { &old }.clone()
826 }
827 AtomicRmwOp::Min => {
828 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
829 if lt { &old } else { rhs }.clone()
830 }
831 };
832
833 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
834
835 this.validate_atomic_rmw(place, ord)?;
836
837 this.buffered_atomic_rmw(val.to_scalar(), place, ord, old.to_scalar())?;
838 interp_ok(old)
839 }
840
841 fn atomic_exchange_scalar(
844 &mut self,
845 place: &MPlaceTy<'tcx>,
846 new: Scalar,
847 atomic: AtomicRwOrd,
848 ) -> InterpResult<'tcx, Scalar> {
849 let this = self.eval_context_mut();
850 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
851
852 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
853 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
854
855 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
857 let (old_val, new_val) = genmc_ctx.atomic_exchange(
858 this,
859 place.ptr().addr(),
860 place.layout.size,
861 new,
862 atomic,
863 old,
864 )?;
865 if let Some(new_val) = new_val {
868 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
869 }
870 return interp_ok(old_val);
871 }
872
873 this.validate_atomic_rmw(place, atomic)?;
874
875 this.buffered_atomic_rmw(new, place, atomic, old)?;
876 interp_ok(old)
877 }
878
879 fn atomic_compare_exchange_scalar(
886 &mut self,
887 place: &MPlaceTy<'tcx>,
888 expect_old: &ImmTy<'tcx>,
889 new: Scalar,
890 success: AtomicRwOrd,
891 fail: AtomicReadOrd,
892 can_fail_spuriously: bool,
893 ) -> InterpResult<'tcx, Immediate<Provenance>> {
894 use rand::Rng as _;
895 let this = self.eval_context_mut();
896 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
897
898 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
900
901 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
903 let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
904 this,
905 place.ptr().addr(),
906 place.layout.size,
907 this.read_scalar(expect_old)?,
908 new,
909 success,
910 fail,
911 can_fail_spuriously,
912 old.to_scalar(),
913 )?;
914 if let Some(new_value) = new_value {
917 this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
918 }
919 return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
920 }
921
922 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
924 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
927 let cmpxchg_success = eq.to_scalar().to_bool()?
928 && if can_fail_spuriously {
929 this.machine.rng.get_mut().random_bool(success_rate)
930 } else {
931 true
932 };
933 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
934
935 if cmpxchg_success {
939 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
940 this.validate_atomic_rmw(place, success)?;
941 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
942 } else {
943 this.validate_atomic_load(place, fail, None)?;
944 this.perform_read_on_buffered_latest(place, fail)?;
949 }
950
951 interp_ok(res)
953 }
954
955 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
957 let this = self.eval_context_mut();
958 let machine = &this.machine;
959 match &this.machine.data_race {
960 GlobalDataRaceHandler::None => interp_ok(()),
961 GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
962 GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
963 }
964 }
965
966 fn release_clock<R>(
972 &self,
973 callback: impl FnOnce(&VClock) -> R,
974 ) -> InterpResult<'tcx, Option<R>> {
975 let this = self.eval_context_ref();
976 interp_ok(match &this.machine.data_race {
977 GlobalDataRaceHandler::None => None,
978 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
979 throw_unsup_format!(
980 "this operation performs synchronization that is not supported in GenMC mode"
981 ),
982 GlobalDataRaceHandler::Vclocks(data_race) =>
983 Some(data_race.release_clock(&this.machine.threads, callback)),
984 })
985 }
986
987 fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
990 let this = self.eval_context_ref();
991 match &this.machine.data_race {
992 GlobalDataRaceHandler::None => {}
993 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
994 throw_unsup_format!(
995 "this operation performs synchronization that is not supported in GenMC mode"
996 ),
997 GlobalDataRaceHandler::Vclocks(data_race) =>
998 data_race.acquire_clock(clock, &this.machine.threads),
999 }
1000 interp_ok(())
1001 }
1002}
1003
1004#[derive(Debug, Clone)]
1006pub struct VClockAlloc {
1007 alloc_ranges: RefCell<DedupRangeMap<MemoryCellClocks>>,
1009}
1010
1011impl VisitProvenance for VClockAlloc {
1012 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1013 }
1015}
1016
1017impl VClockAlloc {
1018 pub fn new_allocation(
1020 global: &GlobalState,
1021 thread_mgr: &ThreadManager<'_>,
1022 len: Size,
1023 kind: MemoryKind,
1024 current_span: Span,
1025 ) -> VClockAlloc {
1026 let (alloc_timestamp, alloc_index) = match kind {
1028 MemoryKind::Machine(
1030 MiriMemoryKind::Rust
1031 | MiriMemoryKind::Miri
1032 | MiriMemoryKind::C
1033 | MiriMemoryKind::WinHeap
1034 | MiriMemoryKind::WinLocal
1035 | MiriMemoryKind::Mmap,
1036 )
1037 | MemoryKind::Stack => {
1038 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1039 let mut alloc_timestamp = clocks.clock[alloc_index];
1040 alloc_timestamp.span = current_span;
1041 (alloc_timestamp, alloc_index)
1042 }
1043 MemoryKind::Machine(
1046 MiriMemoryKind::Global
1047 | MiriMemoryKind::Machine
1048 | MiriMemoryKind::Runtime
1049 | MiriMemoryKind::ExternStatic
1050 | MiriMemoryKind::Tls,
1051 )
1052 | MemoryKind::CallerLocation =>
1053 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1054 };
1055 VClockAlloc {
1056 alloc_ranges: RefCell::new(DedupRangeMap::new(
1057 len,
1058 MemoryCellClocks::new(alloc_timestamp, alloc_index),
1059 )),
1060 }
1061 }
1062
1063 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1066 trace!("Find index where not {:?} <= {:?}", l, r);
1067 let l_slice = l.as_slice();
1068 let r_slice = r.as_slice();
1069 l_slice
1070 .iter()
1071 .zip(r_slice.iter())
1072 .enumerate()
1073 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1074 .or_else(|| {
1075 if l_slice.len() > r_slice.len() {
1076 let l_remainder_slice = &l_slice[r_slice.len()..];
1081 let idx = l_remainder_slice
1082 .iter()
1083 .enumerate()
1084 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1085 .expect("Invalid VClock Invariant");
1086 Some(idx + r_slice.len())
1087 } else {
1088 None
1089 }
1090 })
1091 .map(VectorIdx::new)
1092 }
1093
1094 #[cold]
1101 #[inline(never)]
1102 fn report_data_race<'tcx>(
1103 global: &GlobalState,
1104 thread_mgr: &ThreadManager<'_>,
1105 mem_clocks: &MemoryCellClocks,
1106 access: AccessType,
1107 access_size: Size,
1108 ptr_dbg: interpret::Pointer<AllocId>,
1109 ty: Option<Ty<'_>>,
1110 ) -> InterpResult<'tcx> {
1111 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1112 let mut other_size = None; let write_clock;
1114 let (other_access, other_thread, other_clock) =
1115 if !access.is_atomic() &&
1117 let Some(atomic) = mem_clocks.atomic() &&
1118 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1119 {
1120 (AccessType::AtomicStore, idx, &atomic.write_vector)
1121 } else if !access.is_atomic() &&
1122 let Some(atomic) = mem_clocks.atomic() &&
1123 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1124 {
1125 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1126 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1128 write_clock = mem_clocks.write();
1129 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1130 } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1131 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1132 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1134 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1137 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1138 {
1139 (AccessType::AtomicStore, idx, &atomic.write_vector)
1140 } else if let Some(idx) =
1141 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1142 {
1143 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1144 } else {
1145 unreachable!(
1146 "Failed to report data-race for mixed-size access: no race found"
1147 )
1148 }
1149 } else {
1150 unreachable!("Failed to report data-race")
1151 };
1152
1153 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1155 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1156 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1157
1158 let extra = if other_size.is_some() {
1160 assert!(!involves_non_atomic);
1161 Some("overlapping unsynchronized atomic accesses must use the same access size")
1162 } else if access.is_read() && other_access.is_read() {
1163 panic!("there should be no same-size read-read races")
1164 } else {
1165 None
1166 };
1167 Err(err_machine_stop!(TerminationInfo::DataRace {
1168 involves_non_atomic,
1169 extra,
1170 retag_explain: access.is_retag() || other_access.is_retag(),
1171 ptr: ptr_dbg,
1172 op1: RacingOp {
1173 action: other_access.description(None, other_size),
1174 thread_info: other_thread_info,
1175 span: other_clock.as_slice()[other_thread.index()].span_data(),
1176 },
1177 op2: RacingOp {
1178 action: access.description(ty, other_size.map(|_| access_size)),
1179 thread_info: active_thread_info,
1180 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1181 },
1182 }))?
1183 }
1184
1185 pub(super) fn sync_clock(&self, access_range: AllocRange) -> VClock {
1187 let alloc_ranges = self.alloc_ranges.borrow();
1188 let mut clock = VClock::default();
1189 for (_, mem_clocks) in alloc_ranges.iter(access_range.start, access_range.size) {
1190 if let Some(atomic) = mem_clocks.atomic() {
1191 clock.join(&atomic.sync_vector);
1192 }
1193 }
1194 clock
1195 }
1196
1197 pub fn read<'tcx>(
1204 &self,
1205 alloc_id: AllocId,
1206 access_range: AllocRange,
1207 read_type: NaReadType,
1208 ty: Option<Ty<'_>>,
1209 machine: &MiriMachine<'_>,
1210 ) -> InterpResult<'tcx> {
1211 let current_span = machine.current_span();
1212 let global = machine.data_race.as_vclocks_ref().unwrap();
1213 if !global.race_detecting() {
1214 return interp_ok(());
1215 }
1216 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1217 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1218 for (mem_clocks_range, mem_clocks) in
1219 alloc_ranges.iter_mut(access_range.start, access_range.size)
1220 {
1221 if let Err(DataRace) =
1222 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1223 {
1224 drop(thread_clocks);
1225 return Self::report_data_race(
1227 global,
1228 &machine.threads,
1229 mem_clocks,
1230 AccessType::NaRead(read_type),
1231 access_range.size,
1232 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1233 ty,
1234 );
1235 }
1236 }
1237 interp_ok(())
1238 }
1239
1240 pub fn write<'tcx>(
1246 &mut self,
1247 alloc_id: AllocId,
1248 access_range: AllocRange,
1249 write_type: NaWriteType,
1250 ty: Option<Ty<'_>>,
1251 machine: &mut MiriMachine<'_>,
1252 ) -> InterpResult<'tcx> {
1253 let current_span = machine.current_span();
1254 let global = machine.data_race.as_vclocks_mut().unwrap();
1255 if !global.race_detecting() {
1256 return interp_ok(());
1257 }
1258 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1259 for (mem_clocks_range, mem_clocks) in
1260 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1261 {
1262 if let Err(DataRace) =
1263 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1264 {
1265 drop(thread_clocks);
1266 return Self::report_data_race(
1268 global,
1269 &machine.threads,
1270 mem_clocks,
1271 AccessType::NaWrite(write_type),
1272 access_range.size,
1273 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1274 ty,
1275 );
1276 }
1277 }
1278 interp_ok(())
1279 }
1280}
1281
1282#[derive(Debug, Default)]
1285pub struct FrameState {
1286 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1287}
1288
1289#[derive(Debug)]
1293struct LocalClocks {
1294 write: VTimestamp,
1295 write_type: NaWriteType,
1296 read: VTimestamp,
1297}
1298
1299impl Default for LocalClocks {
1300 fn default() -> Self {
1301 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1302 }
1303}
1304
1305impl FrameState {
1306 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1307 let current_span = machine.current_span();
1308 let global = machine.data_race.as_vclocks_ref().unwrap();
1309 if !global.race_detecting() {
1310 return;
1311 }
1312 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1313 if !current_span.is_dummy() {
1315 thread_clocks.clock.index_mut(index).span = current_span;
1316 }
1317 let mut clocks = self.local_clocks.borrow_mut();
1318 if storage_live {
1319 let new_clocks = LocalClocks {
1320 write: thread_clocks.clock[index],
1321 write_type: NaWriteType::Allocate,
1322 read: VTimestamp::ZERO,
1323 };
1324 clocks.insert(local, new_clocks);
1327 } else {
1328 let clocks = clocks.entry(local).or_default();
1331 clocks.write = thread_clocks.clock[index];
1332 clocks.write_type = NaWriteType::Write;
1333 }
1334 }
1335
1336 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1337 let current_span = machine.current_span();
1338 let global = machine.data_race.as_vclocks_ref().unwrap();
1339 if !global.race_detecting() {
1340 return;
1341 }
1342 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1343 if !current_span.is_dummy() {
1345 thread_clocks.clock.index_mut(index).span = current_span;
1346 }
1347 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1348 let mut clocks = self.local_clocks.borrow_mut();
1351 let clocks = clocks.entry(local).or_default();
1352 clocks.read = thread_clocks.clock[index];
1353 }
1354
1355 pub fn local_moved_to_memory(
1356 &self,
1357 local: mir::Local,
1358 alloc: &mut VClockAlloc,
1359 machine: &MiriMachine<'_>,
1360 ) {
1361 let global = machine.data_race.as_vclocks_ref().unwrap();
1362 if !global.race_detecting() {
1363 return;
1364 }
1365 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1366 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1370 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1371 assert_eq!(mem_clocks.write.0, index);
1374 mem_clocks.write = (index, local_clocks.write);
1376 mem_clocks.write_type = local_clocks.write_type;
1377 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1378 }
1379 }
1380}
1381
1382impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1383trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1384 #[inline]
1392 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1393 let this = self.eval_context_ref();
1394 this.machine.data_race.set_ongoing_action_data_race_free(true);
1395 let result = op(this);
1396 this.machine.data_race.set_ongoing_action_data_race_free(false);
1397 result
1398 }
1399
1400 #[inline]
1404 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1405 let this = self.eval_context_mut();
1406 this.machine.data_race.set_ongoing_action_data_race_free(true);
1407 let result = op(this);
1408 this.machine.data_race.set_ongoing_action_data_race_free(false);
1409 result
1410 }
1411
1412 fn atomic_access_check(
1414 &self,
1415 place: &MPlaceTy<'tcx>,
1416 access_type: AtomicAccessType,
1417 ) -> InterpResult<'tcx> {
1418 let this = self.eval_context_ref();
1419 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1423 this.check_ptr_align(place.ptr(), align)?;
1424 let (alloc_id, _offset, _prov) = this
1432 .ptr_try_get_alloc_id(place.ptr(), 0)
1433 .expect("there are no zero-sized atomic accesses");
1434 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1435 match access_type {
1437 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1438 throw_ub_format!(
1439 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1440 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1441 );
1442 }
1443 AtomicAccessType::Load(_)
1444 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1445 {
1446 throw_ub_format!(
1447 "large atomic load operations cannot be performed on read-only memory\n\
1448 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1449 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1450 );
1451 }
1452 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1453 throw_ub_format!(
1454 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1455 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1456 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1457 );
1458 }
1459 _ => {
1460 }
1462 }
1463 }
1464 interp_ok(())
1465 }
1466
1467 fn validate_atomic_load(
1470 &self,
1471 place: &MPlaceTy<'tcx>,
1472 atomic: AtomicReadOrd,
1473 sync_clock: Option<&VClock>,
1474 ) -> InterpResult<'tcx> {
1475 let this = self.eval_context_ref();
1476 this.validate_atomic_op(
1477 place,
1478 atomic,
1479 AccessType::AtomicLoad,
1480 move |memory, clocks, index, atomic| {
1481 if atomic == AtomicReadOrd::Relaxed {
1482 memory.load_relaxed(&mut *clocks, index, place.layout.size, sync_clock)
1483 } else {
1484 memory.load_acquire(&mut *clocks, index, place.layout.size, sync_clock)
1485 }
1486 },
1487 )
1488 }
1489
1490 fn validate_atomic_store(
1493 &mut self,
1494 place: &MPlaceTy<'tcx>,
1495 atomic: AtomicWriteOrd,
1496 ) -> InterpResult<'tcx> {
1497 let this = self.eval_context_mut();
1498 this.validate_atomic_op(
1499 place,
1500 atomic,
1501 AccessType::AtomicStore,
1502 move |memory, clocks, index, atomic| {
1503 if atomic == AtomicWriteOrd::Relaxed {
1504 memory.store_relaxed(clocks, index, place.layout.size)
1505 } else {
1506 memory.store_release(clocks, index, place.layout.size)
1507 }
1508 },
1509 )
1510 }
1511
1512 fn validate_atomic_rmw(
1515 &mut self,
1516 place: &MPlaceTy<'tcx>,
1517 atomic: AtomicRwOrd,
1518 ) -> InterpResult<'tcx> {
1519 use AtomicRwOrd::*;
1520 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1521 let release = matches!(atomic, Release | AcqRel | SeqCst);
1522 let this = self.eval_context_mut();
1523 this.validate_atomic_op(
1524 place,
1525 atomic,
1526 AccessType::AtomicRmw,
1527 move |memory, clocks, index, _| {
1528 if acquire {
1529 memory.load_acquire(clocks, index, place.layout.size, None)?;
1530 } else {
1531 memory.load_relaxed(clocks, index, place.layout.size, None)?;
1532 }
1533 if release {
1534 memory.rmw_release(clocks, index, place.layout.size)
1535 } else {
1536 memory.rmw_relaxed(clocks, index, place.layout.size)
1537 }
1538 },
1539 )
1540 }
1541
1542 fn validate_atomic_op<A: Debug + Copy>(
1544 &self,
1545 place: &MPlaceTy<'tcx>,
1546 atomic: A,
1547 access: AccessType,
1548 mut op: impl FnMut(
1549 &mut MemoryCellClocks,
1550 &mut ThreadClockSet,
1551 VectorIdx,
1552 A,
1553 ) -> Result<(), DataRace>,
1554 ) -> InterpResult<'tcx> {
1555 let this = self.eval_context_ref();
1556 assert!(access.is_atomic());
1557 let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1558 return interp_ok(());
1559 };
1560 if !data_race.race_detecting() {
1561 return interp_ok(());
1562 }
1563 let size = place.layout.size;
1564 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1565 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1568 trace!(
1569 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1570 access.description(None, None),
1571 &atomic,
1572 place.ptr(),
1573 size.bytes()
1574 );
1575
1576 let current_span = this.machine.current_span();
1577 data_race.maybe_perform_sync_operation(
1579 &this.machine.threads,
1580 current_span,
1581 |index, mut thread_clocks| {
1582 for (mem_clocks_range, mem_clocks) in
1583 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1584 {
1585 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1586 mem::drop(thread_clocks);
1587 return VClockAlloc::report_data_race(
1588 data_race,
1589 &this.machine.threads,
1590 mem_clocks,
1591 access,
1592 place.layout.size,
1593 interpret::Pointer::new(
1594 alloc_id,
1595 Size::from_bytes(mem_clocks_range.start),
1596 ),
1597 None,
1598 )
1599 .map(|_| true);
1600 }
1601 }
1602
1603 interp_ok(true)
1605 },
1606 )?;
1607
1608 if tracing::enabled!(tracing::Level::TRACE) {
1610 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1611 trace!(
1612 "Updated atomic memory({:?}, size={}) to {:#?}",
1613 place.ptr(),
1614 size.bytes(),
1615 mem_clocks.atomic_ops
1616 );
1617 }
1618 }
1619
1620 interp_ok(())
1621 }
1622}
1623
1624impl GlobalState {
1625 pub fn new(config: &MiriConfig) -> Self {
1628 let mut global_state = GlobalState {
1629 multi_threaded: Cell::new(false),
1630 ongoing_action_data_race_free: Cell::new(false),
1631 vector_clocks: RefCell::new(IndexVec::new()),
1632 vector_info: RefCell::new(IndexVec::new()),
1633 thread_info: RefCell::new(IndexVec::new()),
1634 reuse_candidates: RefCell::new(FxHashSet::default()),
1635 last_sc_fence: RefCell::new(VClock::default()),
1636 last_sc_write_per_thread: RefCell::new(VClock::default()),
1637 track_outdated_loads: config.track_outdated_loads,
1638 weak_memory: config.weak_memory_emulation,
1639 };
1640
1641 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1644 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1645 global_state
1646 .thread_info
1647 .get_mut()
1648 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1649
1650 global_state
1651 }
1652
1653 fn race_detecting(&self) -> bool {
1657 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1658 }
1659
1660 pub fn ongoing_action_data_race_free(&self) -> bool {
1661 self.ongoing_action_data_race_free.get()
1662 }
1663
1664 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1667 let mut reuse = self.reuse_candidates.borrow_mut();
1668 let vector_clocks = self.vector_clocks.borrow();
1669 for &candidate in reuse.iter() {
1670 let target_timestamp = vector_clocks[candidate].clock[candidate];
1671 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1672 let no_data_race = clock.clock[candidate] >= target_timestamp;
1675
1676 let vector_terminated = reuse.contains(&clock_idx);
1679
1680 no_data_race || vector_terminated
1683 }) {
1684 assert!(reuse.remove(&candidate));
1689 return Some(candidate);
1690 }
1691 }
1692 None
1693 }
1694
1695 #[inline]
1698 pub fn thread_created(
1699 &mut self,
1700 thread_mgr: &ThreadManager<'_>,
1701 thread: ThreadId,
1702 current_span: Span,
1703 ) {
1704 let current_index = self.active_thread_index(thread_mgr);
1705
1706 self.multi_threaded.set(true);
1709
1710 let mut thread_info = self.thread_info.borrow_mut();
1712 thread_info.ensure_contains_elem(thread, Default::default);
1713
1714 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1717 let vector_clocks = self.vector_clocks.get_mut();
1720 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1721
1722 let vector_info = self.vector_info.get_mut();
1725 let old_thread = vector_info[reuse_index];
1726 vector_info[reuse_index] = thread;
1727
1728 thread_info[old_thread].vector_index = None;
1731
1732 reuse_index
1733 } else {
1734 let vector_info = self.vector_info.get_mut();
1737 vector_info.push(thread)
1738 };
1739
1740 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1741
1742 thread_info[thread].vector_index = Some(created_index);
1744
1745 let vector_clocks = self.vector_clocks.get_mut();
1747 if created_index == vector_clocks.next_index() {
1748 vector_clocks.push(ThreadClockSet::default());
1749 }
1750
1751 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1753
1754 created.join_with(current);
1757
1758 current.increment_clock(current_index, current_span);
1761 created.increment_clock(created_index, current_span);
1762 }
1763
1764 #[inline]
1768 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1769 let thread_info = self.thread_info.borrow();
1770 let thread_info = &thread_info[joinee];
1771
1772 let join_clock = thread_info
1774 .termination_vector_clock
1775 .as_ref()
1776 .expect("joined with thread but thread has not terminated");
1777 self.acquire_clock(join_clock, threads);
1779
1780 if let Some(current_index) = thread_info.vector_index {
1785 if threads.get_live_thread_count() == 1 {
1786 let vector_clocks = self.vector_clocks.get_mut();
1787 let current_clock = &vector_clocks[current_index];
1789 if vector_clocks
1790 .iter_enumerated()
1791 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1792 {
1793 self.multi_threaded.set(false);
1797 }
1798 }
1799 }
1800 }
1801
1802 #[inline]
1810 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1811 let current_thread = thread_mgr.active_thread();
1812 let current_index = self.active_thread_index(thread_mgr);
1813
1814 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1816 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1817 Some(terminaion_clock);
1818
1819 let reuse = self.reuse_candidates.get_mut();
1821 reuse.insert(current_index);
1822 }
1823
1824 fn atomic_fence<'tcx>(
1826 &self,
1827 machine: &MiriMachine<'tcx>,
1828 atomic: AtomicFenceOrd,
1829 ) -> InterpResult<'tcx> {
1830 let current_span = machine.current_span();
1831 self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1832 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1833
1834 if atomic != AtomicFenceOrd::Release {
1838 clocks.apply_acquire_fence();
1840 }
1841 if atomic == AtomicFenceOrd::SeqCst {
1842 let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1850 sc_fence_clock.join(&clocks.clock);
1851 clocks.clock.join(&sc_fence_clock);
1852 clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1855 }
1856 if atomic != AtomicFenceOrd::Acquire {
1859 clocks.apply_release_fence();
1861 }
1862
1863 interp_ok(atomic != AtomicFenceOrd::Acquire)
1865 })
1866 }
1867
1868 fn maybe_perform_sync_operation<'tcx>(
1876 &self,
1877 thread_mgr: &ThreadManager<'_>,
1878 current_span: Span,
1879 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1880 ) -> InterpResult<'tcx> {
1881 if self.multi_threaded.get() {
1882 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1883 if op(index, clocks)? {
1884 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1885 clocks.increment_clock(index, current_span);
1886 }
1887 }
1888 interp_ok(())
1889 }
1890
1891 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1894 let thread = self.vector_info.borrow()[vector];
1895 let thread_name = thread_mgr.get_thread_display_name(thread);
1896 format!("thread `{thread_name}`")
1897 }
1898
1899 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1904 let thread = threads.active_thread();
1905 let (_, mut clocks) = self.thread_state_mut(thread);
1906 clocks.clock.join(clock);
1907 }
1908
1909 pub fn release_clock<'tcx, R>(
1913 &self,
1914 threads: &ThreadManager<'tcx>,
1915 callback: impl FnOnce(&VClock) -> R,
1916 ) -> R {
1917 let thread = threads.active_thread();
1918 let span = threads.active_thread_ref().current_span();
1919 let (index, mut clocks) = self.thread_state_mut(thread);
1920 let r = callback(&clocks.clock);
1921 clocks.increment_clock(index, span);
1924
1925 r
1926 }
1927
1928 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1929 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1930 }
1931
1932 #[inline]
1935 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1936 let index = self.thread_index(thread);
1937 let ref_vector = self.vector_clocks.borrow_mut();
1938 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1939 (index, clocks)
1940 }
1941
1942 #[inline]
1945 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1946 let index = self.thread_index(thread);
1947 let ref_vector = self.vector_clocks.borrow();
1948 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1949 (index, clocks)
1950 }
1951
1952 #[inline]
1955 pub(super) fn active_thread_state(
1956 &self,
1957 thread_mgr: &ThreadManager<'_>,
1958 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1959 self.thread_state(thread_mgr.active_thread())
1960 }
1961
1962 #[inline]
1965 pub(super) fn active_thread_state_mut(
1966 &self,
1967 thread_mgr: &ThreadManager<'_>,
1968 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1969 self.thread_state_mut(thread_mgr.active_thread())
1970 }
1971
1972 #[inline]
1975 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
1976 let active_thread_id = thread_mgr.active_thread();
1977 self.thread_index(active_thread_id)
1978 }
1979
1980 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
1982 let (index, clocks) = self.active_thread_state(thread_mgr);
1983 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
1984 }
1985
1986 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
1988 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
1989 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
1990 }
1991}