1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_log::tracing;
52use rustc_middle::mir;
53use rustc_middle::ty::Ty;
54use rustc_span::Span;
55
56use super::vector_clock::{VClock, VTimestamp, VectorIdx};
57use super::weak_memory::EvalContextExt as _;
58use crate::concurrency::GlobalDataRaceHandler;
59use crate::diagnostics::RacingOp;
60use crate::intrinsics::AtomicRmwOp;
61use crate::*;
62
63pub type AllocState = VClockAlloc;
64
65#[derive(Copy, Clone, PartialEq, Eq, Debug)]
67pub enum AtomicRwOrd {
68 Relaxed,
69 Acquire,
70 Release,
71 AcqRel,
72 SeqCst,
73}
74
75#[derive(Copy, Clone, PartialEq, Eq, Debug)]
77pub enum AtomicReadOrd {
78 Relaxed,
79 Acquire,
80 SeqCst,
81}
82
83#[derive(Copy, Clone, PartialEq, Eq, Debug)]
85pub enum AtomicWriteOrd {
86 Relaxed,
87 Release,
88 SeqCst,
89}
90
91#[derive(Copy, Clone, PartialEq, Eq, Debug)]
93pub enum AtomicFenceOrd {
94 Acquire,
95 Release,
96 AcqRel,
97 SeqCst,
98}
99
100#[derive(Clone, Default, Debug)]
104pub(super) struct ThreadClockSet {
105 pub(super) clock: VClock,
108
109 fence_acquire: VClock,
112
113 fence_release: VClock,
116
117 pub(super) write_seqcst: VClock,
122
123 pub(super) read_seqcst: VClock,
128}
129
130impl ThreadClockSet {
131 #[inline]
134 fn apply_release_fence(&mut self) {
135 self.fence_release.clone_from(&self.clock);
136 }
137
138 #[inline]
141 fn apply_acquire_fence(&mut self) {
142 self.clock.join(&self.fence_acquire);
143 }
144
145 #[inline]
148 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
149 self.clock.increment_index(index, current_span);
150 }
151
152 fn join_with(&mut self, other: &ThreadClockSet) {
156 self.clock.join(&other.clock);
157 }
158}
159
160#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
163pub struct DataRace;
164
165#[derive(Clone, PartialEq, Eq, Debug)]
170struct AtomicMemoryCellClocks {
171 read_vector: VClock,
176
177 write_vector: VClock,
182
183 sync_vector: VClock,
191
192 size: Option<Size>,
197}
198
199#[derive(Copy, Clone, PartialEq, Eq, Debug)]
200enum AtomicAccessType {
201 Load(AtomicReadOrd),
202 Store,
203 Rmw,
204}
205
206#[derive(Copy, Clone, PartialEq, Eq, Debug)]
208pub enum NaReadType {
209 Read,
211
212 Retag,
214}
215
216impl NaReadType {
217 fn description(self) -> &'static str {
218 match self {
219 NaReadType::Read => "non-atomic read",
220 NaReadType::Retag => "retag read",
221 }
222 }
223}
224
225#[derive(Copy, Clone, PartialEq, Eq, Debug)]
228pub enum NaWriteType {
229 Allocate,
231
232 Write,
234
235 Retag,
237
238 Deallocate,
243}
244
245impl NaWriteType {
246 fn description(self) -> &'static str {
247 match self {
248 NaWriteType::Allocate => "creating a new allocation",
249 NaWriteType::Write => "non-atomic write",
250 NaWriteType::Retag => "retag write",
251 NaWriteType::Deallocate => "deallocation",
252 }
253 }
254}
255
256#[derive(Copy, Clone, PartialEq, Eq, Debug)]
257enum AccessType {
258 NaRead(NaReadType),
259 NaWrite(NaWriteType),
260 AtomicLoad,
261 AtomicStore,
262 AtomicRmw,
263}
264
265#[derive(Clone, PartialEq, Eq, Debug)]
267struct MemoryCellClocks {
268 write: (VectorIdx, VTimestamp),
272
273 write_type: NaWriteType,
277
278 read: VClock,
282
283 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
287}
288
289#[derive(Debug, Clone, Default)]
291struct ThreadExtraState {
292 vector_index: Option<VectorIdx>,
298
299 termination_vector_clock: Option<VClock>,
304}
305
306#[derive(Debug, Clone)]
311pub struct GlobalState {
312 multi_threaded: Cell<bool>,
319
320 ongoing_action_data_race_free: Cell<bool>,
324
325 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
329
330 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
334
335 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
337
338 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
346
347 last_sc_fence: RefCell<VClock>,
350
351 last_sc_write_per_thread: RefCell<VClock>,
354
355 pub track_outdated_loads: bool,
357
358 pub weak_memory: bool,
360}
361
362impl VisitProvenance for GlobalState {
363 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
364 }
366}
367
368impl AccessType {
369 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
370 let mut msg = String::new();
371
372 if let Some(size) = size {
373 if size == Size::ZERO {
374 assert!(self == AccessType::AtomicLoad);
378 assert!(ty.is_none());
379 return format!("multiple differently-sized atomic loads, including one load");
380 }
381 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
382 }
383
384 msg.push_str(match self {
385 AccessType::NaRead(w) => w.description(),
386 AccessType::NaWrite(w) => w.description(),
387 AccessType::AtomicLoad => "atomic load",
388 AccessType::AtomicStore => "atomic store",
389 AccessType::AtomicRmw => "atomic read-modify-write",
390 });
391
392 if let Some(ty) = ty {
393 msg.push_str(&format!(" of type `{ty}`"));
394 }
395
396 msg
397 }
398
399 fn is_atomic(self) -> bool {
400 match self {
401 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
402 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
403 }
404 }
405
406 fn is_read(self) -> bool {
407 match self {
408 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
409 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
410 }
411 }
412
413 fn is_retag(self) -> bool {
414 matches!(
415 self,
416 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
417 )
418 }
419}
420
421impl AtomicMemoryCellClocks {
422 fn new(size: Size) -> Self {
423 AtomicMemoryCellClocks {
424 read_vector: Default::default(),
425 write_vector: Default::default(),
426 sync_vector: Default::default(),
427 size: Some(size),
428 }
429 }
430}
431
432impl MemoryCellClocks {
433 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
436 MemoryCellClocks {
437 read: VClock::default(),
438 write: (alloc_index, alloc),
439 write_type: NaWriteType::Allocate,
440 atomic_ops: None,
441 }
442 }
443
444 #[inline]
445 fn write_was_before(&self, other: &VClock) -> bool {
446 self.write.1 <= other[self.write.0]
449 }
450
451 #[inline]
452 fn write(&self) -> VClock {
453 VClock::new_with_index(self.write.0, self.write.1)
454 }
455
456 #[inline]
458 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
459 self.atomic_ops.as_deref()
460 }
461
462 #[inline]
464 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
465 self.atomic_ops.as_deref_mut().unwrap()
466 }
467
468 fn atomic_access(
471 &mut self,
472 thread_clocks: &ThreadClockSet,
473 size: Size,
474 write: bool,
475 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
476 match self.atomic_ops {
477 Some(ref mut atomic) => {
478 if atomic.size == Some(size) {
480 Ok(atomic)
481 } else if atomic.read_vector <= thread_clocks.clock
482 && atomic.write_vector <= thread_clocks.clock
483 {
484 atomic.size = Some(size);
486 Ok(atomic)
487 } else if !write && atomic.write_vector <= thread_clocks.clock {
488 atomic.size = None;
491 Ok(atomic)
492 } else {
493 Err(DataRace)
494 }
495 }
496 None => {
497 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
498 Ok(self.atomic_ops.as_mut().unwrap())
499 }
500 }
501 }
502
503 fn load_acquire(
507 &mut self,
508 thread_clocks: &mut ThreadClockSet,
509 index: VectorIdx,
510 access_size: Size,
511 sync_clock: Option<&VClock>,
512 ) -> Result<(), DataRace> {
513 self.atomic_read_detect(thread_clocks, index, access_size)?;
514 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
515 thread_clocks.clock.join(sync_clock);
516 }
517 Ok(())
518 }
519
520 fn load_relaxed(
524 &mut self,
525 thread_clocks: &mut ThreadClockSet,
526 index: VectorIdx,
527 access_size: Size,
528 sync_clock: Option<&VClock>,
529 ) -> Result<(), DataRace> {
530 self.atomic_read_detect(thread_clocks, index, access_size)?;
531 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
532 thread_clocks.fence_acquire.join(sync_clock);
533 }
534 Ok(())
535 }
536
537 fn store_release(
540 &mut self,
541 thread_clocks: &ThreadClockSet,
542 index: VectorIdx,
543 access_size: Size,
544 ) -> Result<(), DataRace> {
545 self.atomic_write_detect(thread_clocks, index, access_size)?;
546 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
548 Ok(())
549 }
550
551 fn store_relaxed(
554 &mut self,
555 thread_clocks: &ThreadClockSet,
556 index: VectorIdx,
557 access_size: Size,
558 ) -> Result<(), DataRace> {
559 self.atomic_write_detect(thread_clocks, index, access_size)?;
560
561 let atomic = self.atomic_mut_unwrap();
567 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
568 Ok(())
569 }
570
571 fn rmw_release(
574 &mut self,
575 thread_clocks: &ThreadClockSet,
576 index: VectorIdx,
577 access_size: Size,
578 ) -> Result<(), DataRace> {
579 self.atomic_write_detect(thread_clocks, index, access_size)?;
580 let atomic = self.atomic_mut_unwrap();
581 atomic.sync_vector.join(&thread_clocks.clock);
584 Ok(())
585 }
586
587 fn rmw_relaxed(
590 &mut self,
591 thread_clocks: &ThreadClockSet,
592 index: VectorIdx,
593 access_size: Size,
594 ) -> Result<(), DataRace> {
595 self.atomic_write_detect(thread_clocks, index, access_size)?;
596 let atomic = self.atomic_mut_unwrap();
597 atomic.sync_vector.join(&thread_clocks.fence_release);
600 Ok(())
601 }
602
603 fn atomic_read_detect(
606 &mut self,
607 thread_clocks: &ThreadClockSet,
608 index: VectorIdx,
609 access_size: Size,
610 ) -> Result<(), DataRace> {
611 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
612 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
613 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
614 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
616 }
617
618 fn atomic_write_detect(
621 &mut self,
622 thread_clocks: &ThreadClockSet,
623 index: VectorIdx,
624 access_size: Size,
625 ) -> Result<(), DataRace> {
626 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
627 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
628 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
629 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
631 Ok(())
632 } else {
633 Err(DataRace)
634 }
635 }
636
637 fn read_race_detect(
640 &mut self,
641 thread_clocks: &mut ThreadClockSet,
642 index: VectorIdx,
643 read_type: NaReadType,
644 current_span: Span,
645 ) -> Result<(), DataRace> {
646 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
647 if !current_span.is_dummy() {
648 thread_clocks.clock.index_mut(index).span = current_span;
649 }
650 thread_clocks.clock.index_mut(index).set_read_type(read_type);
651 if !self.write_was_before(&thread_clocks.clock) {
653 return Err(DataRace);
654 }
655 if !self.atomic().is_none_or(|atomic| atomic.write_vector <= thread_clocks.clock) {
657 return Err(DataRace);
658 }
659 self.read.set_at_index(&thread_clocks.clock, index);
661 Ok(())
662 }
663
664 fn write_race_detect(
667 &mut self,
668 thread_clocks: &mut ThreadClockSet,
669 index: VectorIdx,
670 write_type: NaWriteType,
671 current_span: Span,
672 ) -> Result<(), DataRace> {
673 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
674 if !current_span.is_dummy() {
675 thread_clocks.clock.index_mut(index).span = current_span;
676 }
677 if !(self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock) {
679 return Err(DataRace);
680 }
681 if !self.atomic().is_none_or(|atomic| {
683 atomic.write_vector <= thread_clocks.clock && atomic.read_vector <= thread_clocks.clock
684 }) {
685 return Err(DataRace);
686 }
687 self.write = (index, thread_clocks.clock[index]);
689 self.write_type = write_type;
690 self.read.set_zero_vector();
691 self.atomic_ops = None;
693 Ok(())
694 }
695}
696
697impl GlobalDataRaceHandler {
698 fn set_ongoing_action_data_race_free(&self, enable: bool) {
701 match self {
702 GlobalDataRaceHandler::None => {}
703 GlobalDataRaceHandler::Vclocks(data_race) => {
704 let old = data_race.ongoing_action_data_race_free.replace(enable);
705 assert_ne!(old, enable, "cannot nest allow_data_races");
706 }
707 GlobalDataRaceHandler::Genmc(genmc_ctx) => {
708 genmc_ctx.set_ongoing_action_data_race_free(enable);
709 }
710 }
711 }
712}
713
714impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
716pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
717 fn read_scalar_atomic(
719 &self,
720 place: &MPlaceTy<'tcx>,
721 atomic: AtomicReadOrd,
722 ) -> InterpResult<'tcx, Scalar> {
723 let this = self.eval_context_ref();
724 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
725 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
732 let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
733 return genmc_ctx.atomic_load(
734 this,
735 place.ptr().addr(),
736 place.layout.size,
737 atomic,
738 old_val,
739 );
740 }
741
742 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
743 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, |sync_clock| {
744 this.validate_atomic_load(place, atomic, sync_clock)
745 })?;
746 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
747 }
748
749 fn write_scalar_atomic(
751 &mut self,
752 val: Scalar,
753 dest: &MPlaceTy<'tcx>,
754 atomic: AtomicWriteOrd,
755 ) -> InterpResult<'tcx> {
756 let this = self.eval_context_mut();
757 this.atomic_access_check(dest, AtomicAccessType::Store)?;
758
759 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
761 let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
762 if genmc_ctx.atomic_store(
763 this,
764 dest.ptr().addr(),
765 dest.layout.size,
766 val,
767 old_val,
768 atomic,
769 )? {
770 this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
773 }
774 return interp_ok(());
775 }
776
777 let old_val = this.get_latest_nonatomic_val(dest);
779 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
780 this.validate_atomic_store(dest, atomic)?;
781 this.buffered_atomic_write(val, dest, atomic, old_val)
782 }
783
784 fn atomic_rmw_op_immediate(
786 &mut self,
787 place: &MPlaceTy<'tcx>,
788 rhs: &ImmTy<'tcx>,
789 atomic_op: AtomicRmwOp,
790 ord: AtomicRwOrd,
791 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
792 let this = self.eval_context_mut();
793 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
794
795 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
796
797 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
799 let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
800 this,
801 place.ptr().addr(),
802 place.layout.size,
803 atomic_op,
804 place.layout.backend_repr.is_signed(),
805 ord,
806 rhs.to_scalar(),
807 old.to_scalar(),
808 )?;
809 if let Some(new_val) = new_val {
810 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
811 }
812 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
813 }
814
815 let val = match atomic_op {
816 AtomicRmwOp::MirOp { op, neg } => {
817 let val = this.binary_op(op, &old, rhs)?;
818 if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }
819 }
820 AtomicRmwOp::Max => {
821 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
822 if lt { rhs } else { &old }.clone()
823 }
824 AtomicRmwOp::Min => {
825 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
826 if lt { &old } else { rhs }.clone()
827 }
828 };
829
830 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
831
832 this.validate_atomic_rmw(place, ord)?;
833
834 this.buffered_atomic_rmw(val.to_scalar(), place, ord, old.to_scalar())?;
835 interp_ok(old)
836 }
837
838 fn atomic_exchange_scalar(
841 &mut self,
842 place: &MPlaceTy<'tcx>,
843 new: Scalar,
844 atomic: AtomicRwOrd,
845 ) -> InterpResult<'tcx, Scalar> {
846 let this = self.eval_context_mut();
847 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
848
849 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
850 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
851
852 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
854 let (old_val, new_val) = genmc_ctx.atomic_exchange(
855 this,
856 place.ptr().addr(),
857 place.layout.size,
858 new,
859 atomic,
860 old,
861 )?;
862 if let Some(new_val) = new_val {
865 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
866 }
867 return interp_ok(old_val);
868 }
869
870 this.validate_atomic_rmw(place, atomic)?;
871
872 this.buffered_atomic_rmw(new, place, atomic, old)?;
873 interp_ok(old)
874 }
875
876 fn atomic_compare_exchange_scalar(
883 &mut self,
884 place: &MPlaceTy<'tcx>,
885 expect_old: &ImmTy<'tcx>,
886 new: Scalar,
887 success: AtomicRwOrd,
888 fail: AtomicReadOrd,
889 can_fail_spuriously: bool,
890 ) -> InterpResult<'tcx, Immediate<Provenance>> {
891 use rand::Rng as _;
892 let this = self.eval_context_mut();
893 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
894
895 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
897
898 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
900 let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
901 this,
902 place.ptr().addr(),
903 place.layout.size,
904 this.read_scalar(expect_old)?,
905 new,
906 success,
907 fail,
908 can_fail_spuriously,
909 old.to_scalar(),
910 )?;
911 if let Some(new_value) = new_value {
914 this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
915 }
916 return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
917 }
918
919 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
921 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
924 let cmpxchg_success = eq.to_scalar().to_bool()?
925 && if can_fail_spuriously {
926 this.machine.rng.get_mut().random_bool(success_rate)
927 } else {
928 true
929 };
930 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
931
932 if cmpxchg_success {
936 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
937 this.validate_atomic_rmw(place, success)?;
938 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
939 } else {
940 this.validate_atomic_load(place, fail, None)?;
941 this.perform_read_on_buffered_latest(place, fail)?;
946 }
947
948 interp_ok(res)
950 }
951
952 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
954 let this = self.eval_context_mut();
955 let machine = &this.machine;
956 match &this.machine.data_race {
957 GlobalDataRaceHandler::None => interp_ok(()),
958 GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
959 GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
960 }
961 }
962
963 fn release_clock<R>(
969 &self,
970 callback: impl FnOnce(&VClock) -> R,
971 ) -> InterpResult<'tcx, Option<R>> {
972 let this = self.eval_context_ref();
973 interp_ok(match &this.machine.data_race {
974 GlobalDataRaceHandler::None => None,
975 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
976 throw_unsup_format!(
977 "this operation performs synchronization that is not supported in GenMC mode"
978 ),
979 GlobalDataRaceHandler::Vclocks(data_race) =>
980 Some(data_race.release_clock(&this.machine.threads, callback)),
981 })
982 }
983
984 fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
987 let this = self.eval_context_ref();
988 match &this.machine.data_race {
989 GlobalDataRaceHandler::None => {}
990 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
991 throw_unsup_format!(
992 "this operation performs synchronization that is not supported in GenMC mode"
993 ),
994 GlobalDataRaceHandler::Vclocks(data_race) =>
995 data_race.acquire_clock(clock, &this.machine.threads),
996 }
997 interp_ok(())
998 }
999}
1000
1001#[derive(Debug, Clone)]
1003pub struct VClockAlloc {
1004 alloc_ranges: RefCell<DedupRangeMap<MemoryCellClocks>>,
1006}
1007
1008impl VisitProvenance for VClockAlloc {
1009 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1010 }
1012}
1013
1014impl VClockAlloc {
1015 pub fn new_allocation(
1017 global: &GlobalState,
1018 thread_mgr: &ThreadManager<'_>,
1019 len: Size,
1020 kind: MemoryKind,
1021 current_span: Span,
1022 ) -> VClockAlloc {
1023 let (alloc_timestamp, alloc_index) = match kind {
1025 MemoryKind::Machine(
1027 MiriMemoryKind::Rust
1028 | MiriMemoryKind::Miri
1029 | MiriMemoryKind::C
1030 | MiriMemoryKind::WinHeap
1031 | MiriMemoryKind::WinLocal
1032 | MiriMemoryKind::Mmap,
1033 )
1034 | MemoryKind::Stack => {
1035 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1036 let mut alloc_timestamp = clocks.clock[alloc_index];
1037 alloc_timestamp.span = current_span;
1038 (alloc_timestamp, alloc_index)
1039 }
1040 MemoryKind::Machine(
1043 MiriMemoryKind::Global
1044 | MiriMemoryKind::Machine
1045 | MiriMemoryKind::Runtime
1046 | MiriMemoryKind::ExternStatic
1047 | MiriMemoryKind::Tls,
1048 )
1049 | MemoryKind::CallerLocation =>
1050 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1051 };
1052 VClockAlloc {
1053 alloc_ranges: RefCell::new(DedupRangeMap::new(
1054 len,
1055 MemoryCellClocks::new(alloc_timestamp, alloc_index),
1056 )),
1057 }
1058 }
1059
1060 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1063 trace!("Find index where not {:?} <= {:?}", l, r);
1064 let l_slice = l.as_slice();
1065 let r_slice = r.as_slice();
1066 l_slice
1067 .iter()
1068 .zip(r_slice.iter())
1069 .enumerate()
1070 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1071 .or_else(|| {
1072 if l_slice.len() > r_slice.len() {
1073 let l_remainder_slice = &l_slice[r_slice.len()..];
1078 let idx = l_remainder_slice
1079 .iter()
1080 .enumerate()
1081 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1082 .expect("Invalid VClock Invariant");
1083 Some(idx + r_slice.len())
1084 } else {
1085 None
1086 }
1087 })
1088 .map(VectorIdx::new)
1089 }
1090
1091 #[cold]
1098 #[inline(never)]
1099 fn report_data_race<'tcx>(
1100 global: &GlobalState,
1101 thread_mgr: &ThreadManager<'_>,
1102 mem_clocks: &MemoryCellClocks,
1103 access: AccessType,
1104 access_size: Size,
1105 ptr_dbg: interpret::Pointer<AllocId>,
1106 ty: Option<Ty<'_>>,
1107 ) -> InterpResult<'tcx> {
1108 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1109 let mut other_size = None; let write_clock;
1111 let (other_access, other_thread, other_clock) =
1112 if !access.is_atomic() &&
1114 let Some(atomic) = mem_clocks.atomic() &&
1115 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1116 {
1117 (AccessType::AtomicStore, idx, &atomic.write_vector)
1118 } else if !access.is_atomic() &&
1119 !access.is_read() &&
1120 let Some(atomic) = mem_clocks.atomic() &&
1121 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1122 {
1123 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1124 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1126 write_clock = mem_clocks.write();
1127 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1128 } else if !access.is_read() && let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1129 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1130 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1132 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1135 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1136 {
1137 (AccessType::AtomicStore, idx, &atomic.write_vector)
1138 } else if let Some(idx) =
1139 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1140 {
1141 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1142 } else {
1143 unreachable!(
1144 "Failed to report data-race for mixed-size access: no race found"
1145 )
1146 }
1147 } else {
1148 unreachable!("Failed to report data-race")
1149 };
1150
1151 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1153 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1154 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1155
1156 let extra = if other_size.is_some() {
1158 assert!(!involves_non_atomic);
1159 Some("overlapping unsynchronized atomic accesses must use the same access size")
1160 } else if access.is_read() && other_access.is_read() {
1161 panic!(
1162 "there should be no same-size read-read races\naccess: {access:?}\nother_access: {other_access:?}"
1163 )
1164 } else {
1165 None
1166 };
1167 Err(err_machine_stop!(TerminationInfo::DataRace {
1168 involves_non_atomic,
1169 extra,
1170 retag_explain: access.is_retag() || other_access.is_retag(),
1171 ptr: ptr_dbg,
1172 op1: RacingOp {
1173 action: other_access.description(None, other_size),
1174 thread_info: other_thread_info,
1175 span: other_clock.as_slice()[other_thread.index()].span_data(),
1176 },
1177 op2: RacingOp {
1178 action: access.description(ty, other_size.map(|_| access_size)),
1179 thread_info: active_thread_info,
1180 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1181 },
1182 }))?
1183 }
1184
1185 pub(super) fn sync_clock(&self, access_range: AllocRange) -> VClock {
1187 let alloc_ranges = self.alloc_ranges.borrow();
1188 let mut clock = VClock::default();
1189 for (_, mem_clocks) in alloc_ranges.iter(access_range.start, access_range.size) {
1190 if let Some(atomic) = mem_clocks.atomic() {
1191 clock.join(&atomic.sync_vector);
1192 }
1193 }
1194 clock
1195 }
1196
1197 pub fn read_non_atomic<'tcx>(
1204 &self,
1205 alloc_id: AllocId,
1206 access_range: AllocRange,
1207 read_type: NaReadType,
1208 ty: Option<Ty<'_>>,
1209 machine: &MiriMachine<'_>,
1210 ) -> InterpResult<'tcx> {
1211 let current_span = machine.current_user_relevant_span();
1212 let global = machine.data_race.as_vclocks_ref().unwrap();
1213 if !global.race_detecting() {
1214 return interp_ok(());
1215 }
1216 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1217 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1218 for (mem_clocks_range, mem_clocks) in
1219 alloc_ranges.iter_mut(access_range.start, access_range.size)
1220 {
1221 if let Err(DataRace) =
1222 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1223 {
1224 drop(thread_clocks);
1225 return Self::report_data_race(
1227 global,
1228 &machine.threads,
1229 mem_clocks,
1230 AccessType::NaRead(read_type),
1231 access_range.size,
1232 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1233 ty,
1234 );
1235 }
1236 }
1237 interp_ok(())
1238 }
1239
1240 pub fn write_non_atomic<'tcx>(
1246 &mut self,
1247 alloc_id: AllocId,
1248 access_range: AllocRange,
1249 write_type: NaWriteType,
1250 ty: Option<Ty<'_>>,
1251 machine: &mut MiriMachine<'_>,
1252 ) -> InterpResult<'tcx> {
1253 let current_span = machine.current_user_relevant_span();
1254 let global = machine.data_race.as_vclocks_mut().unwrap();
1255 if !global.race_detecting() {
1256 return interp_ok(());
1257 }
1258 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1259 for (mem_clocks_range, mem_clocks) in
1260 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1261 {
1262 if let Err(DataRace) =
1263 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1264 {
1265 drop(thread_clocks);
1266 return Self::report_data_race(
1268 global,
1269 &machine.threads,
1270 mem_clocks,
1271 AccessType::NaWrite(write_type),
1272 access_range.size,
1273 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1274 ty,
1275 );
1276 }
1277 }
1278 interp_ok(())
1279 }
1280}
1281
1282#[derive(Debug, Default)]
1285pub struct FrameState {
1286 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1287}
1288
1289#[derive(Debug)]
1293struct LocalClocks {
1294 write: VTimestamp,
1295 write_type: NaWriteType,
1296 read: VTimestamp,
1297}
1298
1299impl Default for LocalClocks {
1300 fn default() -> Self {
1301 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1302 }
1303}
1304
1305impl FrameState {
1306 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1307 let current_span = machine.current_user_relevant_span();
1308 let global = machine.data_race.as_vclocks_ref().unwrap();
1309 if !global.race_detecting() {
1310 return;
1311 }
1312 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1313 if !current_span.is_dummy() {
1315 thread_clocks.clock.index_mut(index).span = current_span;
1316 }
1317 let mut clocks = self.local_clocks.borrow_mut();
1318 if storage_live {
1319 let new_clocks = LocalClocks {
1320 write: thread_clocks.clock[index],
1321 write_type: NaWriteType::Allocate,
1322 read: VTimestamp::ZERO,
1323 };
1324 clocks.insert(local, new_clocks);
1327 } else {
1328 let clocks = clocks.entry(local).or_default();
1331 clocks.write = thread_clocks.clock[index];
1332 clocks.write_type = NaWriteType::Write;
1333 }
1334 }
1335
1336 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1337 let current_span = machine.current_user_relevant_span();
1338 let global = machine.data_race.as_vclocks_ref().unwrap();
1339 if !global.race_detecting() {
1340 return;
1341 }
1342 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1343 if !current_span.is_dummy() {
1345 thread_clocks.clock.index_mut(index).span = current_span;
1346 }
1347 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1348 let mut clocks = self.local_clocks.borrow_mut();
1351 let clocks = clocks.entry(local).or_default();
1352 clocks.read = thread_clocks.clock[index];
1353 }
1354
1355 pub fn local_moved_to_memory(
1356 &self,
1357 local: mir::Local,
1358 alloc: &mut VClockAlloc,
1359 machine: &MiriMachine<'_>,
1360 ) {
1361 let global = machine.data_race.as_vclocks_ref().unwrap();
1362 if !global.race_detecting() {
1363 return;
1364 }
1365 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1366 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1370 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1371 assert_eq!(mem_clocks.write.0, index);
1374 mem_clocks.write = (index, local_clocks.write);
1376 mem_clocks.write_type = local_clocks.write_type;
1377 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1378 }
1379 }
1380}
1381
1382impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1383trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1384 #[inline]
1392 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1393 let this = self.eval_context_ref();
1394 this.machine.data_race.set_ongoing_action_data_race_free(true);
1395 let result = op(this);
1396 this.machine.data_race.set_ongoing_action_data_race_free(false);
1397 result
1398 }
1399
1400 #[inline]
1404 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1405 let this = self.eval_context_mut();
1406 this.machine.data_race.set_ongoing_action_data_race_free(true);
1407 let result = op(this);
1408 this.machine.data_race.set_ongoing_action_data_race_free(false);
1409 result
1410 }
1411
1412 fn atomic_access_check(
1414 &self,
1415 place: &MPlaceTy<'tcx>,
1416 access_type: AtomicAccessType,
1417 ) -> InterpResult<'tcx> {
1418 let this = self.eval_context_ref();
1419 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1423 this.check_ptr_align(place.ptr(), align)?;
1424 let (alloc_id, _offset, _prov) = this
1432 .ptr_try_get_alloc_id(place.ptr(), 0)
1433 .expect("there are no zero-sized atomic accesses");
1434 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1435 match access_type {
1437 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1438 throw_ub_format!(
1439 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1440 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1441 );
1442 }
1443 AtomicAccessType::Load(_)
1444 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1445 {
1446 throw_ub_format!(
1447 "large atomic load operations cannot be performed on read-only memory\n\
1448 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1449 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1450 );
1451 }
1452 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1453 throw_ub_format!(
1454 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1455 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1456 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1457 );
1458 }
1459 _ => {
1460 }
1462 }
1463 }
1464 interp_ok(())
1465 }
1466
1467 fn validate_atomic_load(
1470 &self,
1471 place: &MPlaceTy<'tcx>,
1472 atomic: AtomicReadOrd,
1473 sync_clock: Option<&VClock>,
1474 ) -> InterpResult<'tcx> {
1475 let this = self.eval_context_ref();
1476 this.validate_atomic_op(
1477 place,
1478 atomic,
1479 AccessType::AtomicLoad,
1480 move |memory, clocks, index, atomic| {
1481 if atomic == AtomicReadOrd::Relaxed {
1482 memory.load_relaxed(&mut *clocks, index, place.layout.size, sync_clock)
1483 } else {
1484 memory.load_acquire(&mut *clocks, index, place.layout.size, sync_clock)
1485 }
1486 },
1487 )
1488 }
1489
1490 fn validate_atomic_store(
1493 &mut self,
1494 place: &MPlaceTy<'tcx>,
1495 atomic: AtomicWriteOrd,
1496 ) -> InterpResult<'tcx> {
1497 let this = self.eval_context_mut();
1498 this.validate_atomic_op(
1499 place,
1500 atomic,
1501 AccessType::AtomicStore,
1502 move |memory, clocks, index, atomic| {
1503 if atomic == AtomicWriteOrd::Relaxed {
1504 memory.store_relaxed(clocks, index, place.layout.size)
1505 } else {
1506 memory.store_release(clocks, index, place.layout.size)
1507 }
1508 },
1509 )
1510 }
1511
1512 fn validate_atomic_rmw(
1515 &mut self,
1516 place: &MPlaceTy<'tcx>,
1517 atomic: AtomicRwOrd,
1518 ) -> InterpResult<'tcx> {
1519 use AtomicRwOrd::*;
1520 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1521 let release = matches!(atomic, Release | AcqRel | SeqCst);
1522 let this = self.eval_context_mut();
1523 this.validate_atomic_op(
1524 place,
1525 atomic,
1526 AccessType::AtomicRmw,
1527 move |memory, clocks, index, _| {
1528 if acquire {
1529 memory.load_acquire(clocks, index, place.layout.size, None)?;
1530 } else {
1531 memory.load_relaxed(clocks, index, place.layout.size, None)?;
1532 }
1533 if release {
1534 memory.rmw_release(clocks, index, place.layout.size)
1535 } else {
1536 memory.rmw_relaxed(clocks, index, place.layout.size)
1537 }
1538 },
1539 )
1540 }
1541
1542 fn get_latest_nonatomic_val(&self, place: &MPlaceTy<'tcx>) -> Result<Option<Scalar>, ()> {
1546 let this = self.eval_context_ref();
1547 let (alloc_id, offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0).unwrap();
1549 let alloc_meta = &this.get_alloc_extra(alloc_id).unwrap().data_race;
1550 if alloc_meta.as_weak_memory_ref().is_none() {
1551 return Err(());
1553 }
1554 let data_race = alloc_meta.as_vclocks_ref().unwrap();
1555 for (_range, clocks) in data_race.alloc_ranges.borrow_mut().iter(offset, place.layout.size)
1557 {
1558 if clocks.atomic().is_some_and(|atomic| !(atomic.write_vector <= clocks.write())) {
1562 return Err(());
1563 }
1564 }
1565 Ok(this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err())
1569 }
1570
1571 fn validate_atomic_op<A: Debug + Copy>(
1573 &self,
1574 place: &MPlaceTy<'tcx>,
1575 atomic: A,
1576 access: AccessType,
1577 mut op: impl FnMut(
1578 &mut MemoryCellClocks,
1579 &mut ThreadClockSet,
1580 VectorIdx,
1581 A,
1582 ) -> Result<(), DataRace>,
1583 ) -> InterpResult<'tcx> {
1584 let this = self.eval_context_ref();
1585 assert!(access.is_atomic());
1586 let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1587 return interp_ok(());
1588 };
1589 if !data_race.race_detecting() {
1590 return interp_ok(());
1591 }
1592 let size = place.layout.size;
1593 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1594 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1597 trace!(
1598 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1599 access.description(None, None),
1600 &atomic,
1601 place.ptr(),
1602 size.bytes()
1603 );
1604
1605 let current_span = this.machine.current_user_relevant_span();
1606 data_race.maybe_perform_sync_operation(
1608 &this.machine.threads,
1609 current_span,
1610 |index, mut thread_clocks| {
1611 for (mem_clocks_range, mem_clocks) in
1612 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1613 {
1614 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1615 mem::drop(thread_clocks);
1616 return VClockAlloc::report_data_race(
1617 data_race,
1618 &this.machine.threads,
1619 mem_clocks,
1620 access,
1621 place.layout.size,
1622 interpret::Pointer::new(
1623 alloc_id,
1624 Size::from_bytes(mem_clocks_range.start),
1625 ),
1626 None,
1627 )
1628 .map(|_| true);
1629 }
1630 }
1631
1632 interp_ok(true)
1634 },
1635 )?;
1636
1637 if tracing::enabled!(tracing::Level::TRACE) {
1639 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1640 trace!(
1641 "Updated atomic memory({:?}, size={}) to {:#?}",
1642 place.ptr(),
1643 size.bytes(),
1644 mem_clocks.atomic_ops
1645 );
1646 }
1647 }
1648
1649 interp_ok(())
1650 }
1651}
1652
1653impl GlobalState {
1654 pub fn new(config: &MiriConfig) -> Self {
1657 let mut global_state = GlobalState {
1658 multi_threaded: Cell::new(false),
1659 ongoing_action_data_race_free: Cell::new(false),
1660 vector_clocks: RefCell::new(IndexVec::new()),
1661 vector_info: RefCell::new(IndexVec::new()),
1662 thread_info: RefCell::new(IndexVec::new()),
1663 reuse_candidates: RefCell::new(FxHashSet::default()),
1664 last_sc_fence: RefCell::new(VClock::default()),
1665 last_sc_write_per_thread: RefCell::new(VClock::default()),
1666 track_outdated_loads: config.track_outdated_loads,
1667 weak_memory: config.weak_memory_emulation,
1668 };
1669
1670 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1673 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1674 global_state
1675 .thread_info
1676 .get_mut()
1677 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1678
1679 global_state
1680 }
1681
1682 fn race_detecting(&self) -> bool {
1686 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1687 }
1688
1689 pub fn ongoing_action_data_race_free(&self) -> bool {
1690 self.ongoing_action_data_race_free.get()
1691 }
1692
1693 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1696 let mut reuse = self.reuse_candidates.borrow_mut();
1697 let vector_clocks = self.vector_clocks.borrow();
1698 for &candidate in reuse.iter() {
1699 let target_timestamp = vector_clocks[candidate].clock[candidate];
1700 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1701 let no_data_race = clock.clock[candidate] >= target_timestamp;
1704
1705 let vector_terminated = reuse.contains(&clock_idx);
1708
1709 no_data_race || vector_terminated
1712 }) {
1713 assert!(reuse.remove(&candidate));
1718 return Some(candidate);
1719 }
1720 }
1721 None
1722 }
1723
1724 #[inline]
1727 pub fn thread_created(
1728 &mut self,
1729 thread_mgr: &ThreadManager<'_>,
1730 thread: ThreadId,
1731 current_span: Span,
1732 ) {
1733 let current_index = self.active_thread_index(thread_mgr);
1734
1735 self.multi_threaded.set(true);
1738
1739 let mut thread_info = self.thread_info.borrow_mut();
1741 thread_info.ensure_contains_elem(thread, Default::default);
1742
1743 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1746 let vector_clocks = self.vector_clocks.get_mut();
1749 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1750
1751 let vector_info = self.vector_info.get_mut();
1754 let old_thread = vector_info[reuse_index];
1755 vector_info[reuse_index] = thread;
1756
1757 thread_info[old_thread].vector_index = None;
1760
1761 reuse_index
1762 } else {
1763 let vector_info = self.vector_info.get_mut();
1766 vector_info.push(thread)
1767 };
1768
1769 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1770
1771 thread_info[thread].vector_index = Some(created_index);
1773
1774 let vector_clocks = self.vector_clocks.get_mut();
1776 if created_index == vector_clocks.next_index() {
1777 vector_clocks.push(ThreadClockSet::default());
1778 }
1779
1780 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1782
1783 created.join_with(current);
1786
1787 current.increment_clock(current_index, current_span);
1790 created.increment_clock(created_index, current_span);
1791 }
1792
1793 #[inline]
1797 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1798 let thread_info = self.thread_info.borrow();
1799 let thread_info = &thread_info[joinee];
1800
1801 let join_clock = thread_info
1803 .termination_vector_clock
1804 .as_ref()
1805 .expect("joined with thread but thread has not terminated");
1806 self.acquire_clock(join_clock, threads);
1808
1809 if let Some(current_index) = thread_info.vector_index {
1814 if threads.get_live_thread_count() == 1 {
1815 let vector_clocks = self.vector_clocks.get_mut();
1816 let current_clock = &vector_clocks[current_index];
1818 if vector_clocks
1819 .iter_enumerated()
1820 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1821 {
1822 self.multi_threaded.set(false);
1826 }
1827 }
1828 }
1829 }
1830
1831 #[inline]
1839 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1840 let current_thread = thread_mgr.active_thread();
1841 let current_index = self.active_thread_index(thread_mgr);
1842
1843 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1845 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1846 Some(terminaion_clock);
1847
1848 let reuse = self.reuse_candidates.get_mut();
1850 reuse.insert(current_index);
1851 }
1852
1853 fn atomic_fence<'tcx>(
1855 &self,
1856 machine: &MiriMachine<'tcx>,
1857 atomic: AtomicFenceOrd,
1858 ) -> InterpResult<'tcx> {
1859 let current_span = machine.current_user_relevant_span();
1860 self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1861 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1862
1863 if atomic != AtomicFenceOrd::Release {
1867 clocks.apply_acquire_fence();
1869 }
1870 if atomic == AtomicFenceOrd::SeqCst {
1871 let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1879 sc_fence_clock.join(&clocks.clock);
1880 clocks.clock.join(&sc_fence_clock);
1881 clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1884 }
1885 if atomic != AtomicFenceOrd::Acquire {
1888 clocks.apply_release_fence();
1890 }
1891
1892 interp_ok(atomic != AtomicFenceOrd::Acquire)
1894 })
1895 }
1896
1897 fn maybe_perform_sync_operation<'tcx>(
1905 &self,
1906 thread_mgr: &ThreadManager<'_>,
1907 current_span: Span,
1908 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1909 ) -> InterpResult<'tcx> {
1910 if self.multi_threaded.get() {
1911 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1912 if op(index, clocks)? {
1913 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1914 clocks.increment_clock(index, current_span);
1915 }
1916 }
1917 interp_ok(())
1918 }
1919
1920 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1923 let thread = self.vector_info.borrow()[vector];
1924 let thread_name = thread_mgr.get_thread_display_name(thread);
1925 format!("thread `{thread_name}`")
1926 }
1927
1928 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1933 let thread = threads.active_thread();
1934 let (_, mut clocks) = self.thread_state_mut(thread);
1935 clocks.clock.join(clock);
1936 }
1937
1938 pub fn release_clock<'tcx, R>(
1942 &self,
1943 threads: &ThreadManager<'tcx>,
1944 callback: impl FnOnce(&VClock) -> R,
1945 ) -> R {
1946 let thread = threads.active_thread();
1947 let span = threads.active_thread_ref().current_user_relevant_span();
1948 let (index, mut clocks) = self.thread_state_mut(thread);
1949 let r = callback(&clocks.clock);
1950 clocks.increment_clock(index, span);
1953
1954 r
1955 }
1956
1957 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1958 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1959 }
1960
1961 #[inline]
1964 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1965 let index = self.thread_index(thread);
1966 let ref_vector = self.vector_clocks.borrow_mut();
1967 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1968 (index, clocks)
1969 }
1970
1971 #[inline]
1974 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1975 let index = self.thread_index(thread);
1976 let ref_vector = self.vector_clocks.borrow();
1977 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1978 (index, clocks)
1979 }
1980
1981 #[inline]
1984 pub(super) fn active_thread_state(
1985 &self,
1986 thread_mgr: &ThreadManager<'_>,
1987 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1988 self.thread_state(thread_mgr.active_thread())
1989 }
1990
1991 #[inline]
1994 pub(super) fn active_thread_state_mut(
1995 &self,
1996 thread_mgr: &ThreadManager<'_>,
1997 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1998 self.thread_state_mut(thread_mgr.active_thread())
1999 }
2000
2001 #[inline]
2004 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
2005 let active_thread_id = thread_mgr.active_thread();
2006 self.thread_index(active_thread_id)
2007 }
2008
2009 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
2011 let (index, clocks) = self.active_thread_state(thread_mgr);
2012 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
2013 }
2014
2015 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
2017 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
2018 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
2019 }
2020}