1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_log::tracing;
52use rustc_middle::mir;
53use rustc_middle::ty::Ty;
54use rustc_span::Span;
55
56use super::vector_clock::{VClock, VTimestamp, VectorIdx};
57use super::weak_memory::EvalContextExt as _;
58use crate::concurrency::GlobalDataRaceHandler;
59use crate::diagnostics::RacingOp;
60use crate::intrinsics::AtomicRmwOp;
61use crate::*;
62
63pub type AllocState = VClockAlloc;
64
65#[derive(Copy, Clone, PartialEq, Eq, Debug)]
67pub enum AtomicRwOrd {
68 Relaxed,
69 Acquire,
70 Release,
71 AcqRel,
72 SeqCst,
73}
74
75#[derive(Copy, Clone, PartialEq, Eq, Debug)]
77pub enum AtomicReadOrd {
78 Relaxed,
79 Acquire,
80 SeqCst,
81}
82
83#[derive(Copy, Clone, PartialEq, Eq, Debug)]
85pub enum AtomicWriteOrd {
86 Relaxed,
87 Release,
88 SeqCst,
89}
90
91#[derive(Copy, Clone, PartialEq, Eq, Debug)]
93pub enum AtomicFenceOrd {
94 Acquire,
95 Release,
96 AcqRel,
97 SeqCst,
98}
99
100#[derive(Clone, Default, Debug)]
104pub(super) struct ThreadClockSet {
105 pub(super) clock: VClock,
108
109 fence_acquire: VClock,
112
113 fence_release: VClock,
116
117 pub(super) write_seqcst: VClock,
122
123 pub(super) read_seqcst: VClock,
128}
129
130impl ThreadClockSet {
131 #[inline]
134 fn apply_release_fence(&mut self) {
135 self.fence_release.clone_from(&self.clock);
136 }
137
138 #[inline]
141 fn apply_acquire_fence(&mut self) {
142 self.clock.join(&self.fence_acquire);
143 }
144
145 #[inline]
148 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
149 self.clock.increment_index(index, current_span);
150 }
151
152 fn join_with(&mut self, other: &ThreadClockSet) {
156 self.clock.join(&other.clock);
157 }
158}
159
160#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
163pub struct DataRace;
164
165#[derive(Clone, PartialEq, Eq, Debug)]
170struct AtomicMemoryCellClocks {
171 read_vector: VClock,
176
177 write_vector: VClock,
182
183 sync_vector: VClock,
191
192 size: Option<Size>,
197}
198
199#[derive(Copy, Clone, PartialEq, Eq, Debug)]
200enum AtomicAccessType {
201 Load(AtomicReadOrd),
202 Store,
203 Rmw,
204}
205
206#[derive(Copy, Clone, PartialEq, Eq, Debug)]
208pub enum NaReadType {
209 Read,
211
212 Retag,
214}
215
216impl NaReadType {
217 fn description(self) -> &'static str {
218 match self {
219 NaReadType::Read => "non-atomic read",
220 NaReadType::Retag => "retag read",
221 }
222 }
223}
224
225#[derive(Copy, Clone, PartialEq, Eq, Debug)]
228pub enum NaWriteType {
229 Allocate,
231
232 Write,
234
235 Retag,
237
238 Deallocate,
243}
244
245impl NaWriteType {
246 fn description(self) -> &'static str {
247 match self {
248 NaWriteType::Allocate => "creating a new allocation",
249 NaWriteType::Write => "non-atomic write",
250 NaWriteType::Retag => "retag write",
251 NaWriteType::Deallocate => "deallocation",
252 }
253 }
254}
255
256#[derive(Copy, Clone, PartialEq, Eq, Debug)]
257enum AccessType {
258 NaRead(NaReadType),
259 NaWrite(NaWriteType),
260 AtomicLoad,
261 AtomicStore,
262 AtomicRmw,
263}
264
265#[derive(Clone, PartialEq, Eq, Debug)]
267struct MemoryCellClocks {
268 write: (VectorIdx, VTimestamp),
272
273 write_type: NaWriteType,
277
278 read: VClock,
282
283 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
287}
288
289#[derive(Debug, Clone, Default)]
291struct ThreadExtraState {
292 vector_index: Option<VectorIdx>,
298
299 termination_vector_clock: Option<VClock>,
304}
305
306#[derive(Debug, Clone)]
311pub struct GlobalState {
312 multi_threaded: Cell<bool>,
319
320 ongoing_action_data_race_free: Cell<bool>,
324
325 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
329
330 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
334
335 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
337
338 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
346
347 last_sc_fence: RefCell<VClock>,
350
351 last_sc_write_per_thread: RefCell<VClock>,
354
355 pub track_outdated_loads: bool,
357
358 pub weak_memory: bool,
360}
361
362impl VisitProvenance for GlobalState {
363 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
364 }
366}
367
368impl AccessType {
369 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
370 let mut msg = String::new();
371
372 if let Some(size) = size {
373 if size == Size::ZERO {
374 assert!(self == AccessType::AtomicLoad);
378 assert!(ty.is_none());
379 return format!("multiple differently-sized atomic loads, including one load");
380 }
381 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
382 }
383
384 msg.push_str(match self {
385 AccessType::NaRead(w) => w.description(),
386 AccessType::NaWrite(w) => w.description(),
387 AccessType::AtomicLoad => "atomic load",
388 AccessType::AtomicStore => "atomic store",
389 AccessType::AtomicRmw => "atomic read-modify-write",
390 });
391
392 if let Some(ty) = ty {
393 msg.push_str(&format!(" of type `{ty}`"));
394 }
395
396 msg
397 }
398
399 fn is_atomic(self) -> bool {
400 match self {
401 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
402 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
403 }
404 }
405
406 fn is_read(self) -> bool {
407 match self {
408 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
409 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
410 }
411 }
412
413 fn is_retag(self) -> bool {
414 matches!(
415 self,
416 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
417 )
418 }
419}
420
421impl AtomicMemoryCellClocks {
422 fn new(size: Size) -> Self {
423 AtomicMemoryCellClocks {
424 read_vector: Default::default(),
425 write_vector: Default::default(),
426 sync_vector: Default::default(),
427 size: Some(size),
428 }
429 }
430}
431
432impl MemoryCellClocks {
433 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
436 MemoryCellClocks {
437 read: VClock::default(),
438 write: (alloc_index, alloc),
439 write_type: NaWriteType::Allocate,
440 atomic_ops: None,
441 }
442 }
443
444 #[inline]
445 fn write_was_before(&self, other: &VClock) -> bool {
446 self.write.1 <= other[self.write.0]
449 }
450
451 #[inline]
452 fn write(&self) -> VClock {
453 VClock::new_with_index(self.write.0, self.write.1)
454 }
455
456 #[inline]
458 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
459 self.atomic_ops.as_deref()
460 }
461
462 #[inline]
464 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
465 self.atomic_ops.as_deref_mut().unwrap()
466 }
467
468 fn atomic_access(
471 &mut self,
472 thread_clocks: &ThreadClockSet,
473 size: Size,
474 write: bool,
475 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
476 match self.atomic_ops {
477 Some(ref mut atomic) => {
478 if atomic.size == Some(size) {
480 Ok(atomic)
481 } else if atomic.read_vector <= thread_clocks.clock
482 && atomic.write_vector <= thread_clocks.clock
483 {
484 atomic.size = Some(size);
486 Ok(atomic)
487 } else if !write && atomic.write_vector <= thread_clocks.clock {
488 atomic.size = None;
491 Ok(atomic)
492 } else {
493 Err(DataRace)
494 }
495 }
496 None => {
497 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
498 Ok(self.atomic_ops.as_mut().unwrap())
499 }
500 }
501 }
502
503 fn load_acquire(
507 &mut self,
508 thread_clocks: &mut ThreadClockSet,
509 index: VectorIdx,
510 access_size: Size,
511 sync_clock: Option<&VClock>,
512 ) -> Result<(), DataRace> {
513 self.atomic_read_detect(thread_clocks, index, access_size)?;
514 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
515 thread_clocks.clock.join(sync_clock);
516 }
517 Ok(())
518 }
519
520 fn load_relaxed(
524 &mut self,
525 thread_clocks: &mut ThreadClockSet,
526 index: VectorIdx,
527 access_size: Size,
528 sync_clock: Option<&VClock>,
529 ) -> Result<(), DataRace> {
530 self.atomic_read_detect(thread_clocks, index, access_size)?;
531 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
532 thread_clocks.fence_acquire.join(sync_clock);
533 }
534 Ok(())
535 }
536
537 fn store_release(
540 &mut self,
541 thread_clocks: &ThreadClockSet,
542 index: VectorIdx,
543 access_size: Size,
544 ) -> Result<(), DataRace> {
545 self.atomic_write_detect(thread_clocks, index, access_size)?;
546 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
548 Ok(())
549 }
550
551 fn store_relaxed(
554 &mut self,
555 thread_clocks: &ThreadClockSet,
556 index: VectorIdx,
557 access_size: Size,
558 ) -> Result<(), DataRace> {
559 self.atomic_write_detect(thread_clocks, index, access_size)?;
560
561 let atomic = self.atomic_mut_unwrap();
567 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
568 Ok(())
569 }
570
571 fn rmw_release(
574 &mut self,
575 thread_clocks: &ThreadClockSet,
576 index: VectorIdx,
577 access_size: Size,
578 ) -> Result<(), DataRace> {
579 self.atomic_write_detect(thread_clocks, index, access_size)?;
580 let atomic = self.atomic_mut_unwrap();
581 atomic.sync_vector.join(&thread_clocks.clock);
584 Ok(())
585 }
586
587 fn rmw_relaxed(
590 &mut self,
591 thread_clocks: &ThreadClockSet,
592 index: VectorIdx,
593 access_size: Size,
594 ) -> Result<(), DataRace> {
595 self.atomic_write_detect(thread_clocks, index, access_size)?;
596 let atomic = self.atomic_mut_unwrap();
597 atomic.sync_vector.join(&thread_clocks.fence_release);
600 Ok(())
601 }
602
603 fn atomic_read_detect(
606 &mut self,
607 thread_clocks: &ThreadClockSet,
608 index: VectorIdx,
609 access_size: Size,
610 ) -> Result<(), DataRace> {
611 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
612 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
613 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
614 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
616 }
617
618 fn atomic_write_detect(
621 &mut self,
622 thread_clocks: &ThreadClockSet,
623 index: VectorIdx,
624 access_size: Size,
625 ) -> Result<(), DataRace> {
626 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
627 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
628 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
629 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
631 Ok(())
632 } else {
633 Err(DataRace)
634 }
635 }
636
637 fn read_race_detect(
640 &mut self,
641 thread_clocks: &mut ThreadClockSet,
642 index: VectorIdx,
643 read_type: NaReadType,
644 current_span: Span,
645 ) -> Result<(), DataRace> {
646 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
647 if !current_span.is_dummy() {
648 thread_clocks.clock.index_mut(index).span = current_span;
649 }
650 thread_clocks.clock.index_mut(index).set_read_type(read_type);
651 if self.write_was_before(&thread_clocks.clock) {
652 let race_free = if let Some(atomic) = self.atomic() {
654 atomic.write_vector <= thread_clocks.clock
655 } else {
656 true
657 };
658 self.read.set_at_index(&thread_clocks.clock, index);
659 if race_free { Ok(()) } else { Err(DataRace) }
660 } else {
661 Err(DataRace)
662 }
663 }
664
665 fn write_race_detect(
668 &mut self,
669 thread_clocks: &mut ThreadClockSet,
670 index: VectorIdx,
671 write_type: NaWriteType,
672 current_span: Span,
673 ) -> Result<(), DataRace> {
674 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
675 if !current_span.is_dummy() {
676 thread_clocks.clock.index_mut(index).span = current_span;
677 }
678 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
679 let race_free = if let Some(atomic) = self.atomic() {
680 atomic.write_vector <= thread_clocks.clock
681 && atomic.read_vector <= thread_clocks.clock
682 } else {
683 true
684 };
685 self.write = (index, thread_clocks.clock[index]);
686 self.write_type = write_type;
687 if race_free {
688 self.read.set_zero_vector();
689 Ok(())
690 } else {
691 Err(DataRace)
692 }
693 } else {
694 Err(DataRace)
695 }
696 }
697}
698
699impl GlobalDataRaceHandler {
700 fn set_ongoing_action_data_race_free(&self, enable: bool) {
703 match self {
704 GlobalDataRaceHandler::None => {}
705 GlobalDataRaceHandler::Vclocks(data_race) => {
706 let old = data_race.ongoing_action_data_race_free.replace(enable);
707 assert_ne!(old, enable, "cannot nest allow_data_races");
708 }
709 GlobalDataRaceHandler::Genmc(genmc_ctx) => {
710 genmc_ctx.set_ongoing_action_data_race_free(enable);
711 }
712 }
713 }
714}
715
716impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
718pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
719 fn read_scalar_atomic(
721 &self,
722 place: &MPlaceTy<'tcx>,
723 atomic: AtomicReadOrd,
724 ) -> InterpResult<'tcx, Scalar> {
725 let this = self.eval_context_ref();
726 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
727 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
734 let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
735 return genmc_ctx.atomic_load(
736 this,
737 place.ptr().addr(),
738 place.layout.size,
739 atomic,
740 old_val,
741 );
742 }
743
744 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
745 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, |sync_clock| {
746 this.validate_atomic_load(place, atomic, sync_clock)
747 })?;
748 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
749 }
750
751 fn write_scalar_atomic(
753 &mut self,
754 val: Scalar,
755 dest: &MPlaceTy<'tcx>,
756 atomic: AtomicWriteOrd,
757 ) -> InterpResult<'tcx> {
758 let this = self.eval_context_mut();
759 this.atomic_access_check(dest, AtomicAccessType::Store)?;
760
761 let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
766
767 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
769 if genmc_ctx.atomic_store(
770 this,
771 dest.ptr().addr(),
772 dest.layout.size,
773 val,
774 old_val,
775 atomic,
776 )? {
777 this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
780 }
781 return interp_ok(());
782 }
783 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
784 this.validate_atomic_store(dest, atomic)?;
785 this.buffered_atomic_write(val, dest, atomic, old_val)
786 }
787
788 fn atomic_rmw_op_immediate(
790 &mut self,
791 place: &MPlaceTy<'tcx>,
792 rhs: &ImmTy<'tcx>,
793 atomic_op: AtomicRmwOp,
794 ord: AtomicRwOrd,
795 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
796 let this = self.eval_context_mut();
797 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
798
799 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
800
801 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
803 let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
804 this,
805 place.ptr().addr(),
806 place.layout.size,
807 atomic_op,
808 place.layout.backend_repr.is_signed(),
809 ord,
810 rhs.to_scalar(),
811 old.to_scalar(),
812 )?;
813 if let Some(new_val) = new_val {
814 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
815 }
816 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
817 }
818
819 let val = match atomic_op {
820 AtomicRmwOp::MirOp { op, neg } => {
821 let val = this.binary_op(op, &old, rhs)?;
822 if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }
823 }
824 AtomicRmwOp::Max => {
825 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
826 if lt { rhs } else { &old }.clone()
827 }
828 AtomicRmwOp::Min => {
829 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
830 if lt { &old } else { rhs }.clone()
831 }
832 };
833
834 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
835
836 this.validate_atomic_rmw(place, ord)?;
837
838 this.buffered_atomic_rmw(val.to_scalar(), place, ord, old.to_scalar())?;
839 interp_ok(old)
840 }
841
842 fn atomic_exchange_scalar(
845 &mut self,
846 place: &MPlaceTy<'tcx>,
847 new: Scalar,
848 atomic: AtomicRwOrd,
849 ) -> InterpResult<'tcx, Scalar> {
850 let this = self.eval_context_mut();
851 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
852
853 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
854 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
855
856 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
858 let (old_val, new_val) = genmc_ctx.atomic_exchange(
859 this,
860 place.ptr().addr(),
861 place.layout.size,
862 new,
863 atomic,
864 old,
865 )?;
866 if let Some(new_val) = new_val {
869 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
870 }
871 return interp_ok(old_val);
872 }
873
874 this.validate_atomic_rmw(place, atomic)?;
875
876 this.buffered_atomic_rmw(new, place, atomic, old)?;
877 interp_ok(old)
878 }
879
880 fn atomic_compare_exchange_scalar(
887 &mut self,
888 place: &MPlaceTy<'tcx>,
889 expect_old: &ImmTy<'tcx>,
890 new: Scalar,
891 success: AtomicRwOrd,
892 fail: AtomicReadOrd,
893 can_fail_spuriously: bool,
894 ) -> InterpResult<'tcx, Immediate<Provenance>> {
895 use rand::Rng as _;
896 let this = self.eval_context_mut();
897 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
898
899 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
901
902 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
904 let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
905 this,
906 place.ptr().addr(),
907 place.layout.size,
908 this.read_scalar(expect_old)?,
909 new,
910 success,
911 fail,
912 can_fail_spuriously,
913 old.to_scalar(),
914 )?;
915 if let Some(new_value) = new_value {
918 this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
919 }
920 return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
921 }
922
923 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
925 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
928 let cmpxchg_success = eq.to_scalar().to_bool()?
929 && if can_fail_spuriously {
930 this.machine.rng.get_mut().random_bool(success_rate)
931 } else {
932 true
933 };
934 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
935
936 if cmpxchg_success {
940 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
941 this.validate_atomic_rmw(place, success)?;
942 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
943 } else {
944 this.validate_atomic_load(place, fail, None)?;
945 this.perform_read_on_buffered_latest(place, fail)?;
950 }
951
952 interp_ok(res)
954 }
955
956 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
958 let this = self.eval_context_mut();
959 let machine = &this.machine;
960 match &this.machine.data_race {
961 GlobalDataRaceHandler::None => interp_ok(()),
962 GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
963 GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
964 }
965 }
966
967 fn release_clock<R>(
973 &self,
974 callback: impl FnOnce(&VClock) -> R,
975 ) -> InterpResult<'tcx, Option<R>> {
976 let this = self.eval_context_ref();
977 interp_ok(match &this.machine.data_race {
978 GlobalDataRaceHandler::None => None,
979 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
980 throw_unsup_format!(
981 "this operation performs synchronization that is not supported in GenMC mode"
982 ),
983 GlobalDataRaceHandler::Vclocks(data_race) =>
984 Some(data_race.release_clock(&this.machine.threads, callback)),
985 })
986 }
987
988 fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
991 let this = self.eval_context_ref();
992 match &this.machine.data_race {
993 GlobalDataRaceHandler::None => {}
994 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
995 throw_unsup_format!(
996 "this operation performs synchronization that is not supported in GenMC mode"
997 ),
998 GlobalDataRaceHandler::Vclocks(data_race) =>
999 data_race.acquire_clock(clock, &this.machine.threads),
1000 }
1001 interp_ok(())
1002 }
1003}
1004
1005#[derive(Debug, Clone)]
1007pub struct VClockAlloc {
1008 alloc_ranges: RefCell<DedupRangeMap<MemoryCellClocks>>,
1010}
1011
1012impl VisitProvenance for VClockAlloc {
1013 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1014 }
1016}
1017
1018impl VClockAlloc {
1019 pub fn new_allocation(
1021 global: &GlobalState,
1022 thread_mgr: &ThreadManager<'_>,
1023 len: Size,
1024 kind: MemoryKind,
1025 current_span: Span,
1026 ) -> VClockAlloc {
1027 let (alloc_timestamp, alloc_index) = match kind {
1029 MemoryKind::Machine(
1031 MiriMemoryKind::Rust
1032 | MiriMemoryKind::Miri
1033 | MiriMemoryKind::C
1034 | MiriMemoryKind::WinHeap
1035 | MiriMemoryKind::WinLocal
1036 | MiriMemoryKind::Mmap,
1037 )
1038 | MemoryKind::Stack => {
1039 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1040 let mut alloc_timestamp = clocks.clock[alloc_index];
1041 alloc_timestamp.span = current_span;
1042 (alloc_timestamp, alloc_index)
1043 }
1044 MemoryKind::Machine(
1047 MiriMemoryKind::Global
1048 | MiriMemoryKind::Machine
1049 | MiriMemoryKind::Runtime
1050 | MiriMemoryKind::ExternStatic
1051 | MiriMemoryKind::Tls,
1052 )
1053 | MemoryKind::CallerLocation =>
1054 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1055 };
1056 VClockAlloc {
1057 alloc_ranges: RefCell::new(DedupRangeMap::new(
1058 len,
1059 MemoryCellClocks::new(alloc_timestamp, alloc_index),
1060 )),
1061 }
1062 }
1063
1064 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1067 trace!("Find index where not {:?} <= {:?}", l, r);
1068 let l_slice = l.as_slice();
1069 let r_slice = r.as_slice();
1070 l_slice
1071 .iter()
1072 .zip(r_slice.iter())
1073 .enumerate()
1074 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1075 .or_else(|| {
1076 if l_slice.len() > r_slice.len() {
1077 let l_remainder_slice = &l_slice[r_slice.len()..];
1082 let idx = l_remainder_slice
1083 .iter()
1084 .enumerate()
1085 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1086 .expect("Invalid VClock Invariant");
1087 Some(idx + r_slice.len())
1088 } else {
1089 None
1090 }
1091 })
1092 .map(VectorIdx::new)
1093 }
1094
1095 #[cold]
1102 #[inline(never)]
1103 fn report_data_race<'tcx>(
1104 global: &GlobalState,
1105 thread_mgr: &ThreadManager<'_>,
1106 mem_clocks: &MemoryCellClocks,
1107 access: AccessType,
1108 access_size: Size,
1109 ptr_dbg: interpret::Pointer<AllocId>,
1110 ty: Option<Ty<'_>>,
1111 ) -> InterpResult<'tcx> {
1112 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1113 let mut other_size = None; let write_clock;
1115 let (other_access, other_thread, other_clock) =
1116 if !access.is_atomic() &&
1118 let Some(atomic) = mem_clocks.atomic() &&
1119 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1120 {
1121 (AccessType::AtomicStore, idx, &atomic.write_vector)
1122 } else if !access.is_atomic() &&
1123 let Some(atomic) = mem_clocks.atomic() &&
1124 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1125 {
1126 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1127 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1129 write_clock = mem_clocks.write();
1130 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1131 } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1132 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1133 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1135 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1138 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1139 {
1140 (AccessType::AtomicStore, idx, &atomic.write_vector)
1141 } else if let Some(idx) =
1142 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1143 {
1144 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1145 } else {
1146 unreachable!(
1147 "Failed to report data-race for mixed-size access: no race found"
1148 )
1149 }
1150 } else {
1151 unreachable!("Failed to report data-race")
1152 };
1153
1154 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1156 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1157 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1158
1159 let extra = if other_size.is_some() {
1161 assert!(!involves_non_atomic);
1162 Some("overlapping unsynchronized atomic accesses must use the same access size")
1163 } else if access.is_read() && other_access.is_read() {
1164 panic!("there should be no same-size read-read races")
1165 } else {
1166 None
1167 };
1168 Err(err_machine_stop!(TerminationInfo::DataRace {
1169 involves_non_atomic,
1170 extra,
1171 retag_explain: access.is_retag() || other_access.is_retag(),
1172 ptr: ptr_dbg,
1173 op1: RacingOp {
1174 action: other_access.description(None, other_size),
1175 thread_info: other_thread_info,
1176 span: other_clock.as_slice()[other_thread.index()].span_data(),
1177 },
1178 op2: RacingOp {
1179 action: access.description(ty, other_size.map(|_| access_size)),
1180 thread_info: active_thread_info,
1181 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1182 },
1183 }))?
1184 }
1185
1186 pub(super) fn sync_clock(&self, access_range: AllocRange) -> VClock {
1188 let alloc_ranges = self.alloc_ranges.borrow();
1189 let mut clock = VClock::default();
1190 for (_, mem_clocks) in alloc_ranges.iter(access_range.start, access_range.size) {
1191 if let Some(atomic) = mem_clocks.atomic() {
1192 clock.join(&atomic.sync_vector);
1193 }
1194 }
1195 clock
1196 }
1197
1198 pub fn read<'tcx>(
1205 &self,
1206 alloc_id: AllocId,
1207 access_range: AllocRange,
1208 read_type: NaReadType,
1209 ty: Option<Ty<'_>>,
1210 machine: &MiriMachine<'_>,
1211 ) -> InterpResult<'tcx> {
1212 let current_span = machine.current_user_relevant_span();
1213 let global = machine.data_race.as_vclocks_ref().unwrap();
1214 if !global.race_detecting() {
1215 return interp_ok(());
1216 }
1217 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1218 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1219 for (mem_clocks_range, mem_clocks) in
1220 alloc_ranges.iter_mut(access_range.start, access_range.size)
1221 {
1222 if let Err(DataRace) =
1223 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1224 {
1225 drop(thread_clocks);
1226 return Self::report_data_race(
1228 global,
1229 &machine.threads,
1230 mem_clocks,
1231 AccessType::NaRead(read_type),
1232 access_range.size,
1233 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1234 ty,
1235 );
1236 }
1237 }
1238 interp_ok(())
1239 }
1240
1241 pub fn write<'tcx>(
1247 &mut self,
1248 alloc_id: AllocId,
1249 access_range: AllocRange,
1250 write_type: NaWriteType,
1251 ty: Option<Ty<'_>>,
1252 machine: &mut MiriMachine<'_>,
1253 ) -> InterpResult<'tcx> {
1254 let current_span = machine.current_user_relevant_span();
1255 let global = machine.data_race.as_vclocks_mut().unwrap();
1256 if !global.race_detecting() {
1257 return interp_ok(());
1258 }
1259 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1260 for (mem_clocks_range, mem_clocks) in
1261 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1262 {
1263 if let Err(DataRace) =
1264 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1265 {
1266 drop(thread_clocks);
1267 return Self::report_data_race(
1269 global,
1270 &machine.threads,
1271 mem_clocks,
1272 AccessType::NaWrite(write_type),
1273 access_range.size,
1274 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1275 ty,
1276 );
1277 }
1278 }
1279 interp_ok(())
1280 }
1281}
1282
1283#[derive(Debug, Default)]
1286pub struct FrameState {
1287 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1288}
1289
1290#[derive(Debug)]
1294struct LocalClocks {
1295 write: VTimestamp,
1296 write_type: NaWriteType,
1297 read: VTimestamp,
1298}
1299
1300impl Default for LocalClocks {
1301 fn default() -> Self {
1302 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1303 }
1304}
1305
1306impl FrameState {
1307 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1308 let current_span = machine.current_user_relevant_span();
1309 let global = machine.data_race.as_vclocks_ref().unwrap();
1310 if !global.race_detecting() {
1311 return;
1312 }
1313 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1314 if !current_span.is_dummy() {
1316 thread_clocks.clock.index_mut(index).span = current_span;
1317 }
1318 let mut clocks = self.local_clocks.borrow_mut();
1319 if storage_live {
1320 let new_clocks = LocalClocks {
1321 write: thread_clocks.clock[index],
1322 write_type: NaWriteType::Allocate,
1323 read: VTimestamp::ZERO,
1324 };
1325 clocks.insert(local, new_clocks);
1328 } else {
1329 let clocks = clocks.entry(local).or_default();
1332 clocks.write = thread_clocks.clock[index];
1333 clocks.write_type = NaWriteType::Write;
1334 }
1335 }
1336
1337 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1338 let current_span = machine.current_user_relevant_span();
1339 let global = machine.data_race.as_vclocks_ref().unwrap();
1340 if !global.race_detecting() {
1341 return;
1342 }
1343 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1344 if !current_span.is_dummy() {
1346 thread_clocks.clock.index_mut(index).span = current_span;
1347 }
1348 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1349 let mut clocks = self.local_clocks.borrow_mut();
1352 let clocks = clocks.entry(local).or_default();
1353 clocks.read = thread_clocks.clock[index];
1354 }
1355
1356 pub fn local_moved_to_memory(
1357 &self,
1358 local: mir::Local,
1359 alloc: &mut VClockAlloc,
1360 machine: &MiriMachine<'_>,
1361 ) {
1362 let global = machine.data_race.as_vclocks_ref().unwrap();
1363 if !global.race_detecting() {
1364 return;
1365 }
1366 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1367 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1371 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1372 assert_eq!(mem_clocks.write.0, index);
1375 mem_clocks.write = (index, local_clocks.write);
1377 mem_clocks.write_type = local_clocks.write_type;
1378 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1379 }
1380 }
1381}
1382
1383impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1384trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1385 #[inline]
1393 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1394 let this = self.eval_context_ref();
1395 this.machine.data_race.set_ongoing_action_data_race_free(true);
1396 let result = op(this);
1397 this.machine.data_race.set_ongoing_action_data_race_free(false);
1398 result
1399 }
1400
1401 #[inline]
1405 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1406 let this = self.eval_context_mut();
1407 this.machine.data_race.set_ongoing_action_data_race_free(true);
1408 let result = op(this);
1409 this.machine.data_race.set_ongoing_action_data_race_free(false);
1410 result
1411 }
1412
1413 fn atomic_access_check(
1415 &self,
1416 place: &MPlaceTy<'tcx>,
1417 access_type: AtomicAccessType,
1418 ) -> InterpResult<'tcx> {
1419 let this = self.eval_context_ref();
1420 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1424 this.check_ptr_align(place.ptr(), align)?;
1425 let (alloc_id, _offset, _prov) = this
1433 .ptr_try_get_alloc_id(place.ptr(), 0)
1434 .expect("there are no zero-sized atomic accesses");
1435 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1436 match access_type {
1438 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1439 throw_ub_format!(
1440 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1441 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1442 );
1443 }
1444 AtomicAccessType::Load(_)
1445 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1446 {
1447 throw_ub_format!(
1448 "large atomic load operations cannot be performed on read-only memory\n\
1449 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1450 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1451 );
1452 }
1453 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1454 throw_ub_format!(
1455 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1456 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1457 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1458 );
1459 }
1460 _ => {
1461 }
1463 }
1464 }
1465 interp_ok(())
1466 }
1467
1468 fn validate_atomic_load(
1471 &self,
1472 place: &MPlaceTy<'tcx>,
1473 atomic: AtomicReadOrd,
1474 sync_clock: Option<&VClock>,
1475 ) -> InterpResult<'tcx> {
1476 let this = self.eval_context_ref();
1477 this.validate_atomic_op(
1478 place,
1479 atomic,
1480 AccessType::AtomicLoad,
1481 move |memory, clocks, index, atomic| {
1482 if atomic == AtomicReadOrd::Relaxed {
1483 memory.load_relaxed(&mut *clocks, index, place.layout.size, sync_clock)
1484 } else {
1485 memory.load_acquire(&mut *clocks, index, place.layout.size, sync_clock)
1486 }
1487 },
1488 )
1489 }
1490
1491 fn validate_atomic_store(
1494 &mut self,
1495 place: &MPlaceTy<'tcx>,
1496 atomic: AtomicWriteOrd,
1497 ) -> InterpResult<'tcx> {
1498 let this = self.eval_context_mut();
1499 this.validate_atomic_op(
1500 place,
1501 atomic,
1502 AccessType::AtomicStore,
1503 move |memory, clocks, index, atomic| {
1504 if atomic == AtomicWriteOrd::Relaxed {
1505 memory.store_relaxed(clocks, index, place.layout.size)
1506 } else {
1507 memory.store_release(clocks, index, place.layout.size)
1508 }
1509 },
1510 )
1511 }
1512
1513 fn validate_atomic_rmw(
1516 &mut self,
1517 place: &MPlaceTy<'tcx>,
1518 atomic: AtomicRwOrd,
1519 ) -> InterpResult<'tcx> {
1520 use AtomicRwOrd::*;
1521 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1522 let release = matches!(atomic, Release | AcqRel | SeqCst);
1523 let this = self.eval_context_mut();
1524 this.validate_atomic_op(
1525 place,
1526 atomic,
1527 AccessType::AtomicRmw,
1528 move |memory, clocks, index, _| {
1529 if acquire {
1530 memory.load_acquire(clocks, index, place.layout.size, None)?;
1531 } else {
1532 memory.load_relaxed(clocks, index, place.layout.size, None)?;
1533 }
1534 if release {
1535 memory.rmw_release(clocks, index, place.layout.size)
1536 } else {
1537 memory.rmw_relaxed(clocks, index, place.layout.size)
1538 }
1539 },
1540 )
1541 }
1542
1543 fn validate_atomic_op<A: Debug + Copy>(
1545 &self,
1546 place: &MPlaceTy<'tcx>,
1547 atomic: A,
1548 access: AccessType,
1549 mut op: impl FnMut(
1550 &mut MemoryCellClocks,
1551 &mut ThreadClockSet,
1552 VectorIdx,
1553 A,
1554 ) -> Result<(), DataRace>,
1555 ) -> InterpResult<'tcx> {
1556 let this = self.eval_context_ref();
1557 assert!(access.is_atomic());
1558 let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1559 return interp_ok(());
1560 };
1561 if !data_race.race_detecting() {
1562 return interp_ok(());
1563 }
1564 let size = place.layout.size;
1565 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1566 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1569 trace!(
1570 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1571 access.description(None, None),
1572 &atomic,
1573 place.ptr(),
1574 size.bytes()
1575 );
1576
1577 let current_span = this.machine.current_user_relevant_span();
1578 data_race.maybe_perform_sync_operation(
1580 &this.machine.threads,
1581 current_span,
1582 |index, mut thread_clocks| {
1583 for (mem_clocks_range, mem_clocks) in
1584 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1585 {
1586 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1587 mem::drop(thread_clocks);
1588 return VClockAlloc::report_data_race(
1589 data_race,
1590 &this.machine.threads,
1591 mem_clocks,
1592 access,
1593 place.layout.size,
1594 interpret::Pointer::new(
1595 alloc_id,
1596 Size::from_bytes(mem_clocks_range.start),
1597 ),
1598 None,
1599 )
1600 .map(|_| true);
1601 }
1602 }
1603
1604 interp_ok(true)
1606 },
1607 )?;
1608
1609 if tracing::enabled!(tracing::Level::TRACE) {
1611 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1612 trace!(
1613 "Updated atomic memory({:?}, size={}) to {:#?}",
1614 place.ptr(),
1615 size.bytes(),
1616 mem_clocks.atomic_ops
1617 );
1618 }
1619 }
1620
1621 interp_ok(())
1622 }
1623}
1624
1625impl GlobalState {
1626 pub fn new(config: &MiriConfig) -> Self {
1629 let mut global_state = GlobalState {
1630 multi_threaded: Cell::new(false),
1631 ongoing_action_data_race_free: Cell::new(false),
1632 vector_clocks: RefCell::new(IndexVec::new()),
1633 vector_info: RefCell::new(IndexVec::new()),
1634 thread_info: RefCell::new(IndexVec::new()),
1635 reuse_candidates: RefCell::new(FxHashSet::default()),
1636 last_sc_fence: RefCell::new(VClock::default()),
1637 last_sc_write_per_thread: RefCell::new(VClock::default()),
1638 track_outdated_loads: config.track_outdated_loads,
1639 weak_memory: config.weak_memory_emulation,
1640 };
1641
1642 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1645 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1646 global_state
1647 .thread_info
1648 .get_mut()
1649 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1650
1651 global_state
1652 }
1653
1654 fn race_detecting(&self) -> bool {
1658 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1659 }
1660
1661 pub fn ongoing_action_data_race_free(&self) -> bool {
1662 self.ongoing_action_data_race_free.get()
1663 }
1664
1665 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1668 let mut reuse = self.reuse_candidates.borrow_mut();
1669 let vector_clocks = self.vector_clocks.borrow();
1670 for &candidate in reuse.iter() {
1671 let target_timestamp = vector_clocks[candidate].clock[candidate];
1672 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1673 let no_data_race = clock.clock[candidate] >= target_timestamp;
1676
1677 let vector_terminated = reuse.contains(&clock_idx);
1680
1681 no_data_race || vector_terminated
1684 }) {
1685 assert!(reuse.remove(&candidate));
1690 return Some(candidate);
1691 }
1692 }
1693 None
1694 }
1695
1696 #[inline]
1699 pub fn thread_created(
1700 &mut self,
1701 thread_mgr: &ThreadManager<'_>,
1702 thread: ThreadId,
1703 current_span: Span,
1704 ) {
1705 let current_index = self.active_thread_index(thread_mgr);
1706
1707 self.multi_threaded.set(true);
1710
1711 let mut thread_info = self.thread_info.borrow_mut();
1713 thread_info.ensure_contains_elem(thread, Default::default);
1714
1715 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1718 let vector_clocks = self.vector_clocks.get_mut();
1721 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1722
1723 let vector_info = self.vector_info.get_mut();
1726 let old_thread = vector_info[reuse_index];
1727 vector_info[reuse_index] = thread;
1728
1729 thread_info[old_thread].vector_index = None;
1732
1733 reuse_index
1734 } else {
1735 let vector_info = self.vector_info.get_mut();
1738 vector_info.push(thread)
1739 };
1740
1741 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1742
1743 thread_info[thread].vector_index = Some(created_index);
1745
1746 let vector_clocks = self.vector_clocks.get_mut();
1748 if created_index == vector_clocks.next_index() {
1749 vector_clocks.push(ThreadClockSet::default());
1750 }
1751
1752 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1754
1755 created.join_with(current);
1758
1759 current.increment_clock(current_index, current_span);
1762 created.increment_clock(created_index, current_span);
1763 }
1764
1765 #[inline]
1769 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1770 let thread_info = self.thread_info.borrow();
1771 let thread_info = &thread_info[joinee];
1772
1773 let join_clock = thread_info
1775 .termination_vector_clock
1776 .as_ref()
1777 .expect("joined with thread but thread has not terminated");
1778 self.acquire_clock(join_clock, threads);
1780
1781 if let Some(current_index) = thread_info.vector_index {
1786 if threads.get_live_thread_count() == 1 {
1787 let vector_clocks = self.vector_clocks.get_mut();
1788 let current_clock = &vector_clocks[current_index];
1790 if vector_clocks
1791 .iter_enumerated()
1792 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1793 {
1794 self.multi_threaded.set(false);
1798 }
1799 }
1800 }
1801 }
1802
1803 #[inline]
1811 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1812 let current_thread = thread_mgr.active_thread();
1813 let current_index = self.active_thread_index(thread_mgr);
1814
1815 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1817 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1818 Some(terminaion_clock);
1819
1820 let reuse = self.reuse_candidates.get_mut();
1822 reuse.insert(current_index);
1823 }
1824
1825 fn atomic_fence<'tcx>(
1827 &self,
1828 machine: &MiriMachine<'tcx>,
1829 atomic: AtomicFenceOrd,
1830 ) -> InterpResult<'tcx> {
1831 let current_span = machine.current_user_relevant_span();
1832 self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1833 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1834
1835 if atomic != AtomicFenceOrd::Release {
1839 clocks.apply_acquire_fence();
1841 }
1842 if atomic == AtomicFenceOrd::SeqCst {
1843 let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1851 sc_fence_clock.join(&clocks.clock);
1852 clocks.clock.join(&sc_fence_clock);
1853 clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1856 }
1857 if atomic != AtomicFenceOrd::Acquire {
1860 clocks.apply_release_fence();
1862 }
1863
1864 interp_ok(atomic != AtomicFenceOrd::Acquire)
1866 })
1867 }
1868
1869 fn maybe_perform_sync_operation<'tcx>(
1877 &self,
1878 thread_mgr: &ThreadManager<'_>,
1879 current_span: Span,
1880 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1881 ) -> InterpResult<'tcx> {
1882 if self.multi_threaded.get() {
1883 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1884 if op(index, clocks)? {
1885 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1886 clocks.increment_clock(index, current_span);
1887 }
1888 }
1889 interp_ok(())
1890 }
1891
1892 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1895 let thread = self.vector_info.borrow()[vector];
1896 let thread_name = thread_mgr.get_thread_display_name(thread);
1897 format!("thread `{thread_name}`")
1898 }
1899
1900 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1905 let thread = threads.active_thread();
1906 let (_, mut clocks) = self.thread_state_mut(thread);
1907 clocks.clock.join(clock);
1908 }
1909
1910 pub fn release_clock<'tcx, R>(
1914 &self,
1915 threads: &ThreadManager<'tcx>,
1916 callback: impl FnOnce(&VClock) -> R,
1917 ) -> R {
1918 let thread = threads.active_thread();
1919 let span = threads.active_thread_ref().current_user_relevant_span();
1920 let (index, mut clocks) = self.thread_state_mut(thread);
1921 let r = callback(&clocks.clock);
1922 clocks.increment_clock(index, span);
1925
1926 r
1927 }
1928
1929 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1930 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1931 }
1932
1933 #[inline]
1936 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1937 let index = self.thread_index(thread);
1938 let ref_vector = self.vector_clocks.borrow_mut();
1939 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1940 (index, clocks)
1941 }
1942
1943 #[inline]
1946 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1947 let index = self.thread_index(thread);
1948 let ref_vector = self.vector_clocks.borrow();
1949 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1950 (index, clocks)
1951 }
1952
1953 #[inline]
1956 pub(super) fn active_thread_state(
1957 &self,
1958 thread_mgr: &ThreadManager<'_>,
1959 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1960 self.thread_state(thread_mgr.active_thread())
1961 }
1962
1963 #[inline]
1966 pub(super) fn active_thread_state_mut(
1967 &self,
1968 thread_mgr: &ThreadManager<'_>,
1969 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1970 self.thread_state_mut(thread_mgr.active_thread())
1971 }
1972
1973 #[inline]
1976 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
1977 let active_thread_id = thread_mgr.active_thread();
1978 self.thread_index(active_thread_id)
1979 }
1980
1981 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
1983 let (index, clocks) = self.active_thread_state(thread_mgr);
1984 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
1985 }
1986
1987 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
1989 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
1990 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
1991 }
1992}