rustc_data_structures/
sync.rs

1//! This module defines various operations and types that are implemented in
2//! one way for the serial compiler, and another way the parallel compiler.
3//!
4//! Operations
5//! ----------
6//! The parallel versions of operations use Rayon to execute code in parallel,
7//! while the serial versions degenerate straightforwardly to serial execution.
8//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
9//!
10//! Types
11//! -----
12//! The parallel versions of types provide various kinds of synchronization,
13//! while the serial compiler versions do not.
14//!
15//! The following table shows how the types are implemented internally. Except
16//! where noted otherwise, the type in column one is defined as a
17//! newtype around the type from column two or three.
18//!
19//! | Type                    | Serial version      | Parallel version                |
20//! | ----------------------- | ------------------- | ------------------------------- |
21//! | `LRef<'a, T>` [^2]      | `&'a mut T`         | `&'a T`                         |
22//! |                         |                     |                                 |
23//! | `Lock<T>`               | `RefCell<T>`        | `RefCell<T>` or                 |
24//! |                         |                     | `parking_lot::Mutex<T>`         |
25//! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
26//! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
27//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>`                 |
28//! |                         |                     |                                 |
29//! | `ParallelIterator`      | `Iterator`          | `rayon::iter::ParallelIterator` |
30//!
31//! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost
32//! of a `RefCell`. This is appropriate when interior mutability is not
33//! required.
34//!
35//! [^2]: `MTRef`, `MTLockRef` are type aliases.
36
37use std::collections::HashMap;
38use std::hash::{BuildHasher, Hash};
39
40pub use crate::marker::*;
41
42mod lock;
43#[doc(no_inline)]
44pub use lock::{Lock, LockGuard, Mode};
45
46mod worker_local;
47pub use worker_local::{Registry, WorkerLocal};
48
49mod parallel;
50pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in};
51pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
52
53mod vec;
54
55mod freeze;
56pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
57
58mod mode {
59    use std::sync::atomic::{AtomicU8, Ordering};
60
61    const UNINITIALIZED: u8 = 0;
62    const DYN_NOT_THREAD_SAFE: u8 = 1;
63    const DYN_THREAD_SAFE: u8 = 2;
64
65    static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
66
67    // Whether thread safety is enabled (due to running under multiple threads).
68    #[inline]
69    pub fn is_dyn_thread_safe() -> bool {
70        match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
71            DYN_NOT_THREAD_SAFE => false,
72            DYN_THREAD_SAFE => true,
73            _ => panic!("uninitialized dyn_thread_safe mode!"),
74        }
75    }
76
77    // Whether thread safety might be enabled.
78    #[inline]
79    pub fn might_be_dyn_thread_safe() -> bool {
80        DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
81    }
82
83    // Only set by the `-Z threads` compile option
84    pub fn set_dyn_thread_safe_mode(mode: bool) {
85        let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
86        let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
87            UNINITIALIZED,
88            set,
89            Ordering::Relaxed,
90            Ordering::Relaxed,
91        );
92
93        // Check that the mode was either uninitialized or was already set to the requested mode.
94        assert!(previous.is_ok() || previous == Err(set));
95    }
96}
97
98// FIXME(parallel_compiler): Get rid of these aliases across the compiler.
99
100pub use std::sync::OnceLock;
101// Use portable AtomicU64 for targets without native 64-bit atomics
102#[cfg(target_has_atomic = "64")]
103pub use std::sync::atomic::AtomicU64;
104
105pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
106pub use parking_lot::{
107    MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
108    RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
109};
110#[cfg(not(target_has_atomic = "64"))]
111pub use portable_atomic::AtomicU64;
112
113pub type LRef<'a, T> = &'a T;
114
115#[derive(Debug, Default)]
116pub struct MTLock<T>(Lock<T>);
117
118impl<T> MTLock<T> {
119    #[inline(always)]
120    pub fn new(inner: T) -> Self {
121        MTLock(Lock::new(inner))
122    }
123
124    #[inline(always)]
125    pub fn into_inner(self) -> T {
126        self.0.into_inner()
127    }
128
129    #[inline(always)]
130    pub fn get_mut(&mut self) -> &mut T {
131        self.0.get_mut()
132    }
133
134    #[inline(always)]
135    pub fn lock(&self) -> LockGuard<'_, T> {
136        self.0.lock()
137    }
138
139    #[inline(always)]
140    pub fn lock_mut(&self) -> LockGuard<'_, T> {
141        self.lock()
142    }
143}
144
145use parking_lot::RwLock as InnerRwLock;
146
147/// This makes locks panic if they are already held.
148/// It is only useful when you are running in a single thread
149const ERROR_CHECKING: bool = false;
150
151pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>;
152
153#[derive(Default)]
154#[repr(align(64))]
155pub struct CacheAligned<T>(pub T);
156
157pub trait HashMapExt<K, V> {
158    /// Same as HashMap::insert, but it may panic if there's already an
159    /// entry for `key` with a value not equal to `value`
160    fn insert_same(&mut self, key: K, value: V);
161}
162
163impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
164    fn insert_same(&mut self, key: K, value: V) {
165        self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
166    }
167}
168
169#[derive(Debug, Default)]
170pub struct RwLock<T>(InnerRwLock<T>);
171
172impl<T> RwLock<T> {
173    #[inline(always)]
174    pub fn new(inner: T) -> Self {
175        RwLock(InnerRwLock::new(inner))
176    }
177
178    #[inline(always)]
179    pub fn into_inner(self) -> T {
180        self.0.into_inner()
181    }
182
183    #[inline(always)]
184    pub fn get_mut(&mut self) -> &mut T {
185        self.0.get_mut()
186    }
187
188    #[inline(always)]
189    pub fn read(&self) -> ReadGuard<'_, T> {
190        if ERROR_CHECKING {
191            self.0.try_read().expect("lock was already held")
192        } else {
193            self.0.read()
194        }
195    }
196
197    #[inline(always)]
198    pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
199        self.0.try_write().ok_or(())
200    }
201
202    #[inline(always)]
203    pub fn write(&self) -> WriteGuard<'_, T> {
204        if ERROR_CHECKING {
205            self.0.try_write().expect("lock was already held")
206        } else {
207            self.0.write()
208        }
209    }
210
211    #[inline(always)]
212    #[track_caller]
213    pub fn borrow(&self) -> ReadGuard<'_, T> {
214        self.read()
215    }
216
217    #[inline(always)]
218    #[track_caller]
219    pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
220        self.write()
221    }
222}