rustc_data_structures/
sync.rs

1//! This module defines various operations and types that are implemented in
2//! one way for the serial compiler, and another way the parallel compiler.
3//!
4//! Operations
5//! ----------
6//! The parallel versions of operations use Rayon to execute code in parallel,
7//! while the serial versions degenerate straightforwardly to serial execution.
8//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
9//!
10//! Types
11//! -----
12//! The parallel versions of types provide various kinds of synchronization,
13//! while the serial compiler versions do not.
14//!
15//! The following table shows how the types are implemented internally. Except
16//! where noted otherwise, the type in column one is defined as a
17//! newtype around the type from column two or three.
18//!
19//! | Type                    | Serial version      | Parallel version                |
20//! | ----------------------- | ------------------- | ------------------------------- |
21//! | `Lock<T>`               | `RefCell<T>`        | `RefCell<T>` or                 |
22//! |                         |                     | `parking_lot::Mutex<T>`         |
23//! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
24//! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
25//! |                         |                     |                                 |
26//! | `ParallelIterator`      | `Iterator`          | `rayon::iter::ParallelIterator` |
27//!
28//! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost
29//! of a `RefCell`. This is appropriate when interior mutability is not
30//! required.
31
32use std::collections::HashMap;
33use std::hash::{BuildHasher, Hash};
34
35pub use parking_lot::{
36    MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
37    RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
38};
39
40pub use self::atomic::AtomicU64;
41pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
42#[doc(no_inline)]
43pub use self::lock::{Lock, LockGuard, Mode};
44pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
45pub use self::parallel::{
46    join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in,
47};
48pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec};
49pub use self::worker_local::{Registry, WorkerLocal};
50pub use crate::marker::*;
51
52mod freeze;
53mod lock;
54mod parallel;
55mod vec;
56mod worker_local;
57
58/// Keep the conditional imports together in a submodule, so that import-sorting
59/// doesn't split them up.
60mod atomic {
61    // Most hosts can just use a regular AtomicU64.
62    #[cfg(target_has_atomic = "64")]
63    pub use std::sync::atomic::AtomicU64;
64
65    // Some 32-bit hosts don't have AtomicU64, so use a fallback.
66    #[cfg(not(target_has_atomic = "64"))]
67    pub use portable_atomic::AtomicU64;
68}
69
70mod mode {
71    use std::sync::atomic::{AtomicU8, Ordering};
72
73    const UNINITIALIZED: u8 = 0;
74    const DYN_NOT_THREAD_SAFE: u8 = 1;
75    const DYN_THREAD_SAFE: u8 = 2;
76
77    static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
78
79    // Whether thread safety is enabled (due to running under multiple threads).
80    #[inline]
81    pub fn is_dyn_thread_safe() -> bool {
82        match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
83            DYN_NOT_THREAD_SAFE => false,
84            DYN_THREAD_SAFE => true,
85            _ => panic!("uninitialized dyn_thread_safe mode!"),
86        }
87    }
88
89    // Whether thread safety might be enabled.
90    #[inline]
91    pub(super) fn might_be_dyn_thread_safe() -> bool {
92        DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
93    }
94
95    // Only set by the `-Z threads` compile option
96    pub fn set_dyn_thread_safe_mode(mode: bool) {
97        let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
98        let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
99            UNINITIALIZED,
100            set,
101            Ordering::Relaxed,
102            Ordering::Relaxed,
103        );
104
105        // Check that the mode was either uninitialized or was already set to the requested mode.
106        assert!(previous.is_ok() || previous == Err(set));
107    }
108}
109
110// FIXME(parallel_compiler): Get rid of these aliases across the compiler.
111
112#[derive(Debug, Default)]
113pub struct MTLock<T>(Lock<T>);
114
115impl<T> MTLock<T> {
116    #[inline(always)]
117    pub fn new(inner: T) -> Self {
118        MTLock(Lock::new(inner))
119    }
120
121    #[inline(always)]
122    pub fn into_inner(self) -> T {
123        self.0.into_inner()
124    }
125
126    #[inline(always)]
127    pub fn get_mut(&mut self) -> &mut T {
128        self.0.get_mut()
129    }
130
131    #[inline(always)]
132    pub fn lock(&self) -> LockGuard<'_, T> {
133        self.0.lock()
134    }
135
136    #[inline(always)]
137    pub fn lock_mut(&self) -> LockGuard<'_, T> {
138        self.lock()
139    }
140}
141
142/// This makes locks panic if they are already held.
143/// It is only useful when you are running in a single thread
144const ERROR_CHECKING: bool = false;
145
146#[derive(Default)]
147#[repr(align(64))]
148pub struct CacheAligned<T>(pub T);
149
150pub trait HashMapExt<K, V> {
151    /// Same as HashMap::insert, but it may panic if there's already an
152    /// entry for `key` with a value not equal to `value`
153    fn insert_same(&mut self, key: K, value: V);
154}
155
156impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
157    fn insert_same(&mut self, key: K, value: V) {
158        self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
159    }
160}
161
162#[derive(Debug, Default)]
163pub struct RwLock<T>(parking_lot::RwLock<T>);
164
165impl<T> RwLock<T> {
166    #[inline(always)]
167    pub fn new(inner: T) -> Self {
168        RwLock(parking_lot::RwLock::new(inner))
169    }
170
171    #[inline(always)]
172    pub fn into_inner(self) -> T {
173        self.0.into_inner()
174    }
175
176    #[inline(always)]
177    pub fn get_mut(&mut self) -> &mut T {
178        self.0.get_mut()
179    }
180
181    #[inline(always)]
182    pub fn read(&self) -> ReadGuard<'_, T> {
183        if ERROR_CHECKING {
184            self.0.try_read().expect("lock was already held")
185        } else {
186            self.0.read()
187        }
188    }
189
190    #[inline(always)]
191    pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
192        self.0.try_write().ok_or(())
193    }
194
195    #[inline(always)]
196    pub fn write(&self) -> WriteGuard<'_, T> {
197        if ERROR_CHECKING {
198            self.0.try_write().expect("lock was already held")
199        } else {
200            self.0.write()
201        }
202    }
203
204    #[inline(always)]
205    #[track_caller]
206    pub fn borrow(&self) -> ReadGuard<'_, T> {
207        self.read()
208    }
209
210    #[inline(always)]
211    #[track_caller]
212    pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
213        self.write()
214    }
215}