std/thread/id.rs
1use crate::num::NonZero;
2use crate::sync::atomic::{Atomic, Ordering};
3
4/// A unique identifier for a running thread.
5///
6/// A `ThreadId` is an opaque object that uniquely identifies each thread
7/// created during the lifetime of a process. `ThreadId`s are guaranteed not to
8/// be reused, even when a thread terminates. `ThreadId`s are under the control
9/// of Rust's standard library and there may not be any relationship between
10/// `ThreadId` and the underlying platform's notion of a thread identifier --
11/// the two concepts cannot, therefore, be used interchangeably. A `ThreadId`
12/// can be retrieved from the [`id`] method on a [`Thread`].
13///
14/// # Examples
15///
16/// ```
17/// use std::thread;
18///
19/// let other_thread = thread::spawn(|| {
20/// thread::current().id()
21/// });
22///
23/// let other_thread_id = other_thread.join().unwrap();
24/// assert!(thread::current().id() != other_thread_id);
25/// ```
26///
27/// [`Thread`]: super::Thread
28/// [`id`]: super::Thread::id
29#[stable(feature = "thread_id", since = "1.19.0")]
30#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
31pub struct ThreadId(NonZero<u64>);
32
33impl ThreadId {
34 // Generate a new unique thread ID.
35 pub(crate) fn new() -> ThreadId {
36 #[cold]
37 fn exhausted() -> ! {
38 panic!("failed to generate unique thread ID: bitspace exhausted")
39 }
40
41 cfg_select! {
42 target_has_atomic = "64" => {
43 use crate::sync::atomic::AtomicU64;
44
45 static COUNTER: Atomic<u64> = AtomicU64::new(0);
46
47 let mut last = COUNTER.load(Ordering::Relaxed);
48 loop {
49 let Some(id) = last.checked_add(1) else {
50 exhausted();
51 };
52
53 match COUNTER.compare_exchange_weak(last, id, Ordering::Relaxed, Ordering::Relaxed) {
54 Ok(_) => return ThreadId(NonZero::new(id).unwrap()),
55 Err(id) => last = id,
56 }
57 }
58 }
59 _ => {
60 use crate::cell::SyncUnsafeCell;
61 use crate::hint::spin_loop;
62 use crate::sync::atomic::AtomicBool;
63 use crate::thread::yield_now;
64
65 // If we don't have a 64-bit atomic we use a small spinlock. We don't use Mutex
66 // here as we might be trying to get the current thread id in the global allocator,
67 // and on some platforms Mutex requires allocation.
68 static COUNTER_LOCKED: Atomic<bool> = AtomicBool::new(false);
69 static COUNTER: SyncUnsafeCell<u64> = SyncUnsafeCell::new(0);
70
71 // Acquire lock.
72 let mut spin = 0;
73 // Miri doesn't like it when we yield here as it interferes with deterministically
74 // scheduling threads, so avoid `compare_exchange_weak` to avoid spurious yields.
75 while COUNTER_LOCKED.swap(true, Ordering::Acquire) {
76 if spin <= 3 {
77 for _ in 0..(1 << spin) {
78 spin_loop();
79 }
80 } else {
81 yield_now();
82 }
83 spin += 1;
84 }
85 // This was `false` before the swap, so we got the lock.
86
87 // SAFETY: we have an exclusive lock on the counter.
88 unsafe {
89 if let Some(id) = (*COUNTER.get()).checked_add(1) {
90 *COUNTER.get() = id;
91 COUNTER_LOCKED.store(false, Ordering::Release);
92 ThreadId(NonZero::new(id).unwrap())
93 } else {
94 COUNTER_LOCKED.store(false, Ordering::Release);
95 exhausted()
96 }
97 }
98 }
99 }
100 }
101
102 #[cfg(any(not(target_thread_local), target_has_atomic = "64"))]
103 pub(super) fn from_u64(v: u64) -> Option<ThreadId> {
104 NonZero::new(v).map(ThreadId)
105 }
106
107 /// This returns a numeric identifier for the thread identified by this
108 /// `ThreadId`.
109 ///
110 /// As noted in the documentation for the type itself, it is essentially an
111 /// opaque ID, but is guaranteed to be unique for each thread. The returned
112 /// value is entirely opaque -- only equality testing is stable. Note that
113 /// it is not guaranteed which values new threads will return, and this may
114 /// change across Rust versions.
115 #[must_use]
116 #[unstable(feature = "thread_id_value", issue = "67939")]
117 pub fn as_u64(&self) -> NonZero<u64> {
118 self.0
119 }
120}