miri/shims/unix/linux_like/sync.rs
1use crate::concurrency::sync::FutexRef;
2use crate::helpers::check_min_vararg_count;
3use crate::*;
4
5struct LinuxFutex {
6 futex: FutexRef,
7}
8
9/// Implementation of the SYS_futex syscall.
10/// `args` is the arguments *including* the syscall number.
11pub fn futex<'tcx>(
12 ecx: &mut MiriInterpCx<'tcx>,
13 varargs: &[OpTy<'tcx>],
14 dest: &MPlaceTy<'tcx>,
15) -> InterpResult<'tcx> {
16 let [addr, op, val] = check_min_vararg_count("`syscall(SYS_futex, ...)`", varargs)?;
17
18 // The first three arguments (after the syscall number itself) are the same to all futex operations:
19 // (int *addr, int op, int val).
20 // We checked above that these definitely exist.
21 let addr = ecx.read_pointer(addr)?;
22 let op = ecx.read_scalar(op)?.to_i32()?;
23 let val = ecx.read_scalar(val)?.to_i32()?;
24
25 // This is a vararg function so we have to bring our own type for this pointer.
26 let addr = ecx.ptr_to_mplace(addr, ecx.machine.layouts.i32);
27
28 let futex_private = ecx.eval_libc_i32("FUTEX_PRIVATE_FLAG");
29 let futex_wait = ecx.eval_libc_i32("FUTEX_WAIT");
30 let futex_wait_bitset = ecx.eval_libc_i32("FUTEX_WAIT_BITSET");
31 let futex_wake = ecx.eval_libc_i32("FUTEX_WAKE");
32 let futex_wake_bitset = ecx.eval_libc_i32("FUTEX_WAKE_BITSET");
33 let futex_realtime = ecx.eval_libc_i32("FUTEX_CLOCK_REALTIME");
34
35 // FUTEX_PRIVATE enables an optimization that stops it from working across processes.
36 // Miri doesn't support that anyway, so we ignore that flag.
37 match op & !futex_private {
38 // FUTEX_WAIT: (int *addr, int op = FUTEX_WAIT, int val, const timespec *timeout)
39 // Blocks the thread if *addr still equals val. Wakes up when FUTEX_WAKE is called on the same address,
40 // or *timeout expires. `timeout == null` for an infinite timeout.
41 //
42 // FUTEX_WAIT_BITSET: (int *addr, int op = FUTEX_WAIT_BITSET, int val, const timespec *timeout, int *_ignored, unsigned int bitset)
43 // This is identical to FUTEX_WAIT, except:
44 // - The timeout is absolute rather than relative.
45 // - You can specify the bitset to selecting what WAKE operations to respond to.
46 op if op & !futex_realtime == futex_wait || op & !futex_realtime == futex_wait_bitset => {
47 let wait_bitset = op & !futex_realtime == futex_wait_bitset;
48
49 let (timeout, bitset) = if wait_bitset {
50 let [_, _, _, timeout, uaddr2, bitset] = check_min_vararg_count(
51 "`syscall(SYS_futex, FUTEX_WAIT_BITSET, ...)`",
52 varargs,
53 )?;
54 let _timeout = ecx.read_pointer(timeout)?;
55 let _uaddr2 = ecx.read_pointer(uaddr2)?;
56 (timeout, ecx.read_scalar(bitset)?.to_u32()?)
57 } else {
58 let [_, _, _, timeout] =
59 check_min_vararg_count("`syscall(SYS_futex, FUTEX_WAIT, ...)`", varargs)?;
60 (timeout, u32::MAX)
61 };
62
63 if bitset == 0 {
64 return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
65 }
66
67 let timeout = ecx.deref_pointer_as(timeout, ecx.libc_ty_layout("timespec"))?;
68 let timeout = if ecx.ptr_is_null(timeout.ptr())? {
69 None
70 } else {
71 let duration = match ecx.read_timespec(&timeout)? {
72 Some(duration) => duration,
73 None => {
74 return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
75 }
76 };
77 let timeout_clock = if op & futex_realtime == futex_realtime {
78 ecx.check_no_isolation(
79 "`futex` syscall with `op=FUTEX_WAIT` and non-null timeout with `FUTEX_CLOCK_REALTIME`",
80 )?;
81 TimeoutClock::RealTime
82 } else {
83 TimeoutClock::Monotonic
84 };
85 let timeout_anchor = if wait_bitset {
86 // FUTEX_WAIT_BITSET uses an absolute timestamp.
87 TimeoutAnchor::Absolute
88 } else {
89 // FUTEX_WAIT uses a relative timestamp.
90 TimeoutAnchor::Relative
91 };
92 Some((timeout_clock, timeout_anchor, duration))
93 };
94 // There may be a concurrent thread changing the value of addr
95 // and then invoking the FUTEX_WAKE syscall. It is critical that the
96 // effects of this and the other thread are correctly observed,
97 // otherwise we will deadlock.
98 //
99 // There are two scenarios to consider, depending on whether WAIT or WAKE goes first:
100 // 1. If we (FUTEX_WAIT) execute first, we'll push ourselves into the waiters queue and
101 // go to sleep. They (FUTEX_WAKE) will see us in the queue and wake us up. It doesn't
102 // matter how the addr write is ordered.
103 // 2. If they (FUTEX_WAKE) execute first, that means the addr write is also before us
104 // (FUTEX_WAIT). It is crucial that we observe addr's new value. If we see an
105 // outdated value that happens to equal the expected val, then we'll put ourselves to
106 // sleep with no one to wake us up, so we end up with a deadlock. This is prevented
107 // by having a SeqCst fence inside FUTEX_WAKE syscall, and another SeqCst fence here
108 // in FUTEX_WAIT. The atomic read on addr after the SeqCst fence is guaranteed not to
109 // see any value older than the addr write immediately before calling FUTEX_WAKE.
110 // We'll see futex_val != val and return without sleeping.
111 //
112 // Note that the fences do not create any happens-before relationship.
113 // The read sees the write immediately before the fence not because
114 // one happens after the other, but is instead due to a guarantee unique
115 // to SeqCst fences that restricts what an atomic read placed AFTER the
116 // fence can see. The read still has to be atomic, otherwise it's a data
117 // race. This guarantee cannot be achieved with acquire-release fences
118 // since they only talk about reads placed BEFORE a fence - and places
119 // no restrictions on what the read itself can see, only that there is
120 // a happens-before between the fences IF the read happens to see the
121 // right value. This is useless to us, since we need the read itself
122 // to see an up-to-date value.
123 //
124 // The above case distinction is valid since both FUTEX_WAIT and FUTEX_WAKE
125 // contain a SeqCst fence, therefore inducing a total order between the operations.
126 // It is also critical that the fence, the atomic load, and the comparison in FUTEX_WAIT
127 // altogether happen atomically. If the other thread's fence in FUTEX_WAKE
128 // gets interleaved after our fence, then we lose the guarantee on the
129 // atomic load being up-to-date; if the other thread's write on addr and FUTEX_WAKE
130 // call are interleaved after the load but before the comparison, then we get a TOCTOU
131 // race condition, and go to sleep thinking the other thread will wake us up,
132 // even though they have already finished.
133 //
134 // Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
135 // do anything special to guarantee fence-load-comparison atomicity.
136 ecx.atomic_fence(AtomicFenceOrd::SeqCst)?;
137 // Read an `i32` through the pointer, regardless of any wrapper types.
138 // It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
139 // We do an acquire read -- it only seems reasonable that if we observe a value here, we
140 // actually establish an ordering with that value.
141 let futex_val = ecx.read_scalar_atomic(&addr, AtomicReadOrd::Acquire)?.to_i32()?;
142 if val == futex_val {
143 // The value still matches, so we block the thread and make it wait for FUTEX_WAKE.
144
145 // This cannot fail since we already did an atomic acquire read on that pointer.
146 // Acquire reads are only allowed on mutable memory.
147 let futex_ref = ecx
148 .get_sync_or_init(addr.ptr(), |_| LinuxFutex { futex: Default::default() })
149 .unwrap()
150 .futex
151 .clone();
152
153 let dest = dest.clone();
154 ecx.futex_wait(
155 futex_ref,
156 bitset,
157 timeout,
158 callback!(
159 @capture<'tcx> {
160 dest: MPlaceTy<'tcx>,
161 }
162 |ecx, unblock: UnblockKind| match unblock {
163 UnblockKind::Ready => {
164 ecx.write_int(0, &dest)
165 }
166 UnblockKind::TimedOut => {
167 ecx.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
168 }
169 }
170 ),
171 );
172 } else {
173 // The futex value doesn't match the expected value, so we return failure
174 // right away without sleeping: -1 and errno set to EAGAIN.
175 return ecx.set_last_error_and_return(LibcError("EAGAIN"), dest);
176 }
177 }
178 // FUTEX_WAKE: (int *addr, int op = FUTEX_WAKE, int val)
179 // Wakes at most `val` threads waiting on the futex at `addr`.
180 // Returns the amount of threads woken up.
181 // Does not access the futex value at *addr.
182 // FUTEX_WAKE_BITSET: (int *addr, int op = FUTEX_WAKE, int val, const timespect *_unused, int *_unused, unsigned int bitset)
183 // Same as FUTEX_WAKE, but allows you to specify a bitset to select which threads to wake up.
184 op if op == futex_wake || op == futex_wake_bitset => {
185 let Some(futex_ref) =
186 ecx.get_sync_or_init(addr.ptr(), |_| LinuxFutex { futex: Default::default() })
187 else {
188 // No AllocId, or no live allocation at that AllocId.
189 // Return an error code. (That seems nicer than silently doing something non-intuitive.)
190 // This means that if an address gets reused by a new allocation,
191 // we'll use an independent futex queue for this... that seems acceptable.
192 return ecx.set_last_error_and_return(LibcError("EFAULT"), dest);
193 };
194 let futex_ref = futex_ref.futex.clone();
195
196 let bitset = if op == futex_wake_bitset {
197 let [_, _, _, timeout, uaddr2, bitset] = check_min_vararg_count(
198 "`syscall(SYS_futex, FUTEX_WAKE_BITSET, ...)`",
199 varargs,
200 )?;
201 let _timeout = ecx.read_pointer(timeout)?;
202 let _uaddr2 = ecx.read_pointer(uaddr2)?;
203 ecx.read_scalar(bitset)?.to_u32()?
204 } else {
205 u32::MAX
206 };
207 if bitset == 0 {
208 return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
209 }
210 // Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
211 // will see the latest value on addr which could be changed by our caller
212 // before doing the syscall.
213 ecx.atomic_fence(AtomicFenceOrd::SeqCst)?;
214 let woken = ecx.futex_wake(&futex_ref, bitset, val.try_into().unwrap())?;
215 ecx.write_scalar(Scalar::from_target_isize(woken.try_into().unwrap(), ecx), dest)?;
216 }
217 op => throw_unsup_format!("Miri does not support `futex` syscall with op={}", op),
218 }
219
220 interp_ok(())
221}