miri/shims/unix/macos/
sync.rs

1//! Contains macOS-specific synchronization functions.
2//!
3//! For `os_unfair_lock`, see the documentation
4//! <https://developer.apple.com/documentation/os/synchronization?language=objc>
5//! and in case of underspecification its implementation
6//! <https://github.com/apple-oss-distributions/libplatform/blob/a00a4cc36da2110578bcf3b8eeeeb93dcc7f4e11/src/os/lock.c#L645>.
7//!
8//! Note that we don't emulate every edge-case behaviour of the locks. Notably,
9//! we don't abort when locking a lock owned by a thread that has already exited
10//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
11//! in that case either.
12
13use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::Size;
17
18use crate::concurrency::sync::FutexRef;
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23    Poisoned,
24    Active { mutex_ref: MutexRef },
25}
26
27pub enum MacOsFutexTimeout<'a, 'tcx> {
28    None,
29    Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
30    Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
31}
32
33/// Metadata for a macOS futex.
34///
35/// Since macOS 11.0, Apple has exposed the previously private futex API consisting
36/// of `os_sync_wait_on_address` (and friends) and `os_sync_wake_by_address_{any, all}`.
37/// These work with different value sizes and flags, which are validated to be consistent.
38/// This structure keeps track of both the futex queue and these values.
39struct MacOsFutex {
40    futex: FutexRef,
41    /// The size in bytes of the atomic primitive underlying this futex.
42    size: Cell<u64>,
43    /// Whether the futex is shared across process boundaries.
44    shared: Cell<bool>,
45}
46
47impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
48trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
49    fn os_unfair_lock_get_data<'a>(
50        &'a mut self,
51        lock_ptr: &OpTy<'tcx>,
52    ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
53    where
54        'tcx: 'a,
55    {
56        let this = self.eval_context_mut();
57        let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
58        this.lazy_sync_get_data(
59            &lock,
60            Size::ZERO, // offset for init tracking
61            || {
62                // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
63                // this means the lock was moved while locked. This can happen with a `std` lock,
64                // but then any future attempt to unlock will just deadlock. In practice, terrible
65                // things can probably happen if you swap two locked locks, since they'd wake up
66                // from the wrong queue... we just won't catch all UB of this library API then (we
67                // would need to store some unique identifer in-memory for this, instead of a static
68                // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
69                interp_ok(MacOsUnfairLock::Poisoned)
70            },
71            |ecx| {
72                let mutex_ref = ecx.machine.sync.mutex_create();
73                interp_ok(MacOsUnfairLock::Active { mutex_ref })
74            },
75        )
76    }
77}
78
79impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
80pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
81    /// Implements [`os_sync_wait_on_address`], [`os_sync_wait_on_address_with_deadline`]
82    /// and [`os_sync_wait_on_address_with_timeout`].
83    ///
84    /// [`os_sync_wait_on_address`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address?language=objc
85    /// [`os_sync_wait_on_address_with_deadline`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_deadline?language=objc
86    /// [`os_sync_wait_on_address_with_timeout`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_timeout?language=objc
87    fn os_sync_wait_on_address(
88        &mut self,
89        addr_op: &OpTy<'tcx>,
90        value_op: &OpTy<'tcx>,
91        size_op: &OpTy<'tcx>,
92        flags_op: &OpTy<'tcx>,
93        timeout: MacOsFutexTimeout<'_, 'tcx>,
94        dest: &MPlaceTy<'tcx>,
95    ) -> InterpResult<'tcx> {
96        let this = self.eval_context_mut();
97        let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
98        let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
99        let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
100
101        let ptr = this.read_pointer(addr_op)?;
102        let value = this.read_scalar(value_op)?.to_u64()?;
103        let size = this.read_target_usize(size_op)?;
104        let flags = this.read_scalar(flags_op)?.to_u32()?;
105
106        let clock_timeout = match timeout {
107            MacOsFutexTimeout::None => None,
108            MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
109                let clock = this.read_scalar(clock_op)?.to_u32()?;
110                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
111                Some((clock, TimeoutAnchor::Relative, timeout))
112            }
113            MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
114                let clock = this.read_scalar(clock_op)?.to_u32()?;
115                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
116                Some((clock, TimeoutAnchor::Absolute, timeout))
117            }
118        };
119
120        // Perform validation of the arguments.
121        let addr = ptr.addr().bytes();
122        if addr == 0
123            || !matches!(size, 4 | 8)
124            || !addr.is_multiple_of(size)
125            || (flags != none && flags != shared)
126            || clock_timeout
127                .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
128        {
129            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
130            return interp_ok(());
131        }
132
133        let is_shared = flags == shared;
134        let timeout = clock_timeout.map(|(_, anchor, timeout)| {
135            // The only clock that is currenlty supported is the monotonic clock.
136            // While the deadline argument of `os_sync_wait_on_address_with_deadline`
137            // is actually not in nanoseconds but in the units of `mach_current_time`,
138            // the two are equivalent in miri.
139            (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
140        });
141
142        // See the Linux futex implementation for why this fence exists.
143        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
144
145        let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
146        let futex_val = this
147            .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
148            .to_bits(Size::from_bytes(size))?;
149
150        let futex = this
151            .get_sync_or_init(ptr, |_| {
152                MacOsFutex {
153                    futex: Default::default(),
154                    size: Cell::new(size),
155                    shared: Cell::new(is_shared),
156                }
157            })
158            .unwrap();
159
160        // Detect mismatches between the flags and sizes used on this address
161        // by comparing it with the parameters used by the other waiters in
162        // the current list. If the list is currently empty, update those
163        // parameters.
164        if futex.futex.waiters() == 0 {
165            futex.size.set(size);
166            futex.shared.set(is_shared);
167        } else if futex.size.get() != size || futex.shared.get() != is_shared {
168            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
169            return interp_ok(());
170        }
171
172        if futex_val == value.into() {
173            // If the values are the same, we have to block.
174            let futex_ref = futex.futex.clone();
175            let dest = dest.clone();
176            this.futex_wait(
177                futex_ref.clone(),
178                u32::MAX, // bitset
179                timeout,
180                callback!(
181                    @capture<'tcx> {
182                        dest: MPlaceTy<'tcx>,
183                        futex_ref: FutexRef,
184                    }
185                    |this, unblock: UnblockKind| {
186                        match unblock {
187                            UnblockKind::Ready => {
188                                let remaining = futex_ref.waiters().try_into().unwrap();
189                                this.write_scalar(Scalar::from_i32(remaining), &dest)
190                            }
191                            UnblockKind::TimedOut => {
192                                this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
193                            }
194                        }
195                    }
196                ),
197            );
198        } else {
199            // else retrieve the current number of waiters.
200            let waiters = futex.futex.waiters().try_into().unwrap();
201            this.write_scalar(Scalar::from_i32(waiters), dest)?;
202        }
203
204        interp_ok(())
205    }
206
207    /// Implements [`os_sync_wake_by_address_all`] and [`os_sync_wake_by_address_any`].
208    ///
209    /// [`os_sync_wake_by_address_all`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_all?language=objc
210    /// [`os_sync_wake_by_address_any`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_any?language=objc
211    fn os_sync_wake_by_address(
212        &mut self,
213        addr_op: &OpTy<'tcx>,
214        size_op: &OpTy<'tcx>,
215        flags_op: &OpTy<'tcx>,
216        all: bool,
217        dest: &MPlaceTy<'tcx>,
218    ) -> InterpResult<'tcx> {
219        let this = self.eval_context_mut();
220        let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
221        let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
222
223        let ptr = this.read_pointer(addr_op)?;
224        let size = this.read_target_usize(size_op)?;
225        let flags = this.read_scalar(flags_op)?.to_u32()?;
226
227        // Perform validation of the arguments.
228        let addr = ptr.addr().bytes();
229        if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
230            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
231            return interp_ok(());
232        }
233
234        let is_shared = flags == shared;
235
236        let Some(futex) = this.get_sync_or_init(ptr, |_| {
237            MacOsFutex {
238                futex: Default::default(),
239                size: Cell::new(size),
240                shared: Cell::new(is_shared),
241            }
242        }) else {
243            // No AllocId, or no live allocation at that AllocId. Return an
244            // error code. (That seems nicer than silently doing something
245            // non-intuitive.) This means that if an address gets reused by a
246            // new allocation, we'll use an independent futex queue for this...
247            // that seems acceptable.
248            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
249            return interp_ok(());
250        };
251
252        if futex.futex.waiters() == 0 {
253            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
254            return interp_ok(());
255        // If there are waiters in the queue, they have all used the parameters
256        // stored in `futex` (we check this in `os_sync_wait_on_address` above).
257        // Detect mismatches between "our" parameters and the parameters used by
258        // the waiters and return an error in that case.
259        } else if futex.size.get() != size || futex.shared.get() != is_shared {
260            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
261            return interp_ok(());
262        }
263
264        let futex_ref = futex.futex.clone();
265
266        // See the Linux futex implementation for why this fence exists.
267        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
268        this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
269        this.write_scalar(Scalar::from_i32(0), dest)?;
270        interp_ok(())
271    }
272
273    fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
274        let this = self.eval_context_mut();
275
276        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
277            // Trying to get a poisoned lock. Just block forever...
278            this.block_thread(
279                BlockReason::Sleep,
280                None,
281                callback!(
282                    @capture<'tcx> {}
283                    |_this, _unblock: UnblockKind| {
284                        panic!("we shouldn't wake up ever")
285                    }
286                ),
287            );
288            return interp_ok(());
289        };
290        let mutex_ref = mutex_ref.clone();
291
292        if this.mutex_is_locked(&mutex_ref) {
293            if this.mutex_get_owner(&mutex_ref) == this.active_thread() {
294                // Matching the current macOS implementation: abort on reentrant locking.
295                throw_machine_stop!(TerminationInfo::Abort(
296                    "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
297                ));
298            }
299
300            this.mutex_enqueue_and_block(&mutex_ref, None);
301        } else {
302            this.mutex_lock(&mutex_ref);
303        }
304
305        interp_ok(())
306    }
307
308    fn os_unfair_lock_trylock(
309        &mut self,
310        lock_op: &OpTy<'tcx>,
311        dest: &MPlaceTy<'tcx>,
312    ) -> InterpResult<'tcx> {
313        let this = self.eval_context_mut();
314
315        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
316            // Trying to get a poisoned lock. That never works.
317            this.write_scalar(Scalar::from_bool(false), dest)?;
318            return interp_ok(());
319        };
320        let mutex_ref = mutex_ref.clone();
321
322        if this.mutex_is_locked(&mutex_ref) {
323            // Contrary to the blocking lock function, this does not check for
324            // reentrancy.
325            this.write_scalar(Scalar::from_bool(false), dest)?;
326        } else {
327            this.mutex_lock(&mutex_ref);
328            this.write_scalar(Scalar::from_bool(true), dest)?;
329        }
330
331        interp_ok(())
332    }
333
334    fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
335        let this = self.eval_context_mut();
336
337        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
338            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
339            throw_machine_stop!(TerminationInfo::Abort(
340                "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
341            ));
342        };
343        let mutex_ref = mutex_ref.clone();
344
345        // Now, unlock.
346        if this.mutex_unlock(&mutex_ref)?.is_none() {
347            // Matching the current macOS implementation: abort.
348            throw_machine_stop!(TerminationInfo::Abort(
349                "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
350            ));
351        }
352
353        // If the lock is not locked by anyone now, it went quer.
354        // Reset to zero so that it can be moved and initialized again for the next phase.
355        if !this.mutex_is_locked(&mutex_ref) {
356            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
357            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
358        }
359
360        interp_ok(())
361    }
362
363    fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
364        let this = self.eval_context_mut();
365
366        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
367            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
368            throw_machine_stop!(TerminationInfo::Abort(
369                "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
370            ));
371        };
372        let mutex_ref = mutex_ref.clone();
373
374        if !this.mutex_is_locked(&mutex_ref)
375            || this.mutex_get_owner(&mutex_ref) != this.active_thread()
376        {
377            throw_machine_stop!(TerminationInfo::Abort(
378                "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
379            ));
380        }
381
382        // The lock is definitely not quiet since we are the owner.
383
384        interp_ok(())
385    }
386
387    fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
388        let this = self.eval_context_mut();
389
390        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
391            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
392            return interp_ok(());
393        };
394        let mutex_ref = mutex_ref.clone();
395
396        if this.mutex_is_locked(&mutex_ref)
397            && this.mutex_get_owner(&mutex_ref) == this.active_thread()
398        {
399            throw_machine_stop!(TerminationInfo::Abort(
400                "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
401            ));
402        }
403
404        // If the lock is not locked by anyone now, it went quer.
405        // Reset to zero so that it can be moved and initialized again for the next phase.
406        if !this.mutex_is_locked(&mutex_ref) {
407            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
408            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
409        }
410
411        interp_ok(())
412    }
413}