miri/shims/unix/macos/
sync.rs

1//! Contains macOS-specific synchronization functions.
2//!
3//! For `os_unfair_lock`, see the documentation
4//! <https://developer.apple.com/documentation/os/synchronization?language=objc>
5//! and in case of underspecification its implementation
6//! <https://github.com/apple-oss-distributions/libplatform/blob/a00a4cc36da2110578bcf3b8eeeeb93dcc7f4e11/src/os/lock.c#L645>.
7//!
8//! Note that we don't emulate every edge-case behaviour of the locks. Notably,
9//! we don't abort when locking a lock owned by a thread that has already exited
10//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
11//! in that case either.
12
13use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::{Endian, FieldIdx, Size};
17
18use crate::concurrency::sync::{AccessKind, FutexRef, SyncObj};
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23    Active {
24        mutex_ref: MutexRef,
25    },
26    /// If a lock gets copied while being held, we put it in this state.
27    /// It seems like in the real implementation, the lock actually remembers who held it,
28    /// and still behaves as-if it was held by that thread in the new location. In Miri, we don't
29    /// know who actually owns this lock at the moment.
30    PermanentlyLockedByUnknown,
31}
32
33impl SyncObj for MacOsUnfairLock {
34    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
35        if let MacOsUnfairLock::Active { mutex_ref } = self
36            && !mutex_ref.queue_is_empty()
37        {
38            throw_ub_format!(
39                "{access_kind} of `os_unfair_lock` is forbidden while the queue is non-empty"
40            );
41        }
42        interp_ok(())
43    }
44
45    fn delete_on_write(&self) -> bool {
46        true
47    }
48}
49
50pub enum MacOsFutexTimeout<'a, 'tcx> {
51    None,
52    Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
53    Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
54}
55
56/// Metadata for a macOS futex.
57///
58/// Since macOS 11.0, Apple has exposed the previously private futex API consisting
59/// of `os_sync_wait_on_address` (and friends) and `os_sync_wake_by_address_{any, all}`.
60/// These work with different value sizes and flags, which are validated to be consistent.
61/// This structure keeps track of both the futex queue and these values.
62struct MacOsFutex {
63    futex: FutexRef,
64    /// The size in bytes of the atomic primitive underlying this futex.
65    size: Cell<u64>,
66    /// Whether the futex is shared across process boundaries.
67    shared: Cell<bool>,
68}
69
70impl SyncObj for MacOsFutex {}
71
72impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
73trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
74    fn os_unfair_lock_get_data<'a>(
75        &'a mut self,
76        lock_ptr: &OpTy<'tcx>,
77    ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
78    where
79        'tcx: 'a,
80    {
81        // `os_unfair_lock_s` wraps a single `u32` field. We use the first byte to store the "init"
82        // flag. Due to macOS always being little endian, that's the least significant byte.
83        let this = self.eval_context_mut();
84        assert!(this.tcx.data_layout.endian == Endian::Little);
85
86        let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
87        this.get_immovable_sync_with_static_init(
88            &lock,
89            Size::ZERO, // offset for init tracking
90            /* uninit_val */ 0,
91            /* init_val */ 1,
92            |this| {
93                let field = this.project_field(&lock, FieldIdx::from_u32(0))?;
94                let val = this.read_scalar(&field)?.to_u32()?;
95                if val == 0 {
96                    interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() })
97                } else if val == 1 {
98                    // This is a lock that got copied while it is initialized. We de-initialize
99                    // locks when they get released, so it got copied while locked. Unfortunately
100                    // that is something `std` needs to support (the guard could have been leaked).
101                    // On the plus side, we know nobody was queued for the lock while it got copied;
102                    // that would have been rejected by our `on_access`.
103                    // The real implementation would apparently remember who held the old lock, and
104                    // consider them to hold the copy as well -- but our copies don't preserve sync
105                    // object metadata so we instead move the lock into a "permanently locked"
106                    // state.
107                    interp_ok(MacOsUnfairLock::PermanentlyLockedByUnknown)
108                } else {
109                    throw_ub_format!("`os_unfair_lock` was not properly initialized at this location, or it got overwritten");
110                }
111            },
112        )
113    }
114}
115
116impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
117pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
118    /// Implements [`os_sync_wait_on_address`], [`os_sync_wait_on_address_with_deadline`]
119    /// and [`os_sync_wait_on_address_with_timeout`].
120    ///
121    /// [`os_sync_wait_on_address`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address?language=objc
122    /// [`os_sync_wait_on_address_with_deadline`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_deadline?language=objc
123    /// [`os_sync_wait_on_address_with_timeout`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_timeout?language=objc
124    fn os_sync_wait_on_address(
125        &mut self,
126        addr_op: &OpTy<'tcx>,
127        value_op: &OpTy<'tcx>,
128        size_op: &OpTy<'tcx>,
129        flags_op: &OpTy<'tcx>,
130        timeout: MacOsFutexTimeout<'_, 'tcx>,
131        dest: &MPlaceTy<'tcx>,
132    ) -> InterpResult<'tcx> {
133        let this = self.eval_context_mut();
134        let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
135        let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
136        let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
137
138        let ptr = this.read_pointer(addr_op)?;
139        let value = this.read_scalar(value_op)?.to_u64()?;
140        let size = this.read_target_usize(size_op)?;
141        let flags = this.read_scalar(flags_op)?.to_u32()?;
142
143        let clock_timeout = match timeout {
144            MacOsFutexTimeout::None => None,
145            MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
146                let clock = this.read_scalar(clock_op)?.to_u32()?;
147                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
148                Some((clock, TimeoutAnchor::Relative, timeout))
149            }
150            MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
151                let clock = this.read_scalar(clock_op)?.to_u32()?;
152                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
153                Some((clock, TimeoutAnchor::Absolute, timeout))
154            }
155        };
156
157        // Perform validation of the arguments.
158        let addr = ptr.addr().bytes();
159        if addr == 0
160            || !matches!(size, 4 | 8)
161            || !addr.is_multiple_of(size)
162            || (flags != none && flags != shared)
163            || clock_timeout
164                .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
165        {
166            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
167            return interp_ok(());
168        }
169
170        let is_shared = flags == shared;
171        let timeout = clock_timeout.map(|(_, anchor, timeout)| {
172            // The only clock that is currenlty supported is the monotonic clock.
173            // While the deadline argument of `os_sync_wait_on_address_with_deadline`
174            // is actually not in nanoseconds but in the units of `mach_current_time`,
175            // the two are equivalent in miri.
176            (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
177        });
178
179        // See the Linux futex implementation for why this fence exists.
180        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
181
182        let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
183        let futex_val = this
184            .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
185            .to_bits(Size::from_bytes(size))?;
186
187        let futex = this
188            .get_sync_or_init(ptr, |_| {
189                MacOsFutex {
190                    futex: Default::default(),
191                    size: Cell::new(size),
192                    shared: Cell::new(is_shared),
193                }
194            })
195            .unwrap();
196
197        // Detect mismatches between the flags and sizes used on this address
198        // by comparing it with the parameters used by the other waiters in
199        // the current list. If the list is currently empty, update those
200        // parameters.
201        if futex.futex.waiters() == 0 {
202            futex.size.set(size);
203            futex.shared.set(is_shared);
204        } else if futex.size.get() != size || futex.shared.get() != is_shared {
205            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
206            return interp_ok(());
207        }
208
209        if futex_val == value.into() {
210            // If the values are the same, we have to block.
211            let futex_ref = futex.futex.clone();
212            let dest = dest.clone();
213            this.futex_wait(
214                futex_ref.clone(),
215                u32::MAX, // bitset
216                timeout,
217                callback!(
218                    @capture<'tcx> {
219                        dest: MPlaceTy<'tcx>,
220                        futex_ref: FutexRef,
221                    }
222                    |this, unblock: UnblockKind| {
223                        match unblock {
224                            UnblockKind::Ready => {
225                                let remaining = futex_ref.waiters().try_into().unwrap();
226                                this.write_scalar(Scalar::from_i32(remaining), &dest)
227                            }
228                            UnblockKind::TimedOut => {
229                                this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
230                            }
231                        }
232                    }
233                ),
234            );
235        } else {
236            // else retrieve the current number of waiters.
237            let waiters = futex.futex.waiters().try_into().unwrap();
238            this.write_scalar(Scalar::from_i32(waiters), dest)?;
239        }
240
241        interp_ok(())
242    }
243
244    /// Implements [`os_sync_wake_by_address_all`] and [`os_sync_wake_by_address_any`].
245    ///
246    /// [`os_sync_wake_by_address_all`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_all?language=objc
247    /// [`os_sync_wake_by_address_any`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_any?language=objc
248    fn os_sync_wake_by_address(
249        &mut self,
250        addr_op: &OpTy<'tcx>,
251        size_op: &OpTy<'tcx>,
252        flags_op: &OpTy<'tcx>,
253        all: bool,
254        dest: &MPlaceTy<'tcx>,
255    ) -> InterpResult<'tcx> {
256        let this = self.eval_context_mut();
257        let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
258        let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
259
260        let ptr = this.read_pointer(addr_op)?;
261        let size = this.read_target_usize(size_op)?;
262        let flags = this.read_scalar(flags_op)?.to_u32()?;
263
264        // Perform validation of the arguments.
265        let addr = ptr.addr().bytes();
266        if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
267            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
268            return interp_ok(());
269        }
270
271        let is_shared = flags == shared;
272
273        let Some(futex) = this.get_sync_or_init(ptr, |_| {
274            MacOsFutex {
275                futex: Default::default(),
276                size: Cell::new(size),
277                shared: Cell::new(is_shared),
278            }
279        }) else {
280            // No AllocId, or no live allocation at that AllocId. Return an
281            // error code. (That seems nicer than silently doing something
282            // non-intuitive.) This means that if an address gets reused by a
283            // new allocation, we'll use an independent futex queue for this...
284            // that seems acceptable.
285            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
286            return interp_ok(());
287        };
288
289        if futex.futex.waiters() == 0 {
290            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
291            return interp_ok(());
292        // If there are waiters in the queue, they have all used the parameters
293        // stored in `futex` (we check this in `os_sync_wait_on_address` above).
294        // Detect mismatches between "our" parameters and the parameters used by
295        // the waiters and return an error in that case.
296        } else if futex.size.get() != size || futex.shared.get() != is_shared {
297            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
298            return interp_ok(());
299        }
300
301        let futex_ref = futex.futex.clone();
302
303        // See the Linux futex implementation for why this fence exists.
304        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
305        this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
306        this.write_scalar(Scalar::from_i32(0), dest)?;
307        interp_ok(())
308    }
309
310    fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
311        let this = self.eval_context_mut();
312
313        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
314            // Trying to lock a perma-locked lock. On macOS this would block or abort depending
315            // on whether the current thread is considered to be the one holding this lock. We
316            // don't know who is considered to be holding the lock so we don't know what to do.
317            throw_unsup_format!(
318                "attempted to lock an os_unfair_lock that was copied while being locked"
319            );
320        };
321        let mutex_ref = mutex_ref.clone();
322
323        if let Some(owner) = mutex_ref.owner() {
324            if owner == this.active_thread() {
325                // Matching the current macOS implementation: abort on reentrant locking.
326                throw_machine_stop!(TerminationInfo::Abort(
327                    "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
328                ));
329            }
330
331            this.mutex_enqueue_and_block(mutex_ref, None);
332        } else {
333            this.mutex_lock(&mutex_ref)?;
334        }
335
336        interp_ok(())
337    }
338
339    fn os_unfair_lock_trylock(
340        &mut self,
341        lock_op: &OpTy<'tcx>,
342        dest: &MPlaceTy<'tcx>,
343    ) -> InterpResult<'tcx> {
344        let this = self.eval_context_mut();
345
346        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
347            // Trying to lock a perma-locked lock. That behaves the same no matter who the owner is
348            // so we can implement the real behavior here.
349            this.write_scalar(Scalar::from_bool(false), dest)?;
350            return interp_ok(());
351        };
352        let mutex_ref = mutex_ref.clone();
353
354        if mutex_ref.owner().is_some() {
355            // Contrary to the blocking lock function, this does not check for reentrancy.
356            this.write_scalar(Scalar::from_bool(false), dest)?;
357        } else {
358            this.mutex_lock(&mutex_ref)?;
359            this.write_scalar(Scalar::from_bool(true), dest)?;
360        }
361
362        interp_ok(())
363    }
364
365    fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
366        let this = self.eval_context_mut();
367
368        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
369            // We don't know who the owner is so we cannot proceed.
370            throw_unsup_format!(
371                "attempted to unlock an os_unfair_lock that was copied while being locked"
372            );
373        };
374        let mutex_ref = mutex_ref.clone();
375
376        // Now, unlock.
377        if this.mutex_unlock(&mutex_ref)?.is_none() {
378            // Matching the current macOS implementation: abort.
379            throw_machine_stop!(TerminationInfo::Abort(
380                "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
381            ));
382        }
383
384        // If the lock is not locked by anyone now, it went quiet.
385        // Reset to zero so that it can be moved and initialized again for the next phase.
386        if mutex_ref.owner().is_none() {
387            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
388            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
389        }
390
391        interp_ok(())
392    }
393
394    fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
395        let this = self.eval_context_mut();
396
397        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
398            // We don't know who the owner is so we cannot proceed.
399            throw_unsup_format!(
400                "attempted to assert the owner of an os_unfair_lock that was copied while being locked"
401            );
402        };
403        let mutex_ref = mutex_ref.clone();
404
405        if mutex_ref.owner().is_none_or(|o| o != this.active_thread()) {
406            throw_machine_stop!(TerminationInfo::Abort(
407                "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
408            ));
409        }
410
411        // The lock is definitely not quiet since we are the owner.
412
413        interp_ok(())
414    }
415
416    fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
417        let this = self.eval_context_mut();
418
419        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
420            // We don't know who the owner is so we cannot proceed.
421            throw_unsup_format!(
422                "attempted to assert the owner of an os_unfair_lock that was copied while being locked"
423            );
424        };
425        let mutex_ref = mutex_ref.clone();
426
427        if mutex_ref.owner().is_some_and(|o| o == this.active_thread()) {
428            throw_machine_stop!(TerminationInfo::Abort(
429                "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
430            ));
431        }
432
433        // If the lock is not locked by anyone now, it went quiet.
434        // Reset to zero so that it can be moved and initialized again for the next phase.
435        if mutex_ref.owner().is_none() {
436            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
437            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
438        }
439
440        interp_ok(())
441    }
442}