1use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::Size;
17
18use crate::concurrency::sync::FutexRef;
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23 Poisoned,
24 Active { mutex_ref: MutexRef },
25}
26
27pub enum MacOsFutexTimeout<'a, 'tcx> {
28 None,
29 Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
30 Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
31}
32
33struct MacOsFutex {
40 futex: FutexRef,
41 size: Cell<u64>,
43 shared: Cell<bool>,
45}
46
47impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
48trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
49 fn os_unfair_lock_get_data<'a>(
50 &'a mut self,
51 lock_ptr: &OpTy<'tcx>,
52 ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
53 where
54 'tcx: 'a,
55 {
56 let this = self.eval_context_mut();
57 let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
58 this.lazy_sync_get_data(
59 &lock,
60 Size::ZERO, || {
62 interp_ok(MacOsUnfairLock::Poisoned)
70 },
71 |ecx| {
72 let mutex_ref = ecx.machine.sync.mutex_create();
73 interp_ok(MacOsUnfairLock::Active { mutex_ref })
74 },
75 )
76 }
77}
78
79impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
80pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
81 fn os_sync_wait_on_address(
88 &mut self,
89 addr_op: &OpTy<'tcx>,
90 value_op: &OpTy<'tcx>,
91 size_op: &OpTy<'tcx>,
92 flags_op: &OpTy<'tcx>,
93 timeout: MacOsFutexTimeout<'_, 'tcx>,
94 dest: &MPlaceTy<'tcx>,
95 ) -> InterpResult<'tcx> {
96 let this = self.eval_context_mut();
97 let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
98 let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
99 let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
100
101 let ptr = this.read_pointer(addr_op)?;
102 let value = this.read_scalar(value_op)?.to_u64()?;
103 let size = this.read_target_usize(size_op)?;
104 let flags = this.read_scalar(flags_op)?.to_u32()?;
105
106 let clock_timeout = match timeout {
107 MacOsFutexTimeout::None => None,
108 MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
109 let clock = this.read_scalar(clock_op)?.to_u32()?;
110 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
111 Some((clock, TimeoutAnchor::Relative, timeout))
112 }
113 MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
114 let clock = this.read_scalar(clock_op)?.to_u32()?;
115 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
116 Some((clock, TimeoutAnchor::Absolute, timeout))
117 }
118 };
119
120 let addr = ptr.addr().bytes();
122 if addr == 0
123 || !matches!(size, 4 | 8)
124 || !addr.is_multiple_of(size)
125 || (flags != none && flags != shared)
126 || clock_timeout
127 .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
128 {
129 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
130 return interp_ok(());
131 }
132
133 let is_shared = flags == shared;
134 let timeout = clock_timeout.map(|(_, anchor, timeout)| {
135 (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
140 });
141
142 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
144
145 let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
146 let futex_val = this
147 .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
148 .to_bits(Size::from_bytes(size))?;
149
150 let futex = this
151 .get_sync_or_init(ptr, |_| {
152 MacOsFutex {
153 futex: Default::default(),
154 size: Cell::new(size),
155 shared: Cell::new(is_shared),
156 }
157 })
158 .unwrap();
159
160 if futex.futex.waiters() == 0 {
165 futex.size.set(size);
166 futex.shared.set(is_shared);
167 } else if futex.size.get() != size || futex.shared.get() != is_shared {
168 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
169 return interp_ok(());
170 }
171
172 if futex_val == value.into() {
173 let futex_ref = futex.futex.clone();
175 let dest = dest.clone();
176 this.futex_wait(
177 futex_ref.clone(),
178 u32::MAX, timeout,
180 callback!(
181 @capture<'tcx> {
182 dest: MPlaceTy<'tcx>,
183 futex_ref: FutexRef,
184 }
185 |this, unblock: UnblockKind| {
186 match unblock {
187 UnblockKind::Ready => {
188 let remaining = futex_ref.waiters().try_into().unwrap();
189 this.write_scalar(Scalar::from_i32(remaining), &dest)
190 }
191 UnblockKind::TimedOut => {
192 this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
193 }
194 }
195 }
196 ),
197 );
198 } else {
199 let waiters = futex.futex.waiters().try_into().unwrap();
201 this.write_scalar(Scalar::from_i32(waiters), dest)?;
202 }
203
204 interp_ok(())
205 }
206
207 fn os_sync_wake_by_address(
212 &mut self,
213 addr_op: &OpTy<'tcx>,
214 size_op: &OpTy<'tcx>,
215 flags_op: &OpTy<'tcx>,
216 all: bool,
217 dest: &MPlaceTy<'tcx>,
218 ) -> InterpResult<'tcx> {
219 let this = self.eval_context_mut();
220 let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
221 let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
222
223 let ptr = this.read_pointer(addr_op)?;
224 let size = this.read_target_usize(size_op)?;
225 let flags = this.read_scalar(flags_op)?.to_u32()?;
226
227 let addr = ptr.addr().bytes();
229 if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
230 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
231 return interp_ok(());
232 }
233
234 let is_shared = flags == shared;
235
236 let Some(futex) = this.get_sync_or_init(ptr, |_| {
237 MacOsFutex {
238 futex: Default::default(),
239 size: Cell::new(size),
240 shared: Cell::new(is_shared),
241 }
242 }) else {
243 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
249 return interp_ok(());
250 };
251
252 if futex.futex.waiters() == 0 {
253 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
254 return interp_ok(());
255 } else if futex.size.get() != size || futex.shared.get() != is_shared {
260 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
261 return interp_ok(());
262 }
263
264 let futex_ref = futex.futex.clone();
265
266 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
268 this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
269 this.write_scalar(Scalar::from_i32(0), dest)?;
270 interp_ok(())
271 }
272
273 fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
274 let this = self.eval_context_mut();
275
276 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
277 this.block_thread(
279 BlockReason::Sleep,
280 None,
281 callback!(
282 @capture<'tcx> {}
283 |_this, _unblock: UnblockKind| {
284 panic!("we shouldn't wake up ever")
285 }
286 ),
287 );
288 return interp_ok(());
289 };
290 let mutex_ref = mutex_ref.clone();
291
292 if this.mutex_is_locked(&mutex_ref) {
293 if this.mutex_get_owner(&mutex_ref) == this.active_thread() {
294 throw_machine_stop!(TerminationInfo::Abort(
296 "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
297 ));
298 }
299
300 this.mutex_enqueue_and_block(&mutex_ref, None);
301 } else {
302 this.mutex_lock(&mutex_ref);
303 }
304
305 interp_ok(())
306 }
307
308 fn os_unfair_lock_trylock(
309 &mut self,
310 lock_op: &OpTy<'tcx>,
311 dest: &MPlaceTy<'tcx>,
312 ) -> InterpResult<'tcx> {
313 let this = self.eval_context_mut();
314
315 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
316 this.write_scalar(Scalar::from_bool(false), dest)?;
318 return interp_ok(());
319 };
320 let mutex_ref = mutex_ref.clone();
321
322 if this.mutex_is_locked(&mutex_ref) {
323 this.write_scalar(Scalar::from_bool(false), dest)?;
326 } else {
327 this.mutex_lock(&mutex_ref);
328 this.write_scalar(Scalar::from_bool(true), dest)?;
329 }
330
331 interp_ok(())
332 }
333
334 fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
335 let this = self.eval_context_mut();
336
337 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
338 throw_machine_stop!(TerminationInfo::Abort(
340 "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
341 ));
342 };
343 let mutex_ref = mutex_ref.clone();
344
345 if this.mutex_unlock(&mutex_ref)?.is_none() {
347 throw_machine_stop!(TerminationInfo::Abort(
349 "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
350 ));
351 }
352
353 if !this.mutex_is_locked(&mutex_ref) {
356 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
357 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
358 }
359
360 interp_ok(())
361 }
362
363 fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
364 let this = self.eval_context_mut();
365
366 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
367 throw_machine_stop!(TerminationInfo::Abort(
369 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
370 ));
371 };
372 let mutex_ref = mutex_ref.clone();
373
374 if !this.mutex_is_locked(&mutex_ref)
375 || this.mutex_get_owner(&mutex_ref) != this.active_thread()
376 {
377 throw_machine_stop!(TerminationInfo::Abort(
378 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
379 ));
380 }
381
382 interp_ok(())
385 }
386
387 fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
388 let this = self.eval_context_mut();
389
390 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
391 return interp_ok(());
393 };
394 let mutex_ref = mutex_ref.clone();
395
396 if this.mutex_is_locked(&mutex_ref)
397 && this.mutex_get_owner(&mutex_ref) == this.active_thread()
398 {
399 throw_machine_stop!(TerminationInfo::Abort(
400 "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
401 ));
402 }
403
404 if !this.mutex_is_locked(&mutex_ref) {
407 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
408 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
409 }
410
411 interp_ok(())
412 }
413}