1use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::{Endian, FieldIdx, Size};
17
18use crate::concurrency::sync::{AccessKind, FutexRef, SyncObj};
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23 Active {
24 mutex_ref: MutexRef,
25 },
26 PermanentlyLockedByUnknown,
31}
32
33impl SyncObj for MacOsUnfairLock {
34 fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
35 if let MacOsUnfairLock::Active { mutex_ref } = self
36 && !mutex_ref.queue_is_empty()
37 {
38 throw_ub_format!(
39 "{access_kind} of `os_unfair_lock` is forbidden while the queue is non-empty"
40 );
41 }
42 interp_ok(())
43 }
44
45 fn delete_on_write(&self) -> bool {
46 true
47 }
48}
49
50pub enum MacOsFutexTimeout<'a, 'tcx> {
51 None,
52 Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
53 Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
54}
55
56struct MacOsFutex {
63 futex: FutexRef,
64 size: Cell<u64>,
66 shared: Cell<bool>,
68}
69
70impl SyncObj for MacOsFutex {}
71
72impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
73trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
74 fn os_unfair_lock_get_data<'a>(
75 &'a mut self,
76 lock_ptr: &OpTy<'tcx>,
77 ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
78 where
79 'tcx: 'a,
80 {
81 let this = self.eval_context_mut();
84 assert!(this.tcx.data_layout.endian == Endian::Little);
85
86 let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
87 this.get_immovable_sync_with_static_init(
88 &lock,
89 Size::ZERO, 0,
91 1,
92 |this| {
93 let field = this.project_field(&lock, FieldIdx::from_u32(0))?;
94 let val = this.read_scalar(&field)?.to_u32()?;
95 if val == 0 {
96 interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() })
97 } else if val == 1 {
98 interp_ok(MacOsUnfairLock::PermanentlyLockedByUnknown)
108 } else {
109 throw_ub_format!("`os_unfair_lock` was not properly initialized at this location, or it got overwritten");
110 }
111 },
112 )
113 }
114}
115
116impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
117pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
118 fn os_sync_wait_on_address(
125 &mut self,
126 addr_op: &OpTy<'tcx>,
127 value_op: &OpTy<'tcx>,
128 size_op: &OpTy<'tcx>,
129 flags_op: &OpTy<'tcx>,
130 timeout: MacOsFutexTimeout<'_, 'tcx>,
131 dest: &MPlaceTy<'tcx>,
132 ) -> InterpResult<'tcx> {
133 let this = self.eval_context_mut();
134 let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
135 let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
136 let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
137
138 let ptr = this.read_pointer(addr_op)?;
139 let value = this.read_scalar(value_op)?.to_u64()?;
140 let size = this.read_target_usize(size_op)?;
141 let flags = this.read_scalar(flags_op)?.to_u32()?;
142
143 let clock_timeout = match timeout {
144 MacOsFutexTimeout::None => None,
145 MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
146 let clock = this.read_scalar(clock_op)?.to_u32()?;
147 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
148 Some((clock, TimeoutAnchor::Relative, timeout))
149 }
150 MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
151 let clock = this.read_scalar(clock_op)?.to_u32()?;
152 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
153 Some((clock, TimeoutAnchor::Absolute, timeout))
154 }
155 };
156
157 let addr = ptr.addr().bytes();
159 if addr == 0
160 || !matches!(size, 4 | 8)
161 || !addr.is_multiple_of(size)
162 || (flags != none && flags != shared)
163 || clock_timeout
164 .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
165 {
166 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
167 return interp_ok(());
168 }
169
170 let is_shared = flags == shared;
171 let timeout = clock_timeout.map(|(_, anchor, timeout)| {
172 (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
177 });
178
179 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
181
182 let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
183 let futex_val = this
184 .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
185 .to_bits(Size::from_bytes(size))?;
186
187 let futex = this
188 .get_sync_or_init(ptr, |_| {
189 MacOsFutex {
190 futex: Default::default(),
191 size: Cell::new(size),
192 shared: Cell::new(is_shared),
193 }
194 })
195 .unwrap();
196
197 if futex.futex.waiters() == 0 {
202 futex.size.set(size);
203 futex.shared.set(is_shared);
204 } else if futex.size.get() != size || futex.shared.get() != is_shared {
205 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
206 return interp_ok(());
207 }
208
209 if futex_val == value.into() {
210 let futex_ref = futex.futex.clone();
212 let dest = dest.clone();
213 this.futex_wait(
214 futex_ref.clone(),
215 u32::MAX, timeout,
217 callback!(
218 @capture<'tcx> {
219 dest: MPlaceTy<'tcx>,
220 futex_ref: FutexRef,
221 }
222 |this, unblock: UnblockKind| {
223 match unblock {
224 UnblockKind::Ready => {
225 let remaining = futex_ref.waiters().try_into().unwrap();
226 this.write_scalar(Scalar::from_i32(remaining), &dest)
227 }
228 UnblockKind::TimedOut => {
229 this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
230 }
231 }
232 }
233 ),
234 );
235 } else {
236 let waiters = futex.futex.waiters().try_into().unwrap();
238 this.write_scalar(Scalar::from_i32(waiters), dest)?;
239 }
240
241 interp_ok(())
242 }
243
244 fn os_sync_wake_by_address(
249 &mut self,
250 addr_op: &OpTy<'tcx>,
251 size_op: &OpTy<'tcx>,
252 flags_op: &OpTy<'tcx>,
253 all: bool,
254 dest: &MPlaceTy<'tcx>,
255 ) -> InterpResult<'tcx> {
256 let this = self.eval_context_mut();
257 let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
258 let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
259
260 let ptr = this.read_pointer(addr_op)?;
261 let size = this.read_target_usize(size_op)?;
262 let flags = this.read_scalar(flags_op)?.to_u32()?;
263
264 let addr = ptr.addr().bytes();
266 if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
267 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
268 return interp_ok(());
269 }
270
271 let is_shared = flags == shared;
272
273 let Some(futex) = this.get_sync_or_init(ptr, |_| {
274 MacOsFutex {
275 futex: Default::default(),
276 size: Cell::new(size),
277 shared: Cell::new(is_shared),
278 }
279 }) else {
280 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
286 return interp_ok(());
287 };
288
289 if futex.futex.waiters() == 0 {
290 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
291 return interp_ok(());
292 } else if futex.size.get() != size || futex.shared.get() != is_shared {
297 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
298 return interp_ok(());
299 }
300
301 let futex_ref = futex.futex.clone();
302
303 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
305 this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
306 this.write_scalar(Scalar::from_i32(0), dest)?;
307 interp_ok(())
308 }
309
310 fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
311 let this = self.eval_context_mut();
312
313 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
314 throw_unsup_format!(
318 "attempted to lock an os_unfair_lock that was copied while being locked"
319 );
320 };
321 let mutex_ref = mutex_ref.clone();
322
323 if let Some(owner) = mutex_ref.owner() {
324 if owner == this.active_thread() {
325 throw_machine_stop!(TerminationInfo::Abort(
327 "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
328 ));
329 }
330
331 this.mutex_enqueue_and_block(mutex_ref, None);
332 } else {
333 this.mutex_lock(&mutex_ref)?;
334 }
335
336 interp_ok(())
337 }
338
339 fn os_unfair_lock_trylock(
340 &mut self,
341 lock_op: &OpTy<'tcx>,
342 dest: &MPlaceTy<'tcx>,
343 ) -> InterpResult<'tcx> {
344 let this = self.eval_context_mut();
345
346 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
347 this.write_scalar(Scalar::from_bool(false), dest)?;
350 return interp_ok(());
351 };
352 let mutex_ref = mutex_ref.clone();
353
354 if mutex_ref.owner().is_some() {
355 this.write_scalar(Scalar::from_bool(false), dest)?;
357 } else {
358 this.mutex_lock(&mutex_ref)?;
359 this.write_scalar(Scalar::from_bool(true), dest)?;
360 }
361
362 interp_ok(())
363 }
364
365 fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
366 let this = self.eval_context_mut();
367
368 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
369 throw_unsup_format!(
371 "attempted to unlock an os_unfair_lock that was copied while being locked"
372 );
373 };
374 let mutex_ref = mutex_ref.clone();
375
376 if this.mutex_unlock(&mutex_ref)?.is_none() {
378 throw_machine_stop!(TerminationInfo::Abort(
380 "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
381 ));
382 }
383
384 if mutex_ref.owner().is_none() {
387 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
388 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
389 }
390
391 interp_ok(())
392 }
393
394 fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
395 let this = self.eval_context_mut();
396
397 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
398 throw_unsup_format!(
400 "attempted to assert the owner of an os_unfair_lock that was copied while being locked"
401 );
402 };
403 let mutex_ref = mutex_ref.clone();
404
405 if mutex_ref.owner().is_none_or(|o| o != this.active_thread()) {
406 throw_machine_stop!(TerminationInfo::Abort(
407 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
408 ));
409 }
410
411 interp_ok(())
414 }
415
416 fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
417 let this = self.eval_context_mut();
418
419 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
420 throw_unsup_format!(
422 "attempted to assert the owner of an os_unfair_lock that was copied while being locked"
423 );
424 };
425 let mutex_ref = mutex_ref.clone();
426
427 if mutex_ref.owner().is_some_and(|o| o == this.active_thread()) {
428 throw_machine_stop!(TerminationInfo::Abort(
429 "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
430 ));
431 }
432
433 if mutex_ref.owner().is_none() {
436 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
437 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
438 }
439
440 interp_ok(())
441 }
442}