Skip to main content

miri/shims/unix/
sync.rs

1use rustc_abi::Size;
2use rustc_target::spec::Os;
3
4use crate::concurrency::sync::{AccessKind, SyncObj};
5use crate::*;
6
7/// Do a bytewise comparison of the two places. This is used to check if
8/// a synchronization primitive matches its static initializer value.
9///
10/// `prefix`, if set, indicates that only the first N bytes should be compared.
11fn bytewise_equal<'tcx>(
12    ecx: &MiriInterpCx<'tcx>,
13    left: &MPlaceTy<'tcx>,
14    right: &MPlaceTy<'tcx>,
15    prefix: Option<u64>,
16) -> InterpResult<'tcx, bool> {
17    let size = left.layout.size;
18    assert_eq!(size, right.layout.size);
19    let cmp_size = prefix.map(Size::from_bytes).unwrap_or(size);
20
21    let left_bytes = ecx.read_bytes_ptr_strip_provenance(left.ptr(), cmp_size)?;
22    let right_bytes = ecx.read_bytes_ptr_strip_provenance(right.ptr(), cmp_size)?;
23
24    interp_ok(left_bytes == right_bytes)
25}
26
27// The in-memory marker values we use to indicate whether objects have been initialized.
28const PTHREAD_UNINIT: u8 = 0;
29const PTHREAD_INIT: u8 = 1;
30
31// # pthread_mutexattr_t
32// We store some data directly inside the type, ignoring the platform layout:
33// - kind: i32
34
35#[inline]
36fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
37    interp_ok(match &ecx.tcx.sess.target.os {
38        Os::Linux | Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => 0,
39        os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
40    })
41}
42
43fn mutexattr_get_kind<'tcx>(
44    ecx: &MiriInterpCx<'tcx>,
45    attr_ptr: &OpTy<'tcx>,
46) -> InterpResult<'tcx, i32> {
47    ecx.deref_pointer_and_read(
48        attr_ptr,
49        mutexattr_kind_offset(ecx)?,
50        ecx.libc_ty_layout("pthread_mutexattr_t"),
51        ecx.machine.layouts.i32,
52    )?
53    .to_i32()
54}
55
56fn mutexattr_set_kind<'tcx>(
57    ecx: &mut MiriInterpCx<'tcx>,
58    attr_ptr: &OpTy<'tcx>,
59    kind: i32,
60) -> InterpResult<'tcx, ()> {
61    ecx.deref_pointer_and_write(
62        attr_ptr,
63        mutexattr_kind_offset(ecx)?,
64        Scalar::from_i32(kind),
65        ecx.libc_ty_layout("pthread_mutexattr_t"),
66        ecx.machine.layouts.i32,
67    )
68}
69
70/// To differentiate "the mutex kind has not been changed" from
71/// "the mutex kind has been set to PTHREAD_MUTEX_DEFAULT and that is
72/// equal to some other mutex kind", we make the default value of this
73/// field *not* PTHREAD_MUTEX_DEFAULT but this special flag.
74const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
75
76/// Translates the mutex kind from what is stored in pthread_mutexattr_t to our enum.
77fn mutexattr_translate_kind<'tcx>(
78    ecx: &MiriInterpCx<'tcx>,
79    kind: i32,
80) -> InterpResult<'tcx, MutexKind> {
81    interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
82        MutexKind::Normal
83    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
84        MutexKind::ErrorCheck
85    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
86        MutexKind::Recursive
87    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
88        || kind == PTHREAD_MUTEX_KIND_UNCHANGED
89    {
90        // We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
91        // others, and we want an explicit `mutexattr_settype` to work as expected.
92        MutexKind::Default
93    } else {
94        throw_unsup_format!("unsupported type of mutex: {kind}");
95    })
96}
97
98// # pthread_mutex_t
99// We store some data directly inside the type, ignoring the platform layout:
100// - init: u8
101
102/// The mutex kind.
103#[derive(Debug, Clone, Copy)]
104enum MutexKind {
105    Normal,
106    Default,
107    Recursive,
108    ErrorCheck,
109}
110
111#[derive(Debug, Clone)]
112struct PthreadMutex {
113    mutex_ref: MutexRef,
114    kind: MutexKind,
115}
116
117impl SyncObj for PthreadMutex {
118    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
119        if !self.mutex_ref.queue_is_empty() {
120            throw_ub_format!(
121                "{access_kind} of `pthread_mutex_t` is forbidden while the queue is non-empty"
122            );
123        }
124        interp_ok(())
125    }
126
127    fn delete_on_write(&self) -> bool {
128        true
129    }
130}
131
132/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
133/// a statically initialized mutex that is used the first time, we pick some offset within
134/// `pthread_mutex_t` and use it as an "initialized" flag.
135fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
136    let offset = match &ecx.tcx.sess.target.os {
137        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
138        // macOS stores a signature in the first bytes, so we move to offset 4.
139        Os::MacOs => 4,
140        os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
141    };
142    let offset = Size::from_bytes(offset);
143
144    // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
145    // the `init` field must start out not equal to INIT_COOKIE.
146    if !ecx.machine.pthread_mutex_sanity.replace(true) {
147        let check_static_initializer = |name| {
148            let static_initializer = ecx.eval_path(&["libc", name]);
149            let init_field =
150                static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
151            let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
152            assert_eq!(
153                init, PTHREAD_UNINIT,
154                "{name} is incompatible with our initialization logic"
155            );
156        };
157
158        check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
159        // Check non-standard initializers.
160        match &ecx.tcx.sess.target.os {
161            Os::Linux => {
162                check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
163                check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
164                check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
165            }
166            Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => {
167                // No non-standard initializers.
168            }
169            os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
170        }
171    }
172
173    interp_ok(offset)
174}
175
176/// Eagerly create and initialize a new mutex.
177fn mutex_create<'tcx>(
178    ecx: &mut MiriInterpCx<'tcx>,
179    mutex_ptr: &OpTy<'tcx>,
180    kind: MutexKind,
181) -> InterpResult<'tcx, PthreadMutex> {
182    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
183    let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
184    ecx.init_immovable_sync(&mutex, mutex_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
185    interp_ok(data)
186}
187
188/// Returns the mutex data stored at the address that `mutex_ptr` points to.
189/// Will raise an error if the mutex has been moved since its first use.
190fn mutex_get_data<'tcx, 'a>(
191    ecx: &'a mut MiriInterpCx<'tcx>,
192    mutex_ptr: &OpTy<'tcx>,
193) -> InterpResult<'tcx, &'a PthreadMutex>
194where
195    'tcx: 'a,
196{
197    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
198    ecx.get_immovable_sync_with_static_init(
199        &mutex,
200        mutex_init_offset(ecx)?,
201        PTHREAD_UNINIT,
202        PTHREAD_INIT,
203        |ecx| {
204            let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
205            interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
206        },
207    )
208}
209
210/// Returns the kind of a static initializer.
211fn mutex_kind_from_static_initializer<'tcx>(
212    ecx: &MiriInterpCx<'tcx>,
213    mutex: &MPlaceTy<'tcx>,
214) -> InterpResult<'tcx, MutexKind> {
215    let prefix = match &ecx.tcx.sess.target.os {
216        // On android, there's a 4-byte `value` header followed by "padding", and some versions
217        // of libc leave that uninitialized. Only check the `value` bytes.
218        Os::Android => Some(4),
219        _ => None,
220    };
221    let is_initializer = |name| bytewise_equal(ecx, mutex, &ecx.eval_path(&["libc", name]), prefix);
222
223    // All the static initializers recognized here *must* be checked in `mutex_init_offset`!
224
225    // PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
226    if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
227        return interp_ok(MutexKind::Default);
228    }
229    // Support additional platform-specific initializers.
230    match &ecx.tcx.sess.target.os {
231        Os::Linux =>
232            if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
233                return interp_ok(MutexKind::Recursive);
234            } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
235                return interp_ok(MutexKind::ErrorCheck);
236            },
237        _ => {}
238    }
239    throw_ub_format!(
240        "`pthread_mutex_t` was not properly initialized at this location, or it got overwritten"
241    );
242}
243
244// # pthread_rwlock_t
245// We store some data directly inside the type, ignoring the platform layout:
246// - init: u8
247
248#[derive(Debug, Clone)]
249struct PthreadRwLock {
250    rwlock_ref: RwLockRef,
251}
252
253impl SyncObj for PthreadRwLock {
254    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
255        if !self.rwlock_ref.queue_is_empty() {
256            throw_ub_format!(
257                "{access_kind} of `pthread_rwlock_t` is forbidden while the queue is non-empty"
258            );
259        }
260        interp_ok(())
261    }
262
263    fn delete_on_write(&self) -> bool {
264        true
265    }
266}
267
268fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
269    let offset = match &ecx.tcx.sess.target.os {
270        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
271        // macOS stores a signature in the first bytes, so we move to offset 4.
272        Os::MacOs => 4,
273        os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
274    };
275    let offset = Size::from_bytes(offset);
276
277    // Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
278    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
279    if !ecx.machine.pthread_rwlock_sanity.replace(true) {
280        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
281        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
282        let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
283        assert_eq!(
284            init, PTHREAD_UNINIT,
285            "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization logic"
286        );
287    }
288
289    interp_ok(offset)
290}
291
292fn rwlock_get_data<'tcx, 'a>(
293    ecx: &'a mut MiriInterpCx<'tcx>,
294    rwlock_ptr: &OpTy<'tcx>,
295) -> InterpResult<'tcx, &'a PthreadRwLock>
296where
297    'tcx: 'a,
298{
299    let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
300    ecx.get_immovable_sync_with_static_init(
301        &rwlock,
302        rwlock_init_offset(ecx)?,
303        PTHREAD_UNINIT,
304        PTHREAD_INIT,
305        |ecx| {
306            let prefix = match &ecx.tcx.sess.target.os {
307                // On android, there's a 4-byte `value` header followed by "padding", and some
308                // versions of libc leave that uninitialized. Only check the `value` bytes.
309                Os::Android => Some(4),
310                _ => None,
311            };
312            if !bytewise_equal(
313                ecx,
314                &rwlock,
315                &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
316                prefix,
317            )? {
318                throw_ub_format!(
319                    "`pthread_rwlock_t` was not properly initialized at this location, or it got overwritten"
320                );
321            }
322            interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
323        },
324    )
325}
326
327// # pthread_condattr_t
328// We store some data directly inside the type, ignoring the platform layout:
329// - clock: i32
330
331#[inline]
332fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
333    interp_ok(match &ecx.tcx.sess.target.os {
334        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
335        // macOS does not have a clock attribute.
336        os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
337    })
338}
339
340fn condattr_get_clock_id<'tcx>(
341    ecx: &MiriInterpCx<'tcx>,
342    attr_ptr: &OpTy<'tcx>,
343) -> InterpResult<'tcx, Scalar> {
344    ecx.deref_pointer_and_read(
345        attr_ptr,
346        condattr_clock_offset(ecx)?,
347        ecx.libc_ty_layout("pthread_condattr_t"),
348        ecx.machine.layouts.i32,
349    )
350}
351
352fn condattr_set_clock_id<'tcx>(
353    ecx: &mut MiriInterpCx<'tcx>,
354    attr_ptr: &OpTy<'tcx>,
355    clock_id: i32,
356) -> InterpResult<'tcx, ()> {
357    ecx.deref_pointer_and_write(
358        attr_ptr,
359        condattr_clock_offset(ecx)?,
360        Scalar::from_i32(clock_id),
361        ecx.libc_ty_layout("pthread_condattr_t"),
362        ecx.machine.layouts.i32,
363    )
364}
365
366// # pthread_cond_t
367// We store some data directly inside the type, ignoring the platform layout:
368// - init: u8
369
370fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
371    let offset = match &ecx.tcx.sess.target.os {
372        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
373        // macOS stores a signature in the first bytes, so we move to offset 4.
374        Os::MacOs => 4,
375        os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
376    };
377    let offset = Size::from_bytes(offset);
378
379    // Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
380    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
381    if !ecx.machine.pthread_condvar_sanity.replace(true) {
382        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
383        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
384        let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
385        assert_eq!(
386            init, PTHREAD_UNINIT,
387            "PTHREAD_COND_INITIALIZER is incompatible with our initialization logic"
388        );
389    }
390
391    interp_ok(offset)
392}
393
394#[derive(Debug, Clone)]
395struct PthreadCondvar {
396    condvar_ref: CondvarRef,
397    clock: TimeoutClock,
398}
399
400impl SyncObj for PthreadCondvar {
401    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
402        if !self.condvar_ref.queue_is_empty() {
403            throw_ub_format!(
404                "{access_kind} of `pthread_cond_t` is forbidden while the queue is non-empty"
405            );
406        }
407        interp_ok(())
408    }
409
410    fn delete_on_write(&self) -> bool {
411        true
412    }
413}
414
415fn cond_create<'tcx>(
416    ecx: &mut MiriInterpCx<'tcx>,
417    cond_ptr: &OpTy<'tcx>,
418    clock: TimeoutClock,
419) -> InterpResult<'tcx, PthreadCondvar> {
420    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
421    let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
422    ecx.init_immovable_sync(&cond, cond_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
423    interp_ok(data)
424}
425
426fn cond_get_data<'tcx, 'a>(
427    ecx: &'a mut MiriInterpCx<'tcx>,
428    cond_ptr: &OpTy<'tcx>,
429) -> InterpResult<'tcx, &'a PthreadCondvar>
430where
431    'tcx: 'a,
432{
433    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
434    ecx.get_immovable_sync_with_static_init(
435        &cond,
436        cond_init_offset(ecx)?,
437        PTHREAD_UNINIT,
438        PTHREAD_INIT,
439        |ecx| {
440            let prefix = match &ecx.tcx.sess.target.os {
441                // On android, there's a 4-byte `value` header followed by "padding", and some
442                // versions of libc leave that uninitialized. Only check the `value` bytes.
443                Os::Android => Some(4),
444                _ => None,
445            };
446            if !bytewise_equal(
447                ecx,
448                &cond,
449                &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
450                prefix,
451            )? {
452                throw_ub_format!(
453                    "`pthread_cond_t` was not properly initialized at this location, or it got overwritten"
454                );
455            }
456            // This used the static initializer. The clock there is always CLOCK_REALTIME.
457            interp_ok(PthreadCondvar {
458                condvar_ref: CondvarRef::new(),
459                clock: TimeoutClock::RealTime,
460            })
461        },
462    )
463}
464
465impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
466pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
467    fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
468        let this = self.eval_context_mut();
469
470        mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
471
472        interp_ok(())
473    }
474
475    fn pthread_mutexattr_settype(
476        &mut self,
477        attr_op: &OpTy<'tcx>,
478        kind_op: &OpTy<'tcx>,
479    ) -> InterpResult<'tcx, Scalar> {
480        let this = self.eval_context_mut();
481
482        let kind = this.read_scalar(kind_op)?.to_i32()?;
483        if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
484            || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
485            || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
486            || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
487        {
488            // Make sure we do not mix this up with the "unchanged" kind.
489            assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
490            mutexattr_set_kind(this, attr_op, kind)?;
491        } else {
492            let einval = this.eval_libc_i32("EINVAL");
493            return interp_ok(Scalar::from_i32(einval));
494        }
495
496        interp_ok(Scalar::from_i32(0))
497    }
498
499    fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
500        let this = self.eval_context_mut();
501
502        // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
503        mutexattr_get_kind(this, attr_op)?;
504
505        // To catch double-destroys, we de-initialize the mutexattr.
506        // This is technically not right and might lead to false positives. For example, the below
507        // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
508        //
509        // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
510        // libc::pthread_mutexattr_init(x.as_mut_ptr());
511        // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
512        // x.assume_init();
513        //
514        // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
515        // This can always be revisited to have some external state to catch double-destroys
516        // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
517        this.write_uninit(
518            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
519        )?;
520
521        interp_ok(())
522    }
523
524    fn pthread_mutex_init(
525        &mut self,
526        mutex_op: &OpTy<'tcx>,
527        attr_op: &OpTy<'tcx>,
528    ) -> InterpResult<'tcx, ()> {
529        let this = self.eval_context_mut();
530
531        let attr = this.read_pointer(attr_op)?;
532        let kind = if this.ptr_is_null(attr)? {
533            MutexKind::Default
534        } else {
535            mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
536        };
537
538        mutex_create(this, mutex_op, kind)?;
539
540        interp_ok(())
541    }
542
543    fn pthread_mutex_lock(
544        &mut self,
545        mutex_op: &OpTy<'tcx>,
546        dest: &MPlaceTy<'tcx>,
547    ) -> InterpResult<'tcx> {
548        let this = self.eval_context_mut();
549
550        let mutex = mutex_get_data(this, mutex_op)?.clone();
551
552        let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
553            if owner_thread != this.active_thread() {
554                this.mutex_enqueue_and_block(
555                    mutex.mutex_ref,
556                    Some((Scalar::from_i32(0), dest.clone())),
557                );
558                return interp_ok(());
559            } else {
560                // Trying to acquire the same mutex again.
561                match mutex.kind {
562                    MutexKind::Default =>
563                        throw_ub_format!(
564                            "trying to acquire default mutex already locked by the current thread"
565                        ),
566                    MutexKind::Normal => throw_machine_stop!(TerminationInfo::LocalDeadlock),
567                    MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
568                    MutexKind::Recursive => {
569                        this.mutex_lock(&mutex.mutex_ref)?;
570                        0
571                    }
572                }
573            }
574        } else {
575            // The mutex is unlocked. Let's lock it.
576            this.mutex_lock(&mutex.mutex_ref)?;
577            0
578        };
579        this.write_scalar(Scalar::from_i32(ret), dest)?;
580        interp_ok(())
581    }
582
583    fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
584        let this = self.eval_context_mut();
585
586        let mutex = mutex_get_data(this, mutex_op)?.clone();
587
588        interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
589            if owner_thread != this.active_thread() {
590                this.eval_libc_i32("EBUSY")
591            } else {
592                match mutex.kind {
593                    MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
594                        this.eval_libc_i32("EBUSY"),
595                    MutexKind::Recursive => {
596                        this.mutex_lock(&mutex.mutex_ref)?;
597                        0
598                    }
599                }
600            }
601        } else {
602            // The mutex is unlocked. Let's lock it.
603            this.mutex_lock(&mutex.mutex_ref)?;
604            0
605        }))
606    }
607
608    fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
609        let this = self.eval_context_mut();
610
611        let mutex = mutex_get_data(this, mutex_op)?.clone();
612
613        if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
614            // The mutex was locked by the current thread.
615            interp_ok(Scalar::from_i32(0))
616        } else {
617            // The mutex was locked by another thread or not locked at all. See
618            // the “Unlock When Not Owner” column in
619            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
620            match mutex.kind {
621                MutexKind::Default =>
622                    throw_ub_format!(
623                        "unlocked a default mutex that was not locked by the current thread"
624                    ),
625                MutexKind::Normal =>
626                    throw_ub_format!(
627                        "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
628                    ),
629                MutexKind::ErrorCheck | MutexKind::Recursive =>
630                    interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
631            }
632        }
633    }
634
635    fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
636        let this = self.eval_context_mut();
637
638        // Reading the field also has the side-effect that we detect double-`destroy`
639        // since we make the field uninit below.
640        let mutex = mutex_get_data(this, mutex_op)?.clone();
641
642        if mutex.mutex_ref.owner().is_some() {
643            throw_ub_format!("destroyed a locked mutex");
644        }
645
646        // This write also deletes the interpreter state for this mutex.
647        // This might lead to false positives, see comment in pthread_mutexattr_destroy
648        let mutex_place =
649            this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?;
650        this.write_uninit(&mutex_place)?;
651
652        interp_ok(())
653    }
654
655    fn pthread_rwlock_rdlock(
656        &mut self,
657        rwlock_op: &OpTy<'tcx>,
658        dest: &MPlaceTy<'tcx>,
659    ) -> InterpResult<'tcx> {
660        let this = self.eval_context_mut();
661
662        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
663
664        if rwlock.rwlock_ref.is_write_locked() {
665            this.rwlock_enqueue_and_block_reader(
666                rwlock.rwlock_ref,
667                Scalar::from_i32(0),
668                dest.clone(),
669            );
670        } else {
671            this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
672            this.write_null(dest)?;
673        }
674
675        interp_ok(())
676    }
677
678    fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
679        let this = self.eval_context_mut();
680
681        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
682
683        if rwlock.rwlock_ref.is_write_locked() {
684            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
685        } else {
686            this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
687            interp_ok(Scalar::from_i32(0))
688        }
689    }
690
691    fn pthread_rwlock_wrlock(
692        &mut self,
693        rwlock_op: &OpTy<'tcx>,
694        dest: &MPlaceTy<'tcx>,
695    ) -> InterpResult<'tcx> {
696        let this = self.eval_context_mut();
697
698        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
699
700        if rwlock.rwlock_ref.is_locked() {
701            // Note: this will deadlock if the lock is already locked by this
702            // thread in any way.
703            //
704            // Relevant documentation:
705            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
706            // An in-depth discussion on this topic:
707            // https://github.com/rust-lang/rust/issues/53127
708            //
709            // FIXME: Detect and report the deadlock proactively. (We currently
710            // report the deadlock only when no thread can continue execution,
711            // but we could detect that this lock is already locked and report
712            // an error.)
713            this.rwlock_enqueue_and_block_writer(
714                rwlock.rwlock_ref,
715                Scalar::from_i32(0),
716                dest.clone(),
717            );
718        } else {
719            this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
720            this.write_null(dest)?;
721        }
722
723        interp_ok(())
724    }
725
726    fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
727        let this = self.eval_context_mut();
728
729        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
730
731        if rwlock.rwlock_ref.is_locked() {
732            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
733        } else {
734            this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
735            interp_ok(Scalar::from_i32(0))
736        }
737    }
738
739    fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
740        let this = self.eval_context_mut();
741
742        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
743
744        if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
745            || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
746        {
747            interp_ok(())
748        } else {
749            throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
750        }
751    }
752
753    fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
754        let this = self.eval_context_mut();
755
756        // Reading the field also has the side-effect that we detect double-`destroy`
757        // since we make the field uninit below.
758        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
759
760        if rwlock.rwlock_ref.is_locked() {
761            throw_ub_format!("destroyed a locked rwlock");
762        }
763
764        // This write also deletes the interpreter state for this rwlock.
765        // This might lead to false positives, see comment in pthread_mutexattr_destroy
766        let rwlock_place =
767            this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?;
768        this.write_uninit(&rwlock_place)?;
769
770        interp_ok(())
771    }
772
773    fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
774        let this = self.eval_context_mut();
775
776        // no clock attribute on macOS
777        if this.tcx.sess.target.os != Os::MacOs {
778            // The default value of the clock attribute shall refer to the system
779            // clock.
780            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
781            let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
782            condattr_set_clock_id(this, attr_op, default_clock_id)?;
783        }
784
785        interp_ok(())
786    }
787
788    fn pthread_condattr_setclock(
789        &mut self,
790        attr_op: &OpTy<'tcx>,
791        clock_id_op: &OpTy<'tcx>,
792    ) -> InterpResult<'tcx, Scalar> {
793        let this = self.eval_context_mut();
794
795        let clock_id = this.read_scalar(clock_id_op)?;
796        if this.parse_clockid(clock_id).is_some() {
797            condattr_set_clock_id(this, attr_op, clock_id.to_i32()?)?;
798        } else {
799            let einval = this.eval_libc_i32("EINVAL");
800            return interp_ok(Scalar::from_i32(einval));
801        }
802
803        interp_ok(Scalar::from_i32(0))
804    }
805
806    fn pthread_condattr_getclock(
807        &mut self,
808        attr_op: &OpTy<'tcx>,
809        clk_id_op: &OpTy<'tcx>,
810    ) -> InterpResult<'tcx, ()> {
811        let this = self.eval_context_mut();
812
813        let clock_id = condattr_get_clock_id(this, attr_op)?;
814        this.write_scalar(
815            clock_id,
816            &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
817        )?;
818
819        interp_ok(())
820    }
821
822    fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
823        let this = self.eval_context_mut();
824
825        // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
826        // There's no clock attribute on macOS.
827        if this.tcx.sess.target.os != Os::MacOs {
828            condattr_get_clock_id(this, attr_op)?;
829        }
830
831        // De-init the entire thing.
832        // This might lead to false positives, see comment in pthread_mutexattr_destroy
833        this.write_uninit(
834            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
835        )?;
836
837        interp_ok(())
838    }
839
840    fn pthread_cond_init(
841        &mut self,
842        cond_op: &OpTy<'tcx>,
843        attr_op: &OpTy<'tcx>,
844    ) -> InterpResult<'tcx, ()> {
845        let this = self.eval_context_mut();
846
847        let attr = this.read_pointer(attr_op)?;
848        // Default clock if `attr` is null, and on macOS where there is no clock attribute.
849        let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == Os::MacOs {
850            this.eval_libc("CLOCK_REALTIME")
851        } else {
852            condattr_get_clock_id(this, attr_op)?
853        };
854        let Some(clock) = this.parse_clockid(clock_id) else {
855            // This is UB since this situation cannot arise when using pthread_condattr_setclock.
856            throw_ub_format!("pthread_cond_init: invalid attributes (unsupported clock)")
857        };
858
859        cond_create(this, cond_op, clock)?;
860
861        interp_ok(())
862    }
863
864    fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
865        let this = self.eval_context_mut();
866        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
867        this.condvar_signal(&condvar)?;
868        interp_ok(())
869    }
870
871    fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
872        let this = self.eval_context_mut();
873        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
874        while this.condvar_signal(&condvar)? {}
875        interp_ok(())
876    }
877
878    fn pthread_cond_wait(
879        &mut self,
880        cond_op: &OpTy<'tcx>,
881        mutex_op: &OpTy<'tcx>,
882        dest: &MPlaceTy<'tcx>,
883    ) -> InterpResult<'tcx> {
884        let this = self.eval_context_mut();
885
886        let data = cond_get_data(this, cond_op)?.clone();
887        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
888
889        this.condvar_wait(
890            data.condvar_ref,
891            mutex_ref,
892            None, // no timeout
893            Scalar::from_i32(0),
894            Scalar::from_i32(0), // retval_timeout -- unused
895            dest.clone(),
896        )?;
897
898        interp_ok(())
899    }
900
901    fn pthread_cond_timedwait(
902        &mut self,
903        cond_op: &OpTy<'tcx>,
904        mutex_op: &OpTy<'tcx>,
905        timeout_op: &OpTy<'tcx>,
906        dest: &MPlaceTy<'tcx>,
907        macos_relative_np: bool,
908    ) -> InterpResult<'tcx> {
909        let this = self.eval_context_mut();
910
911        let data = cond_get_data(this, cond_op)?.clone();
912        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
913
914        // Extract the timeout.
915        let Some(duration) = this
916            .read_timespec(&this.deref_pointer_as(timeout_op, this.libc_ty_layout("timespec"))?)?
917        else {
918            let einval = this.eval_libc("EINVAL");
919            this.write_scalar(einval, dest)?;
920            return interp_ok(());
921        };
922
923        let (clock, anchor) = if macos_relative_np {
924            // `pthread_cond_timedwait_relative_np` always measures time against the
925            // monotonic clock, regardless of the condvar clock.
926            (TimeoutClock::Monotonic, TimeoutAnchor::Relative)
927        } else {
928            if data.clock == TimeoutClock::RealTime {
929                this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
930            }
931
932            (data.clock, TimeoutAnchor::Absolute)
933        };
934
935        this.condvar_wait(
936            data.condvar_ref,
937            mutex_ref,
938            Some((clock, anchor, duration)),
939            Scalar::from_i32(0),
940            this.eval_libc("ETIMEDOUT"), // retval_timeout
941            dest.clone(),
942        )?;
943
944        interp_ok(())
945    }
946
947    fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
948        let this = self.eval_context_mut();
949
950        // Reading the field also has the side-effect that we detect double-`destroy`
951        // since we make the field uninit below.
952        let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
953        if !condvar.queue_is_empty() {
954            throw_ub_format!("destroying an awaited conditional variable");
955        }
956
957        // This write also deletes the interpreter state for this mutex.
958        // This might lead to false positives, see comment in pthread_mutexattr_destroy
959        let cond_place = this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?;
960        this.write_uninit(&cond_place)?;
961
962        interp_ok(())
963    }
964}