miri/shims/unix/
sync.rs

1use rustc_abi::Size;
2
3use crate::concurrency::sync::LAZY_INIT_COOKIE;
4use crate::*;
5
6/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
7/// a synchronization primitive matches its static initializer value.
8///
9/// The reads happen in chunks of 4, so all racing accesses must also use that access size.
10fn bytewise_equal_atomic_relaxed<'tcx>(
11    ecx: &MiriInterpCx<'tcx>,
12    left: &MPlaceTy<'tcx>,
13    right: &MPlaceTy<'tcx>,
14) -> InterpResult<'tcx, bool> {
15    let size = left.layout.size;
16    assert_eq!(size, right.layout.size);
17
18    // We do this in chunks of 4, so that we are okay to race with (sufficiently aligned)
19    // 4-byte atomic accesses.
20    assert!(size.bytes() % 4 == 0);
21    for i in 0..(size.bytes() / 4) {
22        let offset = Size::from_bytes(i.strict_mul(4));
23        let load = |place: &MPlaceTy<'tcx>| {
24            let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
25            ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
26        };
27        let left = load(left)?;
28        let right = load(right)?;
29        if left != right {
30            return interp_ok(false);
31        }
32    }
33
34    interp_ok(true)
35}
36
37// # pthread_mutexattr_t
38// We store some data directly inside the type, ignoring the platform layout:
39// - kind: i32
40
41#[inline]
42fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
43    interp_ok(match &*ecx.tcx.sess.target.os {
44        "linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
45        os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
46    })
47}
48
49fn mutexattr_get_kind<'tcx>(
50    ecx: &MiriInterpCx<'tcx>,
51    attr_ptr: &OpTy<'tcx>,
52) -> InterpResult<'tcx, i32> {
53    ecx.deref_pointer_and_read(
54        attr_ptr,
55        mutexattr_kind_offset(ecx)?,
56        ecx.libc_ty_layout("pthread_mutexattr_t"),
57        ecx.machine.layouts.i32,
58    )?
59    .to_i32()
60}
61
62fn mutexattr_set_kind<'tcx>(
63    ecx: &mut MiriInterpCx<'tcx>,
64    attr_ptr: &OpTy<'tcx>,
65    kind: i32,
66) -> InterpResult<'tcx, ()> {
67    ecx.deref_pointer_and_write(
68        attr_ptr,
69        mutexattr_kind_offset(ecx)?,
70        Scalar::from_i32(kind),
71        ecx.libc_ty_layout("pthread_mutexattr_t"),
72        ecx.machine.layouts.i32,
73    )
74}
75
76/// To differentiate "the mutex kind has not been changed" from
77/// "the mutex kind has been set to PTHREAD_MUTEX_DEFAULT and that is
78/// equal to some other mutex kind", we make the default value of this
79/// field *not* PTHREAD_MUTEX_DEFAULT but this special flag.
80const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
81
82/// Translates the mutex kind from what is stored in pthread_mutexattr_t to our enum.
83fn mutexattr_translate_kind<'tcx>(
84    ecx: &MiriInterpCx<'tcx>,
85    kind: i32,
86) -> InterpResult<'tcx, MutexKind> {
87    interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
88        MutexKind::Normal
89    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
90        MutexKind::ErrorCheck
91    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
92        MutexKind::Recursive
93    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
94        || kind == PTHREAD_MUTEX_KIND_UNCHANGED
95    {
96        // We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
97        // others, and we want an explicit `mutexattr_settype` to work as expected.
98        MutexKind::Default
99    } else {
100        throw_unsup_format!("unsupported type of mutex: {kind}");
101    })
102}
103
104// # pthread_mutex_t
105// We store some data directly inside the type, ignoring the platform layout:
106// - init: u32
107
108/// The mutex kind.
109#[derive(Debug, Clone, Copy)]
110enum MutexKind {
111    Normal,
112    Default,
113    Recursive,
114    ErrorCheck,
115}
116
117#[derive(Debug, Clone)]
118struct PthreadMutex {
119    mutex_ref: MutexRef,
120    kind: MutexKind,
121}
122
123/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
124/// a statically initialized mutex that is used the first time, we pick some offset within
125/// `pthread_mutex_t` and use it as an "initialized" flag.
126fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
127    let offset = match &*ecx.tcx.sess.target.os {
128        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
129        // macOS stores a signature in the first bytes, so we move to offset 4.
130        "macos" => 4,
131        os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
132    };
133    let offset = Size::from_bytes(offset);
134
135    // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
136    // the `init` field must start out not equal to INIT_COOKIE.
137    if !ecx.machine.pthread_mutex_sanity.replace(true) {
138        let check_static_initializer = |name| {
139            let static_initializer = ecx.eval_path(&["libc", name]);
140            let init_field =
141                static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
142            let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
143            assert_ne!(
144                init, LAZY_INIT_COOKIE,
145                "{name} is incompatible with our initialization cookie"
146            );
147        };
148
149        check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
150        // Check non-standard initializers.
151        match &*ecx.tcx.sess.target.os {
152            "linux" => {
153                check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
154                check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
155                check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
156            }
157            "illumos" | "solaris" | "macos" | "freebsd" | "android" => {
158                // No non-standard initializers.
159            }
160            os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
161        }
162    }
163
164    interp_ok(offset)
165}
166
167/// Eagerly create and initialize a new mutex.
168fn mutex_create<'tcx>(
169    ecx: &mut MiriInterpCx<'tcx>,
170    mutex_ptr: &OpTy<'tcx>,
171    kind: MutexKind,
172) -> InterpResult<'tcx, PthreadMutex> {
173    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
174    let id = ecx.machine.sync.mutex_create();
175    let data = PthreadMutex { mutex_ref: id, kind };
176    ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
177    interp_ok(data)
178}
179
180/// Returns the mutex data stored at the address that `mutex_ptr` points to.
181/// Will raise an error if the mutex has been moved since its first use.
182fn mutex_get_data<'tcx, 'a>(
183    ecx: &'a mut MiriInterpCx<'tcx>,
184    mutex_ptr: &OpTy<'tcx>,
185) -> InterpResult<'tcx, &'a PthreadMutex>
186where
187    'tcx: 'a,
188{
189    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
190    ecx.lazy_sync_get_data(
191        &mutex,
192        mutex_init_offset(ecx)?,
193        || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
194        |ecx| {
195            let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
196            let id = ecx.machine.sync.mutex_create();
197            interp_ok(PthreadMutex { mutex_ref: id, kind })
198        },
199    )
200}
201
202/// Returns the kind of a static initializer.
203fn mutex_kind_from_static_initializer<'tcx>(
204    ecx: &MiriInterpCx<'tcx>,
205    mutex: &MPlaceTy<'tcx>,
206) -> InterpResult<'tcx, MutexKind> {
207    // All the static initializers recognized here *must* be checked in `mutex_init_offset`!
208    let is_initializer =
209        |name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
210
211    // PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
212    if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
213        return interp_ok(MutexKind::Default);
214    }
215    // Support additional platform-specific initializers.
216    match &*ecx.tcx.sess.target.os {
217        "linux" =>
218            if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
219                return interp_ok(MutexKind::Recursive);
220            } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
221                return interp_ok(MutexKind::ErrorCheck);
222            },
223        _ => {}
224    }
225    throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
226}
227
228// # pthread_rwlock_t
229// We store some data directly inside the type, ignoring the platform layout:
230// - init: u32
231
232#[derive(Debug, Clone)]
233struct PthreadRwLock {
234    rwlock_ref: RwLockRef,
235}
236
237fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
238    let offset = match &*ecx.tcx.sess.target.os {
239        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
240        // macOS stores a signature in the first bytes, so we move to offset 4.
241        "macos" => 4,
242        os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
243    };
244    let offset = Size::from_bytes(offset);
245
246    // Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
247    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
248    if !ecx.machine.pthread_rwlock_sanity.replace(true) {
249        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
250        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
251        let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
252        assert_ne!(
253            init, LAZY_INIT_COOKIE,
254            "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
255        );
256    }
257
258    interp_ok(offset)
259}
260
261fn rwlock_get_data<'tcx, 'a>(
262    ecx: &'a mut MiriInterpCx<'tcx>,
263    rwlock_ptr: &OpTy<'tcx>,
264) -> InterpResult<'tcx, &'a PthreadRwLock>
265where
266    'tcx: 'a,
267{
268    let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
269    ecx.lazy_sync_get_data(
270        &rwlock,
271        rwlock_init_offset(ecx)?,
272        || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
273        |ecx| {
274            if !bytewise_equal_atomic_relaxed(
275                ecx,
276                &rwlock,
277                &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
278            )? {
279                throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
280            }
281            let rwlock_ref = ecx.machine.sync.rwlock_create();
282            interp_ok(PthreadRwLock { rwlock_ref })
283        },
284    )
285}
286
287// # pthread_condattr_t
288// We store some data directly inside the type, ignoring the platform layout:
289// - clock: i32
290
291#[inline]
292fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
293    interp_ok(match &*ecx.tcx.sess.target.os {
294        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
295        // macOS does not have a clock attribute.
296        os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
297    })
298}
299
300fn condattr_get_clock_id<'tcx>(
301    ecx: &MiriInterpCx<'tcx>,
302    attr_ptr: &OpTy<'tcx>,
303) -> InterpResult<'tcx, i32> {
304    ecx.deref_pointer_and_read(
305        attr_ptr,
306        condattr_clock_offset(ecx)?,
307        ecx.libc_ty_layout("pthread_condattr_t"),
308        ecx.machine.layouts.i32,
309    )?
310    .to_i32()
311}
312
313fn condattr_set_clock_id<'tcx>(
314    ecx: &mut MiriInterpCx<'tcx>,
315    attr_ptr: &OpTy<'tcx>,
316    clock_id: i32,
317) -> InterpResult<'tcx, ()> {
318    ecx.deref_pointer_and_write(
319        attr_ptr,
320        condattr_clock_offset(ecx)?,
321        Scalar::from_i32(clock_id),
322        ecx.libc_ty_layout("pthread_condattr_t"),
323        ecx.machine.layouts.i32,
324    )
325}
326
327/// Translates the clock from what is stored in pthread_condattr_t to our enum.
328fn condattr_translate_clock_id<'tcx>(
329    ecx: &MiriInterpCx<'tcx>,
330    raw_id: i32,
331) -> InterpResult<'tcx, ClockId> {
332    interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
333        ClockId::Realtime
334    } else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
335        ClockId::Monotonic
336    } else {
337        throw_unsup_format!("unsupported clock id: {raw_id}");
338    })
339}
340
341// # pthread_cond_t
342// We store some data directly inside the type, ignoring the platform layout:
343// - init: u32
344
345fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
346    let offset = match &*ecx.tcx.sess.target.os {
347        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
348        // macOS stores a signature in the first bytes, so we move to offset 4.
349        "macos" => 4,
350        os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
351    };
352    let offset = Size::from_bytes(offset);
353
354    // Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
355    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
356    if !ecx.machine.pthread_condvar_sanity.replace(true) {
357        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
358        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
359        let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
360        assert_ne!(
361            init, LAZY_INIT_COOKIE,
362            "PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
363        );
364    }
365
366    interp_ok(offset)
367}
368
369#[derive(Debug, Clone, Copy)]
370enum ClockId {
371    Realtime,
372    Monotonic,
373}
374
375#[derive(Debug, Copy, Clone)]
376struct PthreadCondvar {
377    id: CondvarId,
378    clock: ClockId,
379}
380
381fn cond_create<'tcx>(
382    ecx: &mut MiriInterpCx<'tcx>,
383    cond_ptr: &OpTy<'tcx>,
384    clock: ClockId,
385) -> InterpResult<'tcx, PthreadCondvar> {
386    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
387    let id = ecx.machine.sync.condvar_create();
388    let data = PthreadCondvar { id, clock };
389    ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
390    interp_ok(data)
391}
392
393fn cond_get_data<'tcx, 'a>(
394    ecx: &'a mut MiriInterpCx<'tcx>,
395    cond_ptr: &OpTy<'tcx>,
396) -> InterpResult<'tcx, &'a PthreadCondvar>
397where
398    'tcx: 'a,
399{
400    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
401    ecx.lazy_sync_get_data(
402        &cond,
403        cond_init_offset(ecx)?,
404        || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
405        |ecx| {
406            if !bytewise_equal_atomic_relaxed(
407                ecx,
408                &cond,
409                &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
410            )? {
411                throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
412            }
413            // This used the static initializer. The clock there is always CLOCK_REALTIME.
414            let id = ecx.machine.sync.condvar_create();
415            interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
416        },
417    )
418}
419
420impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
421pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
422    fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
423        let this = self.eval_context_mut();
424
425        mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
426
427        interp_ok(())
428    }
429
430    fn pthread_mutexattr_settype(
431        &mut self,
432        attr_op: &OpTy<'tcx>,
433        kind_op: &OpTy<'tcx>,
434    ) -> InterpResult<'tcx, Scalar> {
435        let this = self.eval_context_mut();
436
437        let kind = this.read_scalar(kind_op)?.to_i32()?;
438        if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
439            || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
440            || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
441            || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
442        {
443            // Make sure we do not mix this up with the "unchanged" kind.
444            assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
445            mutexattr_set_kind(this, attr_op, kind)?;
446        } else {
447            let einval = this.eval_libc_i32("EINVAL");
448            return interp_ok(Scalar::from_i32(einval));
449        }
450
451        interp_ok(Scalar::from_i32(0))
452    }
453
454    fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
455        let this = self.eval_context_mut();
456
457        // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
458        mutexattr_get_kind(this, attr_op)?;
459
460        // To catch double-destroys, we de-initialize the mutexattr.
461        // This is technically not right and might lead to false positives. For example, the below
462        // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
463        //
464        // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
465        // libc::pthread_mutexattr_init(x.as_mut_ptr());
466        // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
467        // x.assume_init();
468        //
469        // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
470        // This can always be revisited to have some external state to catch double-destroys
471        // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
472        this.write_uninit(
473            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
474        )?;
475
476        interp_ok(())
477    }
478
479    fn pthread_mutex_init(
480        &mut self,
481        mutex_op: &OpTy<'tcx>,
482        attr_op: &OpTy<'tcx>,
483    ) -> InterpResult<'tcx, ()> {
484        let this = self.eval_context_mut();
485
486        let attr = this.read_pointer(attr_op)?;
487        let kind = if this.ptr_is_null(attr)? {
488            MutexKind::Default
489        } else {
490            mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
491        };
492
493        mutex_create(this, mutex_op, kind)?;
494
495        interp_ok(())
496    }
497
498    fn pthread_mutex_lock(
499        &mut self,
500        mutex_op: &OpTy<'tcx>,
501        dest: &MPlaceTy<'tcx>,
502    ) -> InterpResult<'tcx> {
503        let this = self.eval_context_mut();
504
505        let mutex = mutex_get_data(this, mutex_op)?.clone();
506
507        let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
508            if owner_thread != this.active_thread() {
509                this.mutex_enqueue_and_block(
510                    mutex.mutex_ref,
511                    Some((Scalar::from_i32(0), dest.clone())),
512                );
513                return interp_ok(());
514            } else {
515                // Trying to acquire the same mutex again.
516                match mutex.kind {
517                    MutexKind::Default =>
518                        throw_ub_format!(
519                            "trying to acquire default mutex already locked by the current thread"
520                        ),
521                    MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
522                    MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
523                    MutexKind::Recursive => {
524                        this.mutex_lock(&mutex.mutex_ref);
525                        0
526                    }
527                }
528            }
529        } else {
530            // The mutex is unlocked. Let's lock it.
531            this.mutex_lock(&mutex.mutex_ref);
532            0
533        };
534        this.write_scalar(Scalar::from_i32(ret), dest)?;
535        interp_ok(())
536    }
537
538    fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
539        let this = self.eval_context_mut();
540
541        let mutex = mutex_get_data(this, mutex_op)?.clone();
542
543        interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
544            if owner_thread != this.active_thread() {
545                this.eval_libc_i32("EBUSY")
546            } else {
547                match mutex.kind {
548                    MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
549                        this.eval_libc_i32("EBUSY"),
550                    MutexKind::Recursive => {
551                        this.mutex_lock(&mutex.mutex_ref);
552                        0
553                    }
554                }
555            }
556        } else {
557            // The mutex is unlocked. Let's lock it.
558            this.mutex_lock(&mutex.mutex_ref);
559            0
560        }))
561    }
562
563    fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
564        let this = self.eval_context_mut();
565
566        let mutex = mutex_get_data(this, mutex_op)?.clone();
567
568        if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
569            // The mutex was locked by the current thread.
570            interp_ok(Scalar::from_i32(0))
571        } else {
572            // The mutex was locked by another thread or not locked at all. See
573            // the “Unlock When Not Owner” column in
574            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
575            match mutex.kind {
576                MutexKind::Default =>
577                    throw_ub_format!(
578                        "unlocked a default mutex that was not locked by the current thread"
579                    ),
580                MutexKind::Normal =>
581                    throw_ub_format!(
582                        "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
583                    ),
584                MutexKind::ErrorCheck | MutexKind::Recursive =>
585                    interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
586            }
587        }
588    }
589
590    fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
591        let this = self.eval_context_mut();
592
593        // Reading the field also has the side-effect that we detect double-`destroy`
594        // since we make the field uninit below.
595        let mutex = mutex_get_data(this, mutex_op)?.clone();
596
597        if mutex.mutex_ref.owner().is_some() {
598            throw_ub_format!("destroyed a locked mutex");
599        }
600
601        // This might lead to false positives, see comment in pthread_mutexattr_destroy
602        this.write_uninit(
603            &this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
604        )?;
605        // FIXME: delete interpreter state associated with this mutex.
606
607        interp_ok(())
608    }
609
610    fn pthread_rwlock_rdlock(
611        &mut self,
612        rwlock_op: &OpTy<'tcx>,
613        dest: &MPlaceTy<'tcx>,
614    ) -> InterpResult<'tcx> {
615        let this = self.eval_context_mut();
616
617        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
618
619        if rwlock.rwlock_ref.is_write_locked() {
620            this.rwlock_enqueue_and_block_reader(
621                rwlock.rwlock_ref,
622                Scalar::from_i32(0),
623                dest.clone(),
624            );
625        } else {
626            this.rwlock_reader_lock(&rwlock.rwlock_ref);
627            this.write_null(dest)?;
628        }
629
630        interp_ok(())
631    }
632
633    fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
634        let this = self.eval_context_mut();
635
636        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
637
638        if rwlock.rwlock_ref.is_write_locked() {
639            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
640        } else {
641            this.rwlock_reader_lock(&rwlock.rwlock_ref);
642            interp_ok(Scalar::from_i32(0))
643        }
644    }
645
646    fn pthread_rwlock_wrlock(
647        &mut self,
648        rwlock_op: &OpTy<'tcx>,
649        dest: &MPlaceTy<'tcx>,
650    ) -> InterpResult<'tcx> {
651        let this = self.eval_context_mut();
652
653        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
654
655        if rwlock.rwlock_ref.is_locked() {
656            // Note: this will deadlock if the lock is already locked by this
657            // thread in any way.
658            //
659            // Relevant documentation:
660            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
661            // An in-depth discussion on this topic:
662            // https://github.com/rust-lang/rust/issues/53127
663            //
664            // FIXME: Detect and report the deadlock proactively. (We currently
665            // report the deadlock only when no thread can continue execution,
666            // but we could detect that this lock is already locked and report
667            // an error.)
668            this.rwlock_enqueue_and_block_writer(
669                rwlock.rwlock_ref,
670                Scalar::from_i32(0),
671                dest.clone(),
672            );
673        } else {
674            this.rwlock_writer_lock(&rwlock.rwlock_ref);
675            this.write_null(dest)?;
676        }
677
678        interp_ok(())
679    }
680
681    fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
682        let this = self.eval_context_mut();
683
684        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
685
686        if rwlock.rwlock_ref.is_locked() {
687            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
688        } else {
689            this.rwlock_writer_lock(&rwlock.rwlock_ref);
690            interp_ok(Scalar::from_i32(0))
691        }
692    }
693
694    fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
695        let this = self.eval_context_mut();
696
697        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
698
699        if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
700            || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
701        {
702            interp_ok(())
703        } else {
704            throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
705        }
706    }
707
708    fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
709        let this = self.eval_context_mut();
710
711        // Reading the field also has the side-effect that we detect double-`destroy`
712        // since we make the field uninit below.
713        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
714
715        if rwlock.rwlock_ref.is_locked() {
716            throw_ub_format!("destroyed a locked rwlock");
717        }
718
719        // This might lead to false positives, see comment in pthread_mutexattr_destroy
720        this.write_uninit(
721            &this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
722        )?;
723        // FIXME: delete interpreter state associated with this rwlock.
724
725        interp_ok(())
726    }
727
728    fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
729        let this = self.eval_context_mut();
730
731        // no clock attribute on macOS
732        if this.tcx.sess.target.os != "macos" {
733            // The default value of the clock attribute shall refer to the system
734            // clock.
735            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
736            let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
737            condattr_set_clock_id(this, attr_op, default_clock_id)?;
738        }
739
740        interp_ok(())
741    }
742
743    fn pthread_condattr_setclock(
744        &mut self,
745        attr_op: &OpTy<'tcx>,
746        clock_id_op: &OpTy<'tcx>,
747    ) -> InterpResult<'tcx, Scalar> {
748        let this = self.eval_context_mut();
749
750        let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
751        if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
752            || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
753        {
754            condattr_set_clock_id(this, attr_op, clock_id)?;
755        } else {
756            let einval = this.eval_libc_i32("EINVAL");
757            return interp_ok(Scalar::from_i32(einval));
758        }
759
760        interp_ok(Scalar::from_i32(0))
761    }
762
763    fn pthread_condattr_getclock(
764        &mut self,
765        attr_op: &OpTy<'tcx>,
766        clk_id_op: &OpTy<'tcx>,
767    ) -> InterpResult<'tcx, ()> {
768        let this = self.eval_context_mut();
769
770        let clock_id = condattr_get_clock_id(this, attr_op)?;
771        this.write_scalar(
772            Scalar::from_i32(clock_id),
773            &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
774        )?;
775
776        interp_ok(())
777    }
778
779    fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
780        let this = self.eval_context_mut();
781
782        // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
783        // There's no clock attribute on macOS.
784        if this.tcx.sess.target.os != "macos" {
785            condattr_get_clock_id(this, attr_op)?;
786        }
787
788        // De-init the entire thing.
789        // This might lead to false positives, see comment in pthread_mutexattr_destroy
790        this.write_uninit(
791            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
792        )?;
793
794        interp_ok(())
795    }
796
797    fn pthread_cond_init(
798        &mut self,
799        cond_op: &OpTy<'tcx>,
800        attr_op: &OpTy<'tcx>,
801    ) -> InterpResult<'tcx, ()> {
802        let this = self.eval_context_mut();
803
804        let attr = this.read_pointer(attr_op)?;
805        // Default clock if `attr` is null, and on macOS where there is no clock attribute.
806        let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
807            this.eval_libc_i32("CLOCK_REALTIME")
808        } else {
809            condattr_get_clock_id(this, attr_op)?
810        };
811        let clock_id = condattr_translate_clock_id(this, clock_id)?;
812
813        cond_create(this, cond_op, clock_id)?;
814
815        interp_ok(())
816    }
817
818    fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
819        let this = self.eval_context_mut();
820        let id = cond_get_data(this, cond_op)?.id;
821        this.condvar_signal(id)?;
822        interp_ok(())
823    }
824
825    fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
826        let this = self.eval_context_mut();
827        let id = cond_get_data(this, cond_op)?.id;
828        while this.condvar_signal(id)? {}
829        interp_ok(())
830    }
831
832    fn pthread_cond_wait(
833        &mut self,
834        cond_op: &OpTy<'tcx>,
835        mutex_op: &OpTy<'tcx>,
836        dest: &MPlaceTy<'tcx>,
837    ) -> InterpResult<'tcx> {
838        let this = self.eval_context_mut();
839
840        let data = *cond_get_data(this, cond_op)?;
841        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
842
843        this.condvar_wait(
844            data.id,
845            mutex_ref,
846            None, // no timeout
847            Scalar::from_i32(0),
848            Scalar::from_i32(0), // retval_timeout -- unused
849            dest.clone(),
850        )?;
851
852        interp_ok(())
853    }
854
855    fn pthread_cond_timedwait(
856        &mut self,
857        cond_op: &OpTy<'tcx>,
858        mutex_op: &OpTy<'tcx>,
859        abstime_op: &OpTy<'tcx>,
860        dest: &MPlaceTy<'tcx>,
861    ) -> InterpResult<'tcx> {
862        let this = self.eval_context_mut();
863
864        let data = *cond_get_data(this, cond_op)?;
865        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
866
867        // Extract the timeout.
868        let duration = match this
869            .read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
870        {
871            Some(duration) => duration,
872            None => {
873                let einval = this.eval_libc("EINVAL");
874                this.write_scalar(einval, dest)?;
875                return interp_ok(());
876            }
877        };
878        let timeout_clock = match data.clock {
879            ClockId::Realtime => {
880                this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
881                TimeoutClock::RealTime
882            }
883            ClockId::Monotonic => TimeoutClock::Monotonic,
884        };
885
886        this.condvar_wait(
887            data.id,
888            mutex_ref,
889            Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
890            Scalar::from_i32(0),
891            this.eval_libc("ETIMEDOUT"), // retval_timeout
892            dest.clone(),
893        )?;
894
895        interp_ok(())
896    }
897
898    fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
899        let this = self.eval_context_mut();
900
901        // Reading the field also has the side-effect that we detect double-`destroy`
902        // since we make the field uninit below.
903        let id = cond_get_data(this, cond_op)?.id;
904        if this.condvar_is_awaited(id) {
905            throw_ub_format!("destroying an awaited conditional variable");
906        }
907
908        // This might lead to false positives, see comment in pthread_mutexattr_destroy
909        this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
910        // FIXME: delete interpreter state associated with this condvar.
911
912        interp_ok(())
913    }
914}