miri/shims/unix/
sync.rs

1use rustc_abi::Size;
2use rustc_target::spec::Os;
3
4use crate::concurrency::sync::{AccessKind, SyncObj};
5use crate::*;
6
7/// Do a bytewise comparison of the two places. This is used to check if
8/// a synchronization primitive matches its static initializer value.
9fn bytewise_equal<'tcx>(
10    ecx: &MiriInterpCx<'tcx>,
11    left: &MPlaceTy<'tcx>,
12    right: &MPlaceTy<'tcx>,
13) -> InterpResult<'tcx, bool> {
14    let size = left.layout.size;
15    assert_eq!(size, right.layout.size);
16
17    let left_bytes = ecx.read_bytes_ptr_strip_provenance(left.ptr(), size)?;
18    let right_bytes = ecx.read_bytes_ptr_strip_provenance(right.ptr(), size)?;
19
20    interp_ok(left_bytes == right_bytes)
21}
22
23// The in-memory marker values we use to indicate whether objects have been initialized.
24const PTHREAD_UNINIT: u8 = 0;
25const PTHREAD_INIT: u8 = 1;
26
27// # pthread_mutexattr_t
28// We store some data directly inside the type, ignoring the platform layout:
29// - kind: i32
30
31#[inline]
32fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
33    interp_ok(match &ecx.tcx.sess.target.os {
34        Os::Linux | Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => 0,
35        os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
36    })
37}
38
39fn mutexattr_get_kind<'tcx>(
40    ecx: &MiriInterpCx<'tcx>,
41    attr_ptr: &OpTy<'tcx>,
42) -> InterpResult<'tcx, i32> {
43    ecx.deref_pointer_and_read(
44        attr_ptr,
45        mutexattr_kind_offset(ecx)?,
46        ecx.libc_ty_layout("pthread_mutexattr_t"),
47        ecx.machine.layouts.i32,
48    )?
49    .to_i32()
50}
51
52fn mutexattr_set_kind<'tcx>(
53    ecx: &mut MiriInterpCx<'tcx>,
54    attr_ptr: &OpTy<'tcx>,
55    kind: i32,
56) -> InterpResult<'tcx, ()> {
57    ecx.deref_pointer_and_write(
58        attr_ptr,
59        mutexattr_kind_offset(ecx)?,
60        Scalar::from_i32(kind),
61        ecx.libc_ty_layout("pthread_mutexattr_t"),
62        ecx.machine.layouts.i32,
63    )
64}
65
66/// To differentiate "the mutex kind has not been changed" from
67/// "the mutex kind has been set to PTHREAD_MUTEX_DEFAULT and that is
68/// equal to some other mutex kind", we make the default value of this
69/// field *not* PTHREAD_MUTEX_DEFAULT but this special flag.
70const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
71
72/// Translates the mutex kind from what is stored in pthread_mutexattr_t to our enum.
73fn mutexattr_translate_kind<'tcx>(
74    ecx: &MiriInterpCx<'tcx>,
75    kind: i32,
76) -> InterpResult<'tcx, MutexKind> {
77    interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
78        MutexKind::Normal
79    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
80        MutexKind::ErrorCheck
81    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
82        MutexKind::Recursive
83    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
84        || kind == PTHREAD_MUTEX_KIND_UNCHANGED
85    {
86        // We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
87        // others, and we want an explicit `mutexattr_settype` to work as expected.
88        MutexKind::Default
89    } else {
90        throw_unsup_format!("unsupported type of mutex: {kind}");
91    })
92}
93
94// # pthread_mutex_t
95// We store some data directly inside the type, ignoring the platform layout:
96// - init: u8
97
98/// The mutex kind.
99#[derive(Debug, Clone, Copy)]
100enum MutexKind {
101    Normal,
102    Default,
103    Recursive,
104    ErrorCheck,
105}
106
107#[derive(Debug, Clone)]
108struct PthreadMutex {
109    mutex_ref: MutexRef,
110    kind: MutexKind,
111}
112
113impl SyncObj for PthreadMutex {
114    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
115        if !self.mutex_ref.queue_is_empty() {
116            throw_ub_format!(
117                "{access_kind} of `pthread_mutex_t` is forbidden while the queue is non-empty"
118            );
119        }
120        interp_ok(())
121    }
122
123    fn delete_on_write(&self) -> bool {
124        true
125    }
126}
127
128/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
129/// a statically initialized mutex that is used the first time, we pick some offset within
130/// `pthread_mutex_t` and use it as an "initialized" flag.
131fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
132    let offset = match &ecx.tcx.sess.target.os {
133        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
134        // macOS stores a signature in the first bytes, so we move to offset 4.
135        Os::MacOs => 4,
136        os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
137    };
138    let offset = Size::from_bytes(offset);
139
140    // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
141    // the `init` field must start out not equal to INIT_COOKIE.
142    if !ecx.machine.pthread_mutex_sanity.replace(true) {
143        let check_static_initializer = |name| {
144            let static_initializer = ecx.eval_path(&["libc", name]);
145            let init_field =
146                static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
147            let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
148            assert_eq!(
149                init, PTHREAD_UNINIT,
150                "{name} is incompatible with our initialization logic"
151            );
152        };
153
154        check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
155        // Check non-standard initializers.
156        match &ecx.tcx.sess.target.os {
157            Os::Linux => {
158                check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
159                check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
160                check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
161            }
162            Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => {
163                // No non-standard initializers.
164            }
165            os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
166        }
167    }
168
169    interp_ok(offset)
170}
171
172/// Eagerly create and initialize a new mutex.
173fn mutex_create<'tcx>(
174    ecx: &mut MiriInterpCx<'tcx>,
175    mutex_ptr: &OpTy<'tcx>,
176    kind: MutexKind,
177) -> InterpResult<'tcx, PthreadMutex> {
178    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
179    let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
180    ecx.init_immovable_sync(&mutex, mutex_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
181    interp_ok(data)
182}
183
184/// Returns the mutex data stored at the address that `mutex_ptr` points to.
185/// Will raise an error if the mutex has been moved since its first use.
186fn mutex_get_data<'tcx, 'a>(
187    ecx: &'a mut MiriInterpCx<'tcx>,
188    mutex_ptr: &OpTy<'tcx>,
189) -> InterpResult<'tcx, &'a PthreadMutex>
190where
191    'tcx: 'a,
192{
193    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
194    ecx.get_immovable_sync_with_static_init(
195        &mutex,
196        mutex_init_offset(ecx)?,
197        PTHREAD_UNINIT,
198        PTHREAD_INIT,
199        |ecx| {
200            let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
201            interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
202        },
203    )
204}
205
206/// Returns the kind of a static initializer.
207fn mutex_kind_from_static_initializer<'tcx>(
208    ecx: &MiriInterpCx<'tcx>,
209    mutex: &MPlaceTy<'tcx>,
210) -> InterpResult<'tcx, MutexKind> {
211    // All the static initializers recognized here *must* be checked in `mutex_init_offset`!
212    let is_initializer = |name| bytewise_equal(ecx, mutex, &ecx.eval_path(&["libc", name]));
213
214    // PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
215    if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
216        return interp_ok(MutexKind::Default);
217    }
218    // Support additional platform-specific initializers.
219    match &ecx.tcx.sess.target.os {
220        Os::Linux =>
221            if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
222                return interp_ok(MutexKind::Recursive);
223            } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
224                return interp_ok(MutexKind::ErrorCheck);
225            },
226        _ => {}
227    }
228    throw_ub_format!(
229        "`pthread_mutex_t` was not properly initialized at this location, or it got overwritten"
230    );
231}
232
233// # pthread_rwlock_t
234// We store some data directly inside the type, ignoring the platform layout:
235// - init: u8
236
237#[derive(Debug, Clone)]
238struct PthreadRwLock {
239    rwlock_ref: RwLockRef,
240}
241
242impl SyncObj for PthreadRwLock {
243    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
244        if !self.rwlock_ref.queue_is_empty() {
245            throw_ub_format!(
246                "{access_kind} of `pthread_rwlock_t` is forbidden while the queue is non-empty"
247            );
248        }
249        interp_ok(())
250    }
251
252    fn delete_on_write(&self) -> bool {
253        true
254    }
255}
256
257fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
258    let offset = match &ecx.tcx.sess.target.os {
259        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
260        // macOS stores a signature in the first bytes, so we move to offset 4.
261        Os::MacOs => 4,
262        os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
263    };
264    let offset = Size::from_bytes(offset);
265
266    // Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
267    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
268    if !ecx.machine.pthread_rwlock_sanity.replace(true) {
269        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
270        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
271        let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
272        assert_eq!(
273            init, PTHREAD_UNINIT,
274            "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization logic"
275        );
276    }
277
278    interp_ok(offset)
279}
280
281fn rwlock_get_data<'tcx, 'a>(
282    ecx: &'a mut MiriInterpCx<'tcx>,
283    rwlock_ptr: &OpTy<'tcx>,
284) -> InterpResult<'tcx, &'a PthreadRwLock>
285where
286    'tcx: 'a,
287{
288    let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
289    ecx.get_immovable_sync_with_static_init(
290        &rwlock,
291        rwlock_init_offset(ecx)?,
292        PTHREAD_UNINIT,
293        PTHREAD_INIT,
294        |ecx| {
295            if !bytewise_equal(
296                ecx,
297                &rwlock,
298                &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
299            )? {
300                throw_ub_format!(
301                    "`pthread_rwlock_t` was not properly initialized at this location, or it got overwritten"
302                );
303            }
304            interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
305        },
306    )
307}
308
309// # pthread_condattr_t
310// We store some data directly inside the type, ignoring the platform layout:
311// - clock: i32
312
313#[inline]
314fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
315    interp_ok(match &ecx.tcx.sess.target.os {
316        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
317        // macOS does not have a clock attribute.
318        os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
319    })
320}
321
322fn condattr_get_clock_id<'tcx>(
323    ecx: &MiriInterpCx<'tcx>,
324    attr_ptr: &OpTy<'tcx>,
325) -> InterpResult<'tcx, Scalar> {
326    ecx.deref_pointer_and_read(
327        attr_ptr,
328        condattr_clock_offset(ecx)?,
329        ecx.libc_ty_layout("pthread_condattr_t"),
330        ecx.machine.layouts.i32,
331    )
332}
333
334fn condattr_set_clock_id<'tcx>(
335    ecx: &mut MiriInterpCx<'tcx>,
336    attr_ptr: &OpTy<'tcx>,
337    clock_id: i32,
338) -> InterpResult<'tcx, ()> {
339    ecx.deref_pointer_and_write(
340        attr_ptr,
341        condattr_clock_offset(ecx)?,
342        Scalar::from_i32(clock_id),
343        ecx.libc_ty_layout("pthread_condattr_t"),
344        ecx.machine.layouts.i32,
345    )
346}
347
348// # pthread_cond_t
349// We store some data directly inside the type, ignoring the platform layout:
350// - init: u8
351
352fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
353    let offset = match &ecx.tcx.sess.target.os {
354        Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
355        // macOS stores a signature in the first bytes, so we move to offset 4.
356        Os::MacOs => 4,
357        os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
358    };
359    let offset = Size::from_bytes(offset);
360
361    // Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
362    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
363    if !ecx.machine.pthread_condvar_sanity.replace(true) {
364        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
365        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
366        let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
367        assert_eq!(
368            init, PTHREAD_UNINIT,
369            "PTHREAD_COND_INITIALIZER is incompatible with our initialization logic"
370        );
371    }
372
373    interp_ok(offset)
374}
375
376#[derive(Debug, Clone)]
377struct PthreadCondvar {
378    condvar_ref: CondvarRef,
379    clock: TimeoutClock,
380}
381
382impl SyncObj for PthreadCondvar {
383    fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
384        if !self.condvar_ref.queue_is_empty() {
385            throw_ub_format!(
386                "{access_kind} of `pthread_cond_t` is forbidden while the queue is non-empty"
387            );
388        }
389        interp_ok(())
390    }
391
392    fn delete_on_write(&self) -> bool {
393        true
394    }
395}
396
397fn cond_create<'tcx>(
398    ecx: &mut MiriInterpCx<'tcx>,
399    cond_ptr: &OpTy<'tcx>,
400    clock: TimeoutClock,
401) -> InterpResult<'tcx, PthreadCondvar> {
402    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
403    let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
404    ecx.init_immovable_sync(&cond, cond_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
405    interp_ok(data)
406}
407
408fn cond_get_data<'tcx, 'a>(
409    ecx: &'a mut MiriInterpCx<'tcx>,
410    cond_ptr: &OpTy<'tcx>,
411) -> InterpResult<'tcx, &'a PthreadCondvar>
412where
413    'tcx: 'a,
414{
415    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
416    ecx.get_immovable_sync_with_static_init(
417        &cond,
418        cond_init_offset(ecx)?,
419        PTHREAD_UNINIT,
420        PTHREAD_INIT,
421        |ecx| {
422            if !bytewise_equal(
423                ecx,
424                &cond,
425                &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
426            )? {
427                throw_ub_format!(
428                    "`pthread_cond_t` was not properly initialized at this location, or it got overwritten"
429                );
430            }
431            // This used the static initializer. The clock there is always CLOCK_REALTIME.
432            interp_ok(PthreadCondvar {
433                condvar_ref: CondvarRef::new(),
434                clock: TimeoutClock::RealTime,
435            })
436        },
437    )
438}
439
440impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
441pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
442    fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
443        let this = self.eval_context_mut();
444
445        mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
446
447        interp_ok(())
448    }
449
450    fn pthread_mutexattr_settype(
451        &mut self,
452        attr_op: &OpTy<'tcx>,
453        kind_op: &OpTy<'tcx>,
454    ) -> InterpResult<'tcx, Scalar> {
455        let this = self.eval_context_mut();
456
457        let kind = this.read_scalar(kind_op)?.to_i32()?;
458        if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
459            || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
460            || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
461            || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
462        {
463            // Make sure we do not mix this up with the "unchanged" kind.
464            assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
465            mutexattr_set_kind(this, attr_op, kind)?;
466        } else {
467            let einval = this.eval_libc_i32("EINVAL");
468            return interp_ok(Scalar::from_i32(einval));
469        }
470
471        interp_ok(Scalar::from_i32(0))
472    }
473
474    fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
475        let this = self.eval_context_mut();
476
477        // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
478        mutexattr_get_kind(this, attr_op)?;
479
480        // To catch double-destroys, we de-initialize the mutexattr.
481        // This is technically not right and might lead to false positives. For example, the below
482        // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
483        //
484        // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
485        // libc::pthread_mutexattr_init(x.as_mut_ptr());
486        // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
487        // x.assume_init();
488        //
489        // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
490        // This can always be revisited to have some external state to catch double-destroys
491        // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
492        this.write_uninit(
493            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
494        )?;
495
496        interp_ok(())
497    }
498
499    fn pthread_mutex_init(
500        &mut self,
501        mutex_op: &OpTy<'tcx>,
502        attr_op: &OpTy<'tcx>,
503    ) -> InterpResult<'tcx, ()> {
504        let this = self.eval_context_mut();
505
506        let attr = this.read_pointer(attr_op)?;
507        let kind = if this.ptr_is_null(attr)? {
508            MutexKind::Default
509        } else {
510            mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
511        };
512
513        mutex_create(this, mutex_op, kind)?;
514
515        interp_ok(())
516    }
517
518    fn pthread_mutex_lock(
519        &mut self,
520        mutex_op: &OpTy<'tcx>,
521        dest: &MPlaceTy<'tcx>,
522    ) -> InterpResult<'tcx> {
523        let this = self.eval_context_mut();
524
525        let mutex = mutex_get_data(this, mutex_op)?.clone();
526
527        let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
528            if owner_thread != this.active_thread() {
529                this.mutex_enqueue_and_block(
530                    mutex.mutex_ref,
531                    Some((Scalar::from_i32(0), dest.clone())),
532                );
533                return interp_ok(());
534            } else {
535                // Trying to acquire the same mutex again.
536                match mutex.kind {
537                    MutexKind::Default =>
538                        throw_ub_format!(
539                            "trying to acquire default mutex already locked by the current thread"
540                        ),
541                    MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
542                    MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
543                    MutexKind::Recursive => {
544                        this.mutex_lock(&mutex.mutex_ref)?;
545                        0
546                    }
547                }
548            }
549        } else {
550            // The mutex is unlocked. Let's lock it.
551            this.mutex_lock(&mutex.mutex_ref)?;
552            0
553        };
554        this.write_scalar(Scalar::from_i32(ret), dest)?;
555        interp_ok(())
556    }
557
558    fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
559        let this = self.eval_context_mut();
560
561        let mutex = mutex_get_data(this, mutex_op)?.clone();
562
563        interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
564            if owner_thread != this.active_thread() {
565                this.eval_libc_i32("EBUSY")
566            } else {
567                match mutex.kind {
568                    MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
569                        this.eval_libc_i32("EBUSY"),
570                    MutexKind::Recursive => {
571                        this.mutex_lock(&mutex.mutex_ref)?;
572                        0
573                    }
574                }
575            }
576        } else {
577            // The mutex is unlocked. Let's lock it.
578            this.mutex_lock(&mutex.mutex_ref)?;
579            0
580        }))
581    }
582
583    fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
584        let this = self.eval_context_mut();
585
586        let mutex = mutex_get_data(this, mutex_op)?.clone();
587
588        if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
589            // The mutex was locked by the current thread.
590            interp_ok(Scalar::from_i32(0))
591        } else {
592            // The mutex was locked by another thread or not locked at all. See
593            // the “Unlock When Not Owner” column in
594            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
595            match mutex.kind {
596                MutexKind::Default =>
597                    throw_ub_format!(
598                        "unlocked a default mutex that was not locked by the current thread"
599                    ),
600                MutexKind::Normal =>
601                    throw_ub_format!(
602                        "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
603                    ),
604                MutexKind::ErrorCheck | MutexKind::Recursive =>
605                    interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
606            }
607        }
608    }
609
610    fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
611        let this = self.eval_context_mut();
612
613        // Reading the field also has the side-effect that we detect double-`destroy`
614        // since we make the field uninit below.
615        let mutex = mutex_get_data(this, mutex_op)?.clone();
616
617        if mutex.mutex_ref.owner().is_some() {
618            throw_ub_format!("destroyed a locked mutex");
619        }
620
621        // This write also deletes the interpreter state for this mutex.
622        // This might lead to false positives, see comment in pthread_mutexattr_destroy
623        let mutex_place =
624            this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?;
625        this.write_uninit(&mutex_place)?;
626
627        interp_ok(())
628    }
629
630    fn pthread_rwlock_rdlock(
631        &mut self,
632        rwlock_op: &OpTy<'tcx>,
633        dest: &MPlaceTy<'tcx>,
634    ) -> InterpResult<'tcx> {
635        let this = self.eval_context_mut();
636
637        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
638
639        if rwlock.rwlock_ref.is_write_locked() {
640            this.rwlock_enqueue_and_block_reader(
641                rwlock.rwlock_ref,
642                Scalar::from_i32(0),
643                dest.clone(),
644            );
645        } else {
646            this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
647            this.write_null(dest)?;
648        }
649
650        interp_ok(())
651    }
652
653    fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
654        let this = self.eval_context_mut();
655
656        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
657
658        if rwlock.rwlock_ref.is_write_locked() {
659            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
660        } else {
661            this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
662            interp_ok(Scalar::from_i32(0))
663        }
664    }
665
666    fn pthread_rwlock_wrlock(
667        &mut self,
668        rwlock_op: &OpTy<'tcx>,
669        dest: &MPlaceTy<'tcx>,
670    ) -> InterpResult<'tcx> {
671        let this = self.eval_context_mut();
672
673        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
674
675        if rwlock.rwlock_ref.is_locked() {
676            // Note: this will deadlock if the lock is already locked by this
677            // thread in any way.
678            //
679            // Relevant documentation:
680            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
681            // An in-depth discussion on this topic:
682            // https://github.com/rust-lang/rust/issues/53127
683            //
684            // FIXME: Detect and report the deadlock proactively. (We currently
685            // report the deadlock only when no thread can continue execution,
686            // but we could detect that this lock is already locked and report
687            // an error.)
688            this.rwlock_enqueue_and_block_writer(
689                rwlock.rwlock_ref,
690                Scalar::from_i32(0),
691                dest.clone(),
692            );
693        } else {
694            this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
695            this.write_null(dest)?;
696        }
697
698        interp_ok(())
699    }
700
701    fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
702        let this = self.eval_context_mut();
703
704        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
705
706        if rwlock.rwlock_ref.is_locked() {
707            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
708        } else {
709            this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
710            interp_ok(Scalar::from_i32(0))
711        }
712    }
713
714    fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
715        let this = self.eval_context_mut();
716
717        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
718
719        if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
720            || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
721        {
722            interp_ok(())
723        } else {
724            throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
725        }
726    }
727
728    fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
729        let this = self.eval_context_mut();
730
731        // Reading the field also has the side-effect that we detect double-`destroy`
732        // since we make the field uninit below.
733        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
734
735        if rwlock.rwlock_ref.is_locked() {
736            throw_ub_format!("destroyed a locked rwlock");
737        }
738
739        // This write also deletes the interpreter state for this rwlock.
740        // This might lead to false positives, see comment in pthread_mutexattr_destroy
741        let rwlock_place =
742            this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?;
743        this.write_uninit(&rwlock_place)?;
744
745        interp_ok(())
746    }
747
748    fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
749        let this = self.eval_context_mut();
750
751        // no clock attribute on macOS
752        if this.tcx.sess.target.os != Os::MacOs {
753            // The default value of the clock attribute shall refer to the system
754            // clock.
755            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
756            let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
757            condattr_set_clock_id(this, attr_op, default_clock_id)?;
758        }
759
760        interp_ok(())
761    }
762
763    fn pthread_condattr_setclock(
764        &mut self,
765        attr_op: &OpTy<'tcx>,
766        clock_id_op: &OpTy<'tcx>,
767    ) -> InterpResult<'tcx, Scalar> {
768        let this = self.eval_context_mut();
769
770        let clock_id = this.read_scalar(clock_id_op)?;
771        if this.parse_clockid(clock_id).is_some() {
772            condattr_set_clock_id(this, attr_op, clock_id.to_i32()?)?;
773        } else {
774            let einval = this.eval_libc_i32("EINVAL");
775            return interp_ok(Scalar::from_i32(einval));
776        }
777
778        interp_ok(Scalar::from_i32(0))
779    }
780
781    fn pthread_condattr_getclock(
782        &mut self,
783        attr_op: &OpTy<'tcx>,
784        clk_id_op: &OpTy<'tcx>,
785    ) -> InterpResult<'tcx, ()> {
786        let this = self.eval_context_mut();
787
788        let clock_id = condattr_get_clock_id(this, attr_op)?;
789        this.write_scalar(
790            clock_id,
791            &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
792        )?;
793
794        interp_ok(())
795    }
796
797    fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
798        let this = self.eval_context_mut();
799
800        // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
801        // There's no clock attribute on macOS.
802        if this.tcx.sess.target.os != Os::MacOs {
803            condattr_get_clock_id(this, attr_op)?;
804        }
805
806        // De-init the entire thing.
807        // This might lead to false positives, see comment in pthread_mutexattr_destroy
808        this.write_uninit(
809            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
810        )?;
811
812        interp_ok(())
813    }
814
815    fn pthread_cond_init(
816        &mut self,
817        cond_op: &OpTy<'tcx>,
818        attr_op: &OpTy<'tcx>,
819    ) -> InterpResult<'tcx, ()> {
820        let this = self.eval_context_mut();
821
822        let attr = this.read_pointer(attr_op)?;
823        // Default clock if `attr` is null, and on macOS where there is no clock attribute.
824        let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == Os::MacOs {
825            this.eval_libc("CLOCK_REALTIME")
826        } else {
827            condattr_get_clock_id(this, attr_op)?
828        };
829        let Some(clock) = this.parse_clockid(clock_id) else {
830            // This is UB since this situation cannot arise when using pthread_condattr_setclock.
831            throw_ub_format!("pthread_cond_init: invalid attributes (unsupported clock)")
832        };
833
834        cond_create(this, cond_op, clock)?;
835
836        interp_ok(())
837    }
838
839    fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
840        let this = self.eval_context_mut();
841        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
842        this.condvar_signal(&condvar)?;
843        interp_ok(())
844    }
845
846    fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
847        let this = self.eval_context_mut();
848        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
849        while this.condvar_signal(&condvar)? {}
850        interp_ok(())
851    }
852
853    fn pthread_cond_wait(
854        &mut self,
855        cond_op: &OpTy<'tcx>,
856        mutex_op: &OpTy<'tcx>,
857        dest: &MPlaceTy<'tcx>,
858    ) -> InterpResult<'tcx> {
859        let this = self.eval_context_mut();
860
861        let data = cond_get_data(this, cond_op)?.clone();
862        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
863
864        this.condvar_wait(
865            data.condvar_ref,
866            mutex_ref,
867            None, // no timeout
868            Scalar::from_i32(0),
869            Scalar::from_i32(0), // retval_timeout -- unused
870            dest.clone(),
871        )?;
872
873        interp_ok(())
874    }
875
876    fn pthread_cond_timedwait(
877        &mut self,
878        cond_op: &OpTy<'tcx>,
879        mutex_op: &OpTy<'tcx>,
880        timeout_op: &OpTy<'tcx>,
881        dest: &MPlaceTy<'tcx>,
882        macos_relative_np: bool,
883    ) -> InterpResult<'tcx> {
884        let this = self.eval_context_mut();
885
886        let data = cond_get_data(this, cond_op)?.clone();
887        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
888
889        // Extract the timeout.
890        let duration = match this
891            .read_timespec(&this.deref_pointer_as(timeout_op, this.libc_ty_layout("timespec"))?)?
892        {
893            Some(duration) => duration,
894            None => {
895                let einval = this.eval_libc("EINVAL");
896                this.write_scalar(einval, dest)?;
897                return interp_ok(());
898            }
899        };
900
901        let (clock, anchor) = if macos_relative_np {
902            // `pthread_cond_timedwait_relative_np` always measures time against the
903            // monotonic clock, regardless of the condvar clock.
904            (TimeoutClock::Monotonic, TimeoutAnchor::Relative)
905        } else {
906            if data.clock == TimeoutClock::RealTime {
907                this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
908            }
909
910            (data.clock, TimeoutAnchor::Absolute)
911        };
912
913        this.condvar_wait(
914            data.condvar_ref,
915            mutex_ref,
916            Some((clock, anchor, duration)),
917            Scalar::from_i32(0),
918            this.eval_libc("ETIMEDOUT"), // retval_timeout
919            dest.clone(),
920        )?;
921
922        interp_ok(())
923    }
924
925    fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
926        let this = self.eval_context_mut();
927
928        // Reading the field also has the side-effect that we detect double-`destroy`
929        // since we make the field uninit below.
930        let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
931        if !condvar.queue_is_empty() {
932            throw_ub_format!("destroying an awaited conditional variable");
933        }
934
935        // This write also deletes the interpreter state for this mutex.
936        // This might lead to false positives, see comment in pthread_mutexattr_destroy
937        let cond_place = this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?;
938        this.write_uninit(&cond_place)?;
939
940        interp_ok(())
941    }
942}