1use rustc_abi::Size;
2use rustc_target::spec::Os;
3
4use crate::concurrency::sync::{AccessKind, SyncObj};
5use crate::*;
6
7fn bytewise_equal<'tcx>(
10 ecx: &MiriInterpCx<'tcx>,
11 left: &MPlaceTy<'tcx>,
12 right: &MPlaceTy<'tcx>,
13) -> InterpResult<'tcx, bool> {
14 let size = left.layout.size;
15 assert_eq!(size, right.layout.size);
16
17 let left_bytes = ecx.read_bytes_ptr_strip_provenance(left.ptr(), size)?;
18 let right_bytes = ecx.read_bytes_ptr_strip_provenance(right.ptr(), size)?;
19
20 interp_ok(left_bytes == right_bytes)
21}
22
23const PTHREAD_UNINIT: u8 = 0;
25const PTHREAD_INIT: u8 = 1;
26
27#[inline]
32fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
33 interp_ok(match &ecx.tcx.sess.target.os {
34 Os::Linux | Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => 0,
35 os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
36 })
37}
38
39fn mutexattr_get_kind<'tcx>(
40 ecx: &MiriInterpCx<'tcx>,
41 attr_ptr: &OpTy<'tcx>,
42) -> InterpResult<'tcx, i32> {
43 ecx.deref_pointer_and_read(
44 attr_ptr,
45 mutexattr_kind_offset(ecx)?,
46 ecx.libc_ty_layout("pthread_mutexattr_t"),
47 ecx.machine.layouts.i32,
48 )?
49 .to_i32()
50}
51
52fn mutexattr_set_kind<'tcx>(
53 ecx: &mut MiriInterpCx<'tcx>,
54 attr_ptr: &OpTy<'tcx>,
55 kind: i32,
56) -> InterpResult<'tcx, ()> {
57 ecx.deref_pointer_and_write(
58 attr_ptr,
59 mutexattr_kind_offset(ecx)?,
60 Scalar::from_i32(kind),
61 ecx.libc_ty_layout("pthread_mutexattr_t"),
62 ecx.machine.layouts.i32,
63 )
64}
65
66const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
71
72fn mutexattr_translate_kind<'tcx>(
74 ecx: &MiriInterpCx<'tcx>,
75 kind: i32,
76) -> InterpResult<'tcx, MutexKind> {
77 interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
78 MutexKind::Normal
79 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
80 MutexKind::ErrorCheck
81 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
82 MutexKind::Recursive
83 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
84 || kind == PTHREAD_MUTEX_KIND_UNCHANGED
85 {
86 MutexKind::Default
89 } else {
90 throw_unsup_format!("unsupported type of mutex: {kind}");
91 })
92}
93
94#[derive(Debug, Clone, Copy)]
100enum MutexKind {
101 Normal,
102 Default,
103 Recursive,
104 ErrorCheck,
105}
106
107#[derive(Debug, Clone)]
108struct PthreadMutex {
109 mutex_ref: MutexRef,
110 kind: MutexKind,
111}
112
113impl SyncObj for PthreadMutex {
114 fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
115 if !self.mutex_ref.queue_is_empty() {
116 throw_ub_format!(
117 "{access_kind} of `pthread_mutex_t` is forbidden while the queue is non-empty"
118 );
119 }
120 interp_ok(())
121 }
122
123 fn delete_on_write(&self) -> bool {
124 true
125 }
126}
127
128fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
132 let offset = match &ecx.tcx.sess.target.os {
133 Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
134 Os::MacOs => 4,
136 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
137 };
138 let offset = Size::from_bytes(offset);
139
140 if !ecx.machine.pthread_mutex_sanity.replace(true) {
143 let check_static_initializer = |name| {
144 let static_initializer = ecx.eval_path(&["libc", name]);
145 let init_field =
146 static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
147 let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
148 assert_eq!(
149 init, PTHREAD_UNINIT,
150 "{name} is incompatible with our initialization logic"
151 );
152 };
153
154 check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
155 match &ecx.tcx.sess.target.os {
157 Os::Linux => {
158 check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
159 check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
160 check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
161 }
162 Os::Illumos | Os::Solaris | Os::MacOs | Os::FreeBsd | Os::Android => {
163 }
165 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
166 }
167 }
168
169 interp_ok(offset)
170}
171
172fn mutex_create<'tcx>(
174 ecx: &mut MiriInterpCx<'tcx>,
175 mutex_ptr: &OpTy<'tcx>,
176 kind: MutexKind,
177) -> InterpResult<'tcx, PthreadMutex> {
178 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
179 let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
180 ecx.init_immovable_sync(&mutex, mutex_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
181 interp_ok(data)
182}
183
184fn mutex_get_data<'tcx, 'a>(
187 ecx: &'a mut MiriInterpCx<'tcx>,
188 mutex_ptr: &OpTy<'tcx>,
189) -> InterpResult<'tcx, &'a PthreadMutex>
190where
191 'tcx: 'a,
192{
193 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
194 ecx.get_immovable_sync_with_static_init(
195 &mutex,
196 mutex_init_offset(ecx)?,
197 PTHREAD_UNINIT,
198 PTHREAD_INIT,
199 |ecx| {
200 let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
201 interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
202 },
203 )
204}
205
206fn mutex_kind_from_static_initializer<'tcx>(
208 ecx: &MiriInterpCx<'tcx>,
209 mutex: &MPlaceTy<'tcx>,
210) -> InterpResult<'tcx, MutexKind> {
211 let is_initializer = |name| bytewise_equal(ecx, mutex, &ecx.eval_path(&["libc", name]));
213
214 if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
216 return interp_ok(MutexKind::Default);
217 }
218 match &ecx.tcx.sess.target.os {
220 Os::Linux =>
221 if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
222 return interp_ok(MutexKind::Recursive);
223 } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
224 return interp_ok(MutexKind::ErrorCheck);
225 },
226 _ => {}
227 }
228 throw_ub_format!(
229 "`pthread_mutex_t` was not properly initialized at this location, or it got overwritten"
230 );
231}
232
233#[derive(Debug, Clone)]
238struct PthreadRwLock {
239 rwlock_ref: RwLockRef,
240}
241
242impl SyncObj for PthreadRwLock {
243 fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
244 if !self.rwlock_ref.queue_is_empty() {
245 throw_ub_format!(
246 "{access_kind} of `pthread_rwlock_t` is forbidden while the queue is non-empty"
247 );
248 }
249 interp_ok(())
250 }
251
252 fn delete_on_write(&self) -> bool {
253 true
254 }
255}
256
257fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
258 let offset = match &ecx.tcx.sess.target.os {
259 Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
260 Os::MacOs => 4,
262 os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
263 };
264 let offset = Size::from_bytes(offset);
265
266 if !ecx.machine.pthread_rwlock_sanity.replace(true) {
269 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
270 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
271 let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
272 assert_eq!(
273 init, PTHREAD_UNINIT,
274 "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization logic"
275 );
276 }
277
278 interp_ok(offset)
279}
280
281fn rwlock_get_data<'tcx, 'a>(
282 ecx: &'a mut MiriInterpCx<'tcx>,
283 rwlock_ptr: &OpTy<'tcx>,
284) -> InterpResult<'tcx, &'a PthreadRwLock>
285where
286 'tcx: 'a,
287{
288 let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
289 ecx.get_immovable_sync_with_static_init(
290 &rwlock,
291 rwlock_init_offset(ecx)?,
292 PTHREAD_UNINIT,
293 PTHREAD_INIT,
294 |ecx| {
295 if !bytewise_equal(
296 ecx,
297 &rwlock,
298 &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
299 )? {
300 throw_ub_format!(
301 "`pthread_rwlock_t` was not properly initialized at this location, or it got overwritten"
302 );
303 }
304 interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
305 },
306 )
307}
308
309#[inline]
314fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
315 interp_ok(match &ecx.tcx.sess.target.os {
316 Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
317 os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
319 })
320}
321
322fn condattr_get_clock_id<'tcx>(
323 ecx: &MiriInterpCx<'tcx>,
324 attr_ptr: &OpTy<'tcx>,
325) -> InterpResult<'tcx, Scalar> {
326 ecx.deref_pointer_and_read(
327 attr_ptr,
328 condattr_clock_offset(ecx)?,
329 ecx.libc_ty_layout("pthread_condattr_t"),
330 ecx.machine.layouts.i32,
331 )
332}
333
334fn condattr_set_clock_id<'tcx>(
335 ecx: &mut MiriInterpCx<'tcx>,
336 attr_ptr: &OpTy<'tcx>,
337 clock_id: i32,
338) -> InterpResult<'tcx, ()> {
339 ecx.deref_pointer_and_write(
340 attr_ptr,
341 condattr_clock_offset(ecx)?,
342 Scalar::from_i32(clock_id),
343 ecx.libc_ty_layout("pthread_condattr_t"),
344 ecx.machine.layouts.i32,
345 )
346}
347
348fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
353 let offset = match &ecx.tcx.sess.target.os {
354 Os::Linux | Os::Illumos | Os::Solaris | Os::FreeBsd | Os::Android => 0,
355 Os::MacOs => 4,
357 os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
358 };
359 let offset = Size::from_bytes(offset);
360
361 if !ecx.machine.pthread_condvar_sanity.replace(true) {
364 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
365 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
366 let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
367 assert_eq!(
368 init, PTHREAD_UNINIT,
369 "PTHREAD_COND_INITIALIZER is incompatible with our initialization logic"
370 );
371 }
372
373 interp_ok(offset)
374}
375
376#[derive(Debug, Clone)]
377struct PthreadCondvar {
378 condvar_ref: CondvarRef,
379 clock: TimeoutClock,
380}
381
382impl SyncObj for PthreadCondvar {
383 fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
384 if !self.condvar_ref.queue_is_empty() {
385 throw_ub_format!(
386 "{access_kind} of `pthread_cond_t` is forbidden while the queue is non-empty"
387 );
388 }
389 interp_ok(())
390 }
391
392 fn delete_on_write(&self) -> bool {
393 true
394 }
395}
396
397fn cond_create<'tcx>(
398 ecx: &mut MiriInterpCx<'tcx>,
399 cond_ptr: &OpTy<'tcx>,
400 clock: TimeoutClock,
401) -> InterpResult<'tcx, PthreadCondvar> {
402 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
403 let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
404 ecx.init_immovable_sync(&cond, cond_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
405 interp_ok(data)
406}
407
408fn cond_get_data<'tcx, 'a>(
409 ecx: &'a mut MiriInterpCx<'tcx>,
410 cond_ptr: &OpTy<'tcx>,
411) -> InterpResult<'tcx, &'a PthreadCondvar>
412where
413 'tcx: 'a,
414{
415 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
416 ecx.get_immovable_sync_with_static_init(
417 &cond,
418 cond_init_offset(ecx)?,
419 PTHREAD_UNINIT,
420 PTHREAD_INIT,
421 |ecx| {
422 if !bytewise_equal(
423 ecx,
424 &cond,
425 &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
426 )? {
427 throw_ub_format!(
428 "`pthread_cond_t` was not properly initialized at this location, or it got overwritten"
429 );
430 }
431 interp_ok(PthreadCondvar {
433 condvar_ref: CondvarRef::new(),
434 clock: TimeoutClock::RealTime,
435 })
436 },
437 )
438}
439
440impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
441pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
442 fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
443 let this = self.eval_context_mut();
444
445 mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
446
447 interp_ok(())
448 }
449
450 fn pthread_mutexattr_settype(
451 &mut self,
452 attr_op: &OpTy<'tcx>,
453 kind_op: &OpTy<'tcx>,
454 ) -> InterpResult<'tcx, Scalar> {
455 let this = self.eval_context_mut();
456
457 let kind = this.read_scalar(kind_op)?.to_i32()?;
458 if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
459 || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
460 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
461 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
462 {
463 assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
465 mutexattr_set_kind(this, attr_op, kind)?;
466 } else {
467 let einval = this.eval_libc_i32("EINVAL");
468 return interp_ok(Scalar::from_i32(einval));
469 }
470
471 interp_ok(Scalar::from_i32(0))
472 }
473
474 fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
475 let this = self.eval_context_mut();
476
477 mutexattr_get_kind(this, attr_op)?;
479
480 this.write_uninit(
493 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
494 )?;
495
496 interp_ok(())
497 }
498
499 fn pthread_mutex_init(
500 &mut self,
501 mutex_op: &OpTy<'tcx>,
502 attr_op: &OpTy<'tcx>,
503 ) -> InterpResult<'tcx, ()> {
504 let this = self.eval_context_mut();
505
506 let attr = this.read_pointer(attr_op)?;
507 let kind = if this.ptr_is_null(attr)? {
508 MutexKind::Default
509 } else {
510 mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
511 };
512
513 mutex_create(this, mutex_op, kind)?;
514
515 interp_ok(())
516 }
517
518 fn pthread_mutex_lock(
519 &mut self,
520 mutex_op: &OpTy<'tcx>,
521 dest: &MPlaceTy<'tcx>,
522 ) -> InterpResult<'tcx> {
523 let this = self.eval_context_mut();
524
525 let mutex = mutex_get_data(this, mutex_op)?.clone();
526
527 let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
528 if owner_thread != this.active_thread() {
529 this.mutex_enqueue_and_block(
530 mutex.mutex_ref,
531 Some((Scalar::from_i32(0), dest.clone())),
532 );
533 return interp_ok(());
534 } else {
535 match mutex.kind {
537 MutexKind::Default =>
538 throw_ub_format!(
539 "trying to acquire default mutex already locked by the current thread"
540 ),
541 MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
542 MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
543 MutexKind::Recursive => {
544 this.mutex_lock(&mutex.mutex_ref)?;
545 0
546 }
547 }
548 }
549 } else {
550 this.mutex_lock(&mutex.mutex_ref)?;
552 0
553 };
554 this.write_scalar(Scalar::from_i32(ret), dest)?;
555 interp_ok(())
556 }
557
558 fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
559 let this = self.eval_context_mut();
560
561 let mutex = mutex_get_data(this, mutex_op)?.clone();
562
563 interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
564 if owner_thread != this.active_thread() {
565 this.eval_libc_i32("EBUSY")
566 } else {
567 match mutex.kind {
568 MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
569 this.eval_libc_i32("EBUSY"),
570 MutexKind::Recursive => {
571 this.mutex_lock(&mutex.mutex_ref)?;
572 0
573 }
574 }
575 }
576 } else {
577 this.mutex_lock(&mutex.mutex_ref)?;
579 0
580 }))
581 }
582
583 fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
584 let this = self.eval_context_mut();
585
586 let mutex = mutex_get_data(this, mutex_op)?.clone();
587
588 if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
589 interp_ok(Scalar::from_i32(0))
591 } else {
592 match mutex.kind {
596 MutexKind::Default =>
597 throw_ub_format!(
598 "unlocked a default mutex that was not locked by the current thread"
599 ),
600 MutexKind::Normal =>
601 throw_ub_format!(
602 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
603 ),
604 MutexKind::ErrorCheck | MutexKind::Recursive =>
605 interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
606 }
607 }
608 }
609
610 fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
611 let this = self.eval_context_mut();
612
613 let mutex = mutex_get_data(this, mutex_op)?.clone();
616
617 if mutex.mutex_ref.owner().is_some() {
618 throw_ub_format!("destroyed a locked mutex");
619 }
620
621 let mutex_place =
624 this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?;
625 this.write_uninit(&mutex_place)?;
626
627 interp_ok(())
628 }
629
630 fn pthread_rwlock_rdlock(
631 &mut self,
632 rwlock_op: &OpTy<'tcx>,
633 dest: &MPlaceTy<'tcx>,
634 ) -> InterpResult<'tcx> {
635 let this = self.eval_context_mut();
636
637 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
638
639 if rwlock.rwlock_ref.is_write_locked() {
640 this.rwlock_enqueue_and_block_reader(
641 rwlock.rwlock_ref,
642 Scalar::from_i32(0),
643 dest.clone(),
644 );
645 } else {
646 this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
647 this.write_null(dest)?;
648 }
649
650 interp_ok(())
651 }
652
653 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
654 let this = self.eval_context_mut();
655
656 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
657
658 if rwlock.rwlock_ref.is_write_locked() {
659 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
660 } else {
661 this.rwlock_reader_lock(&rwlock.rwlock_ref)?;
662 interp_ok(Scalar::from_i32(0))
663 }
664 }
665
666 fn pthread_rwlock_wrlock(
667 &mut self,
668 rwlock_op: &OpTy<'tcx>,
669 dest: &MPlaceTy<'tcx>,
670 ) -> InterpResult<'tcx> {
671 let this = self.eval_context_mut();
672
673 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
674
675 if rwlock.rwlock_ref.is_locked() {
676 this.rwlock_enqueue_and_block_writer(
689 rwlock.rwlock_ref,
690 Scalar::from_i32(0),
691 dest.clone(),
692 );
693 } else {
694 this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
695 this.write_null(dest)?;
696 }
697
698 interp_ok(())
699 }
700
701 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
702 let this = self.eval_context_mut();
703
704 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
705
706 if rwlock.rwlock_ref.is_locked() {
707 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
708 } else {
709 this.rwlock_writer_lock(&rwlock.rwlock_ref)?;
710 interp_ok(Scalar::from_i32(0))
711 }
712 }
713
714 fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
715 let this = self.eval_context_mut();
716
717 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
718
719 if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
720 || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
721 {
722 interp_ok(())
723 } else {
724 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
725 }
726 }
727
728 fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
729 let this = self.eval_context_mut();
730
731 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
734
735 if rwlock.rwlock_ref.is_locked() {
736 throw_ub_format!("destroyed a locked rwlock");
737 }
738
739 let rwlock_place =
742 this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?;
743 this.write_uninit(&rwlock_place)?;
744
745 interp_ok(())
746 }
747
748 fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
749 let this = self.eval_context_mut();
750
751 if this.tcx.sess.target.os != Os::MacOs {
753 let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
757 condattr_set_clock_id(this, attr_op, default_clock_id)?;
758 }
759
760 interp_ok(())
761 }
762
763 fn pthread_condattr_setclock(
764 &mut self,
765 attr_op: &OpTy<'tcx>,
766 clock_id_op: &OpTy<'tcx>,
767 ) -> InterpResult<'tcx, Scalar> {
768 let this = self.eval_context_mut();
769
770 let clock_id = this.read_scalar(clock_id_op)?;
771 if this.parse_clockid(clock_id).is_some() {
772 condattr_set_clock_id(this, attr_op, clock_id.to_i32()?)?;
773 } else {
774 let einval = this.eval_libc_i32("EINVAL");
775 return interp_ok(Scalar::from_i32(einval));
776 }
777
778 interp_ok(Scalar::from_i32(0))
779 }
780
781 fn pthread_condattr_getclock(
782 &mut self,
783 attr_op: &OpTy<'tcx>,
784 clk_id_op: &OpTy<'tcx>,
785 ) -> InterpResult<'tcx, ()> {
786 let this = self.eval_context_mut();
787
788 let clock_id = condattr_get_clock_id(this, attr_op)?;
789 this.write_scalar(
790 clock_id,
791 &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
792 )?;
793
794 interp_ok(())
795 }
796
797 fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
798 let this = self.eval_context_mut();
799
800 if this.tcx.sess.target.os != Os::MacOs {
803 condattr_get_clock_id(this, attr_op)?;
804 }
805
806 this.write_uninit(
809 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
810 )?;
811
812 interp_ok(())
813 }
814
815 fn pthread_cond_init(
816 &mut self,
817 cond_op: &OpTy<'tcx>,
818 attr_op: &OpTy<'tcx>,
819 ) -> InterpResult<'tcx, ()> {
820 let this = self.eval_context_mut();
821
822 let attr = this.read_pointer(attr_op)?;
823 let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == Os::MacOs {
825 this.eval_libc("CLOCK_REALTIME")
826 } else {
827 condattr_get_clock_id(this, attr_op)?
828 };
829 let Some(clock) = this.parse_clockid(clock_id) else {
830 throw_ub_format!("pthread_cond_init: invalid attributes (unsupported clock)")
832 };
833
834 cond_create(this, cond_op, clock)?;
835
836 interp_ok(())
837 }
838
839 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
840 let this = self.eval_context_mut();
841 let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
842 this.condvar_signal(&condvar)?;
843 interp_ok(())
844 }
845
846 fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
847 let this = self.eval_context_mut();
848 let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
849 while this.condvar_signal(&condvar)? {}
850 interp_ok(())
851 }
852
853 fn pthread_cond_wait(
854 &mut self,
855 cond_op: &OpTy<'tcx>,
856 mutex_op: &OpTy<'tcx>,
857 dest: &MPlaceTy<'tcx>,
858 ) -> InterpResult<'tcx> {
859 let this = self.eval_context_mut();
860
861 let data = cond_get_data(this, cond_op)?.clone();
862 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
863
864 this.condvar_wait(
865 data.condvar_ref,
866 mutex_ref,
867 None, Scalar::from_i32(0),
869 Scalar::from_i32(0), dest.clone(),
871 )?;
872
873 interp_ok(())
874 }
875
876 fn pthread_cond_timedwait(
877 &mut self,
878 cond_op: &OpTy<'tcx>,
879 mutex_op: &OpTy<'tcx>,
880 timeout_op: &OpTy<'tcx>,
881 dest: &MPlaceTy<'tcx>,
882 macos_relative_np: bool,
883 ) -> InterpResult<'tcx> {
884 let this = self.eval_context_mut();
885
886 let data = cond_get_data(this, cond_op)?.clone();
887 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
888
889 let duration = match this
891 .read_timespec(&this.deref_pointer_as(timeout_op, this.libc_ty_layout("timespec"))?)?
892 {
893 Some(duration) => duration,
894 None => {
895 let einval = this.eval_libc("EINVAL");
896 this.write_scalar(einval, dest)?;
897 return interp_ok(());
898 }
899 };
900
901 let (clock, anchor) = if macos_relative_np {
902 (TimeoutClock::Monotonic, TimeoutAnchor::Relative)
905 } else {
906 if data.clock == TimeoutClock::RealTime {
907 this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
908 }
909
910 (data.clock, TimeoutAnchor::Absolute)
911 };
912
913 this.condvar_wait(
914 data.condvar_ref,
915 mutex_ref,
916 Some((clock, anchor, duration)),
917 Scalar::from_i32(0),
918 this.eval_libc("ETIMEDOUT"), dest.clone(),
920 )?;
921
922 interp_ok(())
923 }
924
925 fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
926 let this = self.eval_context_mut();
927
928 let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
931 if !condvar.queue_is_empty() {
932 throw_ub_format!("destroying an awaited conditional variable");
933 }
934
935 let cond_place = this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?;
938 this.write_uninit(&cond_place)?;
939
940 interp_ok(())
941 }
942}