1use rustc_abi::Size;
2
3use crate::concurrency::sync::LAZY_INIT_COOKIE;
4use crate::*;
5
6fn bytewise_equal_atomic_relaxed<'tcx>(
11 ecx: &MiriInterpCx<'tcx>,
12 left: &MPlaceTy<'tcx>,
13 right: &MPlaceTy<'tcx>,
14) -> InterpResult<'tcx, bool> {
15 let size = left.layout.size;
16 assert_eq!(size, right.layout.size);
17
18 assert!(size.bytes() % 4 == 0);
21 for i in 0..(size.bytes() / 4) {
22 let offset = Size::from_bytes(i.strict_mul(4));
23 let load = |place: &MPlaceTy<'tcx>| {
24 let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
25 ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
26 };
27 let left = load(left)?;
28 let right = load(right)?;
29 if left != right {
30 return interp_ok(false);
31 }
32 }
33
34 interp_ok(true)
35}
36
37#[inline]
42fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
43 interp_ok(match &*ecx.tcx.sess.target.os {
44 "linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
45 os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
46 })
47}
48
49fn mutexattr_get_kind<'tcx>(
50 ecx: &MiriInterpCx<'tcx>,
51 attr_ptr: &OpTy<'tcx>,
52) -> InterpResult<'tcx, i32> {
53 ecx.deref_pointer_and_read(
54 attr_ptr,
55 mutexattr_kind_offset(ecx)?,
56 ecx.libc_ty_layout("pthread_mutexattr_t"),
57 ecx.machine.layouts.i32,
58 )?
59 .to_i32()
60}
61
62fn mutexattr_set_kind<'tcx>(
63 ecx: &mut MiriInterpCx<'tcx>,
64 attr_ptr: &OpTy<'tcx>,
65 kind: i32,
66) -> InterpResult<'tcx, ()> {
67 ecx.deref_pointer_and_write(
68 attr_ptr,
69 mutexattr_kind_offset(ecx)?,
70 Scalar::from_i32(kind),
71 ecx.libc_ty_layout("pthread_mutexattr_t"),
72 ecx.machine.layouts.i32,
73 )
74}
75
76const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
81
82fn mutexattr_translate_kind<'tcx>(
84 ecx: &MiriInterpCx<'tcx>,
85 kind: i32,
86) -> InterpResult<'tcx, MutexKind> {
87 interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
88 MutexKind::Normal
89 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
90 MutexKind::ErrorCheck
91 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
92 MutexKind::Recursive
93 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
94 || kind == PTHREAD_MUTEX_KIND_UNCHANGED
95 {
96 MutexKind::Default
99 } else {
100 throw_unsup_format!("unsupported type of mutex: {kind}");
101 })
102}
103
104#[derive(Debug, Clone, Copy)]
110enum MutexKind {
111 Normal,
112 Default,
113 Recursive,
114 ErrorCheck,
115}
116
117#[derive(Debug, Clone)]
118struct PthreadMutex {
119 mutex_ref: MutexRef,
120 kind: MutexKind,
121}
122
123fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
127 let offset = match &*ecx.tcx.sess.target.os {
128 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
129 "macos" => 4,
131 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
132 };
133 let offset = Size::from_bytes(offset);
134
135 if !ecx.machine.pthread_mutex_sanity.replace(true) {
138 let check_static_initializer = |name| {
139 let static_initializer = ecx.eval_path(&["libc", name]);
140 let init_field =
141 static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
142 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
143 assert_ne!(
144 init, LAZY_INIT_COOKIE,
145 "{name} is incompatible with our initialization cookie"
146 );
147 };
148
149 check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
150 match &*ecx.tcx.sess.target.os {
152 "linux" => {
153 check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
154 check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
155 check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
156 }
157 "illumos" | "solaris" | "macos" | "freebsd" | "android" => {
158 }
160 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
161 }
162 }
163
164 interp_ok(offset)
165}
166
167fn mutex_create<'tcx>(
169 ecx: &mut MiriInterpCx<'tcx>,
170 mutex_ptr: &OpTy<'tcx>,
171 kind: MutexKind,
172) -> InterpResult<'tcx, PthreadMutex> {
173 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
174 let id = ecx.machine.sync.mutex_create();
175 let data = PthreadMutex { mutex_ref: id, kind };
176 ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
177 interp_ok(data)
178}
179
180fn mutex_get_data<'tcx, 'a>(
183 ecx: &'a mut MiriInterpCx<'tcx>,
184 mutex_ptr: &OpTy<'tcx>,
185) -> InterpResult<'tcx, &'a PthreadMutex>
186where
187 'tcx: 'a,
188{
189 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
190 ecx.lazy_sync_get_data(
191 &mutex,
192 mutex_init_offset(ecx)?,
193 || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
194 |ecx| {
195 let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
196 let id = ecx.machine.sync.mutex_create();
197 interp_ok(PthreadMutex { mutex_ref: id, kind })
198 },
199 )
200}
201
202fn mutex_kind_from_static_initializer<'tcx>(
204 ecx: &MiriInterpCx<'tcx>,
205 mutex: &MPlaceTy<'tcx>,
206) -> InterpResult<'tcx, MutexKind> {
207 let is_initializer =
209 |name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
210
211 if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
213 return interp_ok(MutexKind::Default);
214 }
215 match &*ecx.tcx.sess.target.os {
217 "linux" =>
218 if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
219 return interp_ok(MutexKind::Recursive);
220 } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
221 return interp_ok(MutexKind::ErrorCheck);
222 },
223 _ => {}
224 }
225 throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
226}
227
228#[derive(Debug, Clone)]
233struct PthreadRwLock {
234 rwlock_ref: RwLockRef,
235}
236
237fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
238 let offset = match &*ecx.tcx.sess.target.os {
239 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
240 "macos" => 4,
242 os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
243 };
244 let offset = Size::from_bytes(offset);
245
246 if !ecx.machine.pthread_rwlock_sanity.replace(true) {
249 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
250 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
251 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
252 assert_ne!(
253 init, LAZY_INIT_COOKIE,
254 "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
255 );
256 }
257
258 interp_ok(offset)
259}
260
261fn rwlock_get_data<'tcx, 'a>(
262 ecx: &'a mut MiriInterpCx<'tcx>,
263 rwlock_ptr: &OpTy<'tcx>,
264) -> InterpResult<'tcx, &'a PthreadRwLock>
265where
266 'tcx: 'a,
267{
268 let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
269 ecx.lazy_sync_get_data(
270 &rwlock,
271 rwlock_init_offset(ecx)?,
272 || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
273 |ecx| {
274 if !bytewise_equal_atomic_relaxed(
275 ecx,
276 &rwlock,
277 &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
278 )? {
279 throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
280 }
281 let rwlock_ref = ecx.machine.sync.rwlock_create();
282 interp_ok(PthreadRwLock { rwlock_ref })
283 },
284 )
285}
286
287#[inline]
292fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
293 interp_ok(match &*ecx.tcx.sess.target.os {
294 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
295 os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
297 })
298}
299
300fn condattr_get_clock_id<'tcx>(
301 ecx: &MiriInterpCx<'tcx>,
302 attr_ptr: &OpTy<'tcx>,
303) -> InterpResult<'tcx, i32> {
304 ecx.deref_pointer_and_read(
305 attr_ptr,
306 condattr_clock_offset(ecx)?,
307 ecx.libc_ty_layout("pthread_condattr_t"),
308 ecx.machine.layouts.i32,
309 )?
310 .to_i32()
311}
312
313fn condattr_set_clock_id<'tcx>(
314 ecx: &mut MiriInterpCx<'tcx>,
315 attr_ptr: &OpTy<'tcx>,
316 clock_id: i32,
317) -> InterpResult<'tcx, ()> {
318 ecx.deref_pointer_and_write(
319 attr_ptr,
320 condattr_clock_offset(ecx)?,
321 Scalar::from_i32(clock_id),
322 ecx.libc_ty_layout("pthread_condattr_t"),
323 ecx.machine.layouts.i32,
324 )
325}
326
327fn condattr_translate_clock_id<'tcx>(
329 ecx: &MiriInterpCx<'tcx>,
330 raw_id: i32,
331) -> InterpResult<'tcx, ClockId> {
332 interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
333 ClockId::Realtime
334 } else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
335 ClockId::Monotonic
336 } else {
337 throw_unsup_format!("unsupported clock id: {raw_id}");
338 })
339}
340
341fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
346 let offset = match &*ecx.tcx.sess.target.os {
347 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
348 "macos" => 4,
350 os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
351 };
352 let offset = Size::from_bytes(offset);
353
354 if !ecx.machine.pthread_condvar_sanity.replace(true) {
357 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
358 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
359 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
360 assert_ne!(
361 init, LAZY_INIT_COOKIE,
362 "PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
363 );
364 }
365
366 interp_ok(offset)
367}
368
369#[derive(Debug, Clone, Copy)]
370enum ClockId {
371 Realtime,
372 Monotonic,
373}
374
375#[derive(Debug, Copy, Clone)]
376struct PthreadCondvar {
377 id: CondvarId,
378 clock: ClockId,
379}
380
381fn cond_create<'tcx>(
382 ecx: &mut MiriInterpCx<'tcx>,
383 cond_ptr: &OpTy<'tcx>,
384 clock: ClockId,
385) -> InterpResult<'tcx, PthreadCondvar> {
386 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
387 let id = ecx.machine.sync.condvar_create();
388 let data = PthreadCondvar { id, clock };
389 ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
390 interp_ok(data)
391}
392
393fn cond_get_data<'tcx, 'a>(
394 ecx: &'a mut MiriInterpCx<'tcx>,
395 cond_ptr: &OpTy<'tcx>,
396) -> InterpResult<'tcx, &'a PthreadCondvar>
397where
398 'tcx: 'a,
399{
400 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
401 ecx.lazy_sync_get_data(
402 &cond,
403 cond_init_offset(ecx)?,
404 || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
405 |ecx| {
406 if !bytewise_equal_atomic_relaxed(
407 ecx,
408 &cond,
409 &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
410 )? {
411 throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
412 }
413 let id = ecx.machine.sync.condvar_create();
415 interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
416 },
417 )
418}
419
420impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
421pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
422 fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
423 let this = self.eval_context_mut();
424
425 mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
426
427 interp_ok(())
428 }
429
430 fn pthread_mutexattr_settype(
431 &mut self,
432 attr_op: &OpTy<'tcx>,
433 kind_op: &OpTy<'tcx>,
434 ) -> InterpResult<'tcx, Scalar> {
435 let this = self.eval_context_mut();
436
437 let kind = this.read_scalar(kind_op)?.to_i32()?;
438 if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
439 || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
440 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
441 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
442 {
443 assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
445 mutexattr_set_kind(this, attr_op, kind)?;
446 } else {
447 let einval = this.eval_libc_i32("EINVAL");
448 return interp_ok(Scalar::from_i32(einval));
449 }
450
451 interp_ok(Scalar::from_i32(0))
452 }
453
454 fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
455 let this = self.eval_context_mut();
456
457 mutexattr_get_kind(this, attr_op)?;
459
460 this.write_uninit(
473 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
474 )?;
475
476 interp_ok(())
477 }
478
479 fn pthread_mutex_init(
480 &mut self,
481 mutex_op: &OpTy<'tcx>,
482 attr_op: &OpTy<'tcx>,
483 ) -> InterpResult<'tcx, ()> {
484 let this = self.eval_context_mut();
485
486 let attr = this.read_pointer(attr_op)?;
487 let kind = if this.ptr_is_null(attr)? {
488 MutexKind::Default
489 } else {
490 mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
491 };
492
493 mutex_create(this, mutex_op, kind)?;
494
495 interp_ok(())
496 }
497
498 fn pthread_mutex_lock(
499 &mut self,
500 mutex_op: &OpTy<'tcx>,
501 dest: &MPlaceTy<'tcx>,
502 ) -> InterpResult<'tcx> {
503 let this = self.eval_context_mut();
504
505 let mutex = mutex_get_data(this, mutex_op)?.clone();
506
507 let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
508 if owner_thread != this.active_thread() {
509 this.mutex_enqueue_and_block(
510 mutex.mutex_ref,
511 Some((Scalar::from_i32(0), dest.clone())),
512 );
513 return interp_ok(());
514 } else {
515 match mutex.kind {
517 MutexKind::Default =>
518 throw_ub_format!(
519 "trying to acquire default mutex already locked by the current thread"
520 ),
521 MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
522 MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
523 MutexKind::Recursive => {
524 this.mutex_lock(&mutex.mutex_ref);
525 0
526 }
527 }
528 }
529 } else {
530 this.mutex_lock(&mutex.mutex_ref);
532 0
533 };
534 this.write_scalar(Scalar::from_i32(ret), dest)?;
535 interp_ok(())
536 }
537
538 fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
539 let this = self.eval_context_mut();
540
541 let mutex = mutex_get_data(this, mutex_op)?.clone();
542
543 interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
544 if owner_thread != this.active_thread() {
545 this.eval_libc_i32("EBUSY")
546 } else {
547 match mutex.kind {
548 MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
549 this.eval_libc_i32("EBUSY"),
550 MutexKind::Recursive => {
551 this.mutex_lock(&mutex.mutex_ref);
552 0
553 }
554 }
555 }
556 } else {
557 this.mutex_lock(&mutex.mutex_ref);
559 0
560 }))
561 }
562
563 fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
564 let this = self.eval_context_mut();
565
566 let mutex = mutex_get_data(this, mutex_op)?.clone();
567
568 if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
569 interp_ok(Scalar::from_i32(0))
571 } else {
572 match mutex.kind {
576 MutexKind::Default =>
577 throw_ub_format!(
578 "unlocked a default mutex that was not locked by the current thread"
579 ),
580 MutexKind::Normal =>
581 throw_ub_format!(
582 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
583 ),
584 MutexKind::ErrorCheck | MutexKind::Recursive =>
585 interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
586 }
587 }
588 }
589
590 fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
591 let this = self.eval_context_mut();
592
593 let mutex = mutex_get_data(this, mutex_op)?.clone();
596
597 if mutex.mutex_ref.owner().is_some() {
598 throw_ub_format!("destroyed a locked mutex");
599 }
600
601 this.write_uninit(
603 &this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
604 )?;
605 interp_ok(())
608 }
609
610 fn pthread_rwlock_rdlock(
611 &mut self,
612 rwlock_op: &OpTy<'tcx>,
613 dest: &MPlaceTy<'tcx>,
614 ) -> InterpResult<'tcx> {
615 let this = self.eval_context_mut();
616
617 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
618
619 if rwlock.rwlock_ref.is_write_locked() {
620 this.rwlock_enqueue_and_block_reader(
621 rwlock.rwlock_ref,
622 Scalar::from_i32(0),
623 dest.clone(),
624 );
625 } else {
626 this.rwlock_reader_lock(&rwlock.rwlock_ref);
627 this.write_null(dest)?;
628 }
629
630 interp_ok(())
631 }
632
633 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
634 let this = self.eval_context_mut();
635
636 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
637
638 if rwlock.rwlock_ref.is_write_locked() {
639 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
640 } else {
641 this.rwlock_reader_lock(&rwlock.rwlock_ref);
642 interp_ok(Scalar::from_i32(0))
643 }
644 }
645
646 fn pthread_rwlock_wrlock(
647 &mut self,
648 rwlock_op: &OpTy<'tcx>,
649 dest: &MPlaceTy<'tcx>,
650 ) -> InterpResult<'tcx> {
651 let this = self.eval_context_mut();
652
653 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
654
655 if rwlock.rwlock_ref.is_locked() {
656 this.rwlock_enqueue_and_block_writer(
669 rwlock.rwlock_ref,
670 Scalar::from_i32(0),
671 dest.clone(),
672 );
673 } else {
674 this.rwlock_writer_lock(&rwlock.rwlock_ref);
675 this.write_null(dest)?;
676 }
677
678 interp_ok(())
679 }
680
681 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
682 let this = self.eval_context_mut();
683
684 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
685
686 if rwlock.rwlock_ref.is_locked() {
687 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
688 } else {
689 this.rwlock_writer_lock(&rwlock.rwlock_ref);
690 interp_ok(Scalar::from_i32(0))
691 }
692 }
693
694 fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
695 let this = self.eval_context_mut();
696
697 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
698
699 if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
700 || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
701 {
702 interp_ok(())
703 } else {
704 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
705 }
706 }
707
708 fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
709 let this = self.eval_context_mut();
710
711 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
714
715 if rwlock.rwlock_ref.is_locked() {
716 throw_ub_format!("destroyed a locked rwlock");
717 }
718
719 this.write_uninit(
721 &this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
722 )?;
723 interp_ok(())
726 }
727
728 fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
729 let this = self.eval_context_mut();
730
731 if this.tcx.sess.target.os != "macos" {
733 let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
737 condattr_set_clock_id(this, attr_op, default_clock_id)?;
738 }
739
740 interp_ok(())
741 }
742
743 fn pthread_condattr_setclock(
744 &mut self,
745 attr_op: &OpTy<'tcx>,
746 clock_id_op: &OpTy<'tcx>,
747 ) -> InterpResult<'tcx, Scalar> {
748 let this = self.eval_context_mut();
749
750 let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
751 if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
752 || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
753 {
754 condattr_set_clock_id(this, attr_op, clock_id)?;
755 } else {
756 let einval = this.eval_libc_i32("EINVAL");
757 return interp_ok(Scalar::from_i32(einval));
758 }
759
760 interp_ok(Scalar::from_i32(0))
761 }
762
763 fn pthread_condattr_getclock(
764 &mut self,
765 attr_op: &OpTy<'tcx>,
766 clk_id_op: &OpTy<'tcx>,
767 ) -> InterpResult<'tcx, ()> {
768 let this = self.eval_context_mut();
769
770 let clock_id = condattr_get_clock_id(this, attr_op)?;
771 this.write_scalar(
772 Scalar::from_i32(clock_id),
773 &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
774 )?;
775
776 interp_ok(())
777 }
778
779 fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
780 let this = self.eval_context_mut();
781
782 if this.tcx.sess.target.os != "macos" {
785 condattr_get_clock_id(this, attr_op)?;
786 }
787
788 this.write_uninit(
791 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
792 )?;
793
794 interp_ok(())
795 }
796
797 fn pthread_cond_init(
798 &mut self,
799 cond_op: &OpTy<'tcx>,
800 attr_op: &OpTy<'tcx>,
801 ) -> InterpResult<'tcx, ()> {
802 let this = self.eval_context_mut();
803
804 let attr = this.read_pointer(attr_op)?;
805 let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
807 this.eval_libc_i32("CLOCK_REALTIME")
808 } else {
809 condattr_get_clock_id(this, attr_op)?
810 };
811 let clock_id = condattr_translate_clock_id(this, clock_id)?;
812
813 cond_create(this, cond_op, clock_id)?;
814
815 interp_ok(())
816 }
817
818 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
819 let this = self.eval_context_mut();
820 let id = cond_get_data(this, cond_op)?.id;
821 this.condvar_signal(id)?;
822 interp_ok(())
823 }
824
825 fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
826 let this = self.eval_context_mut();
827 let id = cond_get_data(this, cond_op)?.id;
828 while this.condvar_signal(id)? {}
829 interp_ok(())
830 }
831
832 fn pthread_cond_wait(
833 &mut self,
834 cond_op: &OpTy<'tcx>,
835 mutex_op: &OpTy<'tcx>,
836 dest: &MPlaceTy<'tcx>,
837 ) -> InterpResult<'tcx> {
838 let this = self.eval_context_mut();
839
840 let data = *cond_get_data(this, cond_op)?;
841 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
842
843 this.condvar_wait(
844 data.id,
845 mutex_ref,
846 None, Scalar::from_i32(0),
848 Scalar::from_i32(0), dest.clone(),
850 )?;
851
852 interp_ok(())
853 }
854
855 fn pthread_cond_timedwait(
856 &mut self,
857 cond_op: &OpTy<'tcx>,
858 mutex_op: &OpTy<'tcx>,
859 abstime_op: &OpTy<'tcx>,
860 dest: &MPlaceTy<'tcx>,
861 ) -> InterpResult<'tcx> {
862 let this = self.eval_context_mut();
863
864 let data = *cond_get_data(this, cond_op)?;
865 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
866
867 let duration = match this
869 .read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
870 {
871 Some(duration) => duration,
872 None => {
873 let einval = this.eval_libc("EINVAL");
874 this.write_scalar(einval, dest)?;
875 return interp_ok(());
876 }
877 };
878 let timeout_clock = match data.clock {
879 ClockId::Realtime => {
880 this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
881 TimeoutClock::RealTime
882 }
883 ClockId::Monotonic => TimeoutClock::Monotonic,
884 };
885
886 this.condvar_wait(
887 data.id,
888 mutex_ref,
889 Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
890 Scalar::from_i32(0),
891 this.eval_libc("ETIMEDOUT"), dest.clone(),
893 )?;
894
895 interp_ok(())
896 }
897
898 fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
899 let this = self.eval_context_mut();
900
901 let id = cond_get_data(this, cond_op)?.id;
904 if this.condvar_is_awaited(id) {
905 throw_ub_format!("destroying an awaited conditional variable");
906 }
907
908 this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
910 interp_ok(())
913 }
914}