1use rustc_abi::Size;
2
3use crate::concurrency::sync::LAZY_INIT_COOKIE;
4use crate::*;
5
6fn bytewise_equal_atomic_relaxed<'tcx>(
11 ecx: &MiriInterpCx<'tcx>,
12 left: &MPlaceTy<'tcx>,
13 right: &MPlaceTy<'tcx>,
14) -> InterpResult<'tcx, bool> {
15 let size = left.layout.size;
16 assert_eq!(size, right.layout.size);
17
18 assert!(size.bytes() % 4 == 0);
21 for i in 0..(size.bytes() / 4) {
22 let offset = Size::from_bytes(i.strict_mul(4));
23 let load = |place: &MPlaceTy<'tcx>| {
24 let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
25 ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
26 };
27 let left = load(left)?;
28 let right = load(right)?;
29 if left != right {
30 return interp_ok(false);
31 }
32 }
33
34 interp_ok(true)
35}
36
37#[inline]
42fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
43 interp_ok(match &*ecx.tcx.sess.target.os {
44 "linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
45 os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
46 })
47}
48
49fn mutexattr_get_kind<'tcx>(
50 ecx: &MiriInterpCx<'tcx>,
51 attr_ptr: &OpTy<'tcx>,
52) -> InterpResult<'tcx, i32> {
53 ecx.deref_pointer_and_read(
54 attr_ptr,
55 mutexattr_kind_offset(ecx)?,
56 ecx.libc_ty_layout("pthread_mutexattr_t"),
57 ecx.machine.layouts.i32,
58 )?
59 .to_i32()
60}
61
62fn mutexattr_set_kind<'tcx>(
63 ecx: &mut MiriInterpCx<'tcx>,
64 attr_ptr: &OpTy<'tcx>,
65 kind: i32,
66) -> InterpResult<'tcx, ()> {
67 ecx.deref_pointer_and_write(
68 attr_ptr,
69 mutexattr_kind_offset(ecx)?,
70 Scalar::from_i32(kind),
71 ecx.libc_ty_layout("pthread_mutexattr_t"),
72 ecx.machine.layouts.i32,
73 )
74}
75
76const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
81
82fn mutexattr_translate_kind<'tcx>(
84 ecx: &MiriInterpCx<'tcx>,
85 kind: i32,
86) -> InterpResult<'tcx, MutexKind> {
87 interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
88 MutexKind::Normal
89 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
90 MutexKind::ErrorCheck
91 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
92 MutexKind::Recursive
93 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
94 || kind == PTHREAD_MUTEX_KIND_UNCHANGED
95 {
96 MutexKind::Default
99 } else {
100 throw_unsup_format!("unsupported type of mutex: {kind}");
101 })
102}
103
104#[derive(Debug, Clone, Copy)]
110enum MutexKind {
111 Normal,
112 Default,
113 Recursive,
114 ErrorCheck,
115}
116
117#[derive(Debug, Clone)]
118struct PthreadMutex {
119 mutex_ref: MutexRef,
120 kind: MutexKind,
121}
122
123fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
127 let offset = match &*ecx.tcx.sess.target.os {
128 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
129 "macos" => 4,
131 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
132 };
133 let offset = Size::from_bytes(offset);
134
135 if !ecx.machine.pthread_mutex_sanity.replace(true) {
138 let check_static_initializer = |name| {
139 let static_initializer = ecx.eval_path(&["libc", name]);
140 let init_field =
141 static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
142 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
143 assert_ne!(
144 init, LAZY_INIT_COOKIE,
145 "{name} is incompatible with our initialization cookie"
146 );
147 };
148
149 check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
150 match &*ecx.tcx.sess.target.os {
152 "linux" => {
153 check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
154 check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
155 check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
156 }
157 "illumos" | "solaris" | "macos" | "freebsd" | "android" => {
158 }
160 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
161 }
162 }
163
164 interp_ok(offset)
165}
166
167fn mutex_create<'tcx>(
169 ecx: &mut MiriInterpCx<'tcx>,
170 mutex_ptr: &OpTy<'tcx>,
171 kind: MutexKind,
172) -> InterpResult<'tcx, PthreadMutex> {
173 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
174 let id = ecx.machine.sync.mutex_create();
175 let data = PthreadMutex { mutex_ref: id, kind };
176 ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
177 interp_ok(data)
178}
179
180fn mutex_get_data<'tcx, 'a>(
183 ecx: &'a mut MiriInterpCx<'tcx>,
184 mutex_ptr: &OpTy<'tcx>,
185) -> InterpResult<'tcx, &'a PthreadMutex>
186where
187 'tcx: 'a,
188{
189 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
190 ecx.lazy_sync_get_data(
191 &mutex,
192 mutex_init_offset(ecx)?,
193 || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
194 |ecx| {
195 let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
196 let id = ecx.machine.sync.mutex_create();
197 interp_ok(PthreadMutex { mutex_ref: id, kind })
198 },
199 )
200}
201
202fn mutex_kind_from_static_initializer<'tcx>(
204 ecx: &MiriInterpCx<'tcx>,
205 mutex: &MPlaceTy<'tcx>,
206) -> InterpResult<'tcx, MutexKind> {
207 let is_initializer =
209 |name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
210
211 if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
213 return interp_ok(MutexKind::Default);
214 }
215 match &*ecx.tcx.sess.target.os {
217 "linux" =>
218 if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
219 return interp_ok(MutexKind::Recursive);
220 } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
221 return interp_ok(MutexKind::ErrorCheck);
222 },
223 _ => {}
224 }
225 throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
226}
227
228#[derive(Debug, Copy, Clone)]
233struct PthreadRwLock {
234 id: RwLockId,
235}
236
237fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
238 let offset = match &*ecx.tcx.sess.target.os {
239 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
240 "macos" => 4,
242 os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
243 };
244 let offset = Size::from_bytes(offset);
245
246 if !ecx.machine.pthread_rwlock_sanity.replace(true) {
249 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
250 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
251 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
252 assert_ne!(
253 init, LAZY_INIT_COOKIE,
254 "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
255 );
256 }
257
258 interp_ok(offset)
259}
260
261fn rwlock_get_data<'tcx, 'a>(
262 ecx: &'a mut MiriInterpCx<'tcx>,
263 rwlock_ptr: &OpTy<'tcx>,
264) -> InterpResult<'tcx, &'a PthreadRwLock>
265where
266 'tcx: 'a,
267{
268 let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
269 ecx.lazy_sync_get_data(
270 &rwlock,
271 rwlock_init_offset(ecx)?,
272 || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
273 |ecx| {
274 if !bytewise_equal_atomic_relaxed(
275 ecx,
276 &rwlock,
277 &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
278 )? {
279 throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
280 }
281 let id = ecx.machine.sync.rwlock_create();
282 interp_ok(PthreadRwLock { id })
283 },
284 )
285}
286
287#[inline]
292fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
293 interp_ok(match &*ecx.tcx.sess.target.os {
294 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
295 os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
297 })
298}
299
300fn condattr_get_clock_id<'tcx>(
301 ecx: &MiriInterpCx<'tcx>,
302 attr_ptr: &OpTy<'tcx>,
303) -> InterpResult<'tcx, i32> {
304 ecx.deref_pointer_and_read(
305 attr_ptr,
306 condattr_clock_offset(ecx)?,
307 ecx.libc_ty_layout("pthread_condattr_t"),
308 ecx.machine.layouts.i32,
309 )?
310 .to_i32()
311}
312
313fn condattr_set_clock_id<'tcx>(
314 ecx: &mut MiriInterpCx<'tcx>,
315 attr_ptr: &OpTy<'tcx>,
316 clock_id: i32,
317) -> InterpResult<'tcx, ()> {
318 ecx.deref_pointer_and_write(
319 attr_ptr,
320 condattr_clock_offset(ecx)?,
321 Scalar::from_i32(clock_id),
322 ecx.libc_ty_layout("pthread_condattr_t"),
323 ecx.machine.layouts.i32,
324 )
325}
326
327fn condattr_translate_clock_id<'tcx>(
329 ecx: &MiriInterpCx<'tcx>,
330 raw_id: i32,
331) -> InterpResult<'tcx, ClockId> {
332 interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
333 ClockId::Realtime
334 } else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
335 ClockId::Monotonic
336 } else {
337 throw_unsup_format!("unsupported clock id: {raw_id}");
338 })
339}
340
341fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
346 let offset = match &*ecx.tcx.sess.target.os {
347 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
348 "macos" => 4,
350 os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
351 };
352 let offset = Size::from_bytes(offset);
353
354 if !ecx.machine.pthread_condvar_sanity.replace(true) {
357 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
358 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
359 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
360 assert_ne!(
361 init, LAZY_INIT_COOKIE,
362 "PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
363 );
364 }
365
366 interp_ok(offset)
367}
368
369#[derive(Debug, Clone, Copy)]
370enum ClockId {
371 Realtime,
372 Monotonic,
373}
374
375#[derive(Debug, Copy, Clone)]
376struct PthreadCondvar {
377 id: CondvarId,
378 clock: ClockId,
379}
380
381fn cond_create<'tcx>(
382 ecx: &mut MiriInterpCx<'tcx>,
383 cond_ptr: &OpTy<'tcx>,
384 clock: ClockId,
385) -> InterpResult<'tcx, PthreadCondvar> {
386 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
387 let id = ecx.machine.sync.condvar_create();
388 let data = PthreadCondvar { id, clock };
389 ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
390 interp_ok(data)
391}
392
393fn cond_get_data<'tcx, 'a>(
394 ecx: &'a mut MiriInterpCx<'tcx>,
395 cond_ptr: &OpTy<'tcx>,
396) -> InterpResult<'tcx, &'a PthreadCondvar>
397where
398 'tcx: 'a,
399{
400 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
401 ecx.lazy_sync_get_data(
402 &cond,
403 cond_init_offset(ecx)?,
404 || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
405 |ecx| {
406 if !bytewise_equal_atomic_relaxed(
407 ecx,
408 &cond,
409 &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
410 )? {
411 throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
412 }
413 let id = ecx.machine.sync.condvar_create();
415 interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
416 },
417 )
418}
419
420impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
421pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
422 fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
423 let this = self.eval_context_mut();
424
425 mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
426
427 interp_ok(())
428 }
429
430 fn pthread_mutexattr_settype(
431 &mut self,
432 attr_op: &OpTy<'tcx>,
433 kind_op: &OpTy<'tcx>,
434 ) -> InterpResult<'tcx, Scalar> {
435 let this = self.eval_context_mut();
436
437 let kind = this.read_scalar(kind_op)?.to_i32()?;
438 if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
439 || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
440 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
441 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
442 {
443 assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
445 mutexattr_set_kind(this, attr_op, kind)?;
446 } else {
447 let einval = this.eval_libc_i32("EINVAL");
448 return interp_ok(Scalar::from_i32(einval));
449 }
450
451 interp_ok(Scalar::from_i32(0))
452 }
453
454 fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
455 let this = self.eval_context_mut();
456
457 mutexattr_get_kind(this, attr_op)?;
459
460 this.write_uninit(
473 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
474 )?;
475
476 interp_ok(())
477 }
478
479 fn pthread_mutex_init(
480 &mut self,
481 mutex_op: &OpTy<'tcx>,
482 attr_op: &OpTy<'tcx>,
483 ) -> InterpResult<'tcx, ()> {
484 let this = self.eval_context_mut();
485
486 let attr = this.read_pointer(attr_op)?;
487 let kind = if this.ptr_is_null(attr)? {
488 MutexKind::Default
489 } else {
490 mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
491 };
492
493 mutex_create(this, mutex_op, kind)?;
494
495 interp_ok(())
496 }
497
498 fn pthread_mutex_lock(
499 &mut self,
500 mutex_op: &OpTy<'tcx>,
501 dest: &MPlaceTy<'tcx>,
502 ) -> InterpResult<'tcx> {
503 let this = self.eval_context_mut();
504
505 let mutex = mutex_get_data(this, mutex_op)?.clone();
506
507 let ret = if this.mutex_is_locked(&mutex.mutex_ref) {
508 let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
509 if owner_thread != this.active_thread() {
510 this.mutex_enqueue_and_block(
511 &mutex.mutex_ref,
512 Some((Scalar::from_i32(0), dest.clone())),
513 );
514 return interp_ok(());
515 } else {
516 match mutex.kind {
518 MutexKind::Default =>
519 throw_ub_format!(
520 "trying to acquire default mutex already locked by the current thread"
521 ),
522 MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
523 MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
524 MutexKind::Recursive => {
525 this.mutex_lock(&mutex.mutex_ref);
526 0
527 }
528 }
529 }
530 } else {
531 this.mutex_lock(&mutex.mutex_ref);
533 0
534 };
535 this.write_scalar(Scalar::from_i32(ret), dest)?;
536 interp_ok(())
537 }
538
539 fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
540 let this = self.eval_context_mut();
541
542 let mutex = mutex_get_data(this, mutex_op)?.clone();
543
544 interp_ok(Scalar::from_i32(if this.mutex_is_locked(&mutex.mutex_ref) {
545 let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
546 if owner_thread != this.active_thread() {
547 this.eval_libc_i32("EBUSY")
548 } else {
549 match mutex.kind {
550 MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
551 this.eval_libc_i32("EBUSY"),
552 MutexKind::Recursive => {
553 this.mutex_lock(&mutex.mutex_ref);
554 0
555 }
556 }
557 }
558 } else {
559 this.mutex_lock(&mutex.mutex_ref);
561 0
562 }))
563 }
564
565 fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
566 let this = self.eval_context_mut();
567
568 let mutex = mutex_get_data(this, mutex_op)?.clone();
569
570 if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
571 interp_ok(Scalar::from_i32(0))
573 } else {
574 match mutex.kind {
578 MutexKind::Default =>
579 throw_ub_format!(
580 "unlocked a default mutex that was not locked by the current thread"
581 ),
582 MutexKind::Normal =>
583 throw_ub_format!(
584 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
585 ),
586 MutexKind::ErrorCheck | MutexKind::Recursive =>
587 interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
588 }
589 }
590 }
591
592 fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
593 let this = self.eval_context_mut();
594
595 let mutex = mutex_get_data(this, mutex_op)?.clone();
598
599 if this.mutex_is_locked(&mutex.mutex_ref) {
600 throw_ub_format!("destroyed a locked mutex");
601 }
602
603 this.write_uninit(
605 &this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
606 )?;
607 interp_ok(())
610 }
611
612 fn pthread_rwlock_rdlock(
613 &mut self,
614 rwlock_op: &OpTy<'tcx>,
615 dest: &MPlaceTy<'tcx>,
616 ) -> InterpResult<'tcx> {
617 let this = self.eval_context_mut();
618
619 let id = rwlock_get_data(this, rwlock_op)?.id;
620
621 if this.rwlock_is_write_locked(id) {
622 this.rwlock_enqueue_and_block_reader(id, Scalar::from_i32(0), dest.clone());
623 } else {
624 this.rwlock_reader_lock(id);
625 this.write_null(dest)?;
626 }
627
628 interp_ok(())
629 }
630
631 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
632 let this = self.eval_context_mut();
633
634 let id = rwlock_get_data(this, rwlock_op)?.id;
635
636 if this.rwlock_is_write_locked(id) {
637 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
638 } else {
639 this.rwlock_reader_lock(id);
640 interp_ok(Scalar::from_i32(0))
641 }
642 }
643
644 fn pthread_rwlock_wrlock(
645 &mut self,
646 rwlock_op: &OpTy<'tcx>,
647 dest: &MPlaceTy<'tcx>,
648 ) -> InterpResult<'tcx> {
649 let this = self.eval_context_mut();
650
651 let id = rwlock_get_data(this, rwlock_op)?.id;
652
653 if this.rwlock_is_locked(id) {
654 this.rwlock_enqueue_and_block_writer(id, Scalar::from_i32(0), dest.clone());
667 } else {
668 this.rwlock_writer_lock(id);
669 this.write_null(dest)?;
670 }
671
672 interp_ok(())
673 }
674
675 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
676 let this = self.eval_context_mut();
677
678 let id = rwlock_get_data(this, rwlock_op)?.id;
679
680 if this.rwlock_is_locked(id) {
681 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
682 } else {
683 this.rwlock_writer_lock(id);
684 interp_ok(Scalar::from_i32(0))
685 }
686 }
687
688 fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
689 let this = self.eval_context_mut();
690
691 let id = rwlock_get_data(this, rwlock_op)?.id;
692
693 if this.rwlock_reader_unlock(id)? || this.rwlock_writer_unlock(id)? {
694 interp_ok(())
695 } else {
696 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
697 }
698 }
699
700 fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
701 let this = self.eval_context_mut();
702
703 let id = rwlock_get_data(this, rwlock_op)?.id;
706
707 if this.rwlock_is_locked(id) {
708 throw_ub_format!("destroyed a locked rwlock");
709 }
710
711 this.write_uninit(
713 &this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
714 )?;
715 interp_ok(())
718 }
719
720 fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
721 let this = self.eval_context_mut();
722
723 if this.tcx.sess.target.os != "macos" {
725 let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
729 condattr_set_clock_id(this, attr_op, default_clock_id)?;
730 }
731
732 interp_ok(())
733 }
734
735 fn pthread_condattr_setclock(
736 &mut self,
737 attr_op: &OpTy<'tcx>,
738 clock_id_op: &OpTy<'tcx>,
739 ) -> InterpResult<'tcx, Scalar> {
740 let this = self.eval_context_mut();
741
742 let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
743 if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
744 || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
745 {
746 condattr_set_clock_id(this, attr_op, clock_id)?;
747 } else {
748 let einval = this.eval_libc_i32("EINVAL");
749 return interp_ok(Scalar::from_i32(einval));
750 }
751
752 interp_ok(Scalar::from_i32(0))
753 }
754
755 fn pthread_condattr_getclock(
756 &mut self,
757 attr_op: &OpTy<'tcx>,
758 clk_id_op: &OpTy<'tcx>,
759 ) -> InterpResult<'tcx, ()> {
760 let this = self.eval_context_mut();
761
762 let clock_id = condattr_get_clock_id(this, attr_op)?;
763 this.write_scalar(
764 Scalar::from_i32(clock_id),
765 &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
766 )?;
767
768 interp_ok(())
769 }
770
771 fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
772 let this = self.eval_context_mut();
773
774 if this.tcx.sess.target.os != "macos" {
777 condattr_get_clock_id(this, attr_op)?;
778 }
779
780 this.write_uninit(
783 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
784 )?;
785
786 interp_ok(())
787 }
788
789 fn pthread_cond_init(
790 &mut self,
791 cond_op: &OpTy<'tcx>,
792 attr_op: &OpTy<'tcx>,
793 ) -> InterpResult<'tcx, ()> {
794 let this = self.eval_context_mut();
795
796 let attr = this.read_pointer(attr_op)?;
797 let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
799 this.eval_libc_i32("CLOCK_REALTIME")
800 } else {
801 condattr_get_clock_id(this, attr_op)?
802 };
803 let clock_id = condattr_translate_clock_id(this, clock_id)?;
804
805 cond_create(this, cond_op, clock_id)?;
806
807 interp_ok(())
808 }
809
810 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
811 let this = self.eval_context_mut();
812 let id = cond_get_data(this, cond_op)?.id;
813 this.condvar_signal(id)?;
814 interp_ok(())
815 }
816
817 fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
818 let this = self.eval_context_mut();
819 let id = cond_get_data(this, cond_op)?.id;
820 while this.condvar_signal(id)? {}
821 interp_ok(())
822 }
823
824 fn pthread_cond_wait(
825 &mut self,
826 cond_op: &OpTy<'tcx>,
827 mutex_op: &OpTy<'tcx>,
828 dest: &MPlaceTy<'tcx>,
829 ) -> InterpResult<'tcx> {
830 let this = self.eval_context_mut();
831
832 let data = *cond_get_data(this, cond_op)?;
833 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
834
835 this.condvar_wait(
836 data.id,
837 mutex_ref,
838 None, Scalar::from_i32(0),
840 Scalar::from_i32(0), dest.clone(),
842 )?;
843
844 interp_ok(())
845 }
846
847 fn pthread_cond_timedwait(
848 &mut self,
849 cond_op: &OpTy<'tcx>,
850 mutex_op: &OpTy<'tcx>,
851 abstime_op: &OpTy<'tcx>,
852 dest: &MPlaceTy<'tcx>,
853 ) -> InterpResult<'tcx> {
854 let this = self.eval_context_mut();
855
856 let data = *cond_get_data(this, cond_op)?;
857 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
858
859 let duration = match this
861 .read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
862 {
863 Some(duration) => duration,
864 None => {
865 let einval = this.eval_libc("EINVAL");
866 this.write_scalar(einval, dest)?;
867 return interp_ok(());
868 }
869 };
870 let timeout_clock = match data.clock {
871 ClockId::Realtime => {
872 this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
873 TimeoutClock::RealTime
874 }
875 ClockId::Monotonic => TimeoutClock::Monotonic,
876 };
877
878 this.condvar_wait(
879 data.id,
880 mutex_ref,
881 Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
882 Scalar::from_i32(0),
883 this.eval_libc("ETIMEDOUT"), dest.clone(),
885 )?;
886
887 interp_ok(())
888 }
889
890 fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
891 let this = self.eval_context_mut();
892
893 let id = cond_get_data(this, cond_op)?.id;
896 if this.condvar_is_awaited(id) {
897 throw_ub_format!("destroying an awaited conditional variable");
898 }
899
900 this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
902 interp_ok(())
905 }
906}