use std::sync::atomic::{AtomicBool, Ordering};
use rustc_abi::Size;
use crate::concurrency::sync::LAZY_INIT_COOKIE;
use crate::*;
fn bytewise_equal_atomic_relaxed<'tcx>(
ecx: &MiriInterpCx<'tcx>,
left: &MPlaceTy<'tcx>,
right: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, bool> {
let size = left.layout.size;
assert_eq!(size, right.layout.size);
assert!(size.bytes() % 4 == 0);
for i in 0..(size.bytes() / 4) {
let offset = Size::from_bytes(i.strict_mul(4));
let load = |place: &MPlaceTy<'tcx>| {
let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
};
let left = load(left)?;
let right = load(right)?;
if left != right {
return interp_ok(false);
}
}
interp_ok(true)
}
#[inline]
fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
interp_ok(match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
})
}
fn mutexattr_get_kind<'tcx>(
ecx: &MiriInterpCx<'tcx>,
attr_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
ecx.deref_pointer_and_read(
attr_ptr,
mutexattr_kind_offset(ecx)?,
ecx.libc_ty_layout("pthread_mutexattr_t"),
ecx.machine.layouts.i32,
)?
.to_i32()
}
fn mutexattr_set_kind<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
attr_ptr: &OpTy<'tcx>,
kind: i32,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
attr_ptr,
mutexattr_kind_offset(ecx)?,
Scalar::from_i32(kind),
ecx.libc_ty_layout("pthread_mutexattr_t"),
ecx.machine.layouts.i32,
)
}
const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
fn mutexattr_translate_kind<'tcx>(
ecx: &MiriInterpCx<'tcx>,
kind: i32,
) -> InterpResult<'tcx, MutexKind> {
interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
MutexKind::Normal
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
MutexKind::ErrorCheck
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
MutexKind::Recursive
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
|| kind == PTHREAD_MUTEX_KIND_UNCHANGED
{
MutexKind::Default
} else {
throw_unsup_format!("unsupported type of mutex: {kind}");
})
}
#[derive(Debug, Clone, Copy)]
enum MutexKind {
Normal,
Default,
Recursive,
ErrorCheck,
}
#[derive(Debug, Clone)]
struct PthreadMutex {
mutex_ref: MutexRef,
kind: MutexKind,
}
fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
let offset = match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
"macos" => 4,
os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
};
let offset = Size::from_bytes(offset);
static SANITY: AtomicBool = AtomicBool::new(false);
if !SANITY.swap(true, Ordering::Relaxed) {
let check_static_initializer = |name| {
let static_initializer = ecx.eval_path(&["libc", name]);
let init_field =
static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"{name} is incompatible with our initialization cookie"
);
};
check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
match &*ecx.tcx.sess.target.os {
"linux" => {
check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
}
"illumos" | "solaris" | "macos" | "freebsd" | "android" => {
}
os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
}
}
interp_ok(offset)
}
fn mutex_create<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
mutex_ptr: &OpTy<'tcx>,
kind: MutexKind,
) -> InterpResult<'tcx, PthreadMutex> {
let mutex = ecx.deref_pointer(mutex_ptr)?;
let id = ecx.machine.sync.mutex_create();
let data = PthreadMutex { mutex_ref: id, kind };
ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
interp_ok(data)
}
fn mutex_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
mutex_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, &'a PthreadMutex>
where
'tcx: 'a,
{
let mutex = ecx.deref_pointer(mutex_ptr)?;
ecx.lazy_sync_get_data(
&mutex,
mutex_init_offset(ecx)?,
|| throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
|ecx| {
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
let id = ecx.machine.sync.mutex_create();
interp_ok(PthreadMutex { mutex_ref: id, kind })
},
)
}
fn mutex_kind_from_static_initializer<'tcx>(
ecx: &MiriInterpCx<'tcx>,
mutex: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MutexKind> {
let is_initializer =
|name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
return interp_ok(MutexKind::Default);
}
match &*ecx.tcx.sess.target.os {
"linux" =>
if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
return interp_ok(MutexKind::Recursive);
} else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
return interp_ok(MutexKind::ErrorCheck);
},
_ => {}
}
throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
}
#[derive(Debug, Copy, Clone)]
struct PthreadRwLock {
id: RwLockId,
}
fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
let offset = match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
"macos" => 4,
os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
};
let offset = Size::from_bytes(offset);
static SANITY: AtomicBool = AtomicBool::new(false);
if !SANITY.swap(true, Ordering::Relaxed) {
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
);
}
interp_ok(offset)
}
fn rwlock_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
rwlock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, &'a PthreadRwLock>
where
'tcx: 'a,
{
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
ecx.lazy_sync_get_data(
&rwlock,
rwlock_init_offset(ecx)?,
|| throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
|ecx| {
if !bytewise_equal_atomic_relaxed(
ecx,
&rwlock,
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
)? {
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
}
let id = ecx.machine.sync.rwlock_create();
interp_ok(PthreadRwLock { id })
},
)
}
#[inline]
fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
interp_ok(match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
})
}
fn condattr_get_clock_id<'tcx>(
ecx: &MiriInterpCx<'tcx>,
attr_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
ecx.deref_pointer_and_read(
attr_ptr,
condattr_clock_offset(ecx)?,
ecx.libc_ty_layout("pthread_condattr_t"),
ecx.machine.layouts.i32,
)?
.to_i32()
}
fn condattr_set_clock_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
attr_ptr: &OpTy<'tcx>,
clock_id: i32,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
attr_ptr,
condattr_clock_offset(ecx)?,
Scalar::from_i32(clock_id),
ecx.libc_ty_layout("pthread_condattr_t"),
ecx.machine.layouts.i32,
)
}
fn condattr_translate_clock_id<'tcx>(
ecx: &MiriInterpCx<'tcx>,
raw_id: i32,
) -> InterpResult<'tcx, ClockId> {
interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
ClockId::Realtime
} else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
ClockId::Monotonic
} else {
throw_unsup_format!("unsupported clock id: {raw_id}");
})
}
fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
let offset = match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
"macos" => 4,
os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
};
let offset = Size::from_bytes(offset);
static SANITY: AtomicBool = AtomicBool::new(false);
if !SANITY.swap(true, Ordering::Relaxed) {
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
);
}
interp_ok(offset)
}
#[derive(Debug, Clone, Copy)]
enum ClockId {
Realtime,
Monotonic,
}
#[derive(Debug, Copy, Clone)]
struct PthreadCondvar {
id: CondvarId,
clock: ClockId,
}
fn cond_create<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
cond_ptr: &OpTy<'tcx>,
clock: ClockId,
) -> InterpResult<'tcx, PthreadCondvar> {
let cond = ecx.deref_pointer(cond_ptr)?;
let id = ecx.machine.sync.condvar_create();
let data = PthreadCondvar { id, clock };
ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
interp_ok(data)
}
fn cond_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, &'a PthreadCondvar>
where
'tcx: 'a,
{
let cond = ecx.deref_pointer(cond_ptr)?;
ecx.lazy_sync_get_data(
&cond,
cond_init_offset(ecx)?,
|| throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
|ecx| {
if !bytewise_equal_atomic_relaxed(
ecx,
&cond,
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
)? {
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
}
let id = ecx.machine.sync.condvar_create();
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
},
)
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
interp_ok(())
}
fn pthread_mutexattr_settype(
&mut self,
attr_op: &OpTy<'tcx>,
kind_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let kind = this.read_scalar(kind_op)?.to_i32()?;
if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
|| kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
|| kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
|| kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
{
assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
mutexattr_set_kind(this, attr_op, kind)?;
} else {
let einval = this.eval_libc_i32("EINVAL");
return interp_ok(Scalar::from_i32(einval));
}
interp_ok(Scalar::from_i32(0))
}
fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
mutexattr_get_kind(this, attr_op)?;
this.write_uninit(
&this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
)?;
interp_ok(())
}
fn pthread_mutex_init(
&mut self,
mutex_op: &OpTy<'tcx>,
attr_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let attr = this.read_pointer(attr_op)?;
let kind = if this.ptr_is_null(attr)? {
MutexKind::Default
} else {
mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
};
mutex_create(this, mutex_op, kind)?;
interp_ok(())
}
fn pthread_mutex_lock(
&mut self,
mutex_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?.clone();
let ret = if this.mutex_is_locked(&mutex.mutex_ref) {
let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
if owner_thread != this.active_thread() {
this.mutex_enqueue_and_block(
&mutex.mutex_ref,
Some((Scalar::from_i32(0), dest.clone())),
);
return interp_ok(());
} else {
match mutex.kind {
MutexKind::Default =>
throw_ub_format!(
"trying to acquire default mutex already locked by the current thread"
),
MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
MutexKind::Recursive => {
this.mutex_lock(&mutex.mutex_ref);
0
}
}
}
} else {
this.mutex_lock(&mutex.mutex_ref);
0
};
this.write_scalar(Scalar::from_i32(ret), dest)?;
interp_ok(())
}
fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?.clone();
interp_ok(Scalar::from_i32(if this.mutex_is_locked(&mutex.mutex_ref) {
let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
if owner_thread != this.active_thread() {
this.eval_libc_i32("EBUSY")
} else {
match mutex.kind {
MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
this.eval_libc_i32("EBUSY"),
MutexKind::Recursive => {
this.mutex_lock(&mutex.mutex_ref);
0
}
}
}
} else {
this.mutex_lock(&mutex.mutex_ref);
0
}))
}
fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?.clone();
if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
interp_ok(Scalar::from_i32(0))
} else {
match mutex.kind {
MutexKind::Default =>
throw_ub_format!(
"unlocked a default mutex that was not locked by the current thread"
),
MutexKind::Normal =>
throw_ub_format!(
"unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
),
MutexKind::ErrorCheck | MutexKind::Recursive =>
interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
}
}
}
fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?.clone();
if this.mutex_is_locked(&mutex.mutex_ref) {
throw_ub_format!("destroyed a locked mutex");
}
this.write_uninit(
&this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
)?;
interp_ok(())
}
fn pthread_rwlock_rdlock(
&mut self,
rwlock_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_write_locked(id) {
this.rwlock_enqueue_and_block_reader(id, Scalar::from_i32(0), dest.clone());
} else {
this.rwlock_reader_lock(id);
this.write_null(dest)?;
}
interp_ok(())
}
fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_write_locked(id) {
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
} else {
this.rwlock_reader_lock(id);
interp_ok(Scalar::from_i32(0))
}
}
fn pthread_rwlock_wrlock(
&mut self,
rwlock_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_locked(id) {
this.rwlock_enqueue_and_block_writer(id, Scalar::from_i32(0), dest.clone());
} else {
this.rwlock_writer_lock(id);
this.write_null(dest)?;
}
interp_ok(())
}
fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_locked(id) {
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
} else {
this.rwlock_writer_lock(id);
interp_ok(Scalar::from_i32(0))
}
}
fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_reader_unlock(id)? || this.rwlock_writer_unlock(id)? {
interp_ok(())
} else {
throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
}
}
fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_locked(id) {
throw_ub_format!("destroyed a locked rwlock");
}
this.write_uninit(
&this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
)?;
interp_ok(())
}
fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
if this.tcx.sess.target.os != "macos" {
let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
condattr_set_clock_id(this, attr_op, default_clock_id)?;
}
interp_ok(())
}
fn pthread_condattr_setclock(
&mut self,
attr_op: &OpTy<'tcx>,
clock_id_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
|| clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
{
condattr_set_clock_id(this, attr_op, clock_id)?;
} else {
let einval = this.eval_libc_i32("EINVAL");
return interp_ok(Scalar::from_i32(einval));
}
interp_ok(Scalar::from_i32(0))
}
fn pthread_condattr_getclock(
&mut self,
attr_op: &OpTy<'tcx>,
clk_id_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let clock_id = condattr_get_clock_id(this, attr_op)?;
this.write_scalar(Scalar::from_i32(clock_id), &this.deref_pointer(clk_id_op)?)?;
interp_ok(())
}
fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
if this.tcx.sess.target.os != "macos" {
condattr_get_clock_id(this, attr_op)?;
}
this.write_uninit(
&this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
)?;
interp_ok(())
}
fn pthread_cond_init(
&mut self,
cond_op: &OpTy<'tcx>,
attr_op: &OpTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let attr = this.read_pointer(attr_op)?;
let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
this.eval_libc_i32("CLOCK_REALTIME")
} else {
condattr_get_clock_id(this, attr_op)?
};
let clock_id = condattr_translate_clock_id(this, clock_id)?;
cond_create(this, cond_op, clock_id)?;
interp_ok(())
}
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = cond_get_data(this, cond_op)?.id;
this.condvar_signal(id)?;
interp_ok(())
}
fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = cond_get_data(this, cond_op)?.id;
while this.condvar_signal(id)? {}
interp_ok(())
}
fn pthread_cond_wait(
&mut self,
cond_op: &OpTy<'tcx>,
mutex_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data = *cond_get_data(this, cond_op)?;
let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
this.condvar_wait(
data.id,
mutex_ref,
None, Scalar::from_i32(0),
Scalar::from_i32(0), dest.clone(),
)?;
interp_ok(())
}
fn pthread_cond_timedwait(
&mut self,
cond_op: &OpTy<'tcx>,
mutex_op: &OpTy<'tcx>,
abstime_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data = *cond_get_data(this, cond_op)?;
let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
let duration = match this
.read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
{
Some(duration) => duration,
None => {
let einval = this.eval_libc("EINVAL");
this.write_scalar(einval, dest)?;
return interp_ok(());
}
};
let timeout_clock = match data.clock {
ClockId::Realtime => {
this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
TimeoutClock::RealTime
}
ClockId::Monotonic => TimeoutClock::Monotonic,
};
this.condvar_wait(
data.id,
mutex_ref,
Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
Scalar::from_i32(0),
this.eval_libc("ETIMEDOUT"), dest.clone(),
)?;
interp_ok(())
}
fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let id = cond_get_data(this, cond_op)?.id;
if this.condvar_is_awaited(id) {
throw_ub_format!("destroying an awaited conditional variable");
}
this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
interp_ok(())
}
}