pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
Expand description
A rustc InterpCx for Miri.
Aliased Type§
struct MiriInterpCx<'tcx> {
pub machine: MiriMachine<'tcx>,
pub tcx: TyCtxtAt<'tcx>,
pub memory: Memory<'tcx, MiriMachine<'tcx>>,
pub recursion_limit: Limit,
/* private fields */
}
Fields§
§machine: MiriMachine<'tcx>
Stores the Machine
instance.
Note: the stack is provided by the machine.
tcx: TyCtxtAt<'tcx>
The results of the type checker, from rustc. The span in this is the “root” of the evaluation, i.e., the const we are evaluating (if this is CTFE).
memory: Memory<'tcx, MiriMachine<'tcx>>
The virtual memory system.
recursion_limit: Limit
The recursion limit (cached from tcx.recursion_limit(())
)
Trait Implementations§
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn expose_ptr(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer>
Source§fn adjust_alloc_root_pointer(
&self,
ptr: Pointer<CtfeProvenance>,
tag: BorTag,
kind: MemoryKind,
) -> InterpResult<'tcx, Pointer<Provenance>>
fn adjust_alloc_root_pointer( &self, ptr: Pointer<CtfeProvenance>, tag: BorTag, kind: MemoryKind, ) -> InterpResult<'tcx, Pointer<Provenance>>
fn get_global_alloc_bytes( &self, id: AllocId, kind: MemoryKind, bytes: &[u8], align: Align, ) -> InterpResult<'tcx, MiriAllocBytes>
Source§fn ptr_get_alloc(
&self,
ptr: Pointer<Provenance>,
size: i64,
) -> Option<(AllocId, Size)>
fn ptr_get_alloc( &self, ptr: Pointer<Provenance>, size: i64, ) -> Option<(AllocId, Size)>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn sb_retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx>, ) -> InterpResult<'tcx, ImmTy<'tcx>>
fn sb_retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx>, ) -> InterpResult<'tcx>
Source§fn sb_protect_place(
&mut self,
place: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn sb_protect_place( &mut self, place: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
Source§fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn have_module(&self, path: &[&str]) -> bool
fn have_module(&self, path: &[&str]) -> bool
Source§fn eval_path(&self, path: &[&str]) -> MPlaceTy<'tcx>
fn eval_path(&self, path: &[&str]) -> MPlaceTy<'tcx>
fn eval_path_scalar(&self, path: &[&str]) -> Scalar
Source§fn eval_libc_i32(&self, name: &str) -> i32
fn eval_libc_i32(&self, name: &str) -> i32
libc
constant as an i32
.Source§fn eval_libc_u32(&self, name: &str) -> u32
fn eval_libc_u32(&self, name: &str) -> u32
libc
constant as an u32
.Source§fn eval_windows(&self, module: &str, name: &str) -> Scalar
fn eval_windows(&self, module: &str, name: &str) -> Scalar
windows
constant as a Scalar
.Source§fn eval_windows_u32(&self, module: &str, name: &str) -> u32
fn eval_windows_u32(&self, module: &str, name: &str) -> u32
windows
constant as a u32
.Source§fn eval_windows_u64(&self, module: &str, name: &str) -> u64
fn eval_windows_u64(&self, module: &str, name: &str) -> u64
windows
constant as a u64
.Source§fn libc_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>
fn libc_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>
TyAndLayout
of a libc
typeSource§fn windows_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>
fn windows_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>
TyAndLayout
of a windows
typeSource§fn libc_array_ty_layout(&self, name: &str, size: u64) -> TyAndLayout<'tcx>
fn libc_array_ty_layout(&self, name: &str, size: u64) -> TyAndLayout<'tcx>
TyAndLayout
of an array that consists of libc
type.Source§fn try_project_field_named<P: Projectable<'tcx, Provenance>>(
&self,
base: &P,
name: &str,
) -> InterpResult<'tcx, Option<P>>
fn try_project_field_named<P: Projectable<'tcx, Provenance>>( &self, base: &P, name: &str, ) -> InterpResult<'tcx, Option<P>>
Source§fn project_field_named<P: Projectable<'tcx, Provenance>>(
&self,
base: &P,
name: &str,
) -> InterpResult<'tcx, P>
fn project_field_named<P: Projectable<'tcx, Provenance>>( &self, base: &P, name: &str, ) -> InterpResult<'tcx, P>
Source§fn projectable_has_field<P: Projectable<'tcx, Provenance>>(
&self,
base: &P,
name: &str,
) -> bool
fn projectable_has_field<P: Projectable<'tcx, Provenance>>( &self, base: &P, name: &str, ) -> bool
base
(which must be a struct or union type) contains the name
field.Source§fn write_int(
&mut self,
i: impl Into<i128>,
dest: &impl Writeable<'tcx, Provenance>,
) -> InterpResult<'tcx>
fn write_int( &mut self, i: impl Into<i128>, dest: &impl Writeable<'tcx, Provenance>, ) -> InterpResult<'tcx>
dest
. The target type may be signed or unsigned,
we try to do the right thing anyway. i128
can fit all integer types except for u128
so
this method is fine for almost all integer types.Source§fn write_int_fields(
&mut self,
values: &[i128],
dest: &impl Writeable<'tcx, Provenance>,
) -> InterpResult<'tcx>
fn write_int_fields( &mut self, values: &[i128], dest: &impl Writeable<'tcx, Provenance>, ) -> InterpResult<'tcx>
Source§fn write_int_fields_named(
&mut self,
values: &[(&str, i128)],
dest: &impl Writeable<'tcx, Provenance>,
) -> InterpResult<'tcx>
fn write_int_fields_named( &mut self, values: &[(&str, i128)], dest: &impl Writeable<'tcx, Provenance>, ) -> InterpResult<'tcx>
Source§fn write_null(
&mut self,
dest: &impl Writeable<'tcx, Provenance>,
) -> InterpResult<'tcx>
fn write_null( &mut self, dest: &impl Writeable<'tcx, Provenance>, ) -> InterpResult<'tcx>
dest
.Source§fn ptr_is_null(&self, ptr: Pointer) -> InterpResult<'tcx, bool>
fn ptr_is_null(&self, ptr: Pointer) -> InterpResult<'tcx, bool>
Source§fn gen_random(&mut self, ptr: Pointer, len: u64) -> InterpResult<'tcx>
fn gen_random(&mut self, ptr: Pointer, len: u64) -> InterpResult<'tcx>
dest
.Source§fn call_function(
&mut self,
f: Instance<'tcx>,
caller_abi: ExternAbi,
args: &[ImmTy<'tcx>],
dest: Option<&MPlaceTy<'tcx>>,
stack_pop: StackPopCleanup,
) -> InterpResult<'tcx>
fn call_function( &mut self, f: Instance<'tcx>, caller_abi: ExternAbi, args: &[ImmTy<'tcx>], dest: Option<&MPlaceTy<'tcx>>, stack_pop: StackPopCleanup, ) -> InterpResult<'tcx>
Source§fn visit_freeze_sensitive(
&self,
place: &MPlaceTy<'tcx>,
size: Size,
action: impl FnMut(AllocRange, bool) -> InterpResult<'tcx>,
) -> InterpResult<'tcx>
fn visit_freeze_sensitive( &self, place: &MPlaceTy<'tcx>, size: Size, action: impl FnMut(AllocRange, bool) -> InterpResult<'tcx>, ) -> InterpResult<'tcx>
place
, sensitive to freezing: the 2nd parameter
of action
will be true if this is frozen, false if this is in an UnsafeCell
.
The range is relative to place
.Source§fn check_no_isolation(&self, name: &str) -> InterpResult<'tcx>
fn check_no_isolation(&self, name: &str) -> InterpResult<'tcx>
name
of the foreign function if this is not the
case.Source§fn reject_in_isolation(
&self,
op_name: &str,
reject_with: RejectOpWith,
) -> InterpResult<'tcx>
fn reject_in_isolation( &self, op_name: &str, reject_with: RejectOpWith, ) -> InterpResult<'tcx>
Source§fn assert_target_os(&self, target_os: &str, name: &str)
fn assert_target_os(&self, target_os: &str, name: &str)
target_os
. It panics showing a message with the name
of the foreign function
if this is not the case.Source§fn assert_target_os_is_unix(&self, name: &str)
fn assert_target_os_is_unix(&self, name: &str)
name
of the foreign function
if this is not the case.fn target_os_is_unix(&self) -> bool
Source§fn deref_pointer_as(
&self,
op: &impl Projectable<'tcx, Provenance>,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn deref_pointer_as( &self, op: &impl Projectable<'tcx, Provenance>, layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
layout
instead of the pointer’s declared typeSource§fn deref_pointer_and_offset(
&self,
op: &impl Projectable<'tcx, Provenance>,
offset: u64,
base_layout: TyAndLayout<'tcx>,
value_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn deref_pointer_and_offset( &self, op: &impl Projectable<'tcx, Provenance>, offset: u64, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn deref_pointer_and_read( &self, op: &impl Projectable<'tcx, Provenance>, offset: u64, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn deref_pointer_and_write( &mut self, op: &impl Projectable<'tcx, Provenance>, offset: u64, value: impl Into<Scalar>, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, ()>
Source§fn read_timespec(
&mut self,
tp: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, Option<Duration>>
fn read_timespec( &mut self, tp: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, Option<Duration>>
timespec
struct and return it as a std::time::Duration
. It returns None
if the value in the timespec
struct is invalid. Some libc functions will return
EINVAL
in this case.Source§fn read_byte_slice<'a>(
&'a self,
slice: &ImmTy<'tcx>,
) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
fn read_byte_slice<'a>(
&'a self,
slice: &ImmTy<'tcx>,
) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
Source§fn read_c_str<'a>(&'a self, ptr: Pointer) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
fn read_c_str<'a>(&'a self, ptr: Pointer) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
Source§fn write_c_str(
&mut self,
c_str: &[u8],
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_c_str( &mut self, c_str: &[u8], ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of c_str
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator.Source§fn read_c_str_with_char_size<T>(
&self,
ptr: Pointer,
size: Size,
align: Align,
) -> InterpResult<'tcx, Vec<T>>
fn read_c_str_with_char_size<T>( &self, ptr: Pointer, size: Size, align: Align, ) -> InterpResult<'tcx, Vec<T>>
Source§fn read_wide_str(&self, ptr: Pointer) -> InterpResult<'tcx, Vec<u16>>
fn read_wide_str(&self, ptr: Pointer) -> InterpResult<'tcx, Vec<u16>>
Source§fn write_wide_str(
&mut self,
wide_str: &[u16],
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_wide_str( &mut self, wide_str: &[u16], ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of os_string
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator. Length is measured in units of
u16.
Source§fn read_wchar_t_str(&self, ptr: Pointer) -> InterpResult<'tcx, Vec<u32>>
fn read_wchar_t_str(&self, ptr: Pointer) -> InterpResult<'tcx, Vec<u32>>
Vec<u32>
no matter the size of wchar_t
.Source§fn check_abi<'a>(
&self,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
exp_abi: Conv,
) -> InterpResult<'a, ()>
fn check_abi<'a>( &self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, exp_abi: Conv, ) -> InterpResult<'a, ()>
fn frame_in_std(&self) -> bool
fn check_abi_and_shim_symbol_clash( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, exp_abi: Conv, link_name: Symbol, ) -> InterpResult<'tcx, ()>
fn check_shim<'a, const N: usize>( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, exp_abi: Conv, link_name: Symbol, args: &'a [OpTy<'tcx>], ) -> InterpResult<'tcx, &'a [OpTy<'tcx>; N]>
Source§fn mark_immutable(&mut self, mplace: &MPlaceTy<'tcx>)
fn mark_immutable(&mut self, mplace: &MPlaceTy<'tcx>)
Source§fn float_to_int_checked(
&self,
src: &ImmTy<'tcx>,
cast_to: TyAndLayout<'tcx>,
round: Round,
) -> InterpResult<'tcx, Option<ImmTy<'tcx>>>
fn float_to_int_checked( &self, src: &ImmTy<'tcx>, cast_to: TyAndLayout<'tcx>, round: Round, ) -> InterpResult<'tcx, Option<ImmTy<'tcx>>>
src
from floating point to integer type dest_ty
after rounding with mode round
.
Returns None
if f
is NaN or out of range.Source§fn get_twice_wide_int_ty(&self, ty: Ty<'tcx>) -> Ty<'tcx>
fn get_twice_wide_int_ty(&self, ty: Ty<'tcx>) -> Ty<'tcx>
ty
Source§fn expect_target_feature_for_intrinsic(
&self,
intrinsic: Symbol,
target_feature: &str,
) -> InterpResult<'tcx, ()>
fn expect_target_feature_for_intrinsic( &self, intrinsic: Symbol, target_feature: &str, ) -> InterpResult<'tcx, ()>
target_feature
is enabled. Read moreSource§fn lookup_link_section(
&mut self,
name: &str,
) -> InterpResult<'tcx, Vec<ImmTy<'tcx>>>
fn lookup_link_section( &mut self, name: &str, ) -> InterpResult<'tcx, Vec<ImmTy<'tcx>>>
name
.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn emulate_atomic_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, EmulateItemResult>
fn emulate_atomic_intrinsic( &mut self, intrinsic_name: &str, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
intrinsic
; the atomic_
prefix has already been removed.
Returns Ok(true)
if the intrinsic was handled.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn emulate_simd_intrinsic(
&mut self,
intrinsic_name: &str,
generic_args: GenericArgsRef<'tcx>,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, EmulateItemResult>
fn emulate_simd_intrinsic( &mut self, intrinsic_name: &str, generic_args: GenericArgsRef<'tcx>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
intrinsic
; the simd_
prefix has already been removed.
Returns Ok(true)
if the intrinsic was handled.fn fminmax_op( &self, op: MinMax, left: &ImmTy<'tcx>, right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn call_intrinsic( &mut self, instance: Instance<'tcx>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<BasicBlock>, unwind: UnwindAction, ) -> InterpResult<'tcx, Option<Instance<'tcx>>>
Source§fn emulate_intrinsic_by_name(
&mut self,
intrinsic_name: &str,
generic_args: GenericArgsRef<'tcx>,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
ret: Option<BasicBlock>,
) -> InterpResult<'tcx, EmulateItemResult>
fn emulate_intrinsic_by_name( &mut self, intrinsic_name: &str, generic_args: GenericArgsRef<'tcx>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<BasicBlock>, ) -> InterpResult<'tcx, EmulateItemResult>
Ok(true)
if the intrinsic was handled.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn binary_ptr_op( &self, bin_op: BinOp, left: &ImmTy<'tcx>, right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, ImmTy<'tcx>>
fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>( &self, inputs: &[F1], ) -> F2
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn run_provenance_gc(&mut self)
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn malloc_align(&self, size: u64) -> Align
fn malloc_align(&self, size: u64) -> Align
malloc
would guarantee for requests of the given size.Source§fn emulate_allocator(
&mut self,
default: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx>,
) -> InterpResult<'tcx, EmulateItemResult>
fn emulate_allocator( &mut self, default: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
fn malloc(&mut self, size: u64, zero_init: bool) -> InterpResult<'tcx, Pointer>
fn posix_memalign( &mut self, memptr: &OpTy<'tcx>, align: &OpTy<'tcx>, size: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn free(&mut self, ptr: Pointer) -> InterpResult<'tcx>
fn realloc( &mut self, old_ptr: Pointer, new_size: u64, ) -> InterpResult<'tcx, Pointer>
fn aligned_alloc( &mut self, align: &OpTy<'tcx>, size: &OpTy<'tcx>, ) -> InterpResult<'tcx, Pointer>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn handle_miri_backtrace_size( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, link_name: Symbol, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn handle_miri_get_backtrace( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, link_name: Symbol, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn resolve_frame_pointer( &mut self, ptr: &OpTy<'tcx>, ) -> InterpResult<'tcx, (Instance<'tcx>, Loc, String, String)>
fn handle_miri_resolve_frame( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, link_name: Symbol, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn handle_miri_resolve_frame_names( &mut self, abi: &FnAbi<'tcx, Ty<'tcx>>, link_name: Symbol, args: &[OpTy<'tcx>], ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn return_read_success(
&mut self,
buf: Pointer,
bytes: &[u8],
actual_read_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn return_read_success( &mut self, buf: Pointer, bytes: &[u8], actual_read_size: usize, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
FileDescription::read
:
This is only used when read
is successful.
actual_read_size
should be the return value of some underlying read
call that used
bytes
as its output buffer.
The length of bytes
must not exceed either the host’s or the target’s isize
.
bytes
is written to buf
and the size is written to dest
.Source§fn return_write_success(
&mut self,
actual_write_size: usize,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn return_write_success( &mut self, actual_write_size: usize, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
FileDescription::write
:
This function is only used when write
is successful, and writes actual_write_size
to dest
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn call_native_fn(
&mut self,
link_name: Symbol,
dest: &MPlaceTy<'tcx>,
args: &[OpTy<'tcx>],
) -> InterpResult<'tcx, bool>
fn call_native_fn( &mut self, link_name: Symbol, dest: &MPlaceTy<'tcx>, args: &[OpTy<'tcx>], ) -> InterpResult<'tcx, bool>
libffi
call).
Then, convert the return value from the native form into something that
can be stored in Miri’s internal memory.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn tb_retag_ptr_value(
&mut self,
kind: RetagKind,
val: &ImmTy<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn tb_retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx>, ) -> InterpResult<'tcx, ImmTy<'tcx>>
from_ref_ty
and
raw pointers are never reborrowed.Source§fn tb_retag_place_contents(
&mut self,
kind: RetagKind,
place: &PlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn tb_retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx>, ) -> InterpResult<'tcx>
Source§fn tb_protect_place(
&mut self,
place: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn tb_protect_place( &mut self, place: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
Source§fn tb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn tb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
Source§fn print_tree(
&mut self,
alloc_id: AllocId,
show_unnamed: bool,
) -> InterpResult<'tcx>
fn print_tree( &mut self, alloc_id: AllocId, show_unnamed: bool, ) -> InterpResult<'tcx>
Source§fn tb_give_pointer_debug_name(
&mut self,
ptr: Pointer,
nth_parent: u8,
name: &str,
) -> InterpResult<'tcx>
fn tb_give_pointer_debug_name( &mut self, ptr: Pointer, nth_parent: u8, name: &str, ) -> InterpResult<'tcx>
name
and the pointer that receives it is the nth_parent
of ptr
(with 0 representing ptr
itself)Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn sysconf(&mut self, val: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn strerror_r( &mut self, errnum: &OpTy<'tcx>, buf: &OpTy<'tcx>, buflen: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn getenv(&mut self, name_op: &OpTy<'tcx>) -> InterpResult<'tcx, Pointer>
fn setenv( &mut self, name_op: &OpTy<'tcx>, value_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn unsetenv(&mut self, name_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn getcwd( &mut self, buf_op: &OpTy<'tcx>, size_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Pointer>
fn chdir(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
Source§fn update_environ(&mut self) -> InterpResult<'tcx>
fn update_environ(&mut self) -> InterpResult<'tcx>
environ
static.fn getpid(&mut self) -> InterpResult<'tcx, Scalar>
fn linux_gettid(&mut self) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn dup(&mut self, old_fd_num: i32) -> InterpResult<'tcx, Scalar>
fn dup2( &mut self, old_fd_num: i32, new_fd_num: i32, ) -> InterpResult<'tcx, Scalar>
fn flock(&mut self, fd_num: i32, op: i32) -> InterpResult<'tcx, Scalar>
fn fcntl(&mut self, args: &[OpTy<'tcx>]) -> InterpResult<'tcx, Scalar>
fn close(&mut self, fd_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
Source§fn read(
&mut self,
fd_num: i32,
buf: Pointer,
count: u64,
offset: Option<i128>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn read( &mut self, fd_num: i32, buf: Pointer, count: u64, offset: Option<i128>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn write( &mut self, fd_num: i32, buf: Pointer, count: u64, offset: Option<i128>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn open(&mut self, args: &[OpTy<'tcx>]) -> InterpResult<'tcx, Scalar>
fn lseek64( &mut self, fd_num: i32, offset: i128, whence: i32, ) -> InterpResult<'tcx, Scalar>
fn unlink(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn symlink( &mut self, target_op: &OpTy<'tcx>, linkpath_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn macos_fbsd_solaris_stat( &mut self, path_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn macos_fbsd_solaris_lstat( &mut self, path_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn macos_fbsd_solaris_fstat( &mut self, fd_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn linux_statx( &mut self, dirfd_op: &OpTy<'tcx>, pathname_op: &OpTy<'tcx>, flags_op: &OpTy<'tcx>, mask_op: &OpTy<'tcx>, statxbuf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn rename( &mut self, oldpath_op: &OpTy<'tcx>, newpath_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn mkdir( &mut self, path_op: &OpTy<'tcx>, mode_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn rmdir(&mut self, path_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn opendir(&mut self, name_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn linux_solarish_readdir64( &mut self, dirent_type: &str, dirp_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn macos_fbsd_readdir_r( &mut self, dirp_op: &OpTy<'tcx>, entry_op: &OpTy<'tcx>, result_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn closedir(&mut self, dirp_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn ftruncate64( &mut self, fd_num: i32, length: i128, ) -> InterpResult<'tcx, Scalar>
fn fsync(&mut self, fd_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn ffullsync_fd(&mut self, fd_num: i32) -> InterpResult<'tcx, Scalar>
fn fdatasync(&mut self, fd_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn sync_file_range( &mut self, fd_op: &OpTy<'tcx>, offset_op: &OpTy<'tcx>, nbytes_op: &OpTy<'tcx>, flags_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn readlink( &mut self, pathname_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, bufsize_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, i64>
fn isatty(&mut self, miri_fd: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn realpath( &mut self, path_op: &OpTy<'tcx>, processed_path_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn mkstemp(&mut self, template_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn pthread_mutexattr_init( &mut self, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_mutexattr_settype( &mut self, attr_op: &OpTy<'tcx>, kind_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_mutexattr_destroy( &mut self, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_mutex_init( &mut self, mutex_op: &OpTy<'tcx>, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_mutex_lock( &mut self, mutex_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn pthread_mutex_trylock( &mut self, mutex_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_mutex_unlock( &mut self, mutex_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_mutex_destroy( &mut self, mutex_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_rwlock_rdlock( &mut self, rwlock_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn pthread_rwlock_tryrdlock( &mut self, rwlock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_rwlock_wrlock( &mut self, rwlock_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn pthread_rwlock_trywrlock( &mut self, rwlock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_rwlock_unlock( &mut self, rwlock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_rwlock_destroy( &mut self, rwlock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_condattr_init( &mut self, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_condattr_setclock( &mut self, attr_op: &OpTy<'tcx>, clock_id_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_condattr_getclock( &mut self, attr_op: &OpTy<'tcx>, clk_id_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_condattr_destroy( &mut self, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_cond_init( &mut self, cond_op: &OpTy<'tcx>, attr_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_cond_signal( &mut self, cond_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_cond_broadcast( &mut self, cond_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_cond_wait( &mut self, cond_op: &OpTy<'tcx>, mutex_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn pthread_cond_timedwait( &mut self, cond_op: &OpTy<'tcx>, mutex_op: &OpTy<'tcx>, abstime_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn pthread_cond_destroy( &mut self, cond_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn pthread_create( &mut self, thread: &OpTy<'tcx>, _attr: &OpTy<'tcx>, start_routine: &OpTy<'tcx>, arg: &OpTy<'tcx>, ) -> InterpResult<'tcx, ()>
fn pthread_join( &mut self, thread: &OpTy<'tcx>, retval: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pthread_detach(&mut self, thread: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn pthread_self(&mut self) -> InterpResult<'tcx, Scalar>
Source§fn pthread_setname_np(
&mut self,
thread: Scalar,
name: Scalar,
name_max_len: usize,
truncate: bool,
) -> InterpResult<'tcx, ThreadNameResult>
fn pthread_setname_np( &mut self, thread: Scalar, name: Scalar, name_max_len: usize, truncate: bool, ) -> InterpResult<'tcx, ThreadNameResult>
name_max_len
, then if truncate
is set the truncated name
is used as the thread name, otherwise ThreadNameResult::NameTooLong
is returned.
If the specified thread wasn’t found, ThreadNameResult::ThreadNotFound
is returned.Source§fn pthread_getname_np(
&mut self,
thread: Scalar,
name_out: Scalar,
len: Scalar,
truncate: bool,
) -> InterpResult<'tcx, ThreadNameResult>
fn pthread_getname_np( &mut self, thread: Scalar, name_out: Scalar, len: Scalar, truncate: bool, ) -> InterpResult<'tcx, ThreadNameResult>
truncate
is set the truncated name is written out,
otherwise ThreadNameResult::NameTooLong
is returned. If the specified
thread wasn’t found, ThreadNameResult::ThreadNotFound
is returned.fn sched_yield(&mut self) -> InterpResult<'tcx, ()>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn socketpair(
&mut self,
domain: &OpTy<'tcx>,
type_: &OpTy<'tcx>,
protocol: &OpTy<'tcx>,
sv: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar>
fn socketpair( &mut self, domain: &OpTy<'tcx>, type_: &OpTy<'tcx>, protocol: &OpTy<'tcx>, sv: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn pipe2( &mut self, pipefd: &OpTy<'tcx>, flags: Option<&OpTy<'tcx>>, ) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx>, ) -> InterpResult<'tcx, ImmTy<'tcx>>
fn retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn protect_place( &mut self, place: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn give_pointer_debug_name( &mut self, ptr: Pointer, nth_parent: u8, name: &str, ) -> InterpResult<'tcx>
fn print_borrow_state( &mut self, alloc_id: AllocId, show_unnamed: bool, ) -> InterpResult<'tcx>
fn on_stack_pop( &self, frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn epoll_create1(&mut self, flags: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
fn epoll_create1(&mut self, flags: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
Epoll
instance. This file
descriptor is used for all subsequent calls to the epoll interface. If the flags
argument
is 0, then this function is the same as epoll_create()
. Read moreSource§fn epoll_ctl(
&mut self,
epfd: &OpTy<'tcx>,
op: &OpTy<'tcx>,
fd: &OpTy<'tcx>,
event: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar>
fn epoll_ctl( &mut self, epfd: &OpTy<'tcx>, op: &OpTy<'tcx>, fd: &OpTy<'tcx>, event: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
Epoll
instance referred to by the file
descriptor epfd
. It requests that the operation op
be performed for the target file
descriptor, fd
. Read moreSource§fn epoll_wait(
&mut self,
epfd: &OpTy<'tcx>,
events_op: &OpTy<'tcx>,
maxevents: &OpTy<'tcx>,
timeout: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn epoll_wait( &mut self, epfd: &OpTy<'tcx>, events_op: &OpTy<'tcx>, maxevents: &OpTy<'tcx>, timeout: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
epoll_wait()
system call waits for events on the Epoll
instance referred to by the file descriptor epfd
. The buffer
pointed to by events
is used to return information from the ready
list about file descriptors in the interest list that have some
events available. Up to maxevents
are returned by epoll_wait()
.
The maxevents
argument must be greater than zero. Read moreSource§fn check_and_update_readiness(
&mut self,
fd_ref: &FileDescriptionRef,
) -> InterpResult<'tcx, ()>
fn check_and_update_readiness( &mut self, fd_ref: &FileDescriptionRef, ) -> InterpResult<'tcx, ()>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn eventfd(
&mut self,
val: &OpTy<'tcx>,
flags: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar>
fn eventfd( &mut self, val: &OpTy<'tcx>, flags: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
Event
that is used as an event wait/notify mechanism by
user-space applications, and by the kernel to notify user-space applications of events.
The Event
contains an u64
counter maintained by the kernel. The counter is initialized
with the value specified in the initval
argument. Read moreSource§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx>
fn os_unfair_lock_trylock( &mut self, lock_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx>
fn os_unfair_lock_assert_owner( &mut self, lock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx>
fn os_unfair_lock_assert_not_owner( &mut self, lock_op: &OpTy<'tcx>, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn GetEnvironmentVariableW( &mut self, name_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, size_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn GetEnvironmentStringsW(&mut self) -> InterpResult<'tcx, Pointer>
fn FreeEnvironmentStringsW( &mut self, env_block_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn SetEnvironmentVariableW( &mut self, name_op: &OpTy<'tcx>, value_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn GetCurrentDirectoryW( &mut self, size_op: &OpTy<'tcx>, buf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn SetCurrentDirectoryW( &mut self, path_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn GetCurrentProcessId(&mut self) -> InterpResult<'tcx, Scalar>
fn GetUserProfileDirectoryW( &mut self, token: &OpTy<'tcx>, buf: &OpTy<'tcx>, size: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Evaluation context extensions.
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Evaluation context extensions.
Source§fn read_scalar_atomic(
&self,
place: &MPlaceTy<'tcx>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx, Scalar>
fn read_scalar_atomic( &self, place: &MPlaceTy<'tcx>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx, Scalar>
Source§fn write_scalar_atomic(
&mut self,
val: Scalar,
dest: &MPlaceTy<'tcx>,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx>
fn write_scalar_atomic( &mut self, val: Scalar, dest: &MPlaceTy<'tcx>, atomic: AtomicWriteOrd, ) -> InterpResult<'tcx>
Source§fn atomic_rmw_op_immediate(
&mut self,
place: &MPlaceTy<'tcx>,
rhs: &ImmTy<'tcx>,
op: BinOp,
not: bool,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn atomic_rmw_op_immediate( &mut self, place: &MPlaceTy<'tcx>, rhs: &ImmTy<'tcx>, op: BinOp, not: bool, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx>>
Source§fn atomic_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx>,
new: Scalar,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, Scalar>
fn atomic_exchange_scalar( &mut self, place: &MPlaceTy<'tcx>, new: Scalar, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, Scalar>
Source§fn atomic_min_max_scalar(
&mut self,
place: &MPlaceTy<'tcx>,
rhs: ImmTy<'tcx>,
min: bool,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn atomic_min_max_scalar( &mut self, place: &MPlaceTy<'tcx>, rhs: ImmTy<'tcx>, min: bool, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx>>
Source§fn atomic_compare_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx>,
expect_old: &ImmTy<'tcx>,
new: Scalar,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx, Immediate<Provenance>>
fn atomic_compare_exchange_scalar( &mut self, place: &MPlaceTy<'tcx>, expect_old: &ImmTy<'tcx>, new: Scalar, success: AtomicRwOrd, fail: AtomicReadOrd, can_fail_spuriously: bool, ) -> InterpResult<'tcx, Immediate<Provenance>>
can_fail_spuriously
is true,
then we treat it as a “compare_exchange_weak” operation, and
some portion of the time fail even when the values are actually
identical.Source§fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>
Source§fn allow_data_races_all_threads_done(&mut self)
fn allow_data_races_all_threads_done(&mut self)
Source§fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R>
fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R>
Source§fn acquire_clock(&self, clock: &VClock)
fn acquire_clock(&self, clock: &VClock)
release_clock
.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn invalid_handle(&mut self, function_name: &str) -> InterpResult<'tcx, !>
fn CloseHandle(&mut self, handle_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn InitOnceBeginInitialize( &mut self, init_once_op: &OpTy<'tcx>, flags_op: &OpTy<'tcx>, pending_op: &OpTy<'tcx>, context_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn InitOnceComplete( &mut self, init_once_op: &OpTy<'tcx>, flags_op: &OpTy<'tcx>, context_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn WaitOnAddress( &mut self, ptr_op: &OpTy<'tcx>, compare_op: &OpTy<'tcx>, size_op: &OpTy<'tcx>, timeout_op: &OpTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
fn WakeByAddressSingle(&mut self, ptr_op: &OpTy<'tcx>) -> InterpResult<'tcx>
fn WakeByAddressAll(&mut self, ptr_op: &OpTy<'tcx>) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn CreateThread( &mut self, security_op: &OpTy<'tcx>, stacksize_op: &OpTy<'tcx>, start_op: &OpTy<'tcx>, arg_op: &OpTy<'tcx>, flags_op: &OpTy<'tcx>, thread_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, ThreadId>
fn WaitForSingleObject( &mut self, handle_op: &OpTy<'tcx>, timeout_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_aesni_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_avx_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_avx2_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_bmi_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_gfni_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sha_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sse_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus
Source§fn init_once_enqueue_and_block(
&mut self,
id: InitOnceId,
callback: Box<dyn UnblockCallback<'tcx> + 'tcx>,
)
fn init_once_enqueue_and_block( &mut self, id: InitOnceId, callback: Box<dyn UnblockCallback<'tcx> + 'tcx>, )
Source§fn init_once_begin(&mut self, id: InitOnceId)
fn init_once_begin(&mut self, id: InitOnceId)
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx>
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx>
Source§fn init_once_observe_completed(&mut self, id: InitOnceId)
fn init_once_observe_completed(&mut self, id: InitOnceId)
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sse2_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sse3_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sse41_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_sse42_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_ssse3_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emulate_x86_intrinsic( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn emulate_foreign_item(
&mut self,
link_name: Symbol,
abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
ret: Option<BasicBlock>,
unwind: UnwindAction,
) -> InterpResult<'tcx, Option<(&'tcx Body<'tcx>, Instance<'tcx>)>>
fn emulate_foreign_item( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<BasicBlock>, unwind: UnwindAction, ) -> InterpResult<'tcx, Option<(&'tcx Body<'tcx>, Instance<'tcx>)>>
goto_block
if needed.
Returns Ok(None) if the foreign item was completely handled
by this function.
Returns Ok(Some(body)) if processing the foreign item
is delegated to another function.fn is_dyn_sym(&self, name: &str) -> bool
Source§fn emulate_dyn_sym(
&mut self,
sym: DynSym,
abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
ret: Option<BasicBlock>,
unwind: UnwindAction,
) -> InterpResult<'tcx>
fn emulate_dyn_sym( &mut self, sym: DynSym, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<BasicBlock>, unwind: UnwindAction, ) -> InterpResult<'tcx>
DynSym
.Source§fn lookup_exported_symbol(
&mut self,
link_name: Symbol,
) -> InterpResult<'tcx, Option<(&'tcx Body<'tcx>, Instance<'tcx>)>>
fn lookup_exported_symbol( &mut self, link_name: Symbol, ) -> InterpResult<'tcx, Option<(&'tcx Body<'tcx>, Instance<'tcx>)>>
link_name
as the symbol name.Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx>>
fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx>>
Source§fn set_last_error(&mut self, err: impl Into<IoError>) -> InterpResult<'tcx>
fn set_last_error(&mut self, err: impl Into<IoError>) -> InterpResult<'tcx>
Source§fn set_last_error_and_return(
&mut self,
err: impl Into<IoError>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn set_last_error_and_return( &mut self, err: impl Into<IoError>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
Source§fn set_last_error_and_return_i32(
&mut self,
err: impl Into<IoError>,
) -> InterpResult<'tcx, Scalar>
fn set_last_error_and_return_i32( &mut self, err: impl Into<IoError>, ) -> InterpResult<'tcx, Scalar>
-1
as a i32
-typed ScalarSource§fn set_last_error_and_return_i64(
&mut self,
err: impl Into<IoError>,
) -> InterpResult<'tcx, Scalar>
fn set_last_error_and_return_i64( &mut self, err: impl Into<IoError>, ) -> InterpResult<'tcx, Scalar>
-1
as a i64
-typed ScalarSource§fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar>
fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar>
Source§fn io_error_to_errnum(&self, err: Error) -> InterpResult<'tcx, Scalar>
fn io_error_to_errnum(&self, err: Error) -> InterpResult<'tcx, Scalar>
std::io::ErrorKind
as a platform-specific errnum.Source§fn try_errnum_to_io_error(
&self,
errnum: Scalar,
) -> InterpResult<'tcx, Option<ErrorKind>>
fn try_errnum_to_io_error( &self, errnum: Scalar, ) -> InterpResult<'tcx, Option<ErrorKind>>
io_error_to_errnum
.Source§fn try_unwrap_io_result<T: From<i32>>(
&mut self,
result: Result<T>,
) -> InterpResult<'tcx, T>
fn try_unwrap_io_result<T: From<i32>>( &mut self, result: Result<T>, ) -> InterpResult<'tcx, T>
std::io::Result<T>
and returns an
InterpResult<'tcx,T>::Ok
instead. In case the result is an error, this function returns
Ok(-1)
and sets the last OS error accordingly. Read moreSource§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn read_os_str_from_c_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, &'a OsStr>where
'tcx: 'a,
fn read_os_str_from_c_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, &'a OsStr>where
'tcx: 'a,
Source§fn read_os_str_from_wide_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, OsString>where
'tcx: 'a,
fn read_os_str_from_wide_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, OsString>where
'tcx: 'a,
Source§fn write_os_str_to_c_str(
&mut self,
os_str: &OsStr,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_c_str( &mut self, os_str: &OsStr, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
(success, full_len)
, where length includes the null
terminator. On failure, nothing is written.Source§fn write_os_str_to_wide_str_helper(
&mut self,
os_str: &OsStr,
ptr: Pointer,
size: u64,
truncate: bool,
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_wide_str_helper( &mut self, os_str: &OsStr, ptr: Pointer, size: u64, truncate: bool, ) -> InterpResult<'tcx, (bool, u64)>
write_os_str_to_wide_str
and
write_os_str_to_wide_str_truncated
.Source§fn write_os_str_to_wide_str(
&mut self,
os_str: &OsStr,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_wide_str( &mut self, os_str: &OsStr, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
(success, full_len)
, where length is measured
in units of u16
and includes the null terminator. On failure, nothing is written.Source§fn write_os_str_to_wide_str_truncated(
&mut self,
os_str: &OsStr,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_wide_str_truncated( &mut self, os_str: &OsStr, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
write_os_str_to_wide_str
, but on failure as much as possible is written into
the buffer (always with a null terminator).Source§fn alloc_os_str_as_c_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind,
) -> InterpResult<'tcx, Pointer>
fn alloc_os_str_as_c_str( &mut self, os_str: &OsStr, memkind: MemoryKind, ) -> InterpResult<'tcx, Pointer>
OsStr
as a null-terminated sequence of bytes.Source§fn alloc_os_str_as_wide_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind,
) -> InterpResult<'tcx, Pointer>
fn alloc_os_str_as_wide_str( &mut self, os_str: &OsStr, memkind: MemoryKind, ) -> InterpResult<'tcx, Pointer>
OsStr
as a null-terminated sequence of u16
.Source§fn read_path_from_c_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, Cow<'a, Path>>where
'tcx: 'a,
fn read_path_from_c_str<'a>(
&'a self,
ptr: Pointer,
) -> InterpResult<'tcx, Cow<'a, Path>>where
'tcx: 'a,
Source§fn read_path_from_wide_str(&self, ptr: Pointer) -> InterpResult<'tcx, PathBuf>
fn read_path_from_wide_str(&self, ptr: Pointer) -> InterpResult<'tcx, PathBuf>
u16
s, and perform path separator conversion if needed.Source§fn write_path_to_c_str(
&mut self,
path: &Path,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_path_to_c_str( &mut self, path: &Path, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
Source§fn write_path_to_wide_str(
&mut self,
path: &Path,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_path_to_wide_str( &mut self, path: &Path, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
u16
s),
adjusting path separators if needed.Source§fn write_path_to_wide_str_truncated(
&mut self,
path: &Path,
ptr: Pointer,
size: u64,
) -> InterpResult<'tcx, (bool, u64)>
fn write_path_to_wide_str_truncated( &mut self, path: &Path, ptr: Pointer, size: u64, ) -> InterpResult<'tcx, (bool, u64)>
u16
s),
adjusting path separators if needed.Source§fn alloc_path_as_c_str(
&mut self,
path: &Path,
memkind: MemoryKind,
) -> InterpResult<'tcx, Pointer>
fn alloc_path_as_c_str( &mut self, path: &Path, memkind: MemoryKind, ) -> InterpResult<'tcx, Pointer>
Source§fn alloc_path_as_wide_str(
&mut self,
path: &Path,
memkind: MemoryKind,
) -> InterpResult<'tcx, Pointer>
fn alloc_path_as_wide_str( &mut self, path: &Path, memkind: MemoryKind, ) -> InterpResult<'tcx, Pointer>
u16
s,
adjusting path separators if needed.fn convert_path<'a>( &self, os_str: Cow<'a, OsStr>, direction: PathConversion, ) -> Cow<'a, OsStr>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn lazy_sync_init<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx, &'a T>where
'tcx: 'a,
fn lazy_sync_init<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx, &'a T>where
'tcx: 'a,
alloc_extra.sync
data:
this forces an immediate init.
Return a reference to the data in the machine state.Source§fn lazy_sync_get_data<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, &'a T>where
'tcx: 'a,
fn lazy_sync_get_data<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, &'a T>where
'tcx: 'a,
alloc_extra.sync
data:
Checks if the primitive is initialized: Read moreSource§fn get_sync_or_init<'a, T: 'static>(
&'a mut self,
ptr: Pointer,
new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
) -> Option<&'a T>where
'tcx: 'a,
fn get_sync_or_init<'a, T: 'static>(
&'a mut self,
ptr: Pointer,
new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
) -> Option<&'a T>where
'tcx: 'a,
Source§fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId
fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId
Source§fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool
fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool
Source§fn mutex_lock(&mut self, mutex_ref: &MutexRef)
fn mutex_lock(&mut self, mutex_ref: &MutexRef)
Source§fn mutex_unlock(
&mut self,
mutex_ref: &MutexRef,
) -> InterpResult<'tcx, Option<usize>>
fn mutex_unlock( &mut self, mutex_ref: &MutexRef, ) -> InterpResult<'tcx, Option<usize>>
None
.Source§fn mutex_enqueue_and_block(
&mut self,
mutex_ref: &MutexRef,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
)
fn mutex_enqueue_and_block( &mut self, mutex_ref: &MutexRef, retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>, )
Source§fn rwlock_is_locked(&self, id: RwLockId) -> bool
fn rwlock_is_locked(&self, id: RwLockId) -> bool
Source§fn rwlock_is_write_locked(&self, id: RwLockId) -> bool
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool
Source§fn rwlock_reader_lock(&mut self, id: RwLockId)
fn rwlock_reader_lock(&mut self, id: RwLockId)
reader
the list of threads that own
this lock.Source§fn rwlock_reader_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool>
fn rwlock_reader_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool>
true
if succeeded, false
if this reader
did not hold the lock.Source§fn rwlock_enqueue_and_block_reader(
&mut self,
id: RwLockId,
retval: Scalar,
dest: MPlaceTy<'tcx>,
)
fn rwlock_enqueue_and_block_reader( &mut self, id: RwLockId, retval: Scalar, dest: MPlaceTy<'tcx>, )
retval
will be written to dest
.Source§fn rwlock_writer_lock(&mut self, id: RwLockId)
fn rwlock_writer_lock(&mut self, id: RwLockId)
Source§fn rwlock_writer_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool>
fn rwlock_writer_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool>
false
if it is held by another thread.Source§fn rwlock_enqueue_and_block_writer(
&mut self,
id: RwLockId,
retval: Scalar,
dest: MPlaceTy<'tcx>,
)
fn rwlock_enqueue_and_block_writer( &mut self, id: RwLockId, retval: Scalar, dest: MPlaceTy<'tcx>, )
retval
will be written to dest
.Source§fn condvar_is_awaited(&mut self, id: CondvarId) -> bool
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool
Source§fn condvar_wait(
&mut self,
condvar: CondvarId,
mutex_ref: MutexRef,
timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
retval_succ: Scalar,
retval_timeout: Scalar,
dest: MPlaceTy<'tcx>,
) -> InterpResult<'tcx>
fn condvar_wait( &mut self, condvar: CondvarId, mutex_ref: MutexRef, timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>, retval_succ: Scalar, retval_timeout: Scalar, dest: MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
retval_succ
will be written to dest
.
If the timeout happens first, retval_timeout
will be written to dest
.Source§fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool>
fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool>
true
iff any thread was woken up.Source§fn futex_wait(
&mut self,
futex_ref: FutexRef,
bitset: u32,
timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
retval_succ: Scalar,
retval_timeout: Scalar,
dest: MPlaceTy<'tcx>,
errno_timeout: IoError,
)
fn futex_wait( &mut self, futex_ref: FutexRef, bitset: u32, timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>, retval_succ: Scalar, retval_timeout: Scalar, dest: MPlaceTy<'tcx>, errno_timeout: IoError, )
retval_succ
is written to dest
.
On a timeout, retval_timeout
is written to dest
and errno_timeout
is set as the last error.Source§fn futex_wake(
&mut self,
futex_ref: &FutexRef,
bitset: u32,
) -> InterpResult<'tcx, bool>
fn futex_wake( &mut self, futex_ref: &FutexRef, bitset: u32, ) -> InterpResult<'tcx, bool>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
Source§fn handle_miri_start_unwind(
&mut self,
payload: &OpTy<'tcx>,
) -> InterpResult<'tcx>
fn handle_miri_start_unwind( &mut self, payload: &OpTy<'tcx>, ) -> InterpResult<'tcx>
miri_start_unwind
intrinsic, which is called
by libpanic_unwind to delegate the actual unwinding process to Miri.Source§fn handle_catch_unwind(
&mut self,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
ret: Option<BasicBlock>,
) -> InterpResult<'tcx>
fn handle_catch_unwind( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<BasicBlock>, ) -> InterpResult<'tcx>
try
intrinsic, the underlying implementation of std::panicking::try
.fn handle_stack_pop_unwind( &mut self, extra: FrameExtra<'tcx>, unwinding: bool, ) -> InterpResult<'tcx, ReturnAction>
Source§fn start_panic(&mut self, msg: &str, unwind: UnwindAction) -> InterpResult<'tcx>
fn start_panic(&mut self, msg: &str, unwind: UnwindAction) -> InterpResult<'tcx>
Source§fn start_panic_nounwind(&mut self, msg: &str) -> InterpResult<'tcx>
fn start_panic_nounwind(&mut self, msg: &str) -> InterpResult<'tcx>
fn assert_panic( &mut self, msg: &AssertMessage<'tcx>, unwind: UnwindAction, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn clock_gettime( &mut self, clk_id_op: &OpTy<'tcx>, tp_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn gettimeofday( &mut self, tv_op: &OpTy<'tcx>, tz_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn localtime_r( &mut self, timep: &OpTy<'tcx>, result_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Pointer>
fn GetSystemTimeAsFileTime( &mut self, shim_name: &str, LPFILETIME_op: &OpTy<'tcx>, ) -> InterpResult<'tcx>
fn QueryPerformanceCounter( &mut self, lpPerformanceCount_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn QueryPerformanceFrequency( &mut self, lpFrequency_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn mach_absolute_time(&self) -> InterpResult<'tcx, Scalar>
fn mach_timebase_info( &mut self, info_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn nanosleep( &mut self, req_op: &OpTy<'tcx>, _rem: &OpTy<'tcx>, ) -> InterpResult<'tcx, Scalar>
fn Sleep(&mut self, timeout: &OpTy<'tcx>) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn thread_id_try_from( &self, id: impl TryInto<u32>, ) -> Result<ThreadId, ThreadNotFound>
Source§fn get_or_create_thread_local_alloc(
&mut self,
def_id: DefId,
) -> InterpResult<'tcx, StrictPointer>
fn get_or_create_thread_local_alloc( &mut self, def_id: DefId, ) -> InterpResult<'tcx, StrictPointer>
Source§fn start_regular_thread(
&mut self,
thread: Option<MPlaceTy<'tcx>>,
start_routine: Pointer,
start_abi: ExternAbi,
func_arg: ImmTy<'tcx>,
ret_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ThreadId>
fn start_regular_thread( &mut self, thread: Option<MPlaceTy<'tcx>>, start_routine: Pointer, start_abi: ExternAbi, func_arg: ImmTy<'tcx>, ret_layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, ThreadId>
Source§fn terminate_active_thread(
&mut self,
tls_alloc_action: TlsAllocAction,
) -> InterpResult<'tcx>
fn terminate_active_thread( &mut self, tls_alloc_action: TlsAllocAction, ) -> InterpResult<'tcx>
tls_alloc_action
. Read moreSource§fn block_thread(
&mut self,
reason: BlockReason,
timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
callback: Box<dyn UnblockCallback<'tcx> + 'tcx>,
)
fn block_thread( &mut self, reason: BlockReason, timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>, callback: Box<dyn UnblockCallback<'tcx> + 'tcx>, )
Source§fn unblock_thread(
&mut self,
thread: ThreadId,
reason: BlockReason,
) -> InterpResult<'tcx>
fn unblock_thread( &mut self, thread: ThreadId, reason: BlockReason, ) -> InterpResult<'tcx>
fn detach_thread( &mut self, thread_id: ThreadId, allow_terminated_joined: bool, ) -> InterpResult<'tcx>
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx>
fn join_thread_exclusive( &mut self, joined_thread_id: ThreadId, ) -> InterpResult<'tcx>
fn active_thread(&self) -> ThreadId
fn active_thread_mut(&mut self) -> &mut Thread<'tcx>
fn active_thread_ref(&self) -> &Thread<'tcx>
fn get_total_thread_count(&self) -> usize
fn have_all_terminated(&self) -> bool
fn enable_thread(&mut self, thread_id: ThreadId)
fn active_thread_stack<'a>( &'a self, ) -> &'a [Frame<'tcx, Provenance, FrameExtra<'tcx>>]
fn active_thread_stack_mut<'a>( &'a mut self, ) -> &'a mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>>
Source§fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>)
fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>)
fn get_thread_name<'c>(&'c self, thread: ThreadId) -> Option<&'c [u8]>where
'tcx: 'c,
fn yield_active_thread(&mut self)
fn maybe_preempt_active_thread(&mut self)
Source§fn run_threads(&mut self) -> InterpResult<'tcx, !>
fn run_threads(&mut self) -> InterpResult<'tcx, !>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn buffered_atomic_rmw( &mut self, new_val: Scalar, place: &MPlaceTy<'tcx>, atomic: AtomicRwOrd, init: Scalar, ) -> InterpResult<'tcx>
fn buffered_atomic_read( &self, place: &MPlaceTy<'tcx>, atomic: AtomicReadOrd, latest_in_mo: Scalar, validate: impl FnOnce() -> InterpResult<'tcx>, ) -> InterpResult<'tcx, Option<Scalar>>
Source§fn buffered_atomic_write(
&mut self,
val: Scalar,
dest: &MPlaceTy<'tcx>,
atomic: AtomicWriteOrd,
init: Option<Scalar>,
) -> InterpResult<'tcx>
fn buffered_atomic_write( &mut self, val: Scalar, dest: &MPlaceTy<'tcx>, atomic: AtomicWriteOrd, init: Option<Scalar>, ) -> InterpResult<'tcx>
Source§fn perform_read_on_buffered_latest(
&self,
place: &MPlaceTy<'tcx>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx>
fn perform_read_on_buffered_latest( &self, place: &MPlaceTy<'tcx>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx>
fn emit_diagnostic(&self, e: NonHaltingDiagnostic)
Source§fn handle_ice(&self)
fn handle_ice(&self)
Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId>
fn addr_from_alloc_id_uncached( &self, global_state: &mut GlobalStateInner, alloc_id: AllocId, memory_kind: MemoryKind, ) -> InterpResult<'tcx, u64>
fn addr_from_alloc_id( &self, alloc_id: AllocId, memory_kind: MemoryKind, ) -> InterpResult<'tcx, u64>
Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
fn condvar_reacquire_mutex( &mut self, mutex_ref: &MutexRef, retval: Scalar, dest: MPlaceTy<'tcx>, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
Source§fn call_native_with_args<'a>(
&mut self,
link_name: Symbol,
dest: &MPlaceTy<'tcx>,
ptr: CodePtr,
libffi_args: Vec<Arg<'a>>,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn call_native_with_args<'a>( &mut self, link_name: Symbol, dest: &MPlaceTy<'tcx>, ptr: CodePtr, libffi_args: Vec<Arg<'a>>, ) -> InterpResult<'tcx, ImmTy<'tcx>>
Source§fn get_func_ptr_explicitly_from_lib(
&mut self,
link_name: Symbol,
) -> Option<CodePtr>
fn get_func_ptr_explicitly_from_lib( &mut self, link_name: Symbol, ) -> Option<CodePtr>
Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
fn os_unfair_lock_get_data<'a>(
&'a mut self,
lock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, &'a MacOsUnfairLock>where
'tcx: 'a,
Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
fn init_once_get_data<'a>(
&'a mut self,
init_once_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, &'a WindowsInitOnce>where
'tcx: 'a,
Source§fn init_once_try_begin(
&mut self,
id: InitOnceId,
pending_place: &MPlaceTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, bool>
fn init_once_try_begin( &mut self, id: InitOnceId, pending_place: &MPlaceTy<'tcx>, dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, bool>
true
if we were succssful, false
if we would block.Source§impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPriv<'tcx> for MiriInterpCx<'tcx>
Source§fn check_rustc_alloc_request(&self, size: u64, align: u64) -> InterpResult<'tcx>
fn check_rustc_alloc_request(&self, size: u64, align: u64) -> InterpResult<'tcx>
fn emulate_foreign_item_inner( &mut self, link_name: Symbol, abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, ) -> InterpResult<'tcx, EmulateItemResult>
Source§impl<'tcx> EvalContextExtPrivate<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextExtPrivate<'tcx> for MiriInterpCx<'tcx>
fn macos_fbsd_solaris_write_buf( &mut self, metadata: FileMetadata, buf_op: &OpTy<'tcx>, ) -> InterpResult<'tcx, i32>
fn file_type_to_d_type( &mut self, file_type: Result<FileType>, ) -> InterpResult<'tcx, i32>
Source§impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
Retagging/reborrowing.
Policy on which permission to grant to each pointer should be left to
the implementation of NewPermission.
impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
Retagging/reborrowing. Policy on which permission to grant to each pointer should be left to the implementation of NewPermission.
Source§fn tb_reborrow(
&mut self,
place: &MPlaceTy<'tcx>,
ptr_size: Size,
new_perm: NewPermission,
new_tag: BorTag,
) -> InterpResult<'tcx, Option<Provenance>>
fn tb_reborrow( &mut self, place: &MPlaceTy<'tcx>, ptr_size: Size, new_perm: NewPermission, new_tag: BorTag, ) -> InterpResult<'tcx, Option<Provenance>>
fn tb_retag_place( &mut self, place: &MPlaceTy<'tcx>, new_perm: NewPermission, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
Source§fn tb_retag_reference(
&mut self,
val: &ImmTy<'tcx>,
new_perm: NewPermission,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn tb_retag_reference( &mut self, val: &ImmTy<'tcx>, new_perm: NewPermission, ) -> InterpResult<'tcx, ImmTy<'tcx>>
Source§impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
Source§fn allow_data_races_ref<R>(
&self,
op: impl FnOnce(&MiriInterpCx<'tcx>) -> R,
) -> R
fn allow_data_races_ref<R>( &self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R, ) -> R
Source§fn allow_data_races_mut<R>(
&mut self,
op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R,
) -> R
fn allow_data_races_mut<R>( &mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R, ) -> R
allow_data_races_ref
, this temporarily disables any data-race detection and
so should only be used for atomic operations or internal state that the program cannot
access.Source§fn atomic_access_check(
&self,
place: &MPlaceTy<'tcx>,
access_type: AtomicAccessType,
) -> InterpResult<'tcx>
fn atomic_access_check( &self, place: &MPlaceTy<'tcx>, access_type: AtomicAccessType, ) -> InterpResult<'tcx>
Source§fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx>
fn validate_atomic_load( &self, place: &MPlaceTy<'tcx>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx>
Source§fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx>,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx>
fn validate_atomic_store( &mut self, place: &MPlaceTy<'tcx>, atomic: AtomicWriteOrd, ) -> InterpResult<'tcx>
Source§fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx>,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx>
fn validate_atomic_rmw( &mut self, place: &MPlaceTy<'tcx>, atomic: AtomicRwOrd, ) -> InterpResult<'tcx>
Source§fn validate_atomic_op<A: Debug + Copy>(
&self,
place: &MPlaceTy<'tcx>,
atomic: A,
access: AccessType,
op: impl FnMut(&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A) -> Result<(), DataRace>,
) -> InterpResult<'tcx>
fn validate_atomic_op<A: Debug + Copy>( &self, place: &MPlaceTy<'tcx>, atomic: A, access: AccessType, op: impl FnMut(&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A) -> Result<(), DataRace>, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
Source§fn run_timeout_callback(&mut self) -> InterpResult<'tcx>
fn run_timeout_callback(&mut self) -> InterpResult<'tcx>
fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>>
Source§impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
fn atomic_load( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx>
fn atomic_store( &mut self, args: &[OpTy<'tcx>], atomic: AtomicWriteOrd, ) -> InterpResult<'tcx>
fn compiler_fence_intrinsic( &mut self, args: &[OpTy<'tcx>], atomic: AtomicFenceOrd, ) -> InterpResult<'tcx>
fn atomic_fence_intrinsic( &mut self, args: &[OpTy<'tcx>], atomic: AtomicFenceOrd, ) -> InterpResult<'tcx>
fn atomic_rmw_op( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, atomic_op: AtomicOp, atomic: AtomicRwOrd, ) -> InterpResult<'tcx>
fn atomic_exchange( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, atomic: AtomicRwOrd, ) -> InterpResult<'tcx>
fn atomic_compare_exchange_impl( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, success: AtomicRwOrd, fail: AtomicReadOrd, can_fail_spuriously: bool, ) -> InterpResult<'tcx>
fn atomic_compare_exchange( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, success: AtomicRwOrd, fail: AtomicReadOrd, ) -> InterpResult<'tcx>
fn atomic_compare_exchange_weak( &mut self, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx>, success: AtomicRwOrd, fail: AtomicReadOrd, ) -> InterpResult<'tcx>
Source§impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx>
Source§fn lookup_windows_tls_dtors(&mut self) -> InterpResult<'tcx, Vec<ImmTy<'tcx>>>
fn lookup_windows_tls_dtors(&mut self) -> InterpResult<'tcx, Vec<ImmTy<'tcx>>>
fn schedule_windows_tls_dtor(&mut self, dtor: ImmTy<'tcx>) -> InterpResult<'tcx>
Source§fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, Poll<()>>
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, Poll<()>>
Source§fn schedule_next_pthread_tls_dtor(
&mut self,
state: &mut RunningDtorState,
) -> InterpResult<'tcx, Poll<()>>
fn schedule_next_pthread_tls_dtor( &mut self, state: &mut RunningDtorState, ) -> InterpResult<'tcx, Poll<()>>
true
if found
a destructor to schedule, and false
otherwise.Source§impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for MiriInterpCx<'tcx>
Retagging/reborrowing. There is some policy in here, such as which permissions
to grant for which references, and when to add protectors.
impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for MiriInterpCx<'tcx>
Retagging/reborrowing. There is some policy in here, such as which permissions to grant for which references, and when to add protectors.
Source§fn sb_reborrow(
&mut self,
place: &MPlaceTy<'tcx>,
size: Size,
new_perm: NewPermission,
new_tag: BorTag,
retag_info: RetagInfo,
) -> InterpResult<'tcx, Option<Provenance>>
fn sb_reborrow( &mut self, place: &MPlaceTy<'tcx>, size: Size, new_perm: NewPermission, new_tag: BorTag, retag_info: RetagInfo, ) -> InterpResult<'tcx, Option<Provenance>>
fn sb_retag_place( &mut self, place: &MPlaceTy<'tcx>, new_perm: NewPermission, info: RetagInfo, ) -> InterpResult<'tcx, MPlaceTy<'tcx>>
Source§fn sb_retag_reference(
&mut self,
val: &ImmTy<'tcx>,
new_perm: NewPermission,
info: RetagInfo,
) -> InterpResult<'tcx, ImmTy<'tcx>>
fn sb_retag_reference( &mut self, val: &ImmTy<'tcx>, new_perm: NewPermission, info: RetagInfo, ) -> InterpResult<'tcx, ImmTy<'tcx>>
kind
indicates what kind of reference is being created.Source§impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx>
impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx>
fn eval_context_ref(&self) -> &MiriInterpCx<'tcx>
fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx>
Source§impl VisitProvenance for MiriInterpCx<'_>
impl VisitProvenance for MiriInterpCx<'_>
fn visit_provenance(&self, visit: &mut VisitWith<'_>)
Layout§
Note: Most layout information is completely unstable and may even differ between compilations. The only exception is types with certain repr(...)
attributes. Please see the Rust Reference's “Type Layout” chapter for details on type layout guarantees.
Size: 2608 bytes