miri/shims/native_lib/mod.rs
1//! Implements calling functions from a native library.
2
3use std::cell::Cell;
4use std::marker::PhantomData;
5use std::ops::Deref;
6use std::os::raw::c_void;
7use std::ptr;
8use std::sync::atomic::AtomicBool;
9
10use libffi::low::CodePtr;
11use libffi::middle::Type as FfiType;
12use rustc_abi::{HasDataLayout, Size};
13use rustc_data_structures::either;
14use rustc_middle::ty::layout::TyAndLayout;
15use rustc_middle::ty::{self, Ty};
16use rustc_span::Symbol;
17use serde::{Deserialize, Serialize};
18
19use crate::*;
20
21#[cfg_attr(
22 not(all(
23 target_os = "linux",
24 target_env = "gnu",
25 any(target_arch = "x86", target_arch = "x86_64")
26 )),
27 path = "trace/stub.rs"
28)]
29pub mod trace;
30
31/// An argument for an FFI call.
32#[derive(Debug, Clone)]
33pub struct OwnedArg {
34 /// The type descriptor for this argument.
35 ty: Option<FfiType>,
36 /// Corresponding bytes for the value.
37 bytes: Box<[u8]>,
38}
39
40impl OwnedArg {
41 /// Instantiates an argument from a type descriptor and bytes.
42 pub fn new(ty: FfiType, bytes: Box<[u8]>) -> Self {
43 Self { ty: Some(ty), bytes }
44 }
45}
46
47/// The final results of an FFI trace, containing every relevant event detected
48/// by the tracer.
49#[derive(Serialize, Deserialize, Debug)]
50pub struct MemEvents {
51 /// An list of memory accesses that occurred, in the order they occurred in.
52 pub acc_events: Vec<AccessEvent>,
53}
54
55/// A single memory access.
56#[derive(Serialize, Deserialize, Clone, Debug)]
57pub enum AccessEvent {
58 /// A read occurred on this memory range.
59 Read(AccessRange),
60 /// A write may have occurred on this memory range.
61 /// Some instructions *may* write memory without *always* doing that,
62 /// so this can be an over-approximation.
63 /// The range info, however, is reliable if the access did happen.
64 /// If the second field is true, the access definitely happened.
65 Write(AccessRange, bool),
66}
67
68impl AccessEvent {
69 fn get_range(&self) -> AccessRange {
70 match self {
71 AccessEvent::Read(access_range) => access_range.clone(),
72 AccessEvent::Write(access_range, _) => access_range.clone(),
73 }
74 }
75}
76
77/// The memory touched by a given access.
78#[derive(Serialize, Deserialize, Clone, Debug)]
79pub struct AccessRange {
80 /// The base address in memory where an access occurred.
81 pub addr: usize,
82 /// The number of bytes affected from the base.
83 pub size: usize,
84}
85
86impl AccessRange {
87 fn end(&self) -> usize {
88 self.addr.strict_add(self.size)
89 }
90}
91
92impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
93trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
94 /// Call native host function and return the output and the memory accesses
95 /// that occurred during the call.
96 fn call_native_raw(
97 &mut self,
98 fun: CodePtr,
99 args: &mut [OwnedArg],
100 ret: (FfiType, Size),
101 ) -> InterpResult<'tcx, (Box<[u8]>, Option<MemEvents>)> {
102 let this = self.eval_context_mut();
103 #[cfg(target_os = "linux")]
104 let alloc = this.machine.allocator.as_ref().unwrap().clone();
105 #[cfg(not(target_os = "linux"))]
106 // Placeholder value.
107 let alloc = ();
108
109 // Expose InterpCx for use by closure callbacks.
110 this.machine.native_lib_ecx_interchange.set(ptr::from_mut(this).expose_provenance());
111
112 let res = trace::Supervisor::do_ffi(&alloc, || {
113 use libffi::middle::{Arg, Cif, Ret};
114
115 let cif = Cif::new(args.iter_mut().map(|arg| arg.ty.take().unwrap()), ret.0);
116 let arg_ptrs: Vec<_> = args.iter().map(|arg| Arg::new(&*arg.bytes)).collect();
117 let mut ret = vec![0u8; ret.1.bytes_usize()];
118
119 unsafe { cif.call_return_into(fun, &arg_ptrs, Ret::new::<[u8]>(&mut *ret)) };
120 ret.into()
121 });
122
123 this.machine.native_lib_ecx_interchange.set(0);
124
125 res
126 }
127
128 /// Get the pointer to the function of the specified name in the shared object file,
129 /// if it exists. The function must be in one of the shared object files specified:
130 /// we do *not* return pointers to functions in dependencies of libraries.
131 fn get_func_ptr_explicitly_from_lib(&mut self, link_name: Symbol) -> Option<CodePtr> {
132 let this = self.eval_context_mut();
133 // Try getting the function from one of the shared libraries.
134 for (lib, lib_path) in &this.machine.native_lib {
135 let Ok(func): Result<libloading::Symbol<'_, unsafe extern "C" fn()>, _> =
136 (unsafe { lib.get(link_name.as_str().as_bytes()) })
137 else {
138 continue;
139 };
140 #[expect(clippy::as_conversions)] // fn-ptr to raw-ptr cast needs `as`.
141 let fn_ptr = *func.deref() as *mut std::ffi::c_void;
142
143 // FIXME: this is a hack!
144 // The `libloading` crate will automatically load system libraries like `libc`.
145 // On linux `libloading` is based on `dlsym`: https://docs.rs/libloading/0.7.3/src/libloading/os/unix/mod.rs.html#202
146 // and `dlsym`(https://linux.die.net/man/3/dlsym) looks through the dependency tree of the
147 // library if it can't find the symbol in the library itself.
148 // So, in order to check if the function was actually found in the specified
149 // `machine.external_so_lib` we need to check its `dli_fname` and compare it to
150 // the specified SO file path.
151 // This code is a reimplementation of the mechanism for getting `dli_fname` in `libloading`,
152 // from: https://docs.rs/libloading/0.7.3/src/libloading/os/unix/mod.rs.html#411
153 // using the `libc` crate where this interface is public.
154 let mut info = std::mem::MaybeUninit::<libc::Dl_info>::zeroed();
155 unsafe {
156 let res = libc::dladdr(fn_ptr, info.as_mut_ptr());
157 assert!(res != 0, "failed to load info about function we already loaded");
158 let info = info.assume_init();
159 #[cfg(target_os = "cygwin")]
160 let fname_ptr = info.dli_fname.as_ptr();
161 #[cfg(not(target_os = "cygwin"))]
162 let fname_ptr = info.dli_fname;
163 assert!(!fname_ptr.is_null());
164 if std::ffi::CStr::from_ptr(fname_ptr).to_str().unwrap()
165 != lib_path.to_str().unwrap()
166 {
167 // The function is not actually in this .so, check the next one.
168 continue;
169 }
170 }
171
172 // Return a pointer to the function.
173 return Some(CodePtr(fn_ptr));
174 }
175 None
176 }
177
178 /// Applies the `events` to Miri's internal state. The event vector must be
179 /// ordered sequentially by when the accesses happened, and the sizes are
180 /// assumed to be exact.
181 fn tracing_apply_accesses(&mut self, events: MemEvents) -> InterpResult<'tcx> {
182 let this = self.eval_context_mut();
183
184 for evt in events.acc_events {
185 let evt_rg = evt.get_range();
186 // LLVM at least permits vectorising accesses to adjacent allocations,
187 // so we cannot assume 1 access = 1 allocation. :(
188 let mut rg = evt_rg.addr..evt_rg.end();
189 while let Some(curr) = rg.next() {
190 let Some(alloc_id) =
191 this.alloc_id_from_addr(curr.to_u64(), rg.len().try_into().unwrap())
192 else {
193 throw_ub_format!("Foreign code did an out-of-bounds access!")
194 };
195 let alloc = this.get_alloc_raw(alloc_id)?;
196 // The logical and physical address of the allocation coincide, so we can use
197 // this instead of `addr_from_alloc_id`.
198 let alloc_addr = alloc.get_bytes_unchecked_raw().addr();
199
200 // Determine the range inside the allocation that this access covers. This range is
201 // in terms of offsets from the start of `alloc`. The start of the overlap range
202 // will be `curr`; the end will be the minimum of the end of the allocation and the
203 // end of the access' range.
204 let overlap = curr.strict_sub(alloc_addr)
205 ..std::cmp::min(alloc.len(), rg.end.strict_sub(alloc_addr));
206 // Skip forward however many bytes of the access are contained in the current
207 // allocation, subtracting 1 since the overlap range includes the current addr
208 // that was already popped off of the range.
209 rg.advance_by(overlap.len().strict_sub(1)).unwrap();
210
211 match evt {
212 AccessEvent::Read(_) => {
213 // If a provenance was read by the foreign code, expose it.
214 for (_prov_range, prov) in
215 alloc.provenance().get_range(overlap.into(), this)
216 {
217 this.expose_provenance(prov)?;
218 }
219 }
220 AccessEvent::Write(_, certain) => {
221 // Sometimes we aren't certain if a write happened, in which case we
222 // only initialise that data if the allocation is mutable.
223 if certain || alloc.mutability.is_mut() {
224 let (alloc, cx) = this.get_alloc_raw_mut(alloc_id)?;
225 alloc.process_native_write(
226 &cx.tcx,
227 Some(AllocRange {
228 start: Size::from_bytes(overlap.start),
229 size: Size::from_bytes(overlap.len()),
230 }),
231 )
232 }
233 }
234 }
235 }
236 }
237
238 interp_ok(())
239 }
240
241 /// Extract the value from the result of reading an operand from the machine
242 /// and convert it to a `OwnedArg`.
243 fn op_to_ffi_arg(&self, v: &OpTy<'tcx>, tracing: bool) -> InterpResult<'tcx, OwnedArg> {
244 let this = self.eval_context_ref();
245
246 // This should go first so that we emit unsupported before doing a bunch
247 // of extra work for types that aren't supported yet.
248 let ty = this
249 .ty_to_ffitype(v.layout)
250 .map_err(|ty| err_unsup_format!("unsupported argument type for native call: {ty}"))?;
251
252 // Helper to print a warning when a pointer is shared with the native code.
253 let expose = |prov: Provenance| -> InterpResult<'tcx> {
254 static DEDUP: AtomicBool = AtomicBool::new(false);
255 if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
256 // Newly set, so first time we get here.
257 this.emit_diagnostic(NonHaltingDiagnostic::NativeCallSharedMem { tracing });
258 }
259
260 this.expose_provenance(prov)?;
261 interp_ok(())
262 };
263
264 // Compute the byte-level representation of the argument. If there's a pointer in there, we
265 // expose it inside the AM. Later in `visit_reachable_allocs`, the "meta"-level provenance
266 // for accessing the pointee gets exposed; this is crucial to justify the C code effectively
267 // casting the integer in `byte` to a pointer and using that.
268 let bytes = match v.as_mplace_or_imm() {
269 either::Either::Left(mplace) => {
270 // Get the alloc id corresponding to this mplace, alongside
271 // a pointer that's offset to point to this particular
272 // mplace (not one at the base addr of the allocation).
273 let sz = mplace.layout.size.bytes_usize();
274 if sz == 0 {
275 throw_unsup_format!("attempting to pass a ZST over FFI");
276 }
277 let (id, ofs, _) = this.ptr_get_alloc_id(mplace.ptr(), sz.try_into().unwrap())?;
278 let ofs = ofs.bytes_usize();
279 let range = ofs..ofs.strict_add(sz);
280 // Expose all provenances in the allocation within the byte range of the struct, if
281 // any. These pointers are being directly passed to native code by-value.
282 let alloc = this.get_alloc_raw(id)?;
283 for (_prov_range, prov) in alloc.provenance().get_range(range.clone().into(), this)
284 {
285 expose(prov)?;
286 }
287 // Read the bytes that make up this argument. We cannot use the normal getter as
288 // those would fail if any part of the argument is uninitialized. Native code
289 // is kind of outside the interpreter, after all...
290 Box::from(alloc.inspect_with_uninit_and_ptr_outside_interpreter(range))
291 }
292 either::Either::Right(imm) => {
293 let mut bytes: Box<[u8]> = vec![0; imm.layout.size.bytes_usize()].into();
294
295 // A little helper to write scalars to our byte array.
296 let mut write_scalar = |this: &MiriInterpCx<'tcx>, sc: Scalar, pos: usize| {
297 // If a scalar is a pointer, then expose its provenance.
298 if let interpret::Scalar::Ptr(p, _) = sc {
299 expose(p.provenance)?;
300 }
301 write_target_uint(
302 this.data_layout().endian,
303 &mut bytes[pos..][..sc.size().bytes_usize()],
304 sc.to_scalar_int()?.to_bits_unchecked(),
305 )
306 .unwrap();
307 interp_ok(())
308 };
309
310 // Write the scalar into the `bytes` buffer.
311 match *imm {
312 Immediate::Scalar(sc) => write_scalar(this, sc, 0)?,
313 Immediate::ScalarPair(sc_first, sc_second) => {
314 // The first scalar has an offset of zero; compute the offset of the 2nd.
315 let ofs_second = {
316 let rustc_abi::BackendRepr::ScalarPair(a, b) = imm.layout.backend_repr
317 else {
318 span_bug!(
319 this.cur_span(),
320 "op_to_ffi_arg: invalid scalar pair layout: {:#?}",
321 imm.layout
322 )
323 };
324 a.size(this).align_to(b.align(this).abi).bytes_usize()
325 };
326
327 write_scalar(this, sc_first, 0)?;
328 write_scalar(this, sc_second, ofs_second)?;
329 }
330 Immediate::Uninit => {
331 // Nothing to write.
332 }
333 }
334
335 bytes
336 }
337 };
338 interp_ok(OwnedArg::new(ty, bytes))
339 }
340
341 fn ffi_ret_to_mem(&mut self, v: Box<[u8]>, dest: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
342 let this = self.eval_context_mut();
343 let len = v.len();
344 this.write_bytes_ptr(dest.ptr(), v)?;
345 if len == 0 {
346 return interp_ok(());
347 }
348 // We have no idea which provenance these bytes have, so we reset it to wildcard.
349 let tcx = this.tcx;
350 let (alloc_id, offset, _) = this.ptr_try_get_alloc_id(dest.ptr(), 0).unwrap();
351 let alloc = this.get_alloc_raw_mut(alloc_id)?.0;
352 alloc.process_native_write(&tcx, Some(alloc_range(offset, dest.layout.size)));
353 // Run the validation that would usually be part of `return`, also to reset
354 // any provenance and padding that would not survive the return.
355 if MiriMachine::enforce_validity(this, dest.layout) {
356 this.validate_operand(
357 &dest.clone().into(),
358 MiriMachine::enforce_validity_recursively(this, dest.layout),
359 /*reset_provenance_and_padding*/ true,
360 )?;
361 }
362 interp_ok(())
363 }
364
365 /// Parses an ADT to construct the matching libffi type.
366 fn adt_to_ffitype(
367 &self,
368 orig_ty: Ty<'tcx>,
369 adt_def: ty::AdtDef<'tcx>,
370 args: &'tcx ty::List<ty::GenericArg<'tcx>>,
371 ) -> Result<FfiType, Ty<'tcx>> {
372 let this = self.eval_context_ref();
373 // TODO: unions, etc.
374 if !adt_def.is_struct() {
375 return Err(orig_ty);
376 }
377 // TODO: Certain non-C reprs should be okay also.
378 if !adt_def.repr().c() {
379 return Err(orig_ty);
380 }
381
382 let mut fields = vec![];
383 for field in &adt_def.non_enum_variant().fields {
384 let layout = this.layout_of(field.ty(*this.tcx, args)).map_err(|_err| orig_ty)?;
385 fields.push(this.ty_to_ffitype(layout)?);
386 }
387
388 Ok(FfiType::structure(fields))
389 }
390
391 /// Gets the matching libffi type for a given Ty.
392 fn ty_to_ffitype(&self, layout: TyAndLayout<'tcx>) -> Result<FfiType, Ty<'tcx>> {
393 use rustc_abi::{AddressSpace, BackendRepr, Float, Integer, Primitive};
394
395 // `BackendRepr::Scalar` is also a signal to pass this type as a scalar in the ABI. This
396 // matches what codegen does. This does mean that we support some types whose ABI is not
397 // stable, but that's fine -- we are anyway quite conservative in native-lib mode.
398 if let BackendRepr::Scalar(s) = layout.backend_repr {
399 // Simple sanity-check: this cannot be a `repr(C)` struct or union. (It could be a
400 // repr(C) enum. Those indeed behave like integers in the ABI.)
401 assert!(!layout.ty.ty_adt_def().is_some_and(|adt| !adt.is_enum() && adt.repr().c()));
402 return Ok(match s.primitive() {
403 Primitive::Int(Integer::I8, /* signed */ true) => FfiType::i8(),
404 Primitive::Int(Integer::I16, /* signed */ true) => FfiType::i16(),
405 Primitive::Int(Integer::I32, /* signed */ true) => FfiType::i32(),
406 Primitive::Int(Integer::I64, /* signed */ true) => FfiType::i64(),
407 Primitive::Int(Integer::I8, /* signed */ false) => FfiType::u8(),
408 Primitive::Int(Integer::I16, /* signed */ false) => FfiType::u16(),
409 Primitive::Int(Integer::I32, /* signed */ false) => FfiType::u32(),
410 Primitive::Int(Integer::I64, /* signed */ false) => FfiType::u64(),
411 Primitive::Float(Float::F32) => FfiType::f32(),
412 Primitive::Float(Float::F64) => FfiType::f64(),
413 Primitive::Pointer(AddressSpace::ZERO) => FfiType::pointer(),
414 _ => return Err(layout.ty),
415 });
416 }
417 Ok(match layout.ty.kind() {
418 // Scalar types have already been handled above.
419 ty::Adt(adt_def, args) => self.adt_to_ffitype(layout.ty, *adt_def, args)?,
420 // Rust uses `()` as return type for `void` function, which becomes `Tuple([])`.
421 ty::Tuple(t_list) if t_list.len() == 0 => FfiType::void(),
422 _ => return Err(layout.ty),
423 })
424 }
425}
426
427/// The data passed to the closure shim function used to intercept function pointer calls from
428/// native code.
429struct LibffiClosureData<'tcx> {
430 ecx_interchange: &'static Cell<usize>,
431 unsupported: bool,
432 marker: PhantomData<MiriInterpCx<'tcx>>,
433}
434
435/// This function sets up a new libffi closure to intercept
436/// calls to rust code via function pointers passed to native code.
437///
438/// Calling this function leaks the data passed into the libffi closure as
439/// these need to be available until the execution terminates as the native
440/// code side could store a function pointer and only call it at a later point.
441pub fn build_libffi_closure<'tcx, 'this>(
442 this: &'this MiriInterpCx<'tcx>,
443 fn_sig: rustc_middle::ty::FnSig<'tcx>,
444) -> InterpResult<'tcx, unsafe extern "C" fn()> {
445 // Compute argument and return types in libffi representation.
446 let closure_builder = try {
447 let mut closure_builder = libffi::middle::Builder::new();
448 for &input in fn_sig.inputs().iter() {
449 let layout = this.layout_of(input).map_err(|_| input)?;
450 let ty = this.ty_to_ffitype(layout)?;
451 closure_builder = closure_builder.arg(ty);
452 }
453 let res_type = fn_sig.output();
454 let res_type = {
455 let layout = this.layout_of(res_type).map_err(|_| res_type)?;
456 this.ty_to_ffitype(layout)?
457 };
458 closure_builder.res(res_type)
459 };
460 let mut unsupported = false;
461 let closure_builder = closure_builder.unwrap_or_else(|_| {
462 unsupported = true;
463 // We hope that a closure which aborts execution is works correctly even if we don't
464 // set its signature.
465 libffi::middle::Builder::new()
466 });
467
468 // Build the actual closure.
469 let data = LibffiClosureData {
470 ecx_interchange: this.machine.native_lib_ecx_interchange,
471 unsupported,
472 marker: PhantomData,
473 };
474 let data = Box::leak(Box::new(data));
475 let closure = closure_builder.into_closure(libffi_closure_callback, data);
476 let closure = Box::leak(Box::new(closure));
477
478 // The actual argument/return type doesn't matter.
479 let fn_ptr = unsafe { closure.instantiate_code_ptr::<unsafe extern "C" fn()>() };
480 // Libffi returns a **reference** to a function ptr here.
481 // Therefore we need to dereference the reference to get the actual function pointer.
482 interp_ok(*fn_ptr)
483}
484
485/// A shim function to intercept calls back from native code into the interpreter
486/// via function pointers passed to the native code.
487///
488/// For now this shim only reports that such constructs are not supported by miri.
489/// As future improvement we might continue execution in the interpreter here.
490unsafe extern "C" fn libffi_closure_callback<'tcx>(
491 _cif: &libffi::low::ffi_cif,
492 _result: &mut c_void,
493 _args: *const *const c_void,
494 data: &LibffiClosureData<'tcx>,
495) {
496 let ecx = unsafe {
497 ptr::with_exposed_provenance_mut::<MiriInterpCx<'tcx>>(data.ecx_interchange.get())
498 .as_mut()
499 .expect("libffi closure called while no FFI call is active")
500 };
501 let err = if data.unsupported {
502 err_unsup_format!(
503 "calling a function pointer with unsupported argument/return type through the FFI boundary"
504 )
505 } else {
506 err_unsup_format!("calling a function pointer through the FFI boundary")
507 };
508
509 crate::diagnostics::report_result(ecx, err.into());
510 // We abort the execution at this point as we cannot return the
511 // expected value here.
512 std::process::exit(1);
513}
514
515impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
516pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
517 /// Call the native host function, with supplied arguments.
518 /// Needs to convert all the arguments from their Miri representations to
519 /// a native form (through `libffi` call).
520 /// Then, convert the return value from the native form into something that
521 /// can be stored in Miri's internal memory.
522 ///
523 /// Returns `true` if a call has been made, `false` if no functions of this name was found.
524 fn call_native_fn(
525 &mut self,
526 link_name: Symbol,
527 dest: &MPlaceTy<'tcx>,
528 args: &[OpTy<'tcx>],
529 ) -> InterpResult<'tcx, bool> {
530 let this = self.eval_context_mut();
531 // Get the pointer to the function in the shared object file if it exists.
532 let Some(code_ptr) = this.get_func_ptr_explicitly_from_lib(link_name) else {
533 // Shared object file does not export this function -- try the shims next.
534 return interp_ok(false);
535 };
536
537 // Do we have ptrace?
538 let tracing = trace::Supervisor::is_enabled();
539
540 // Get the function arguments, copy them, and prepare the type descriptions.
541 let mut libffi_args = Vec::<OwnedArg>::with_capacity(args.len());
542 for arg in args.iter() {
543 libffi_args.push(this.op_to_ffi_arg(arg, tracing)?);
544 }
545 let ret_ty = this
546 .ty_to_ffitype(dest.layout)
547 .map_err(|ty| err_unsup_format!("unsupported return type for native call: {ty}"))?;
548
549 // Prepare all exposed memory (both previously exposed, and just newly exposed since a
550 // pointer was passed as argument). Uninitialised memory is left as-is, but any data
551 // exposed this way is garbage anyway.
552 this.visit_reachable_allocs(this.exposed_allocs(), |this, alloc_id, info| {
553 // If there is no data behind this pointer, skip this.
554 if !matches!(info.kind, AllocKind::LiveData) {
555 return interp_ok(());
556 }
557 // It's okay to get raw access, what we do does not correspond to any actual
558 // AM operation, it just approximates the state to account for the native call.
559 let alloc = this.get_alloc_raw(alloc_id)?;
560 // Also expose the provenance of the interpreter-level allocation, so it can
561 // be read by FFI. The `black_box` is defensive programming as LLVM likes
562 // to (incorrectly) optimize away ptr2int casts whose result is unused.
563 std::hint::black_box(alloc.get_bytes_unchecked_raw().expose_provenance());
564
565 if !tracing {
566 // Expose all provenances in this allocation, since the native code can do
567 // $whatever. Can be skipped when tracing; in that case we'll expose just the
568 // actually-read parts later.
569 for prov in alloc.provenance().provenances() {
570 this.expose_provenance(prov)?;
571 }
572 }
573
574 // Prepare for possible write from native code if mutable.
575 if info.mutbl.is_mut() {
576 let (alloc, cx) = this.get_alloc_raw_mut(alloc_id)?;
577 // These writes could initialize everything and wreck havoc with the pointers.
578 // We can skip that when tracing; in that case we'll later do that only for the
579 // memory that got actually written.
580 if !tracing {
581 alloc.process_native_write(&cx.tcx, None);
582 }
583 // Also expose *mutable* provenance for the interpreter-level allocation.
584 std::hint::black_box(alloc.get_bytes_unchecked_raw_mut().expose_provenance());
585 }
586
587 interp_ok(())
588 })?;
589
590 // Call the function and store its output.
591 let (ret, maybe_memevents) =
592 this.call_native_raw(code_ptr, &mut libffi_args, (ret_ty, dest.layout.size))?;
593 if tracing {
594 this.tracing_apply_accesses(maybe_memevents.unwrap())?;
595 }
596 this.ffi_ret_to_mem(ret, dest)?;
597 interp_ok(true)
598 }
599}