rustc_const_eval/interpret/
call.rs

1//! Manages calling a concrete function (with known MIR body) with argument passing,
2//! and returning the return value to the caller.
3use std::assert_matches::assert_matches;
4use std::borrow::Cow;
5
6use either::{Left, Right};
7use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx};
8use rustc_hir::def_id::DefId;
9use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout};
10use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef};
11use rustc_middle::{bug, mir, span_bug};
12use rustc_span::sym;
13use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
14use tracing::{info, instrument, trace};
15
16use super::{
17    CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
18    Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, interp_ok,
19    throw_ub, throw_ub_custom, throw_unsup_format,
20};
21use crate::fluent_generated as fluent;
22
23/// An argument passed to a function.
24#[derive(Clone, Debug)]
25pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> {
26    /// Pass a copy of the given operand.
27    Copy(OpTy<'tcx, Prov>),
28    /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and
29    /// make the place inaccessible for the duration of the function call.
30    InPlace(MPlaceTy<'tcx, Prov>),
31}
32
33impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
34    pub fn layout(&self) -> &TyAndLayout<'tcx> {
35        match self {
36            FnArg::Copy(op) => &op.layout,
37            FnArg::InPlace(mplace) => &mplace.layout,
38        }
39    }
40}
41
42impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
43    /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
44    /// original memory occurs.
45    pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
46        match arg {
47            FnArg::Copy(op) => op.clone(),
48            FnArg::InPlace(mplace) => mplace.clone().into(),
49        }
50    }
51
52    /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
53    /// original memory occurs.
54    pub fn copy_fn_args(
55        &self,
56        args: &[FnArg<'tcx, M::Provenance>],
57    ) -> Vec<OpTy<'tcx, M::Provenance>> {
58        args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
59    }
60
61    /// Helper function for argument untupling.
62    pub(super) fn fn_arg_field(
63        &self,
64        arg: &FnArg<'tcx, M::Provenance>,
65        field: usize,
66    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
67        interp_ok(match arg {
68            FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
69            FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
70        })
71    }
72
73    /// Find the wrapped inner type of a transparent wrapper.
74    /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
75    ///
76    /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
77    fn unfold_transparent(
78        &self,
79        layout: TyAndLayout<'tcx>,
80        may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
81    ) -> TyAndLayout<'tcx> {
82        match layout.ty.kind() {
83            ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
84                assert!(!adt_def.is_enum());
85                // Find the non-1-ZST field, and recurse.
86                let (_, field) = layout.non_1zst_field(self).unwrap();
87                self.unfold_transparent(field, may_unfold)
88            }
89            // Not a transparent type, no further unfolding.
90            _ => layout,
91        }
92    }
93
94    /// Unwrap types that are guaranteed a null-pointer-optimization
95    fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
96        // Check if this is an option-like type wrapping some type.
97        let ty::Adt(def, args) = layout.ty.kind() else {
98            // Not an ADT, so definitely no NPO.
99            return interp_ok(layout);
100        };
101        if def.variants().len() != 2 {
102            // Not a 2-variant enum, so no NPO.
103            return interp_ok(layout);
104        }
105        assert!(def.is_enum());
106
107        let all_fields_1zst = |variant: &VariantDef| -> InterpResult<'tcx, _> {
108            for field in &variant.fields {
109                let ty = field.ty(*self.tcx, args);
110                let layout = self.layout_of(ty)?;
111                if !layout.is_1zst() {
112                    return interp_ok(false);
113                }
114            }
115            interp_ok(true)
116        };
117
118        // If one variant consists entirely of 1-ZST, then the other variant
119        // is the only "relevant" one for this check.
120        let var0 = VariantIdx::from_u32(0);
121        let var1 = VariantIdx::from_u32(1);
122        let relevant_variant = if all_fields_1zst(def.variant(var0))? {
123            def.variant(var1)
124        } else if all_fields_1zst(def.variant(var1))? {
125            def.variant(var0)
126        } else {
127            // No varant is all-1-ZST, so no NPO.
128            return interp_ok(layout);
129        };
130        // The "relevant" variant must have exactly one field, and its type is the "inner" type.
131        if relevant_variant.fields.len() != 1 {
132            return interp_ok(layout);
133        }
134        let inner = relevant_variant.fields[FieldIdx::from_u32(0)].ty(*self.tcx, args);
135        let inner = self.layout_of(inner)?;
136
137        // Check if the inner type is one of the NPO-guaranteed ones.
138        // For that we first unpeel transparent *structs* (but not unions).
139        let is_npo = |def: AdtDef<'tcx>| {
140            self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
141        };
142        let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
143            // Stop at NPO types so that we don't miss that attribute in the check below!
144            def.is_struct() && !is_npo(def)
145        });
146        interp_ok(match inner.ty.kind() {
147            ty::Ref(..) | ty::FnPtr(..) => {
148                // Option<&T> behaves like &T, and same for fn()
149                inner
150            }
151            ty::Adt(def, _) if is_npo(*def) => {
152                // Once we found a `nonnull_optimization_guaranteed` type, further strip off
153                // newtype structs from it to find the underlying ABI type.
154                self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
155            }
156            _ => {
157                // Everything else we do not unfold.
158                layout
159            }
160        })
161    }
162
163    /// Check if these two layouts look like they are fn-ABI-compatible.
164    /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
165    /// that only checking the `PassMode` is insufficient.)
166    fn layout_compat(
167        &self,
168        caller: TyAndLayout<'tcx>,
169        callee: TyAndLayout<'tcx>,
170    ) -> InterpResult<'tcx, bool> {
171        // Fast path: equal types are definitely compatible.
172        if caller.ty == callee.ty {
173            return interp_ok(true);
174        }
175        // 1-ZST are compatible with all 1-ZST (and with nothing else).
176        if caller.is_1zst() || callee.is_1zst() {
177            return interp_ok(caller.is_1zst() && callee.is_1zst());
178        }
179        // Unfold newtypes and NPO optimizations.
180        let unfold = |layout: TyAndLayout<'tcx>| {
181            self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
182        };
183        let caller = unfold(caller)?;
184        let callee = unfold(callee)?;
185        // Now see if these inner types are compatible.
186
187        // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
188        // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
189        // must be compatible. So we just accept everything with Pointer ABI as compatible,
190        // even if this will accept some code that is not stably guaranteed to work.
191        // This also handles function pointers.
192        let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
193            abi::BackendRepr::Scalar(s) => match s.primitive() {
194                abi::Primitive::Pointer(addr_space) => Some(addr_space),
195                _ => None,
196            },
197            _ => None,
198        };
199        if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
200            return interp_ok(caller == callee);
201        }
202        // For wide pointers we have to get the pointee type.
203        let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
204            // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
205            interp_ok(Some(match ty.kind() {
206                ty::Ref(_, ty, _) => *ty,
207                ty::RawPtr(ty, _) => *ty,
208                // We only accept `Box` with the default allocator.
209                _ if ty.is_box_global(*self.tcx) => ty.expect_boxed_ty(),
210                _ => return interp_ok(None),
211            }))
212        };
213        if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
214            // This is okay if they have the same metadata type.
215            let meta_ty = |ty: Ty<'tcx>| {
216                // Even if `ty` is normalized, the search for the unsized tail will project
217                // to fields, which can yield non-normalized types. So we need to provide a
218                // normalization function.
219                let normalize = |ty| self.tcx.normalize_erasing_regions(self.typing_env, ty);
220                ty.ptr_metadata_ty(*self.tcx, normalize)
221            };
222            return interp_ok(meta_ty(caller) == meta_ty(callee));
223        }
224
225        // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
226        // `char` counts as `u32.`
227        let int_ty = |ty: Ty<'tcx>| {
228            Some(match ty.kind() {
229                ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
230                ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
231                ty::Char => (Integer::I32, /* signed */ false),
232                _ => return None,
233            })
234        };
235        if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
236            // This is okay if they are the same integer type.
237            return interp_ok(caller == callee);
238        }
239
240        // Fall back to exact equality.
241        interp_ok(caller == callee)
242    }
243
244    /// Returns a `bool` saying whether the two arguments are ABI-compatible.
245    pub fn check_argument_compat(
246        &self,
247        caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
248        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
249    ) -> InterpResult<'tcx, bool> {
250        // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
251        // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
252        if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
253            // Ensure that our checks imply actual ABI compatibility for this concrete call.
254            // (This can fail e.g. if `#[rustc_nonnull_optimization_guaranteed]` is used incorrectly.)
255            assert!(caller_abi.eq_abi(callee_abi));
256            interp_ok(true)
257        } else {
258            trace!(
259                "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
260                caller_abi, callee_abi
261            );
262            interp_ok(false)
263        }
264    }
265
266    /// Initialize a single callee argument, checking the types for compatibility.
267    fn pass_argument<'x, 'y>(
268        &mut self,
269        caller_args: &mut impl Iterator<
270            Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
271        >,
272        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
273        callee_arg: &mir::Place<'tcx>,
274        callee_ty: Ty<'tcx>,
275        already_live: bool,
276    ) -> InterpResult<'tcx>
277    where
278        'tcx: 'x,
279        'tcx: 'y,
280    {
281        assert_eq!(callee_ty, callee_abi.layout.ty);
282        if matches!(callee_abi.mode, PassMode::Ignore) {
283            // This one is skipped. Still must be made live though!
284            if !already_live {
285                self.storage_live(callee_arg.as_local().unwrap())?;
286            }
287            return interp_ok(());
288        }
289        // Find next caller arg.
290        let Some((caller_arg, caller_abi)) = caller_args.next() else {
291            throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
292        };
293        assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
294        // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
295        // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
296        // right type to print to the user.
297
298        // Check compatibility
299        if !self.check_argument_compat(caller_abi, callee_abi)? {
300            throw_ub!(AbiMismatchArgument {
301                caller_ty: caller_abi.layout.ty,
302                callee_ty: callee_abi.layout.ty
303            });
304        }
305        // We work with a copy of the argument for now; if this is in-place argument passing, we
306        // will later protect the source it comes from. This means the callee cannot observe if we
307        // did in-place of by-copy argument passing, except for pointer equality tests.
308        let caller_arg_copy = self.copy_fn_arg(caller_arg);
309        if !already_live {
310            let local = callee_arg.as_local().unwrap();
311            let meta = caller_arg_copy.meta();
312            // `check_argument_compat` ensures that if metadata is needed, both have the same type,
313            // so we know they will use the metadata the same way.
314            assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
315
316            self.storage_live_dyn(local, meta)?;
317        }
318        // Now we can finally actually evaluate the callee place.
319        let callee_arg = self.eval_place(*callee_arg)?;
320        // We allow some transmutes here.
321        // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
322        // is true for all `copy_op`, but there are a lot of special cases for argument passing
323        // specifically.)
324        self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?;
325        // If this was an in-place pass, protect the place it comes from for the duration of the call.
326        if let FnArg::InPlace(mplace) = caller_arg {
327            M::protect_in_place_function_argument(self, mplace)?;
328        }
329        interp_ok(())
330    }
331
332    /// The main entry point for creating a new stack frame: performs ABI checks and initializes
333    /// arguments.
334    #[instrument(skip(self), level = "trace")]
335    pub fn init_stack_frame(
336        &mut self,
337        instance: Instance<'tcx>,
338        body: &'tcx mir::Body<'tcx>,
339        caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
340        args: &[FnArg<'tcx, M::Provenance>],
341        with_caller_location: bool,
342        destination: &MPlaceTy<'tcx, M::Provenance>,
343        mut stack_pop: StackPopCleanup,
344    ) -> InterpResult<'tcx> {
345        // Compute callee information.
346        // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
347        let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
348
349        if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
350            throw_unsup_format!("calling a c-variadic function is not supported");
351        }
352
353        if caller_fn_abi.conv != callee_fn_abi.conv {
354            throw_ub_custom!(
355                fluent::const_eval_incompatible_calling_conventions,
356                callee_conv = format!("{:?}", callee_fn_abi.conv),
357                caller_conv = format!("{:?}", caller_fn_abi.conv),
358            )
359        }
360
361        // Check that all target features required by the callee (i.e., from
362        // the attribute `#[target_feature(enable = ...)]`) are enabled at
363        // compile time.
364        M::check_fn_target_features(self, instance)?;
365
366        if !callee_fn_abi.can_unwind {
367            // The callee cannot unwind, so force the `Unreachable` unwind handling.
368            match &mut stack_pop {
369                StackPopCleanup::Root { .. } => {}
370                StackPopCleanup::Goto { unwind, .. } => {
371                    *unwind = mir::UnwindAction::Unreachable;
372                }
373            }
374        }
375
376        self.push_stack_frame_raw(instance, body, destination, stack_pop)?;
377
378        // If an error is raised here, pop the frame again to get an accurate backtrace.
379        // To this end, we wrap it all in a `try` block.
380        let res: InterpResult<'tcx> = try {
381            trace!(
382                "caller ABI: {:#?}, args: {:#?}",
383                caller_fn_abi,
384                args.iter()
385                    .map(|arg| (
386                        arg.layout().ty,
387                        match arg {
388                            FnArg::Copy(op) => format!("copy({op:?})"),
389                            FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
390                        }
391                    ))
392                    .collect::<Vec<_>>()
393            );
394            trace!(
395                "spread_arg: {:?}, locals: {:#?}",
396                body.spread_arg,
397                body.args_iter()
398                    .map(|local| (
399                        local,
400                        self.layout_of_local(self.frame(), local, None).unwrap().ty,
401                    ))
402                    .collect::<Vec<_>>()
403            );
404
405            // In principle, we have two iterators: Where the arguments come from, and where
406            // they go to.
407
408            // The "where they come from" part is easy, we expect the caller to do any special handling
409            // that might be required here (e.g. for untupling).
410            // If `with_caller_location` is set we pretend there is an extra argument (that
411            // we will not pass; our `caller_location` intrinsic implementation walks the stack instead).
412            assert_eq!(
413                args.len() + if with_caller_location { 1 } else { 0 },
414                caller_fn_abi.args.len(),
415                "mismatch between caller ABI and caller arguments",
416            );
417            let mut caller_args = args
418                .iter()
419                .zip(caller_fn_abi.args.iter())
420                .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
421
422            // Now we have to spread them out across the callee's locals,
423            // taking into account the `spread_arg`. If we could write
424            // this is a single iterator (that handles `spread_arg`), then
425            // `pass_argument` would be the loop body. It takes care to
426            // not advance `caller_iter` for ignored arguments.
427            let mut callee_args_abis = callee_fn_abi.args.iter();
428            for local in body.args_iter() {
429                // Construct the destination place for this argument. At this point all
430                // locals are still dead, so we cannot construct a `PlaceTy`.
431                let dest = mir::Place::from(local);
432                // `layout_of_local` does more than just the instantiation we need to get the
433                // type, but the result gets cached so this avoids calling the instantiation
434                // query *again* the next time this local is accessed.
435                let ty = self.layout_of_local(self.frame(), local, None)?.ty;
436                if Some(local) == body.spread_arg {
437                    // Make the local live once, then fill in the value field by field.
438                    self.storage_live(local)?;
439                    // Must be a tuple
440                    let ty::Tuple(fields) = ty.kind() else {
441                        span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
442                    };
443                    for (i, field_ty) in fields.iter().enumerate() {
444                        let dest = dest.project_deeper(
445                            &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
446                            *self.tcx,
447                        );
448                        let callee_abi = callee_args_abis.next().unwrap();
449                        self.pass_argument(
450                            &mut caller_args,
451                            callee_abi,
452                            &dest,
453                            field_ty,
454                            /* already_live */ true,
455                        )?;
456                    }
457                } else {
458                    // Normal argument. Cannot mark it as live yet, it might be unsized!
459                    let callee_abi = callee_args_abis.next().unwrap();
460                    self.pass_argument(
461                        &mut caller_args,
462                        callee_abi,
463                        &dest,
464                        ty,
465                        /* already_live */ false,
466                    )?;
467                }
468            }
469            // If the callee needs a caller location, pretend we consume one more argument from the ABI.
470            if instance.def.requires_caller_location(*self.tcx) {
471                callee_args_abis.next().unwrap();
472            }
473            // Now we should have no more caller args or callee arg ABIs
474            assert!(
475                callee_args_abis.next().is_none(),
476                "mismatch between callee ABI and callee body arguments"
477            );
478            if caller_args.next().is_some() {
479                throw_ub_custom!(fluent::const_eval_too_many_caller_args);
480            }
481            // Don't forget to check the return type!
482            if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
483                throw_ub!(AbiMismatchReturn {
484                    caller_ty: caller_fn_abi.ret.layout.ty,
485                    callee_ty: callee_fn_abi.ret.layout.ty
486                });
487            }
488
489            // Protect return place for in-place return value passing.
490            M::protect_in_place_function_argument(self, &destination)?;
491
492            // Don't forget to mark "initially live" locals as live.
493            self.storage_live_for_always_live_locals()?;
494        };
495        res.inspect_err_kind(|_| {
496            // Don't show the incomplete stack frame in the error stacktrace.
497            self.stack_mut().pop();
498        })
499    }
500
501    /// Initiate a call to this function -- pushing the stack frame and initializing the arguments.
502    ///
503    /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
504    /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
505    ///
506    /// `with_caller_location` indicates whether the caller passed a caller location. Miri
507    /// implements caller locations without argument passing, but to match `FnAbi` we need to know
508    /// when those arguments are present.
509    pub(super) fn init_fn_call(
510        &mut self,
511        fn_val: FnVal<'tcx, M::ExtraFnVal>,
512        (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
513        args: &[FnArg<'tcx, M::Provenance>],
514        with_caller_location: bool,
515        destination: &MPlaceTy<'tcx, M::Provenance>,
516        target: Option<mir::BasicBlock>,
517        unwind: mir::UnwindAction,
518    ) -> InterpResult<'tcx> {
519        trace!("init_fn_call: {:#?}", fn_val);
520
521        let instance = match fn_val {
522            FnVal::Instance(instance) => instance,
523            FnVal::Other(extra) => {
524                return M::call_extra_fn(
525                    self,
526                    extra,
527                    caller_fn_abi,
528                    args,
529                    destination,
530                    target,
531                    unwind,
532                );
533            }
534        };
535
536        match instance.def {
537            ty::InstanceKind::Intrinsic(def_id) => {
538                assert!(self.tcx.intrinsic(def_id).is_some());
539                // FIXME: Should `InPlace` arguments be reset to uninit?
540                if let Some(fallback) = M::call_intrinsic(
541                    self,
542                    instance,
543                    &self.copy_fn_args(args),
544                    destination,
545                    target,
546                    unwind,
547                )? {
548                    assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden);
549                    assert_matches!(fallback.def, ty::InstanceKind::Item(_));
550                    return self.init_fn_call(
551                        FnVal::Instance(fallback),
552                        (caller_abi, caller_fn_abi),
553                        args,
554                        with_caller_location,
555                        destination,
556                        target,
557                        unwind,
558                    );
559                } else {
560                    interp_ok(())
561                }
562            }
563            ty::InstanceKind::VTableShim(..)
564            | ty::InstanceKind::ReifyShim(..)
565            | ty::InstanceKind::ClosureOnceShim { .. }
566            | ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
567            | ty::InstanceKind::FnPtrShim(..)
568            | ty::InstanceKind::DropGlue(..)
569            | ty::InstanceKind::CloneShim(..)
570            | ty::InstanceKind::FnPtrAddrShim(..)
571            | ty::InstanceKind::ThreadLocalShim(..)
572            | ty::InstanceKind::AsyncDropGlueCtorShim(..)
573            | ty::InstanceKind::Item(_) => {
574                // We need MIR for this fn.
575                // Note that this can be an intrinsic, if we are executing its fallback body.
576                let Some((body, instance)) = M::find_mir_or_eval_fn(
577                    self,
578                    instance,
579                    caller_fn_abi,
580                    args,
581                    destination,
582                    target,
583                    unwind,
584                )?
585                else {
586                    return interp_ok(());
587                };
588
589                // Special handling for the closure ABI: untuple the last argument.
590                let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
591                    if caller_abi == ExternAbi::RustCall && !args.is_empty() {
592                        // Untuple
593                        let (untuple_arg, args) = args.split_last().unwrap();
594                        trace!("init_fn_call: Will pass last argument by untupling");
595                        Cow::from(
596                            args.iter()
597                                .map(|a| interp_ok(a.clone()))
598                                .chain(
599                                    (0..untuple_arg.layout().fields.count())
600                                        .map(|i| self.fn_arg_field(untuple_arg, i)),
601                                )
602                                .collect::<InterpResult<'_, Vec<_>>>()?,
603                        )
604                    } else {
605                        // Plain arg passing
606                        Cow::from(args)
607                    };
608
609                self.init_stack_frame(
610                    instance,
611                    body,
612                    caller_fn_abi,
613                    &args,
614                    with_caller_location,
615                    destination,
616                    StackPopCleanup::Goto { ret: target, unwind },
617                )
618            }
619            // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
620            // codegen'd / interpreted as virtual calls through the vtable.
621            ty::InstanceKind::Virtual(def_id, idx) => {
622                let mut args = args.to_vec();
623                // We have to implement all "dyn-compatible receivers". So we have to go search for a
624                // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
625                // unwrap those newtypes until we are there.
626                // An `InPlace` does nothing here, we keep the original receiver intact. We can't
627                // really pass the argument in-place anyway, and we are constructing a new
628                // `Immediate` receiver.
629                let mut receiver = self.copy_fn_arg(&args[0]);
630                let receiver_place = loop {
631                    match receiver.layout.ty.kind() {
632                        ty::Ref(..) | ty::RawPtr(..) => {
633                            // We do *not* use `deref_pointer` here: we don't want to conceptually
634                            // create a place that must be dereferenceable, since the receiver might
635                            // be a raw pointer and (for `*const dyn Trait`) we don't need to
636                            // actually access memory to resolve this method.
637                            // Also see <https://github.com/rust-lang/miri/issues/2786>.
638                            let val = self.read_immediate(&receiver)?;
639                            break self.ref_to_mplace(&val)?;
640                        }
641                        ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
642                        ty::Dynamic(.., ty::DynStar) => {
643                            // Not clear how to handle this, so far we assume the receiver is always a pointer.
644                            span_bug!(
645                                self.cur_span(),
646                                "by-value calls on a `dyn*`... are those a thing?"
647                            );
648                        }
649                        _ => {
650                            // Not there yet, search for the only non-ZST field.
651                            // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
652                            let (idx, _) = receiver.layout.non_1zst_field(self).expect(
653                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
654                            );
655                            receiver = self.project_field(&receiver, idx)?;
656                        }
657                    }
658                };
659
660                // Obtain the underlying trait we are working on, and the adjusted receiver argument.
661                let (trait_, dyn_ty, adjusted_recv) = if let ty::Dynamic(data, _, ty::DynStar) =
662                    receiver_place.layout.ty.kind()
663                {
664                    let recv = self.unpack_dyn_star(&receiver_place, data)?;
665
666                    (data.principal(), recv.layout.ty, recv.ptr())
667                } else {
668                    // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
669                    // (For that reason we also cannot use `unpack_dyn_trait`.)
670                    let receiver_tail =
671                        self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env);
672                    let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else {
673                        span_bug!(
674                            self.cur_span(),
675                            "dynamic call on non-`dyn` type {}",
676                            receiver_tail
677                        )
678                    };
679                    assert!(receiver_place.layout.is_unsized());
680
681                    // Get the required information from the vtable.
682                    let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
683                    let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?;
684
685                    // It might be surprising that we use a pointer as the receiver even if this
686                    // is a by-val case; this works because by-val passing of an unsized `dyn
687                    // Trait` to a function is actually desugared to a pointer.
688                    (receiver_trait.principal(), dyn_ty, receiver_place.ptr())
689                };
690
691                // Now determine the actual method to call. Usually we use the easy way of just
692                // looking up the method at index `idx`.
693                let vtable_entries = self.vtable_entries(trait_, dyn_ty);
694                let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else {
695                    // FIXME(fee1-dead) these could be variants of the UB info enum instead of this
696                    throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
697                };
698                trace!("Virtual call dispatches to {fn_inst:#?}");
699                // We can also do the lookup based on `def_id` and `dyn_ty`, and check that that
700                // produces the same result.
701                self.assert_virtual_instance_matches_concrete(dyn_ty, def_id, instance, fn_inst);
702
703                // Adjust receiver argument. Layout can be any (thin) ptr.
704                let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
705                args[0] = FnArg::Copy(
706                    ImmTy::from_immediate(
707                        Scalar::from_maybe_pointer(adjusted_recv, self).into(),
708                        self.layout_of(receiver_ty)?,
709                    )
710                    .into(),
711                );
712                trace!("Patched receiver operand to {:#?}", args[0]);
713                // Need to also adjust the type in the ABI. Strangely, the layout there is actually
714                // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
715                // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
716                // type, so we just patch this up locally.
717                let mut caller_fn_abi = caller_fn_abi.clone();
718                caller_fn_abi.args[0].layout.ty = receiver_ty;
719
720                // recurse with concrete function
721                self.init_fn_call(
722                    FnVal::Instance(fn_inst),
723                    (caller_abi, &caller_fn_abi),
724                    &args,
725                    with_caller_location,
726                    destination,
727                    target,
728                    unwind,
729                )
730            }
731        }
732    }
733
734    fn assert_virtual_instance_matches_concrete(
735        &self,
736        dyn_ty: Ty<'tcx>,
737        def_id: DefId,
738        virtual_instance: ty::Instance<'tcx>,
739        concrete_instance: ty::Instance<'tcx>,
740    ) {
741        let tcx = *self.tcx;
742
743        let trait_def_id = tcx.trait_of_item(def_id).unwrap();
744        let virtual_trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, virtual_instance.args);
745        let existential_trait_ref = ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
746        let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
747
748        let concrete_method = Instance::expect_resolve_for_vtable(
749            tcx,
750            self.typing_env,
751            def_id,
752            virtual_instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
753            self.cur_span(),
754        );
755        assert_eq!(concrete_instance, concrete_method);
756    }
757
758    /// Initiate a tail call to this function -- popping the current stack frame, pushing the new
759    /// stack frame and initializing the arguments.
760    pub(super) fn init_fn_tail_call(
761        &mut self,
762        fn_val: FnVal<'tcx, M::ExtraFnVal>,
763        (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
764        args: &[FnArg<'tcx, M::Provenance>],
765        with_caller_location: bool,
766    ) -> InterpResult<'tcx> {
767        trace!("init_fn_tail_call: {:#?}", fn_val);
768
769        // This is the "canonical" implementation of tails calls,
770        // a pop of the current stack frame, followed by a normal call
771        // which pushes a new stack frame, with the return address from
772        // the popped stack frame.
773        //
774        // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`,
775        // as the latter "executes" the goto to the return block, but we don't want to,
776        // only the tail called function should return to the current return block.
777        M::before_stack_pop(self, self.frame())?;
778
779        let StackPopInfo { return_action, return_to_block, return_place } =
780            self.pop_stack_frame_raw(false)?;
781
782        assert_eq!(return_action, ReturnAction::Normal);
783
784        // Take the "stack pop cleanup" info, and use that to initiate the next call.
785        let StackPopCleanup::Goto { ret, unwind } = return_to_block else {
786            bug!("can't tailcall as root");
787        };
788
789        // FIXME(explicit_tail_calls):
790        //   we should check if both caller&callee can/n't unwind,
791        //   see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803>
792
793        self.init_fn_call(
794            fn_val,
795            (caller_abi, caller_fn_abi),
796            args,
797            with_caller_location,
798            &return_place,
799            ret,
800            unwind,
801        )
802    }
803
804    pub(super) fn init_drop_in_place_call(
805        &mut self,
806        place: &PlaceTy<'tcx, M::Provenance>,
807        instance: ty::Instance<'tcx>,
808        target: mir::BasicBlock,
809        unwind: mir::UnwindAction,
810    ) -> InterpResult<'tcx> {
811        trace!("init_drop_in_place_call: {:?},\n  instance={:?}", place, instance);
812        // We take the address of the object. This may well be unaligned, which is fine
813        // for us here. However, unaligned accesses will probably make the actual drop
814        // implementation fail -- a problem shared by rustc.
815        let place = self.force_allocation(place)?;
816
817        // We behave a bit different from codegen here.
818        // Codegen creates an `InstanceKind::Virtual` with index 0 (the slot of the drop method) and
819        // then dispatches that to the normal call machinery. However, our call machinery currently
820        // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So
821        // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call`
822        // since we can just get a place of the underlying type and use `mplace_to_ref`.
823        let place = match place.layout.ty.kind() {
824            ty::Dynamic(data, _, ty::Dyn) => {
825                // Dropping a trait object. Need to find actual drop fn.
826                self.unpack_dyn_trait(&place, data)?
827            }
828            ty::Dynamic(data, _, ty::DynStar) => {
829                // Dropping a `dyn*`. Need to find actual drop fn.
830                self.unpack_dyn_star(&place, data)?
831            }
832            _ => {
833                debug_assert_eq!(
834                    instance,
835                    ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
836                );
837                place
838            }
839        };
840        let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
841        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
842
843        let arg = self.mplace_to_ref(&place)?;
844        let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
845
846        self.init_fn_call(
847            FnVal::Instance(instance),
848            (ExternAbi::Rust, fn_abi),
849            &[FnArg::Copy(arg.into())],
850            false,
851            &ret,
852            Some(target),
853            unwind,
854        )
855    }
856
857    /// Pops the current frame from the stack, copies the return value to the caller, deallocates
858    /// the memory for allocated locals, and jumps to an appropriate place.
859    ///
860    /// If `unwinding` is `false`, then we are performing a normal return
861    /// from a function. In this case, we jump back into the frame of the caller,
862    /// and continue execution as normal.
863    ///
864    /// If `unwinding` is `true`, then we are in the middle of a panic,
865    /// and need to unwind this frame. In this case, we jump to the
866    /// `cleanup` block for the function, which is responsible for running
867    /// `Drop` impls for any locals that have been initialized at this point.
868    /// The cleanup block ends with a special `Resume` terminator, which will
869    /// cause us to continue unwinding.
870    #[instrument(skip(self), level = "trace")]
871    pub(super) fn return_from_current_stack_frame(
872        &mut self,
873        unwinding: bool,
874    ) -> InterpResult<'tcx> {
875        info!(
876            "popping stack frame ({})",
877            if unwinding { "during unwinding" } else { "returning from function" }
878        );
879
880        // Check `unwinding`.
881        assert_eq!(
882            unwinding,
883            match self.frame().loc {
884                Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
885                Right(_) => true,
886            }
887        );
888        if unwinding && self.frame_idx() == 0 {
889            throw_ub_custom!(fluent::const_eval_unwind_past_top);
890        }
891
892        M::before_stack_pop(self, self.frame())?;
893
894        // Copy return value. Must of course happen *before* we deallocate the locals.
895        // Must be *after* `before_stack_pop` as otherwise the return place might still be protected.
896        let copy_ret_result = if !unwinding {
897            let op = self
898                .local_to_op(mir::RETURN_PLACE, None)
899                .expect("return place should always be live");
900            let dest = self.frame().return_place.clone();
901            let res = self.copy_op_allow_transmute(&op, &dest);
902            trace!("return value: {:?}", self.dump_place(&dest.into()));
903            // We delay actually short-circuiting on this error until *after* the stack frame is
904            // popped, since we want this error to be attributed to the caller, whose type defines
905            // this transmute.
906            res
907        } else {
908            interp_ok(())
909        };
910
911        // All right, now it is time to actually pop the frame.
912        // An error here takes precedence over the copy error.
913        let (stack_pop_info, ()) = self.pop_stack_frame_raw(unwinding).and(copy_ret_result)?;
914
915        match stack_pop_info.return_action {
916            ReturnAction::Normal => {}
917            ReturnAction::NoJump => {
918                // The hook already did everything.
919                return interp_ok(());
920            }
921            ReturnAction::NoCleanup => {
922                // If we are not doing cleanup, also skip everything else.
923                assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
924                assert!(!unwinding, "tried to skip cleanup during unwinding");
925                // Skip machine hook.
926                return interp_ok(());
927            }
928        }
929
930        // Normal return, figure out where to jump.
931        if unwinding {
932            // Follow the unwind edge.
933            match stack_pop_info.return_to_block {
934                StackPopCleanup::Goto { unwind, .. } => {
935                    // This must be the very last thing that happens, since it can in fact push a new stack frame.
936                    self.unwind_to_block(unwind)
937                }
938                StackPopCleanup::Root { .. } => {
939                    panic!("encountered StackPopCleanup::Root when unwinding!")
940                }
941            }
942        } else {
943            // Follow the normal return edge.
944            match stack_pop_info.return_to_block {
945                StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
946                StackPopCleanup::Root { .. } => {
947                    assert!(
948                        self.stack().is_empty(),
949                        "only the bottommost frame can have StackPopCleanup::Root"
950                    );
951                    interp_ok(())
952                }
953            }
954        }
955    }
956}