rustc_const_eval/interpret/
step.rs

1//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
2//!
3//! The main entry point is the `step` method.
4
5use std::iter;
6
7use either::Either;
8use rustc_abi::{FIRST_VARIANT, FieldIdx};
9use rustc_data_structures::fx::FxHashSet;
10use rustc_index::IndexSlice;
11use rustc_middle::ty::{self, Instance, Ty};
12use rustc_middle::{bug, mir, span_bug};
13use rustc_span::source_map::Spanned;
14use rustc_target::callconv::FnAbi;
15use tracing::field::Empty;
16use tracing::{info, instrument, trace};
17
18use super::{
19    FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
20    Projectable, interp_ok, throw_ub, throw_unsup_format,
21};
22use crate::interpret::EnteredTraceSpan;
23use crate::{enter_trace_span, util};
24
25struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
26    callee: FnVal<'tcx, M::ExtraFnVal>,
27    args: Vec<FnArg<'tcx, M::Provenance>>,
28    fn_sig: ty::FnSig<'tcx>,
29    fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
30    /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`])
31    with_caller_location: bool,
32}
33
34impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
35    /// Returns `true` as long as there are more things to do.
36    ///
37    /// This is used by [priroda](https://github.com/oli-obk/priroda)
38    ///
39    /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
40    #[inline(always)]
41    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
42        if self.stack().is_empty() {
43            return interp_ok(false);
44        }
45
46        let Either::Left(loc) = self.frame().loc else {
47            // We are unwinding and this fn has no cleanup code.
48            // Just go on unwinding.
49            trace!("unwinding: skipping frame");
50            self.return_from_current_stack_frame(/* unwinding */ true)?;
51            return interp_ok(true);
52        };
53        let basic_block = &self.body().basic_blocks[loc.block];
54
55        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
56            let old_frames = self.frame_idx();
57            self.eval_statement(stmt)?;
58            // Make sure we are not updating `statement_index` of the wrong frame.
59            assert_eq!(old_frames, self.frame_idx());
60            // Advance the program counter.
61            self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
62            return interp_ok(true);
63        }
64
65        M::before_terminator(self)?;
66
67        let terminator = basic_block.terminator();
68        self.eval_terminator(terminator)?;
69        if !self.stack().is_empty() {
70            if let Either::Left(loc) = self.frame().loc {
71                info!("// executing {:?}", loc.block);
72            }
73        }
74        interp_ok(true)
75    }
76
77    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
78    /// statement counter.
79    ///
80    /// This does NOT move the statement counter forward, the caller has to do that!
81    pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
82        let _trace = enter_trace_span!(
83            M,
84            step::eval_statement,
85            stmt = ?stmt.kind,
86            span = ?stmt.source_info.span,
87            tracing_separate_thread = Empty,
88        )
89        .or_if_tracing_disabled(|| info!(stmt = ?stmt.kind));
90
91        use rustc_middle::mir::StatementKind::*;
92
93        match &stmt.kind {
94            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
95
96            SetDiscriminant { place, variant_index } => {
97                let dest = self.eval_place(**place)?;
98                self.write_discriminant(*variant_index, &dest)?;
99            }
100
101            Deinit(place) => {
102                let dest = self.eval_place(**place)?;
103                self.write_uninit(&dest)?;
104            }
105
106            // Mark locals as alive
107            StorageLive(local) => {
108                self.storage_live(*local)?;
109            }
110
111            // Mark locals as dead
112            StorageDead(local) => {
113                self.storage_dead(*local)?;
114            }
115
116            // No dynamic semantics attached to `FakeRead`; MIR
117            // interpreter is solely intended for borrowck'ed code.
118            FakeRead(..) => {}
119
120            // Stacked Borrows.
121            Retag(kind, place) => {
122                let dest = self.eval_place(**place)?;
123                M::retag_place_contents(self, *kind, &dest)?;
124            }
125
126            Intrinsic(box intrinsic) => self.eval_nondiverging_intrinsic(intrinsic)?,
127
128            // Evaluate the place expression, without reading from it.
129            PlaceMention(box place) => {
130                let _ = self.eval_place(*place)?;
131            }
132
133            // This exists purely to guide borrowck lifetime inference, and does not have
134            // an operational effect.
135            AscribeUserType(..) => {}
136
137            // Currently, Miri discards Coverage statements. Coverage statements are only injected
138            // via an optional compile time MIR pass and have no side effects. Since Coverage
139            // statements don't exist at the source level, it is safe for Miri to ignore them, even
140            // for undefined behavior (UB) checks.
141            //
142            // A coverage counter inside a const expression (for example, a counter injected in a
143            // const function) is discarded when the const is evaluated at compile time. Whether
144            // this should change, and/or how to implement a const eval counter, is a subject of the
145            // following issue:
146            //
147            // FIXME(#73156): Handle source code coverage in const eval
148            Coverage(..) => {}
149
150            ConstEvalCounter => {
151                M::increment_const_eval_counter(self)?;
152            }
153
154            // Defined to do nothing. These are added by optimization passes, to avoid changing the
155            // size of MIR constantly.
156            Nop => {}
157
158            // Only used for temporary lifetime lints
159            BackwardIncompatibleDropHint { .. } => {}
160        }
161
162        interp_ok(())
163    }
164
165    /// Evaluate an assignment statement.
166    ///
167    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
168    /// type writes its results directly into the memory specified by the place.
169    pub fn eval_rvalue_into_place(
170        &mut self,
171        rvalue: &mir::Rvalue<'tcx>,
172        place: mir::Place<'tcx>,
173    ) -> InterpResult<'tcx> {
174        let dest = self.eval_place(place)?;
175        // FIXME: ensure some kind of non-aliasing between LHS and RHS?
176        // Also see https://github.com/rust-lang/rust/issues/68364.
177
178        use rustc_middle::mir::Rvalue::*;
179        match *rvalue {
180            ThreadLocalRef(did) => {
181                let ptr = M::thread_local_static_pointer(self, did)?;
182                self.write_pointer(ptr, &dest)?;
183            }
184
185            Use(ref operand) => {
186                // Avoid recomputing the layout
187                let op = self.eval_operand(operand, Some(dest.layout))?;
188                self.copy_op(&op, &dest)?;
189            }
190
191            CopyForDeref(place) => {
192                let op = self.eval_place_to_op(place, Some(dest.layout))?;
193                self.copy_op(&op, &dest)?;
194            }
195
196            BinaryOp(bin_op, box (ref left, ref right)) => {
197                let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
198                let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
199                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
200                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
201                let result = self.binary_op(bin_op, &left, &right)?;
202                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
203                self.write_immediate(*result, &dest)?;
204            }
205
206            UnaryOp(un_op, ref operand) => {
207                // The operand always has the same type as the result.
208                let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
209                let result = self.unary_op(un_op, &val)?;
210                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
211                self.write_immediate(*result, &dest)?;
212            }
213
214            NullaryOp(null_op, ty) => {
215                let ty = self.instantiate_from_current_frame_and_normalize_erasing_regions(ty)?;
216                let val = self.nullary_op(null_op, ty)?;
217                self.write_immediate(*val, &dest)?;
218            }
219
220            Aggregate(box ref kind, ref operands) => {
221                self.write_aggregate(kind, operands, &dest)?;
222            }
223
224            Repeat(ref operand, _) => {
225                self.write_repeat(operand, &dest)?;
226            }
227
228            Ref(_, borrow_kind, place) => {
229                let src = self.eval_place(place)?;
230                let place = self.force_allocation(&src)?;
231                let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
232                // A fresh reference was created, make sure it gets retagged.
233                let val = M::retag_ptr_value(
234                    self,
235                    if borrow_kind.allows_two_phase_borrow() {
236                        mir::RetagKind::TwoPhase
237                    } else {
238                        mir::RetagKind::Default
239                    },
240                    &val,
241                )?;
242                self.write_immediate(*val, &dest)?;
243            }
244
245            RawPtr(kind, place) => {
246                // Figure out whether this is an addr_of of an already raw place.
247                let place_base_raw = if place.is_indirect_first_projection() {
248                    let ty = self.frame().body.local_decls[place.local].ty;
249                    ty.is_raw_ptr()
250                } else {
251                    // Not a deref, and thus not raw.
252                    false
253                };
254
255                let src = self.eval_place(place)?;
256                let place = self.force_allocation(&src)?;
257                let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
258                if !place_base_raw && !kind.is_fake() {
259                    // If this was not already raw, it needs retagging -- except for "fake"
260                    // raw borrows whose defining property is that they do not get retagged.
261                    val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
262                }
263                self.write_immediate(*val, &dest)?;
264            }
265
266            ShallowInitBox(ref operand, _) => {
267                let src = self.eval_operand(operand, None)?;
268                let v = self.read_immediate(&src)?;
269                self.write_immediate(*v, &dest)?;
270            }
271
272            Cast(cast_kind, ref operand, cast_ty) => {
273                let src = self.eval_operand(operand, None)?;
274                let cast_ty =
275                    self.instantiate_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
276                self.cast(&src, cast_kind, cast_ty, &dest)?;
277            }
278
279            Discriminant(place) => {
280                let op = self.eval_place_to_op(place, None)?;
281                let variant = self.read_discriminant(&op)?;
282                let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
283                self.write_immediate(*discr, &dest)?;
284            }
285
286            WrapUnsafeBinder(ref op, _ty) => {
287                // Constructing an unsafe binder acts like a transmute
288                // since the operand's layout does not change.
289                let op = self.eval_operand(op, None)?;
290                self.copy_op_allow_transmute(&op, &dest)?;
291            }
292        }
293
294        trace!("{:?}", self.dump_place(&dest));
295
296        interp_ok(())
297    }
298
299    /// Writes the aggregate to the destination.
300    #[instrument(skip(self), level = "trace")]
301    fn write_aggregate(
302        &mut self,
303        kind: &mir::AggregateKind<'tcx>,
304        operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
305        dest: &PlaceTy<'tcx, M::Provenance>,
306    ) -> InterpResult<'tcx> {
307        let (variant_index, variant_dest, active_field_index) = match *kind {
308            mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
309                let variant_dest = self.project_downcast(dest, variant_index)?;
310                (variant_index, variant_dest, active_field_index)
311            }
312            mir::AggregateKind::RawPtr(..) => {
313                // Pointers don't have "fields" in the normal sense, so the
314                // projection-based code below would either fail in projection
315                // or in type mismatches. Instead, build an `Immediate` from
316                // the parts and write that to the destination.
317                let [data, meta] = &operands.raw else {
318                    bug!("{kind:?} should have 2 operands, had {operands:?}");
319                };
320                let data = self.eval_operand(data, None)?;
321                let data = self.read_pointer(&data)?;
322                let meta = self.eval_operand(meta, None)?;
323                let meta = if meta.layout.is_zst() {
324                    MemPlaceMeta::None
325                } else {
326                    MemPlaceMeta::Meta(self.read_scalar(&meta)?)
327                };
328                let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
329                let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
330                self.copy_op(&ptr, dest)?;
331                return interp_ok(());
332            }
333            _ => (FIRST_VARIANT, dest.clone(), None),
334        };
335        if active_field_index.is_some() {
336            assert_eq!(operands.len(), 1);
337        }
338        for (field_index, operand) in operands.iter_enumerated() {
339            let field_index = active_field_index.unwrap_or(field_index);
340            let field_dest = self.project_field(&variant_dest, field_index)?;
341            let op = self.eval_operand(operand, Some(field_dest.layout))?;
342            // We validate manually below so we don't have to do it here.
343            self.copy_op_no_validate(&op, &field_dest, /*allow_transmute*/ false)?;
344        }
345        self.write_discriminant(variant_index, dest)?;
346        // Validate that the entire thing is valid, and reset padding that might be in between the
347        // fields.
348        if M::enforce_validity(self, dest.layout()) {
349            self.validate_operand(
350                dest,
351                M::enforce_validity_recursively(self, dest.layout()),
352                /*reset_provenance_and_padding*/ true,
353            )?;
354        }
355        interp_ok(())
356    }
357
358    /// Repeats `operand` into the destination. `dest` must have array type, and that type
359    /// determines how often `operand` is repeated.
360    fn write_repeat(
361        &mut self,
362        operand: &mir::Operand<'tcx>,
363        dest: &PlaceTy<'tcx, M::Provenance>,
364    ) -> InterpResult<'tcx> {
365        let src = self.eval_operand(operand, None)?;
366        assert!(src.layout.is_sized());
367        let dest = self.force_allocation(&dest)?;
368        let length = dest.len(self)?;
369
370        if length == 0 {
371            // Nothing to copy... but let's still make sure that `dest` as a place is valid.
372            self.get_place_alloc_mut(&dest)?;
373        } else {
374            // Write the src to the first element.
375            let first = self.project_index(&dest, 0)?;
376            self.copy_op(&src, &first)?;
377
378            // This is performance-sensitive code for big static/const arrays! So we
379            // avoid writing each operand individually and instead just make many copies
380            // of the first element.
381            let elem_size = first.layout.size;
382            let first_ptr = first.ptr();
383            let rest_ptr = first_ptr.wrapping_offset(elem_size, self);
384            // No alignment requirement since `copy_op` above already checked it.
385            self.mem_copy_repeatedly(
386                first_ptr,
387                rest_ptr,
388                elem_size,
389                length - 1,
390                /*nonoverlapping:*/ true,
391            )?;
392        }
393
394        interp_ok(())
395    }
396
397    /// Evaluate the arguments of a function call
398    fn eval_fn_call_argument(
399        &mut self,
400        op: &mir::Operand<'tcx>,
401        move_definitely_disjoint: bool,
402    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
403        interp_ok(match op {
404            mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
405                // Make a regular copy.
406                let op = self.eval_operand(op, None)?;
407                FnArg::Copy(op)
408            }
409            mir::Operand::Move(place) => {
410                let place = self.eval_place(*place)?;
411                if move_definitely_disjoint {
412                    // We still have to ensure that no *other* pointers are used to access this place,
413                    // so *if* it is in memory then we have to treat it as `InPlace`.
414                    // Use `place_to_op` to guarantee that we notice it being in memory.
415                    let op = self.place_to_op(&place)?;
416                    match op.as_mplace_or_imm() {
417                        Either::Left(mplace) => FnArg::InPlace(mplace),
418                        Either::Right(_imm) => FnArg::Copy(op),
419                    }
420                } else {
421                    // We have to force this into memory to detect aliasing among `Move` arguments.
422                    FnArg::InPlace(self.force_allocation(&place)?)
423                }
424            }
425        })
426    }
427
428    /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the
429    /// necessary information about callee and arguments to make a call.
430    fn eval_callee_and_args(
431        &mut self,
432        terminator: &mir::Terminator<'tcx>,
433        func: &mir::Operand<'tcx>,
434        args: &[Spanned<mir::Operand<'tcx>>],
435        dest: &mir::Place<'tcx>,
436    ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
437        let func = self.eval_operand(func, None)?;
438
439        // Evaluating function call arguments. The tricky part here is dealing with `Move`
440        // arguments: we have to ensure no two such arguments alias. This would be most easily done
441        // by just forcing them all into memory and then doing the usual in-place argument
442        // protection, but then we'd force *a lot* of arguments into memory. So we do some syntactic
443        // pre-processing here where if all `move` arguments are syntactically distinct local
444        // variables (and none is indirect), we can skip the in-memory forcing.
445        // We have to include `dest` in that list so that we can detect aliasing of an in-place
446        // argument with the return place.
447        let move_definitely_disjoint = 'move_definitely_disjoint: {
448            let mut previous_locals = FxHashSet::<mir::Local>::default();
449            for place in args
450                .iter()
451                .filter_map(|a| {
452                    // We only have to care about `Move` arguments.
453                    if let mir::Operand::Move(place) = &a.node { Some(place) } else { None }
454                })
455                .chain(iter::once(dest))
456            {
457                if place.is_indirect_first_projection() {
458                    // An indirect in-place argument could alias with anything else...
459                    break 'move_definitely_disjoint false;
460                }
461                if !previous_locals.insert(place.local) {
462                    // This local is the base for two arguments! They might overlap.
463                    break 'move_definitely_disjoint false;
464                }
465            }
466            // We found no violation so they are all definitely disjoint.
467            true
468        };
469        let args = args
470            .iter()
471            .map(|arg| self.eval_fn_call_argument(&arg.node, move_definitely_disjoint))
472            .collect::<InterpResult<'tcx, Vec<_>>>()?;
473
474        let fn_sig_binder = {
475            let _trace = enter_trace_span!(M, "fn_sig", ty = ?func.layout.ty.kind());
476            func.layout.ty.fn_sig(*self.tcx)
477        };
478        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.typing_env, fn_sig_binder);
479        let extra_args = &args[fn_sig.inputs().len()..];
480        let extra_args =
481            self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
482
483        let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
484            ty::FnPtr(..) => {
485                let fn_ptr = self.read_pointer(&func)?;
486                let fn_val = self.get_ptr_fn(fn_ptr)?;
487                (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
488            }
489            ty::FnDef(def_id, args) => {
490                let instance = self.resolve(def_id, args)?;
491                (
492                    FnVal::Instance(instance),
493                    self.fn_abi_of_instance(instance, extra_args)?,
494                    instance.def.requires_caller_location(*self.tcx),
495                )
496            }
497            _ => {
498                span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
499            }
500        };
501
502        interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
503    }
504
505    fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
506        let _trace = enter_trace_span!(
507            M,
508            step::eval_terminator,
509            terminator = ?terminator.kind,
510            span = ?terminator.source_info.span,
511            tracing_separate_thread = Empty,
512        )
513        .or_if_tracing_disabled(|| info!(terminator = ?terminator.kind));
514
515        use rustc_middle::mir::TerminatorKind::*;
516        match terminator.kind {
517            Return => {
518                self.return_from_current_stack_frame(/* unwinding */ false)?
519            }
520
521            Goto { target } => self.go_to_block(target),
522
523            SwitchInt { ref discr, ref targets } => {
524                let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
525                trace!("SwitchInt({:?})", *discr);
526
527                // Branch to the `otherwise` case by default, if no match is found.
528                let mut target_block = targets.otherwise();
529
530                for (const_int, target) in targets.iter() {
531                    // Compare using MIR BinOp::Eq, to also support pointer values.
532                    // (Avoiding `self.binary_op` as that does some redundant layout computation.)
533                    let res = self.binary_op(
534                        mir::BinOp::Eq,
535                        &discr,
536                        &ImmTy::from_uint(const_int, discr.layout),
537                    )?;
538                    if res.to_scalar().to_bool()? {
539                        target_block = target;
540                        break;
541                    }
542                }
543
544                self.go_to_block(target_block);
545            }
546
547            Call {
548                ref func,
549                ref args,
550                destination,
551                target,
552                unwind,
553                call_source: _,
554                fn_span: _,
555            } => {
556                let old_stack = self.frame_idx();
557                let old_loc = self.frame().loc;
558
559                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
560                    self.eval_callee_and_args(terminator, func, args, &destination)?;
561
562                let destination = self.eval_place(destination)?;
563                self.init_fn_call(
564                    callee,
565                    (fn_sig.abi, fn_abi),
566                    &args,
567                    with_caller_location,
568                    &destination,
569                    target,
570                    if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
571                )?;
572                // Sanity-check that `eval_fn_call` either pushed a new frame or
573                // did a jump to another block.
574                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
575                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
576                }
577            }
578
579            TailCall { ref func, ref args, fn_span: _ } => {
580                let old_frame_idx = self.frame_idx();
581
582                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
583                    self.eval_callee_and_args(terminator, func, args, &mir::Place::return_place())?;
584
585                self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
586
587                if self.frame_idx() != old_frame_idx {
588                    span_bug!(
589                        terminator.source_info.span,
590                        "evaluating this tail call pushed a new stack frame"
591                    );
592                }
593            }
594
595            Drop { place, target, unwind, replace: _, drop, async_fut } => {
596                assert!(
597                    async_fut.is_none() && drop.is_none(),
598                    "Async Drop must be expanded or reset to sync in runtime MIR"
599                );
600                let place = self.eval_place(place)?;
601                let instance = {
602                    let _trace =
603                        enter_trace_span!(M, resolve::resolve_drop_in_place, ty = ?place.layout.ty);
604                    Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
605                };
606                if let ty::InstanceKind::DropGlue(_, None) = instance.def {
607                    // This is the branch we enter if and only if the dropped type has no drop glue
608                    // whatsoever. This can happen as a result of monomorphizing a drop of a
609                    // generic. In order to make sure that generic and non-generic code behaves
610                    // roughly the same (and in keeping with Mir semantics) we do nothing here.
611                    self.go_to_block(target);
612                    return interp_ok(());
613                }
614                trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
615                self.init_drop_in_place_call(&place, instance, target, unwind)?;
616            }
617
618            Assert { ref cond, expected, ref msg, target, unwind } => {
619                let ignored =
620                    M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
621                let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
622                if ignored || expected == cond_val {
623                    self.go_to_block(target);
624                } else {
625                    M::assert_panic(self, msg, unwind)?;
626                }
627            }
628
629            UnwindTerminate(reason) => {
630                M::unwind_terminate(self, reason)?;
631            }
632
633            // When we encounter Resume, we've finished unwinding
634            // cleanup for the current stack frame. We pop it in order
635            // to continue unwinding the next frame
636            UnwindResume => {
637                trace!("unwinding: resuming from cleanup");
638                // By definition, a Resume terminator means
639                // that we're unwinding
640                self.return_from_current_stack_frame(/* unwinding */ true)?;
641                return interp_ok(());
642            }
643
644            // It is UB to ever encounter this.
645            Unreachable => throw_ub!(Unreachable),
646
647            // These should never occur for MIR we actually run.
648            FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
649                terminator.source_info.span,
650                "{:#?} should have been eliminated by MIR pass",
651                terminator.kind
652            ),
653
654            InlineAsm { .. } => {
655                throw_unsup_format!("inline assembly is not supported");
656            }
657        }
658
659        interp_ok(())
660    }
661}