rustc_mir_transform/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(bootstrap, feature(array_windows))]
3#![feature(assert_matches)]
4#![feature(box_patterns)]
5#![feature(const_type_name)]
6#![feature(cow_is_borrowed)]
7#![feature(file_buffered)]
8#![feature(gen_blocks)]
9#![feature(if_let_guard)]
10#![feature(impl_trait_in_assoc_type)]
11#![feature(try_blocks)]
12#![feature(yeet_expr)]
13// tidy-alphabetical-end
14
15use hir::ConstContext;
16use required_consts::RequiredConstsVisitor;
17use rustc_const_eval::check_consts::{self, ConstCx};
18use rustc_const_eval::util;
19use rustc_data_structures::fx::FxIndexSet;
20use rustc_data_structures::steal::Steal;
21use rustc_hir as hir;
22use rustc_hir::def::{CtorKind, DefKind};
23use rustc_hir::def_id::LocalDefId;
24use rustc_index::IndexVec;
25use rustc_middle::mir::{
26    AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs, LocalDecl,
27    MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, START_BLOCK,
28    SourceInfo, Statement, StatementKind, TerminatorKind,
29};
30use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
31use rustc_middle::util::Providers;
32use rustc_middle::{bug, query, span_bug};
33use rustc_span::source_map::Spanned;
34use rustc_span::{DUMMY_SP, sym};
35use tracing::debug;
36
37#[macro_use]
38mod pass_manager;
39
40use std::sync::LazyLock;
41
42use pass_manager::{self as pm, Lint, MirLint, MirPass, WithMinOptLevel};
43
44mod check_pointers;
45mod cost_checker;
46mod cross_crate_inline;
47mod deduce_param_attrs;
48mod elaborate_drop;
49mod errors;
50mod ffi_unwind_calls;
51mod lint;
52mod lint_tail_expr_drop_order;
53mod liveness;
54mod patch;
55mod shim;
56mod ssa;
57mod trivial_const;
58
59/// We import passes via this macro so that we can have a static list of pass names
60/// (used to verify CLI arguments). It takes a list of modules, followed by the passes
61/// declared within them.
62/// ```ignore,macro-test
63/// declare_passes! {
64///     // Declare a single pass from the module `abort_unwinding_calls`
65///     mod abort_unwinding_calls : AbortUnwindingCalls;
66///     // When passes are grouped together as an enum, declare the two constituent passes
67///     mod add_call_guards : AddCallGuards {
68///         AllCallEdges,
69///         CriticalCallEdges
70///     };
71///     // Declares multiple pass groups, each containing their own constituent passes
72///     mod simplify : SimplifyCfg {
73///         Initial,
74///         /* omitted */
75///     }, SimplifyLocals {
76///         BeforeConstProp,
77///         /* omitted */
78///     };
79/// }
80/// ```
81macro_rules! declare_passes {
82    (
83        $(
84            $vis:vis mod $mod_name:ident : $($pass_name:ident $( { $($ident:ident),* } )?),+ $(,)?;
85        )*
86    ) => {
87        $(
88            $vis mod $mod_name;
89            $(
90                // Make sure the type name is correct
91                #[allow(unused_imports)]
92                use $mod_name::$pass_name as _;
93            )+
94        )*
95
96        static PASS_NAMES: LazyLock<FxIndexSet<&str>> = LazyLock::new(|| [
97            // Fake marker pass
98            "PreCodegen",
99            $(
100                $(
101                    stringify!($pass_name),
102                    $(
103                        $(
104                            $mod_name::$pass_name::$ident.name(),
105                        )*
106                    )?
107                )+
108            )*
109        ].into_iter().collect());
110    };
111}
112
113declare_passes! {
114    mod abort_unwinding_calls : AbortUnwindingCalls;
115    mod add_call_guards : AddCallGuards { AllCallEdges, CriticalCallEdges };
116    mod add_moves_for_packed_drops : AddMovesForPackedDrops;
117    mod add_retag : AddRetag;
118    mod add_subtyping_projections : Subtyper;
119    mod check_inline : CheckForceInline;
120    mod check_call_recursion : CheckCallRecursion, CheckDropRecursion;
121    mod check_inline_always_target_features: CheckInlineAlwaysTargetFeature;
122    mod check_alignment : CheckAlignment;
123    mod check_enums : CheckEnums;
124    mod check_const_item_mutation : CheckConstItemMutation;
125    mod check_null : CheckNull;
126    mod check_packed_ref : CheckPackedRef;
127    // This pass is public to allow external drivers to perform MIR cleanup
128    pub mod cleanup_post_borrowck : CleanupPostBorrowck;
129
130    mod copy_prop : CopyProp;
131    mod coroutine : StateTransform;
132    mod coverage : InstrumentCoverage;
133    mod ctfe_limit : CtfeLimit;
134    mod dataflow_const_prop : DataflowConstProp;
135    mod dead_store_elimination : DeadStoreElimination {
136        Initial,
137        Final
138    };
139    mod deref_separator : Derefer;
140    mod dest_prop : DestinationPropagation;
141    pub mod dump_mir : Marker;
142    mod early_otherwise_branch : EarlyOtherwiseBranch;
143    mod erase_deref_temps : EraseDerefTemps;
144    mod elaborate_box_derefs : ElaborateBoxDerefs;
145    mod elaborate_drops : ElaborateDrops;
146    mod function_item_references : FunctionItemReferences;
147    mod gvn : GVN;
148    // Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
149    // by custom rustc drivers, running all the steps by themselves. See #114628.
150    pub mod inline : Inline, ForceInline;
151    mod impossible_predicates : ImpossiblePredicates;
152    mod instsimplify : InstSimplify { BeforeInline, AfterSimplifyCfg };
153    mod jump_threading : JumpThreading;
154    mod known_panics_lint : KnownPanicsLint;
155    mod large_enums : EnumSizeOpt;
156    mod lower_intrinsics : LowerIntrinsics;
157    mod lower_slice_len : LowerSliceLenCalls;
158    mod match_branches : MatchBranchSimplification;
159    mod mentioned_items : MentionedItems;
160    mod multiple_return_terminators : MultipleReturnTerminators;
161    mod post_drop_elaboration : CheckLiveDrops;
162    mod prettify : ReorderBasicBlocks, ReorderLocals;
163    mod promote_consts : PromoteTemps;
164    mod ref_prop : ReferencePropagation;
165    mod remove_noop_landing_pads : RemoveNoopLandingPads;
166    mod remove_place_mention : RemovePlaceMention;
167    mod remove_storage_markers : RemoveStorageMarkers;
168    mod remove_uninit_drops : RemoveUninitDrops;
169    mod remove_unneeded_drops : RemoveUnneededDrops;
170    mod remove_zsts : RemoveZsts;
171    mod required_consts : RequiredConstsVisitor;
172    mod post_analysis_normalize : PostAnalysisNormalize;
173    mod sanity_check : SanityCheck;
174    // This pass is public to allow external drivers to perform MIR cleanup
175    pub mod simplify :
176        SimplifyCfg {
177            Initial,
178            PromoteConsts,
179            RemoveFalseEdges,
180            PostAnalysis,
181            PreOptimizations,
182            Final,
183            MakeShim,
184            AfterUnreachableEnumBranching
185        },
186        SimplifyLocals {
187            BeforeConstProp,
188            AfterGVN,
189            Final
190        };
191    mod simplify_branches : SimplifyConstCondition {
192        AfterInstSimplify,
193        AfterConstProp,
194        Final
195    };
196    mod simplify_comparison_integral : SimplifyComparisonIntegral;
197    mod single_use_consts : SingleUseConsts;
198    mod sroa : ScalarReplacementOfAggregates;
199    mod strip_debuginfo : StripDebugInfo;
200    mod unreachable_enum_branching : UnreachableEnumBranching;
201    mod unreachable_prop : UnreachablePropagation;
202    mod validate : Validator;
203}
204
205rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
206
207pub fn provide(providers: &mut Providers) {
208    coverage::query::provide(providers);
209    ffi_unwind_calls::provide(providers);
210    shim::provide(providers);
211    cross_crate_inline::provide(providers);
212    providers.queries = query::Providers {
213        mir_keys,
214        mir_built,
215        mir_const_qualif,
216        mir_promoted,
217        mir_drops_elaborated_and_const_checked,
218        mir_for_ctfe,
219        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
220        optimized_mir,
221        check_liveness: liveness::check_liveness,
222        is_mir_available,
223        is_ctfe_mir_available: is_mir_available,
224        mir_callgraph_cyclic: inline::cycle::mir_callgraph_cyclic,
225        mir_inliner_callees: inline::cycle::mir_inliner_callees,
226        promoted_mir,
227        deduced_param_attrs: deduce_param_attrs::deduced_param_attrs,
228        coroutine_by_move_body_def_id: coroutine::coroutine_by_move_body_def_id,
229        trivial_const: trivial_const::trivial_const_provider,
230        ..providers.queries
231    };
232}
233
234fn remap_mir_for_const_eval_select<'tcx>(
235    tcx: TyCtxt<'tcx>,
236    mut body: Body<'tcx>,
237    context: hir::Constness,
238) -> Body<'tcx> {
239    for bb in body.basic_blocks.as_mut().iter_mut() {
240        let terminator = bb.terminator.as_mut().expect("invalid terminator");
241        match terminator.kind {
242            TerminatorKind::Call {
243                func: Operand::Constant(box ConstOperand { ref const_, .. }),
244                ref mut args,
245                destination,
246                target,
247                unwind,
248                fn_span,
249                ..
250            } if let ty::FnDef(def_id, _) = *const_.ty().kind()
251                && tcx.is_intrinsic(def_id, sym::const_eval_select) =>
252            {
253                let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else {
254                    unreachable!()
255                };
256                let ty = tupled_args.node.ty(&body.local_decls, tcx);
257                let fields = ty.tuple_fields();
258                let num_args = fields.len();
259                let func =
260                    if context == hir::Constness::Const { called_in_const } else { called_at_rt };
261                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
262                    match tupled_args.node {
263                        Operand::Constant(_) | Operand::RuntimeChecks(_) => {
264                            // There is no good way of extracting a tuple arg from a constant
265                            // (const generic stuff) so we just create a temporary and deconstruct
266                            // that.
267                            let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
268                            bb.statements.push(Statement::new(
269                                SourceInfo::outermost(fn_span),
270                                StatementKind::Assign(Box::new((
271                                    local.into(),
272                                    Rvalue::Use(tupled_args.node.clone()),
273                                ))),
274                            ));
275                            (Operand::Move, local.into())
276                        }
277                        Operand::Move(place) => (Operand::Move, place),
278                        Operand::Copy(place) => (Operand::Copy, place),
279                    };
280                let place_elems = place.projection;
281                let arguments = (0..num_args)
282                    .map(|x| {
283                        let mut place_elems = place_elems.to_vec();
284                        place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
285                        let projection = tcx.mk_place_elems(&place_elems);
286                        let place = Place { local: place.local, projection };
287                        Spanned { node: method(place), span: DUMMY_SP }
288                    })
289                    .collect();
290                terminator.kind = TerminatorKind::Call {
291                    func: func.node,
292                    args: arguments,
293                    destination,
294                    target,
295                    unwind,
296                    call_source: CallSource::Misc,
297                    fn_span,
298                };
299            }
300            _ => {}
301        }
302    }
303    body
304}
305
306fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> {
307    let b: Box<[T; N]> = std::mem::take(b).try_into()?;
308    Ok(*b)
309}
310
311fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
312    tcx.mir_keys(()).contains(&def_id)
313}
314
315/// Finds the full set of `DefId`s within the current crate that have
316/// MIR associated with them.
317fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
318    // All body-owners have MIR associated with them.
319    let mut set: FxIndexSet<_> = tcx.hir_body_owners().collect();
320
321    // Remove the fake bodies for `global_asm!`, since they're not useful
322    // to be emitted (`--emit=mir`) or encoded (in metadata).
323    set.retain(|&def_id| !matches!(tcx.def_kind(def_id), DefKind::GlobalAsm));
324
325    // Coroutine-closures (e.g. async closures) have an additional by-move MIR
326    // body that isn't in the HIR.
327    for body_owner in tcx.hir_body_owners() {
328        if let DefKind::Closure = tcx.def_kind(body_owner)
329            && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id())
330        {
331            set.insert(tcx.coroutine_by_move_body_def_id(body_owner).expect_local());
332        }
333    }
334
335    // tuple struct/variant constructors have MIR, but they don't have a BodyId,
336    // so we need to build them separately.
337    for item in tcx.hir_crate_items(()).free_items() {
338        if let DefKind::Struct | DefKind::Enum = tcx.def_kind(item.owner_id) {
339            for variant in tcx.adt_def(item.owner_id).variants() {
340                if let Some((CtorKind::Fn, ctor_def_id)) = variant.ctor {
341                    set.insert(ctor_def_id.expect_local());
342                }
343            }
344        }
345    }
346
347    set
348}
349
350fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
351    // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
352    // cannot yet be stolen), because `mir_promoted()`, which steals
353    // from `mir_built()`, forces this query to execute before
354    // performing the steal.
355    let body = &tcx.mir_built(def).borrow();
356    let ccx = check_consts::ConstCx::new(tcx, body);
357    // No need to const-check a non-const `fn`.
358    match ccx.const_kind {
359        Some(ConstContext::Const { .. } | ConstContext::Static(_) | ConstContext::ConstFn) => {}
360        None => span_bug!(
361            tcx.def_span(def),
362            "`mir_const_qualif` should only be called on const fns and const items"
363        ),
364    }
365
366    if body.return_ty().references_error() {
367        // It's possible to reach here without an error being emitted (#121103).
368        tcx.dcx().span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
369        return Default::default();
370    }
371
372    let mut validator = check_consts::check::Checker::new(&ccx);
373    validator.check_body();
374
375    // We return the qualifs in the return place for every MIR body, even though it is only used
376    // when deciding to promote a reference to a `const` for now.
377    validator.qualifs_in_return_place()
378}
379
380/// Implementation of the `mir_built` query.
381fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
382    // Delegate to the main MIR building code in the `rustc_mir_build` crate.
383    // This is the one place that is allowed to call `build_mir_inner_impl`.
384    let mut body = tcx.build_mir_inner_impl(def);
385
386    // Identifying trivial consts based on their mir_built is easy, but a little wasteful.
387    // Trying to push this logic earlier in the compiler and never even produce the Body would
388    // probably improve compile time.
389    if trivial_const::trivial_const(tcx, def, || &body).is_some() {
390        // Skip all the passes below for trivial consts.
391        let body = tcx.alloc_steal_mir(body);
392        pass_manager::dump_mir_for_phase_change(tcx, &body.borrow());
393        return body;
394    }
395
396    pass_manager::dump_mir_for_phase_change(tcx, &body);
397
398    pm::run_passes(
399        tcx,
400        &mut body,
401        &[
402            // MIR-level lints.
403            &Lint(check_inline::CheckForceInline),
404            &Lint(check_call_recursion::CheckCallRecursion),
405            // Check callee's target features match callers target features when
406            // using `#[inline(always)]`
407            &Lint(check_inline_always_target_features::CheckInlineAlwaysTargetFeature),
408            &Lint(check_packed_ref::CheckPackedRef),
409            &Lint(check_const_item_mutation::CheckConstItemMutation),
410            &Lint(function_item_references::FunctionItemReferences),
411            // What we need to do constant evaluation.
412            &simplify::SimplifyCfg::Initial,
413            &Lint(sanity_check::SanityCheck),
414        ],
415        None,
416        pm::Optimizations::Allowed,
417    );
418    tcx.alloc_steal_mir(body)
419}
420
421/// Compute the main MIR body and the list of MIR bodies of the promoteds.
422fn mir_promoted(
423    tcx: TyCtxt<'_>,
424    def: LocalDefId,
425) -> (&Steal<Body<'_>>, &Steal<IndexVec<Promoted, Body<'_>>>) {
426    debug_assert!(!tcx.is_trivial_const(def), "Tried to get mir_promoted of a trivial const");
427
428    // Ensure that we compute the `mir_const_qualif` for constants at
429    // this point, before we steal the mir-const result.
430    // Also this means promotion can rely on all const checks having been done.
431
432    let const_qualifs = match tcx.def_kind(def) {
433        DefKind::Fn | DefKind::AssocFn | DefKind::Closure
434            if tcx.constness(def) == hir::Constness::Const =>
435        {
436            tcx.mir_const_qualif(def)
437        }
438        DefKind::AssocConst
439        | DefKind::Const
440        | DefKind::Static { .. }
441        | DefKind::InlineConst
442        | DefKind::AnonConst => tcx.mir_const_qualif(def),
443        _ => ConstQualifs::default(),
444    };
445
446    // the `has_ffi_unwind_calls` query uses the raw mir, so make sure it is run.
447    tcx.ensure_done().has_ffi_unwind_calls(def);
448
449    // the `by_move_body` query uses the raw mir, so make sure it is run.
450    if tcx.needs_coroutine_by_move_body_def_id(def.to_def_id()) {
451        tcx.ensure_done().coroutine_by_move_body_def_id(def);
452    }
453
454    // the `trivial_const` query uses mir_built, so make sure it is run.
455    tcx.ensure_done().trivial_const(def);
456
457    let mut body = tcx.mir_built(def).steal();
458    if let Some(error_reported) = const_qualifs.tainted_by_errors {
459        body.tainted_by_errors = Some(error_reported);
460    }
461
462    // Collect `required_consts` *before* promotion, so if there are any consts being promoted
463    // we still add them to the list in the outer MIR body.
464    RequiredConstsVisitor::compute_required_consts(&mut body);
465
466    // What we need to run borrowck etc.
467    let promote_pass = promote_consts::PromoteTemps::default();
468    pm::run_passes(
469        tcx,
470        &mut body,
471        &[&promote_pass, &simplify::SimplifyCfg::PromoteConsts, &coverage::InstrumentCoverage],
472        Some(MirPhase::Analysis(AnalysisPhase::Initial)),
473        pm::Optimizations::Allowed,
474    );
475
476    lint_tail_expr_drop_order::run_lint(tcx, def, &body);
477
478    let promoted = promote_pass.promoted_fragments.into_inner();
479    (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
480}
481
482/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
483fn mir_for_ctfe(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &Body<'_> {
484    debug_assert!(!tcx.is_trivial_const(def_id), "Tried to get mir_for_ctfe of a trivial const");
485    tcx.arena.alloc(inner_mir_for_ctfe(tcx, def_id))
486}
487
488fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
489    // FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
490    if tcx.is_constructor(def.to_def_id()) {
491        // There's no reason to run all of the MIR passes on constructors when
492        // we can just output the MIR we want directly. This also saves const
493        // qualification and borrow checking the trouble of special casing
494        // constructors.
495        return shim::build_adt_ctor(tcx, def.to_def_id());
496    }
497
498    let body = tcx.mir_drops_elaborated_and_const_checked(def);
499    let body = match tcx.hir_body_const_context(def) {
500        // consts and statics do not have `optimized_mir`, so we can steal the body instead of
501        // cloning it.
502        Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
503        Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
504        None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
505    };
506
507    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
508    pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None, pm::Optimizations::Allowed);
509
510    body
511}
512
513/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
514/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
515/// end up missing the source MIR due to stealing happening.
516fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
517    if tcx.is_coroutine(def.to_def_id()) {
518        tcx.ensure_done().mir_coroutine_witnesses(def);
519    }
520
521    // We only need to borrowck non-synthetic MIR.
522    let tainted_by_errors = if !tcx.is_synthetic_mir(def) {
523        tcx.mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local()).err()
524    } else {
525        None
526    };
527
528    let is_fn_like = tcx.def_kind(def).is_fn_like();
529    if is_fn_like {
530        // Do not compute the mir call graph without said call graph actually being used.
531        if pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
532            || inline::ForceInline::should_run_pass_for_callee(tcx, def.to_def_id())
533        {
534            tcx.ensure_done().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id()));
535        }
536    }
537
538    tcx.ensure_done().check_liveness(def);
539
540    let (body, _) = tcx.mir_promoted(def);
541    let mut body = body.steal();
542
543    if let Some(error_reported) = tainted_by_errors {
544        body.tainted_by_errors = Some(error_reported);
545    }
546
547    // Also taint the body if it's within a top-level item that is not well formed.
548    //
549    // We do this check here and not during `mir_promoted` because that may result
550    // in borrowck cycles if WF requires looking into an opaque hidden type.
551    let root = tcx.typeck_root_def_id(def.to_def_id());
552    match tcx.def_kind(root) {
553        DefKind::Fn
554        | DefKind::AssocFn
555        | DefKind::Static { .. }
556        | DefKind::Const
557        | DefKind::AssocConst => {
558            if let Err(guar) = tcx.ensure_ok().check_well_formed(root.expect_local()) {
559                body.tainted_by_errors = Some(guar);
560            }
561        }
562        _ => {}
563    }
564
565    run_analysis_to_runtime_passes(tcx, &mut body);
566
567    tcx.alloc_steal_mir(body)
568}
569
570// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
571// by custom rustc drivers, running all the steps by themselves. See #114628.
572pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
573    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
574    let did = body.source.def_id();
575
576    debug!("analysis_mir_cleanup({:?})", did);
577    run_analysis_cleanup_passes(tcx, body);
578    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
579
580    // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
581    if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, body)) {
582        pm::run_passes(
583            tcx,
584            body,
585            &[
586                &remove_uninit_drops::RemoveUninitDrops,
587                &simplify::SimplifyCfg::RemoveFalseEdges,
588                &Lint(post_drop_elaboration::CheckLiveDrops),
589            ],
590            None,
591            pm::Optimizations::Allowed,
592        );
593    }
594
595    debug!("runtime_mir_lowering({:?})", did);
596    run_runtime_lowering_passes(tcx, body);
597    assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial));
598
599    debug!("runtime_mir_cleanup({:?})", did);
600    run_runtime_cleanup_passes(tcx, body);
601    assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup));
602}
603
604// FIXME(JakobDegen): Can we make these lists of passes consts?
605
606/// After this series of passes, no lifetime analysis based on borrowing can be done.
607fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
608    let passes: &[&dyn MirPass<'tcx>] = &[
609        &impossible_predicates::ImpossiblePredicates,
610        &cleanup_post_borrowck::CleanupPostBorrowck,
611        &remove_noop_landing_pads::RemoveNoopLandingPads,
612        &simplify::SimplifyCfg::PostAnalysis,
613        &deref_separator::Derefer,
614    ];
615
616    pm::run_passes(
617        tcx,
618        body,
619        passes,
620        Some(MirPhase::Analysis(AnalysisPhase::PostCleanup)),
621        pm::Optimizations::Allowed,
622    );
623}
624
625/// Returns the sequence of passes that lowers analysis to runtime MIR.
626fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
627    let passes: &[&dyn MirPass<'tcx>] = &[
628        // These next passes must be executed together.
629        &add_call_guards::CriticalCallEdges,
630        // Must be done before drop elaboration because we need to drop opaque types, too.
631        &post_analysis_normalize::PostAnalysisNormalize,
632        // Calling this after `PostAnalysisNormalize` ensures that we don't deal with opaque types.
633        &add_subtyping_projections::Subtyper,
634        &elaborate_drops::ElaborateDrops,
635        // Needs to happen after drop elaboration.
636        &Lint(check_call_recursion::CheckDropRecursion),
637        // This will remove extraneous landing pads which are no longer
638        // necessary as well as forcing any call in a non-unwinding
639        // function calling a possibly-unwinding function to abort the process.
640        &abort_unwinding_calls::AbortUnwindingCalls,
641        // AddMovesForPackedDrops needs to run after drop
642        // elaboration.
643        &add_moves_for_packed_drops::AddMovesForPackedDrops,
644        // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
645        // Otherwise it should run fairly late, but before optimizations begin.
646        &add_retag::AddRetag,
647        &erase_deref_temps::EraseDerefTemps,
648        &elaborate_box_derefs::ElaborateBoxDerefs,
649        &coroutine::StateTransform,
650        &Lint(known_panics_lint::KnownPanicsLint),
651    ];
652    pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
653}
654
655/// Returns the sequence of passes that do the initial cleanup of runtime MIR.
656fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
657    let passes: &[&dyn MirPass<'tcx>] = &[
658        &lower_intrinsics::LowerIntrinsics,
659        &remove_place_mention::RemovePlaceMention,
660        &simplify::SimplifyCfg::PreOptimizations,
661    ];
662
663    pm::run_passes(
664        tcx,
665        body,
666        passes,
667        Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
668        pm::Optimizations::Allowed,
669    );
670
671    // Clear this by anticipation. Optimizations and runtime MIR have no reason to look
672    // into this information, which is meant for borrowck diagnostics.
673    for decl in &mut body.local_decls {
674        decl.local_info = ClearCrossCrate::Clear;
675    }
676}
677
678pub(crate) fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
679    fn o1<T>(x: T) -> WithMinOptLevel<T> {
680        WithMinOptLevel(1, x)
681    }
682
683    let def_id = body.source.def_id();
684    let optimizations = if tcx.def_kind(def_id).has_codegen_attrs()
685        && tcx.codegen_fn_attrs(def_id).optimize.do_not_optimize()
686    {
687        pm::Optimizations::Suppressed
688    } else {
689        pm::Optimizations::Allowed
690    };
691
692    // The main optimizations that we do on MIR.
693    pm::run_passes(
694        tcx,
695        body,
696        &[
697            // Add some UB checks before any UB gets optimized away.
698            &check_alignment::CheckAlignment,
699            &check_null::CheckNull,
700            &check_enums::CheckEnums,
701            // Before inlining: trim down MIR with passes to reduce inlining work.
702
703            // Has to be done before inlining, otherwise actual call will be almost always inlined.
704            // Also simple, so can just do first.
705            &lower_slice_len::LowerSliceLenCalls,
706            // Perform instsimplify before inline to eliminate some trivial calls (like clone
707            // shims).
708            &instsimplify::InstSimplify::BeforeInline,
709            // Perform inlining of `#[rustc_force_inline]`-annotated callees.
710            &inline::ForceInline,
711            // Perform inlining, which may add a lot of code.
712            &inline::Inline,
713            // Inlining may have introduced a lot of redundant code and a large move pattern.
714            // Now, we need to shrink the generated MIR.
715            // Code from other crates may have storage markers, so this needs to happen after
716            // inlining.
717            &remove_storage_markers::RemoveStorageMarkers,
718            // Inlining and instantiation may introduce ZST and useless drops.
719            &remove_zsts::RemoveZsts,
720            &remove_unneeded_drops::RemoveUnneededDrops,
721            // Type instantiation may create uninhabited enums.
722            // Also eliminates some unreachable branches based on variants of enums.
723            &unreachable_enum_branching::UnreachableEnumBranching,
724            &unreachable_prop::UnreachablePropagation,
725            &o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching),
726            &multiple_return_terminators::MultipleReturnTerminators,
727            // After simplifycfg, it allows us to discover new opportunities for peephole
728            // optimizations. This invalidates CFG caches, so avoid putting between
729            // `ReferencePropagation` and `GVN` which both use the dominator tree.
730            &instsimplify::InstSimplify::AfterSimplifyCfg,
731            // After `InstSimplify-after-simplifycfg` with `-Zub_checks=false`, simplify
732            // ```
733            // _13 = const false;
734            // assume(copy _13);
735            // Call(precondition_check);
736            // ```
737            // to unreachable to eliminate the call to help later passes.
738            // This invalidates CFG caches also.
739            &o1(simplify_branches::SimplifyConstCondition::AfterInstSimplify),
740            &ref_prop::ReferencePropagation,
741            &sroa::ScalarReplacementOfAggregates,
742            &simplify::SimplifyLocals::BeforeConstProp,
743            &dead_store_elimination::DeadStoreElimination::Initial,
744            &gvn::GVN,
745            &simplify::SimplifyLocals::AfterGVN,
746            &match_branches::MatchBranchSimplification,
747            &dataflow_const_prop::DataflowConstProp,
748            &single_use_consts::SingleUseConsts,
749            &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
750            &jump_threading::JumpThreading,
751            &early_otherwise_branch::EarlyOtherwiseBranch,
752            &simplify_comparison_integral::SimplifyComparisonIntegral,
753            &o1(simplify_branches::SimplifyConstCondition::Final),
754            &o1(remove_noop_landing_pads::RemoveNoopLandingPads),
755            &o1(simplify::SimplifyCfg::Final),
756            // After the last SimplifyCfg, because this wants one-block functions.
757            &strip_debuginfo::StripDebugInfo,
758            &copy_prop::CopyProp,
759            &dead_store_elimination::DeadStoreElimination::Final,
760            &dest_prop::DestinationPropagation,
761            &simplify::SimplifyLocals::Final,
762            &multiple_return_terminators::MultipleReturnTerminators,
763            &large_enums::EnumSizeOpt { discrepancy: 128 },
764            // Some cleanup necessary at least for LLVM and potentially other codegen backends.
765            &add_call_guards::CriticalCallEdges,
766            // Cleanup for human readability, off by default.
767            &prettify::ReorderBasicBlocks,
768            &prettify::ReorderLocals,
769            // Dump the end result for testing and debugging purposes.
770            &dump_mir::Marker("PreCodegen"),
771        ],
772        Some(MirPhase::Runtime(RuntimePhase::Optimized)),
773        optimizations,
774    );
775}
776
777/// Optimize the MIR and prepare it for codegen.
778fn optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> &Body<'_> {
779    tcx.arena.alloc(inner_optimized_mir(tcx, did))
780}
781
782fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
783    if tcx.is_constructor(did.to_def_id()) {
784        // There's no reason to run all of the MIR passes on constructors when
785        // we can just output the MIR we want directly. This also saves const
786        // qualification and borrow checking the trouble of special casing
787        // constructors.
788        return shim::build_adt_ctor(tcx, did.to_def_id());
789    }
790
791    match tcx.hir_body_const_context(did) {
792        // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
793        // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
794        // computes and caches its result.
795        Some(hir::ConstContext::ConstFn) => tcx.ensure_done().mir_for_ctfe(did),
796        None => {}
797        Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"),
798    }
799    debug!("about to call mir_drops_elaborated...");
800    let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
801    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
802
803    if body.tainted_by_errors.is_some() {
804        return body;
805    }
806
807    // Before doing anything, remember which items are being mentioned so that the set of items
808    // visited does not depend on the optimization level.
809    // We do not use `run_passes` for this as that might skip the pass if `injection_phase` is set.
810    mentioned_items::MentionedItems.run_pass(tcx, &mut body);
811
812    // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable
813    // predicates, it will shrink the MIR to a single `unreachable` terminator.
814    // More generally, if MIR is a lone `unreachable`, there is nothing to optimize.
815    if let TerminatorKind::Unreachable = body.basic_blocks[START_BLOCK].terminator().kind
816        && body.basic_blocks[START_BLOCK].statements.is_empty()
817    {
818        return body;
819    }
820
821    run_optimization_passes(tcx, &mut body);
822
823    body
824}
825
826/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
827/// constant evaluation once all generic parameters become known.
828fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_>> {
829    if tcx.is_constructor(def.to_def_id()) {
830        return tcx.arena.alloc(IndexVec::new());
831    }
832
833    if !tcx.is_synthetic_mir(def) {
834        tcx.ensure_done().mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local());
835    }
836    let mut promoted = tcx.mir_promoted(def).1.steal();
837
838    for body in &mut promoted {
839        run_analysis_to_runtime_passes(tcx, body);
840    }
841
842    tcx.arena.alloc(promoted)
843}