rustc_mir_transform/
lib.rs

1// tidy-alphabetical-start
2#![feature(array_windows)]
3#![feature(assert_matches)]
4#![feature(box_patterns)]
5#![feature(const_type_name)]
6#![feature(cow_is_borrowed)]
7#![feature(file_buffered)]
8#![feature(if_let_guard)]
9#![feature(impl_trait_in_assoc_type)]
10#![feature(let_chains)]
11#![feature(map_try_insert)]
12#![feature(never_type)]
13#![feature(try_blocks)]
14#![feature(yeet_expr)]
15#![warn(unreachable_pub)]
16// tidy-alphabetical-end
17
18use hir::ConstContext;
19use required_consts::RequiredConstsVisitor;
20use rustc_const_eval::check_consts::{self, ConstCx};
21use rustc_const_eval::util;
22use rustc_data_structures::fx::FxIndexSet;
23use rustc_data_structures::steal::Steal;
24use rustc_hir as hir;
25use rustc_hir::def::{CtorKind, DefKind};
26use rustc_hir::def_id::LocalDefId;
27use rustc_index::IndexVec;
28use rustc_middle::mir::{
29    AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs, LocalDecl,
30    MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, START_BLOCK,
31    SourceInfo, Statement, StatementKind, TerminatorKind,
32};
33use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
34use rustc_middle::util::Providers;
35use rustc_middle::{bug, query, span_bug};
36use rustc_mir_build::builder::build_mir;
37use rustc_span::source_map::Spanned;
38use rustc_span::{DUMMY_SP, sym};
39use tracing::debug;
40
41#[macro_use]
42mod pass_manager;
43
44use std::sync::LazyLock;
45
46use pass_manager::{self as pm, Lint, MirLint, MirPass, WithMinOptLevel};
47
48mod check_pointers;
49mod cost_checker;
50mod cross_crate_inline;
51mod deduce_param_attrs;
52mod elaborate_drop;
53mod errors;
54mod ffi_unwind_calls;
55mod lint;
56mod lint_tail_expr_drop_order;
57mod patch;
58mod shim;
59mod ssa;
60
61/// We import passes via this macro so that we can have a static list of pass names
62/// (used to verify CLI arguments). It takes a list of modules, followed by the passes
63/// declared within them.
64/// ```ignore,macro-test
65/// declare_passes! {
66///     // Declare a single pass from the module `abort_unwinding_calls`
67///     mod abort_unwinding_calls : AbortUnwindingCalls;
68///     // When passes are grouped together as an enum, declare the two constituent passes
69///     mod add_call_guards : AddCallGuards {
70///         AllCallEdges,
71///         CriticalCallEdges
72///     };
73///     // Declares multiple pass groups, each containing their own constituent passes
74///     mod simplify : SimplifyCfg {
75///         Initial,
76///         /* omitted */
77///     }, SimplifyLocals {
78///         BeforeConstProp,
79///         /* omitted */
80///     };
81/// }
82/// ```
83macro_rules! declare_passes {
84    (
85        $(
86            $vis:vis mod $mod_name:ident : $($pass_name:ident $( { $($ident:ident),* } )?),+ $(,)?;
87        )*
88    ) => {
89        $(
90            $vis mod $mod_name;
91            $(
92                // Make sure the type name is correct
93                #[allow(unused_imports)]
94                use $mod_name::$pass_name as _;
95            )+
96        )*
97
98        static PASS_NAMES: LazyLock<FxIndexSet<&str>> = LazyLock::new(|| [
99            // Fake marker pass
100            "PreCodegen",
101            $(
102                $(
103                    stringify!($pass_name),
104                    $(
105                        $(
106                            $mod_name::$pass_name::$ident.name(),
107                        )*
108                    )?
109                )+
110            )*
111        ].into_iter().collect());
112    };
113}
114
115declare_passes! {
116    mod abort_unwinding_calls : AbortUnwindingCalls;
117    mod add_call_guards : AddCallGuards { AllCallEdges, CriticalCallEdges };
118    mod add_moves_for_packed_drops : AddMovesForPackedDrops;
119    mod add_retag : AddRetag;
120    mod add_subtyping_projections : Subtyper;
121    mod check_inline : CheckForceInline;
122    mod check_call_recursion : CheckCallRecursion, CheckDropRecursion;
123    mod check_alignment : CheckAlignment;
124    mod check_const_item_mutation : CheckConstItemMutation;
125    mod check_null : CheckNull;
126    mod check_packed_ref : CheckPackedRef;
127    mod check_undefined_transmutes : CheckUndefinedTransmutes;
128    // This pass is public to allow external drivers to perform MIR cleanup
129    pub mod cleanup_post_borrowck : CleanupPostBorrowck;
130
131    mod copy_prop : CopyProp;
132    mod coroutine : StateTransform;
133    mod coverage : InstrumentCoverage;
134    mod ctfe_limit : CtfeLimit;
135    mod dataflow_const_prop : DataflowConstProp;
136    mod dead_store_elimination : DeadStoreElimination {
137        Initial,
138        Final
139    };
140    mod deref_separator : Derefer;
141    mod dest_prop : DestinationPropagation;
142    pub mod dump_mir : Marker;
143    mod early_otherwise_branch : EarlyOtherwiseBranch;
144    mod elaborate_box_derefs : ElaborateBoxDerefs;
145    mod elaborate_drops : ElaborateDrops;
146    mod function_item_references : FunctionItemReferences;
147    mod gvn : GVN;
148    // Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
149    // by custom rustc drivers, running all the steps by themselves. See #114628.
150    pub mod inline : Inline, ForceInline;
151    mod impossible_predicates : ImpossiblePredicates;
152    mod instsimplify : InstSimplify { BeforeInline, AfterSimplifyCfg };
153    mod jump_threading : JumpThreading;
154    mod known_panics_lint : KnownPanicsLint;
155    mod large_enums : EnumSizeOpt;
156    mod lower_intrinsics : LowerIntrinsics;
157    mod lower_slice_len : LowerSliceLenCalls;
158    mod match_branches : MatchBranchSimplification;
159    mod mentioned_items : MentionedItems;
160    mod multiple_return_terminators : MultipleReturnTerminators;
161    mod nrvo : RenameReturnPlace;
162    mod post_drop_elaboration : CheckLiveDrops;
163    mod prettify : ReorderBasicBlocks, ReorderLocals;
164    mod promote_consts : PromoteTemps;
165    mod ref_prop : ReferencePropagation;
166    mod remove_noop_landing_pads : RemoveNoopLandingPads;
167    mod remove_place_mention : RemovePlaceMention;
168    mod remove_storage_markers : RemoveStorageMarkers;
169    mod remove_uninit_drops : RemoveUninitDrops;
170    mod remove_unneeded_drops : RemoveUnneededDrops;
171    mod remove_zsts : RemoveZsts;
172    mod required_consts : RequiredConstsVisitor;
173    mod post_analysis_normalize : PostAnalysisNormalize;
174    mod sanity_check : SanityCheck;
175    // This pass is public to allow external drivers to perform MIR cleanup
176    pub mod simplify :
177        SimplifyCfg {
178            Initial,
179            PromoteConsts,
180            RemoveFalseEdges,
181            PostAnalysis,
182            PreOptimizations,
183            Final,
184            MakeShim,
185            AfterUnreachableEnumBranching
186        },
187        SimplifyLocals {
188            BeforeConstProp,
189            AfterGVN,
190            Final
191        };
192    mod simplify_branches : SimplifyConstCondition {
193        AfterConstProp,
194        Final
195    };
196    mod simplify_comparison_integral : SimplifyComparisonIntegral;
197    mod single_use_consts : SingleUseConsts;
198    mod sroa : ScalarReplacementOfAggregates;
199    mod strip_debuginfo : StripDebugInfo;
200    mod unreachable_enum_branching : UnreachableEnumBranching;
201    mod unreachable_prop : UnreachablePropagation;
202    mod validate : Validator;
203}
204
205rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
206
207pub fn provide(providers: &mut Providers) {
208    coverage::query::provide(providers);
209    ffi_unwind_calls::provide(providers);
210    shim::provide(providers);
211    cross_crate_inline::provide(providers);
212    providers.queries = query::Providers {
213        mir_keys,
214        mir_built,
215        mir_const_qualif,
216        mir_promoted,
217        mir_drops_elaborated_and_const_checked,
218        mir_for_ctfe,
219        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
220        optimized_mir,
221        is_mir_available,
222        is_ctfe_mir_available: is_mir_available,
223        mir_callgraph_reachable: inline::cycle::mir_callgraph_reachable,
224        mir_inliner_callees: inline::cycle::mir_inliner_callees,
225        promoted_mir,
226        deduced_param_attrs: deduce_param_attrs::deduced_param_attrs,
227        coroutine_by_move_body_def_id: coroutine::coroutine_by_move_body_def_id,
228        ..providers.queries
229    };
230}
231
232fn remap_mir_for_const_eval_select<'tcx>(
233    tcx: TyCtxt<'tcx>,
234    mut body: Body<'tcx>,
235    context: hir::Constness,
236) -> Body<'tcx> {
237    for bb in body.basic_blocks.as_mut().iter_mut() {
238        let terminator = bb.terminator.as_mut().expect("invalid terminator");
239        match terminator.kind {
240            TerminatorKind::Call {
241                func: Operand::Constant(box ConstOperand { ref const_, .. }),
242                ref mut args,
243                destination,
244                target,
245                unwind,
246                fn_span,
247                ..
248            } if let ty::FnDef(def_id, _) = *const_.ty().kind()
249                && tcx.is_intrinsic(def_id, sym::const_eval_select) =>
250            {
251                let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else {
252                    unreachable!()
253                };
254                let ty = tupled_args.node.ty(&body.local_decls, tcx);
255                let fields = ty.tuple_fields();
256                let num_args = fields.len();
257                let func =
258                    if context == hir::Constness::Const { called_in_const } else { called_at_rt };
259                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
260                    match tupled_args.node {
261                        Operand::Constant(_) => {
262                            // There is no good way of extracting a tuple arg from a constant
263                            // (const generic stuff) so we just create a temporary and deconstruct
264                            // that.
265                            let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
266                            bb.statements.push(Statement {
267                                source_info: SourceInfo::outermost(fn_span),
268                                kind: StatementKind::Assign(Box::new((
269                                    local.into(),
270                                    Rvalue::Use(tupled_args.node.clone()),
271                                ))),
272                            });
273                            (Operand::Move, local.into())
274                        }
275                        Operand::Move(place) => (Operand::Move, place),
276                        Operand::Copy(place) => (Operand::Copy, place),
277                    };
278                let place_elems = place.projection;
279                let arguments = (0..num_args)
280                    .map(|x| {
281                        let mut place_elems = place_elems.to_vec();
282                        place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
283                        let projection = tcx.mk_place_elems(&place_elems);
284                        let place = Place { local: place.local, projection };
285                        Spanned { node: method(place), span: DUMMY_SP }
286                    })
287                    .collect();
288                terminator.kind = TerminatorKind::Call {
289                    func: func.node,
290                    args: arguments,
291                    destination,
292                    target,
293                    unwind,
294                    call_source: CallSource::Misc,
295                    fn_span,
296                };
297            }
298            _ => {}
299        }
300    }
301    body
302}
303
304fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> {
305    let b: Box<[T; N]> = std::mem::take(b).try_into()?;
306    Ok(*b)
307}
308
309fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
310    tcx.mir_keys(()).contains(&def_id)
311}
312
313/// Finds the full set of `DefId`s within the current crate that have
314/// MIR associated with them.
315fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
316    // All body-owners have MIR associated with them.
317    let mut set: FxIndexSet<_> = tcx.hir().body_owners().collect();
318
319    // Coroutine-closures (e.g. async closures) have an additional by-move MIR
320    // body that isn't in the HIR.
321    for body_owner in tcx.hir().body_owners() {
322        if let DefKind::Closure = tcx.def_kind(body_owner)
323            && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id())
324        {
325            set.insert(tcx.coroutine_by_move_body_def_id(body_owner).expect_local());
326        }
327    }
328
329    // tuple struct/variant constructors have MIR, but they don't have a BodyId,
330    // so we need to build them separately.
331    for item in tcx.hir_crate_items(()).free_items() {
332        if let DefKind::Struct | DefKind::Enum = tcx.def_kind(item.owner_id) {
333            for variant in tcx.adt_def(item.owner_id).variants() {
334                if let Some((CtorKind::Fn, ctor_def_id)) = variant.ctor {
335                    set.insert(ctor_def_id.expect_local());
336                }
337            }
338        }
339    }
340
341    set
342}
343
344fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
345    // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
346    // cannot yet be stolen), because `mir_promoted()`, which steals
347    // from `mir_built()`, forces this query to execute before
348    // performing the steal.
349    let body = &tcx.mir_built(def).borrow();
350    let ccx = check_consts::ConstCx::new(tcx, body);
351    // No need to const-check a non-const `fn`.
352    match ccx.const_kind {
353        Some(ConstContext::Const { .. } | ConstContext::Static(_) | ConstContext::ConstFn) => {}
354        None => span_bug!(
355            tcx.def_span(def),
356            "`mir_const_qualif` should only be called on const fns and const items"
357        ),
358    }
359
360    if body.return_ty().references_error() {
361        // It's possible to reach here without an error being emitted (#121103).
362        tcx.dcx().span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
363        return Default::default();
364    }
365
366    let mut validator = check_consts::check::Checker::new(&ccx);
367    validator.check_body();
368
369    // We return the qualifs in the return place for every MIR body, even though it is only used
370    // when deciding to promote a reference to a `const` for now.
371    validator.qualifs_in_return_place()
372}
373
374fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
375    let mut body = build_mir(tcx, def);
376
377    pass_manager::dump_mir_for_phase_change(tcx, &body);
378
379    pm::run_passes(
380        tcx,
381        &mut body,
382        &[
383            // MIR-level lints.
384            &Lint(check_inline::CheckForceInline),
385            &Lint(check_call_recursion::CheckCallRecursion),
386            &Lint(check_packed_ref::CheckPackedRef),
387            &Lint(check_const_item_mutation::CheckConstItemMutation),
388            &Lint(function_item_references::FunctionItemReferences),
389            &Lint(check_undefined_transmutes::CheckUndefinedTransmutes),
390            // What we need to do constant evaluation.
391            &simplify::SimplifyCfg::Initial,
392            &Lint(sanity_check::SanityCheck),
393        ],
394        None,
395        pm::Optimizations::Allowed,
396    );
397    tcx.alloc_steal_mir(body)
398}
399
400/// Compute the main MIR body and the list of MIR bodies of the promoteds.
401fn mir_promoted(
402    tcx: TyCtxt<'_>,
403    def: LocalDefId,
404) -> (&Steal<Body<'_>>, &Steal<IndexVec<Promoted, Body<'_>>>) {
405    // Ensure that we compute the `mir_const_qualif` for constants at
406    // this point, before we steal the mir-const result.
407    // Also this means promotion can rely on all const checks having been done.
408
409    let const_qualifs = match tcx.def_kind(def) {
410        DefKind::Fn | DefKind::AssocFn | DefKind::Closure
411            if tcx.constness(def) == hir::Constness::Const
412                || tcx.is_const_default_method(def.to_def_id()) =>
413        {
414            tcx.mir_const_qualif(def)
415        }
416        DefKind::AssocConst
417        | DefKind::Const
418        | DefKind::Static { .. }
419        | DefKind::InlineConst
420        | DefKind::AnonConst => tcx.mir_const_qualif(def),
421        _ => ConstQualifs::default(),
422    };
423
424    // the `has_ffi_unwind_calls` query uses the raw mir, so make sure it is run.
425    tcx.ensure_done().has_ffi_unwind_calls(def);
426
427    // the `by_move_body` query uses the raw mir, so make sure it is run.
428    if tcx.needs_coroutine_by_move_body_def_id(def.to_def_id()) {
429        tcx.ensure_done().coroutine_by_move_body_def_id(def);
430    }
431
432    let mut body = tcx.mir_built(def).steal();
433    if let Some(error_reported) = const_qualifs.tainted_by_errors {
434        body.tainted_by_errors = Some(error_reported);
435    }
436
437    // Collect `required_consts` *before* promotion, so if there are any consts being promoted
438    // we still add them to the list in the outer MIR body.
439    RequiredConstsVisitor::compute_required_consts(&mut body);
440
441    // What we need to run borrowck etc.
442    let promote_pass = promote_consts::PromoteTemps::default();
443    pm::run_passes(
444        tcx,
445        &mut body,
446        &[&promote_pass, &simplify::SimplifyCfg::PromoteConsts, &coverage::InstrumentCoverage],
447        Some(MirPhase::Analysis(AnalysisPhase::Initial)),
448        pm::Optimizations::Allowed,
449    );
450
451    lint_tail_expr_drop_order::run_lint(tcx, def, &body);
452
453    let promoted = promote_pass.promoted_fragments.into_inner();
454    (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
455}
456
457/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
458fn mir_for_ctfe(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &Body<'_> {
459    tcx.arena.alloc(inner_mir_for_ctfe(tcx, def_id))
460}
461
462fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
463    // FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
464    if tcx.is_constructor(def.to_def_id()) {
465        // There's no reason to run all of the MIR passes on constructors when
466        // we can just output the MIR we want directly. This also saves const
467        // qualification and borrow checking the trouble of special casing
468        // constructors.
469        return shim::build_adt_ctor(tcx, def.to_def_id());
470    }
471
472    let body = tcx.mir_drops_elaborated_and_const_checked(def);
473    let body = match tcx.hir().body_const_context(def) {
474        // consts and statics do not have `optimized_mir`, so we can steal the body instead of
475        // cloning it.
476        Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
477        Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
478        None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
479    };
480
481    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
482    pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None, pm::Optimizations::Allowed);
483
484    body
485}
486
487/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
488/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
489/// end up missing the source MIR due to stealing happening.
490fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
491    if tcx.is_coroutine(def.to_def_id()) {
492        tcx.ensure_done().mir_coroutine_witnesses(def);
493    }
494
495    // We only need to borrowck non-synthetic MIR.
496    let tainted_by_errors =
497        if !tcx.is_synthetic_mir(def) { tcx.mir_borrowck(def).tainted_by_errors } else { None };
498
499    let is_fn_like = tcx.def_kind(def).is_fn_like();
500    if is_fn_like {
501        // Do not compute the mir call graph without said call graph actually being used.
502        if pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
503            || inline::ForceInline::should_run_pass_for_callee(tcx, def.to_def_id())
504        {
505            tcx.ensure_done().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id()));
506        }
507    }
508
509    let (body, _) = tcx.mir_promoted(def);
510    let mut body = body.steal();
511
512    if let Some(error_reported) = tainted_by_errors {
513        body.tainted_by_errors = Some(error_reported);
514    }
515
516    run_analysis_to_runtime_passes(tcx, &mut body);
517
518    tcx.alloc_steal_mir(body)
519}
520
521// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
522// by custom rustc drivers, running all the steps by themselves. See #114628.
523pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
524    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
525    let did = body.source.def_id();
526
527    debug!("analysis_mir_cleanup({:?})", did);
528    run_analysis_cleanup_passes(tcx, body);
529    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
530
531    // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
532    if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, body)) {
533        pm::run_passes(
534            tcx,
535            body,
536            &[
537                &remove_uninit_drops::RemoveUninitDrops,
538                &simplify::SimplifyCfg::RemoveFalseEdges,
539                &Lint(post_drop_elaboration::CheckLiveDrops),
540            ],
541            None,
542            pm::Optimizations::Allowed,
543        );
544    }
545
546    debug!("runtime_mir_lowering({:?})", did);
547    run_runtime_lowering_passes(tcx, body);
548    assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial));
549
550    debug!("runtime_mir_cleanup({:?})", did);
551    run_runtime_cleanup_passes(tcx, body);
552    assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup));
553}
554
555// FIXME(JakobDegen): Can we make these lists of passes consts?
556
557/// After this series of passes, no lifetime analysis based on borrowing can be done.
558fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
559    let passes: &[&dyn MirPass<'tcx>] = &[
560        &impossible_predicates::ImpossiblePredicates,
561        &cleanup_post_borrowck::CleanupPostBorrowck,
562        &remove_noop_landing_pads::RemoveNoopLandingPads,
563        &simplify::SimplifyCfg::PostAnalysis,
564        &deref_separator::Derefer,
565    ];
566
567    pm::run_passes(
568        tcx,
569        body,
570        passes,
571        Some(MirPhase::Analysis(AnalysisPhase::PostCleanup)),
572        pm::Optimizations::Allowed,
573    );
574}
575
576/// Returns the sequence of passes that lowers analysis to runtime MIR.
577fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
578    let passes: &[&dyn MirPass<'tcx>] = &[
579        // These next passes must be executed together.
580        &add_call_guards::CriticalCallEdges,
581        // Must be done before drop elaboration because we need to drop opaque types, too.
582        &post_analysis_normalize::PostAnalysisNormalize,
583        // Calling this after `PostAnalysisNormalize` ensures that we don't deal with opaque types.
584        &add_subtyping_projections::Subtyper,
585        &elaborate_drops::ElaborateDrops,
586        // Needs to happen after drop elaboration.
587        &Lint(check_call_recursion::CheckDropRecursion),
588        // This will remove extraneous landing pads which are no longer
589        // necessary as well as forcing any call in a non-unwinding
590        // function calling a possibly-unwinding function to abort the process.
591        &abort_unwinding_calls::AbortUnwindingCalls,
592        // AddMovesForPackedDrops needs to run after drop
593        // elaboration.
594        &add_moves_for_packed_drops::AddMovesForPackedDrops,
595        // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
596        // Otherwise it should run fairly late, but before optimizations begin.
597        &add_retag::AddRetag,
598        &elaborate_box_derefs::ElaborateBoxDerefs,
599        &coroutine::StateTransform,
600        &Lint(known_panics_lint::KnownPanicsLint),
601    ];
602    pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
603}
604
605/// Returns the sequence of passes that do the initial cleanup of runtime MIR.
606fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
607    let passes: &[&dyn MirPass<'tcx>] = &[
608        &lower_intrinsics::LowerIntrinsics,
609        &remove_place_mention::RemovePlaceMention,
610        &simplify::SimplifyCfg::PreOptimizations,
611    ];
612
613    pm::run_passes(
614        tcx,
615        body,
616        passes,
617        Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
618        pm::Optimizations::Allowed,
619    );
620
621    // Clear this by anticipation. Optimizations and runtime MIR have no reason to look
622    // into this information, which is meant for borrowck diagnostics.
623    for decl in &mut body.local_decls {
624        decl.local_info = ClearCrossCrate::Clear;
625    }
626}
627
628fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
629    fn o1<T>(x: T) -> WithMinOptLevel<T> {
630        WithMinOptLevel(1, x)
631    }
632
633    let def_id = body.source.def_id();
634    let optimizations = if tcx.def_kind(def_id).has_codegen_attrs()
635        && tcx.codegen_fn_attrs(def_id).optimize.do_not_optimize()
636    {
637        pm::Optimizations::Suppressed
638    } else {
639        pm::Optimizations::Allowed
640    };
641
642    // The main optimizations that we do on MIR.
643    pm::run_passes(
644        tcx,
645        body,
646        &[
647            // Add some UB checks before any UB gets optimized away.
648            &check_alignment::CheckAlignment,
649            &check_null::CheckNull,
650            // Before inlining: trim down MIR with passes to reduce inlining work.
651
652            // Has to be done before inlining, otherwise actual call will be almost always inlined.
653            // Also simple, so can just do first.
654            &lower_slice_len::LowerSliceLenCalls,
655            // Perform instsimplify before inline to eliminate some trivial calls (like clone
656            // shims).
657            &instsimplify::InstSimplify::BeforeInline,
658            // Perform inlining of `#[rustc_force_inline]`-annotated callees.
659            &inline::ForceInline,
660            // Perform inlining, which may add a lot of code.
661            &inline::Inline,
662            // Code from other crates may have storage markers, so this needs to happen after
663            // inlining.
664            &remove_storage_markers::RemoveStorageMarkers,
665            // Inlining and instantiation may introduce ZST and useless drops.
666            &remove_zsts::RemoveZsts,
667            &remove_unneeded_drops::RemoveUnneededDrops,
668            // Type instantiation may create uninhabited enums.
669            // Also eliminates some unreachable branches based on variants of enums.
670            &unreachable_enum_branching::UnreachableEnumBranching,
671            &unreachable_prop::UnreachablePropagation,
672            &o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching),
673            // Inlining may have introduced a lot of redundant code and a large move pattern.
674            // Now, we need to shrink the generated MIR.
675            &ref_prop::ReferencePropagation,
676            &sroa::ScalarReplacementOfAggregates,
677            &match_branches::MatchBranchSimplification,
678            // inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
679            &multiple_return_terminators::MultipleReturnTerminators,
680            // After simplifycfg, it allows us to discover new opportunities for peephole
681            // optimizations.
682            &instsimplify::InstSimplify::AfterSimplifyCfg,
683            &simplify::SimplifyLocals::BeforeConstProp,
684            &dead_store_elimination::DeadStoreElimination::Initial,
685            &gvn::GVN,
686            &simplify::SimplifyLocals::AfterGVN,
687            &dataflow_const_prop::DataflowConstProp,
688            &single_use_consts::SingleUseConsts,
689            &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
690            &jump_threading::JumpThreading,
691            &early_otherwise_branch::EarlyOtherwiseBranch,
692            &simplify_comparison_integral::SimplifyComparisonIntegral,
693            &dest_prop::DestinationPropagation,
694            &o1(simplify_branches::SimplifyConstCondition::Final),
695            &o1(remove_noop_landing_pads::RemoveNoopLandingPads),
696            &o1(simplify::SimplifyCfg::Final),
697            // After the last SimplifyCfg, because this wants one-block functions.
698            &strip_debuginfo::StripDebugInfo,
699            &copy_prop::CopyProp,
700            &dead_store_elimination::DeadStoreElimination::Final,
701            &nrvo::RenameReturnPlace,
702            &simplify::SimplifyLocals::Final,
703            &multiple_return_terminators::MultipleReturnTerminators,
704            &large_enums::EnumSizeOpt { discrepancy: 128 },
705            // Some cleanup necessary at least for LLVM and potentially other codegen backends.
706            &add_call_guards::CriticalCallEdges,
707            // Cleanup for human readability, off by default.
708            &prettify::ReorderBasicBlocks,
709            &prettify::ReorderLocals,
710            // Dump the end result for testing and debugging purposes.
711            &dump_mir::Marker("PreCodegen"),
712        ],
713        Some(MirPhase::Runtime(RuntimePhase::Optimized)),
714        optimizations,
715    );
716}
717
718/// Optimize the MIR and prepare it for codegen.
719fn optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> &Body<'_> {
720    tcx.arena.alloc(inner_optimized_mir(tcx, did))
721}
722
723fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
724    if tcx.is_constructor(did.to_def_id()) {
725        // There's no reason to run all of the MIR passes on constructors when
726        // we can just output the MIR we want directly. This also saves const
727        // qualification and borrow checking the trouble of special casing
728        // constructors.
729        return shim::build_adt_ctor(tcx, did.to_def_id());
730    }
731
732    match tcx.hir().body_const_context(did) {
733        // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
734        // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
735        // computes and caches its result.
736        Some(hir::ConstContext::ConstFn) => tcx.ensure_done().mir_for_ctfe(did),
737        None => {}
738        Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"),
739    }
740    debug!("about to call mir_drops_elaborated...");
741    let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
742    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
743
744    if body.tainted_by_errors.is_some() {
745        return body;
746    }
747
748    // Before doing anything, remember which items are being mentioned so that the set of items
749    // visited does not depend on the optimization level.
750    // We do not use `run_passes` for this as that might skip the pass if `injection_phase` is set.
751    mentioned_items::MentionedItems.run_pass(tcx, &mut body);
752
753    // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable
754    // predicates, it will shrink the MIR to a single `unreachable` terminator.
755    // More generally, if MIR is a lone `unreachable`, there is nothing to optimize.
756    if let TerminatorKind::Unreachable = body.basic_blocks[START_BLOCK].terminator().kind
757        && body.basic_blocks[START_BLOCK].statements.is_empty()
758    {
759        return body;
760    }
761
762    run_optimization_passes(tcx, &mut body);
763
764    body
765}
766
767/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
768/// constant evaluation once all generic parameters become known.
769fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_>> {
770    if tcx.is_constructor(def.to_def_id()) {
771        return tcx.arena.alloc(IndexVec::new());
772    }
773
774    if !tcx.is_synthetic_mir(def) {
775        tcx.ensure_done().mir_borrowck(def);
776    }
777    let mut promoted = tcx.mir_promoted(def).1.steal();
778
779    for body in &mut promoted {
780        run_analysis_to_runtime_passes(tcx, body);
781    }
782
783    tcx.arena.alloc(promoted)
784}