1use rustc_hir::attrs::{AttributeKind, InlineAttr};
2use rustc_hir::def::DefKind;
3use rustc_hir::def_id::LocalDefId;
4use rustc_hir::find_attr;
5use rustc_middle::bug;
6use rustc_middle::mir::visit::Visitor;
7use rustc_middle::mir::*;
8use rustc_middle::query::Providers;
9use rustc_middle::ty::TyCtxt;
10use rustc_session::config::{InliningThreshold, OptLevel};
11use rustc_span::sym;
1213use crate::{inline, pass_manageras pm};
1415pub(super) fn provide(providers: &mut Providers) {
16providers.cross_crate_inlinable = cross_crate_inlinable;
17}
1819fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
20let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
21// If this has an extern indicator, then this function is globally shared and thus will not
22 // generate cgu-internal copies which would make it cross-crate inlinable.
23if codegen_fn_attrs.contains_extern_indicator() {
24return false;
25 }
2627// This just reproduces the logic from Instance::requires_inline.
28match tcx.def_kind(def_id) {
29 DefKind::Ctor(..) | DefKind::Closure | DefKind::SyntheticCoroutineBody => return true,
30 DefKind::Fn | DefKind::AssocFn => {}
31_ => return false,
32 }
3334// From this point on, it is valid to return true or false.
35if tcx.sess.opts.unstable_opts.cross_crate_inline_threshold == InliningThreshold::Always {
36return true;
37 }
3839// FIXME(autodiff): replace this as per discussion in https://github.com/rust-lang/rust/pull/149033#discussion_r2535465880
40if tcx.has_attr(def_id, sym::autodiff_forward)
41 || tcx.has_attr(def_id, sym::autodiff_reverse)
42 || tcx.has_attr(def_id, sym::rustc_autodiff)
43 {
44return true;
45 }
4647if {
{
'done:
{
for i in tcx.get_all_attrs(def_id) {
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(AttributeKind::RustcIntrinsic)
=> {
break 'done Some(());
}
_ => {}
}
}
None
}
}.is_some()
}find_attr!(tcx.get_all_attrs(def_id), AttributeKind::RustcIntrinsic) {
48// Intrinsic fallback bodies are always cross-crate inlineable.
49 // To ensure that the MIR inliner doesn't cluelessly try to inline fallback
50 // bodies even when the backend would implement something better, we stop
51 // the MIR inliner from ever inlining an intrinsic.
52return true;
53 }
5455// Obey source annotations first; this is important because it means we can use
56 // #[inline(never)] to force code generation.
57match codegen_fn_attrs.inline {
58 InlineAttr::Never => return false,
59 InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. } => return true,
60_ => {}
61 }
6263// If the crate is likely to be mostly unused, use cross-crate inlining to defer codegen until
64 // the function is referenced, in order to skip codegen for unused functions. This is
65 // intentionally after the check for `inline(never)`, so that `inline(never)` wins.
66if tcx.sess.opts.unstable_opts.hint_mostly_unused {
67return true;
68 }
6970let sig = tcx.fn_sig(def_id).instantiate_identity();
71for ty in sig.inputs().skip_binder().iter().chain(std::iter::once(&sig.output().skip_binder()))
72 {
73// FIXME(f16_f128): in order to avoid crashes building `core`, always inline to skip
74 // codegen if the function is not used.
75if ty == &tcx.types.f16 || ty == &tcx.types.f128 {
76return true;
77 }
78 }
7980// Don't do any inference when incremental compilation is enabled; the additional inlining that
81 // inference permits also creates more work for small edits.
82if tcx.sess.opts.incremental.is_some() {
83return false;
84 }
8586// Don't do any inference if codegen optimizations are disabled and also MIR inlining is not
87 // enabled. This ensures that we do inference even if someone only passes -Zinline-mir,
88 // which is less confusing than having to also enable -Copt-level=1.
89let inliner_will_run = pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
90 || inline::ForceInline::should_run_pass_for_callee(tcx, def_id.to_def_id());
91if #[allow(non_exhaustive_omitted_patterns)] match tcx.sess.opts.optimize {
OptLevel::No => true,
_ => false,
}matches!(tcx.sess.opts.optimize, OptLevel::No) && !inliner_will_run {
92return false;
93 }
9495if !tcx.is_mir_available(def_id) {
96return false;
97 }
9899let threshold = match tcx.sess.opts.unstable_opts.cross_crate_inline_threshold {
100 InliningThreshold::Always => return true,
101 InliningThreshold::Sometimes(threshold) => threshold,
102 InliningThreshold::Never => return false,
103 };
104105let mir = tcx.optimized_mir(def_id);
106let mut checker =
107CostChecker { tcx, callee_body: mir, calls: 0, statements: 0, landing_pads: 0, resumes: 0 };
108checker.visit_body(mir);
109checker.calls == 0
110&& checker.resumes == 0
111&& checker.landing_pads == 0
112&& checker.statements <= threshold113}
114115// The threshold that CostChecker computes is balancing the desire to make more things
116// inlinable cross crates against the growth in incremental CGU size that happens when too many
117// things in the sysroot are made inlinable.
118// Permitting calls causes the size of some incremental CGUs to grow, because more functions are
119// made inlinable out of the sysroot or dependencies.
120// Assert terminators are similar to calls, but do not have the same impact on compile time, so
121// those are just treated as statements.
122// A threshold exists at all because we don't want to blindly mark a huge function as inlinable.
123124struct CostChecker<'b, 'tcx> {
125 tcx: TyCtxt<'tcx>,
126 callee_body: &'b Body<'tcx>,
127 calls: usize,
128 statements: usize,
129 landing_pads: usize,
130 resumes: usize,
131}
132133impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
134fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
135// Don't count StorageLive/StorageDead in the inlining cost.
136match statement.kind {
137 StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Nop => {}
138_ => self.statements += 1,
139 }
140 }
141142fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
143self.statements += 1;
144let tcx = self.tcx;
145match &terminator.kind {
146 TerminatorKind::Drop { place, unwind, .. } => {
147let ty = place.ty(self.callee_body, tcx).ty;
148if !ty.is_trivially_pure_clone_copy() {
149self.calls += 1;
150if let UnwindAction::Cleanup(_) = unwind {
151self.landing_pads += 1;
152 }
153 }
154 }
155 TerminatorKind::Call { func, unwind, .. } => {
156// We track calls because they make our function not a leaf (and in theory, the
157 // number of calls indicates how likely this function is to perturb other CGUs).
158 // But intrinsics don't have a body that gets assigned to a CGU, so they are
159 // ignored.
160if let Some((fn_def_id, _)) = func.const_fn_def()
161 && {
{
'done:
{
for i in tcx.get_all_attrs(fn_def_id) {
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(AttributeKind::RustcIntrinsic)
=> {
break 'done Some(());
}
_ => {}
}
}
None
}
}.is_some()
}find_attr!(tcx.get_all_attrs(fn_def_id), AttributeKind::RustcIntrinsic)162 {
163return;
164 }
165self.calls += 1;
166if let UnwindAction::Cleanup(_) = unwind {
167self.landing_pads += 1;
168 }
169 }
170 TerminatorKind::TailCall { .. } => {
171self.calls += 1;
172 }
173 TerminatorKind::Assert { unwind, .. } => {
174if let UnwindAction::Cleanup(_) = unwind {
175self.landing_pads += 1;
176 }
177 }
178 TerminatorKind::UnwindResume => self.resumes += 1,
179 TerminatorKind::InlineAsm { unwind, .. } => {
180if let UnwindAction::Cleanup(_) = unwind {
181self.landing_pads += 1;
182 }
183 }
184 TerminatorKind::Return185 | TerminatorKind::Goto { .. }
186 | TerminatorKind::SwitchInt { .. }
187 | TerminatorKind::Unreachable188 | TerminatorKind::UnwindTerminate(_) => {}
189 kind @ (TerminatorKind::FalseUnwind { .. }
190 | TerminatorKind::FalseEdge { .. }
191 | TerminatorKind::Yield { .. }
192 | TerminatorKind::CoroutineDrop) => {
193::rustc_middle::util::bug::bug_fmt(format_args!("{0:?} should not be in runtime MIR",
kind));bug!("{kind:?} should not be in runtime MIR");
194 }
195 }
196 }
197}