1use rustc_hir::attrs::InlineAttr;
2use rustc_hir::def::DefKind;
3use rustc_hir::def_id::LocalDefId;
4use rustc_hir::find_attr;
5use rustc_middle::bug;
6use rustc_middle::mir::visit::Visitor;
7use rustc_middle::mir::*;
8use rustc_middle::query::Providers;
9use rustc_middle::ty::TyCtxt;
10use rustc_session::config::{InliningThreshold, OptLevel};
1112use crate::{inline, pass_manageras pm};
1314pub(super) fn provide(providers: &mut Providers) {
15providers.cross_crate_inlinable = cross_crate_inlinable;
16}
1718fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
19let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
20// If this has an extern indicator, then this function is globally shared and thus will not
21 // generate cgu-internal copies which would make it cross-crate inlinable.
22if codegen_fn_attrs.contains_extern_indicator() {
23return false;
24 }
2526// This just reproduces the logic from Instance::requires_inline.
27match tcx.def_kind(def_id) {
28 DefKind::Ctor(..) | DefKind::Closure | DefKind::SyntheticCoroutineBody => return true,
29 DefKind::Fn | DefKind::AssocFn => {}
30_ => return false,
31 }
3233// From this point on, it is valid to return true or false.
34if tcx.sess.opts.unstable_opts.cross_crate_inline_threshold == InliningThreshold::Always {
35return true;
36 }
3738if {
{
'done:
{
for i in ::rustc_hir::attrs::HasAttrs::get_attrs(def_id, &tcx)
{
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcIntrinsic) => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}.is_some()find_attr!(tcx, def_id, RustcIntrinsic) {
39// Intrinsic fallback bodies are always cross-crate inlineable.
40 // To ensure that the MIR inliner doesn't cluelessly try to inline fallback
41 // bodies even when the backend would implement something better, we stop
42 // the MIR inliner from ever inlining an intrinsic.
43return true;
44 }
4546// Obey source annotations first; this is important because it means we can use
47 // #[inline(never)] to force code generation.
48match codegen_fn_attrs.inline {
49 InlineAttr::Never => return false,
50 InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. } => return true,
51_ => {}
52 }
5354// If the crate is likely to be mostly unused, use cross-crate inlining to defer codegen until
55 // the function is referenced, in order to skip codegen for unused functions. This is
56 // intentionally after the check for `inline(never)`, so that `inline(never)` wins.
57if tcx.sess.opts.unstable_opts.hint_mostly_unused {
58return true;
59 }
6061let sig = tcx.fn_sig(def_id).instantiate_identity();
62for ty in sig.inputs().skip_binder().iter().chain(std::iter::once(&sig.output().skip_binder()))
63 {
64// FIXME(f16_f128): in order to avoid crashes building `core`, always inline to skip
65 // codegen if the function is not used.
66if ty == &tcx.types.f16 || ty == &tcx.types.f128 {
67return true;
68 }
69 }
7071// Don't do any inference when incremental compilation is enabled; the additional inlining that
72 // inference permits also creates more work for small edits.
73if tcx.sess.opts.incremental.is_some() {
74return false;
75 }
7677// Don't do any inference if codegen optimizations are disabled and also MIR inlining is not
78 // enabled. This ensures that we do inference even if someone only passes -Zinline-mir,
79 // which is less confusing than having to also enable -Copt-level=1.
80let inliner_will_run = pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
81 || inline::ForceInline::should_run_pass_for_callee(tcx, def_id.to_def_id());
82if #[allow(non_exhaustive_omitted_patterns)] match tcx.sess.opts.optimize {
OptLevel::No => true,
_ => false,
}matches!(tcx.sess.opts.optimize, OptLevel::No) && !inliner_will_run {
83return false;
84 }
8586if !tcx.is_mir_available(def_id) {
87return false;
88 }
8990let threshold = match tcx.sess.opts.unstable_opts.cross_crate_inline_threshold {
91 InliningThreshold::Always => return true,
92 InliningThreshold::Sometimes(threshold) => threshold,
93 InliningThreshold::Never => return false,
94 };
9596let mir = tcx.optimized_mir(def_id);
97let mut checker =
98CostChecker { tcx, callee_body: mir, calls: 0, statements: 0, landing_pads: 0, resumes: 0 };
99checker.visit_body(mir);
100checker.calls == 0
101&& checker.resumes == 0
102&& checker.landing_pads == 0
103&& checker.statements <= threshold104}
105106// The threshold that CostChecker computes is balancing the desire to make more things
107// inlinable cross crates against the growth in incremental CGU size that happens when too many
108// things in the sysroot are made inlinable.
109// Permitting calls causes the size of some incremental CGUs to grow, because more functions are
110// made inlinable out of the sysroot or dependencies.
111// Assert terminators are similar to calls, but do not have the same impact on compile time, so
112// those are just treated as statements.
113// A threshold exists at all because we don't want to blindly mark a huge function as inlinable.
114115struct CostChecker<'b, 'tcx> {
116 tcx: TyCtxt<'tcx>,
117 callee_body: &'b Body<'tcx>,
118 calls: usize,
119 statements: usize,
120 landing_pads: usize,
121 resumes: usize,
122}
123124impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
125fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
126// Don't count StorageLive/StorageDead in the inlining cost.
127match statement.kind {
128 StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Nop => {}
129_ => self.statements += 1,
130 }
131 }
132133fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
134self.statements += 1;
135let tcx = self.tcx;
136match &terminator.kind {
137 TerminatorKind::Drop { place, unwind, .. } => {
138let ty = place.ty(self.callee_body, tcx).ty;
139if !ty.is_trivially_pure_clone_copy() {
140self.calls += 1;
141if let UnwindAction::Cleanup(_) = unwind {
142self.landing_pads += 1;
143 }
144 }
145 }
146 TerminatorKind::Call { func, unwind, .. } => {
147// We track calls because they make our function not a leaf (and in theory, the
148 // number of calls indicates how likely this function is to perturb other CGUs).
149 // But intrinsics don't have a body that gets assigned to a CGU, so they are
150 // ignored.
151if let Some((fn_def_id, _)) = func.const_fn_def()
152 && {
{
'done:
{
for i in
::rustc_hir::attrs::HasAttrs::get_attrs(fn_def_id, &tcx) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcIntrinsic) => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}.is_some()find_attr!(tcx, fn_def_id, RustcIntrinsic)153 {
154return;
155 }
156self.calls += 1;
157if let UnwindAction::Cleanup(_) = unwind {
158self.landing_pads += 1;
159 }
160 }
161 TerminatorKind::TailCall { .. } => {
162self.calls += 1;
163 }
164 TerminatorKind::Assert { unwind, .. } => {
165if let UnwindAction::Cleanup(_) = unwind {
166self.landing_pads += 1;
167 }
168 }
169 TerminatorKind::UnwindResume => self.resumes += 1,
170 TerminatorKind::InlineAsm { unwind, .. } => {
171if let UnwindAction::Cleanup(_) = unwind {
172self.landing_pads += 1;
173 }
174 }
175 TerminatorKind::Return176 | TerminatorKind::Goto { .. }
177 | TerminatorKind::SwitchInt { .. }
178 | TerminatorKind::Unreachable179 | TerminatorKind::UnwindTerminate(_) => {}
180 kind @ (TerminatorKind::FalseUnwind { .. }
181 | TerminatorKind::FalseEdge { .. }
182 | TerminatorKind::Yield { .. }
183 | TerminatorKind::CoroutineDrop) => {
184::rustc_middle::util::bug::bug_fmt(format_args!("{0:?} should not be in runtime MIR",
kind));bug!("{kind:?} should not be in runtime MIR");
185 }
186 }
187 }
188}