rustc_ty_utils/
abi.rs

1use std::iter;
2
3use rustc_abi::Primitive::Pointer;
4use rustc_abi::{BackendRepr, ExternAbi, PointerKind, Scalar, Size};
5use rustc_hir as hir;
6use rustc_hir::lang_items::LangItem;
7use rustc_middle::bug;
8use rustc_middle::query::Providers;
9use rustc_middle::ty::layout::{
10    FnAbiError, HasTyCtxt, HasTypingEnv, LayoutCx, LayoutOf, TyAndLayout, fn_can_unwind,
11};
12use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
13use rustc_session::config::OptLevel;
14use rustc_span::def_id::DefId;
15use rustc_target::callconv::{
16    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
17};
18use tracing::debug;
19
20pub(crate) fn provide(providers: &mut Providers) {
21    *providers = Providers { fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
22}
23
24// NOTE(eddyb) this is private to avoid using it from outside of
25// `fn_abi_of_instance` - any other uses are either too high-level
26// for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
27// or should go through `FnAbi` instead, to avoid losing any
28// adjustments `fn_abi_of_instance` might be performing.
29#[tracing::instrument(level = "debug", skip(tcx, typing_env))]
30fn fn_sig_for_fn_abi<'tcx>(
31    tcx: TyCtxt<'tcx>,
32    instance: ty::Instance<'tcx>,
33    typing_env: ty::TypingEnv<'tcx>,
34) -> ty::FnSig<'tcx> {
35    if let InstanceKind::ThreadLocalShim(..) = instance.def {
36        return tcx.mk_fn_sig(
37            [],
38            tcx.thread_local_ptr_ty(instance.def_id()),
39            false,
40            hir::Safety::Safe,
41            rustc_abi::ExternAbi::Unadjusted,
42        );
43    }
44
45    let ty = instance.ty(tcx, typing_env);
46    match *ty.kind() {
47        ty::FnDef(def_id, args) => {
48            let mut sig = tcx
49                .instantiate_bound_regions_with_erased(tcx.fn_sig(def_id).instantiate(tcx, args));
50
51            // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
52            if let ty::InstanceKind::VTableShim(..) = instance.def {
53                let mut inputs_and_output = sig.inputs_and_output.to_vec();
54                inputs_and_output[0] = Ty::new_mut_ptr(tcx, inputs_and_output[0]);
55                sig.inputs_and_output = tcx.mk_type_list(&inputs_and_output);
56            }
57
58            // Modify `fn() -> impl Future` to `fn() -> dyn* Future`.
59            if let ty::InstanceKind::ReifyShim(def_id, _) = instance.def
60                && let Some((rpitit_def_id, fn_args)) =
61                    tcx.return_position_impl_trait_in_trait_shim_data(def_id)
62            {
63                let fn_args = fn_args.instantiate(tcx, args);
64                let rpitit_args =
65                    fn_args.extend_to(tcx, rpitit_def_id, |param, _| match param.kind {
66                        ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
67                        ty::GenericParamDefKind::Type { .. }
68                        | ty::GenericParamDefKind::Const { .. } => {
69                            unreachable!("rpitit should have no addition ty/ct")
70                        }
71                    });
72                let dyn_star_ty = Ty::new_dynamic(
73                    tcx,
74                    tcx.item_bounds_to_existential_predicates(rpitit_def_id, rpitit_args),
75                    tcx.lifetimes.re_erased,
76                    ty::DynStar,
77                );
78                let mut inputs_and_output = sig.inputs_and_output.to_vec();
79                *inputs_and_output.last_mut().unwrap() = dyn_star_ty;
80                sig.inputs_and_output = tcx.mk_type_list(&inputs_and_output);
81            }
82
83            sig
84        }
85        ty::Closure(def_id, args) => {
86            let sig = tcx.instantiate_bound_regions_with_erased(args.as_closure().sig());
87            let env_ty = tcx.closure_env_ty(
88                Ty::new_closure(tcx, def_id, args),
89                args.as_closure().kind(),
90                tcx.lifetimes.re_erased,
91            );
92
93            tcx.mk_fn_sig(
94                iter::once(env_ty).chain(sig.inputs().iter().cloned()),
95                sig.output(),
96                sig.c_variadic,
97                sig.safety,
98                sig.abi,
99            )
100        }
101        ty::CoroutineClosure(def_id, args) => {
102            let coroutine_ty = Ty::new_coroutine_closure(tcx, def_id, args);
103            let sig = args.as_coroutine_closure().coroutine_closure_sig();
104
105            // When this `CoroutineClosure` comes from a `ConstructCoroutineInClosureShim`,
106            // make sure we respect the `target_kind` in that shim.
107            // FIXME(async_closures): This shouldn't be needed, and we should be populating
108            // a separate def-id for these bodies.
109            let mut coroutine_kind = args.as_coroutine_closure().kind();
110
111            let env_ty =
112                if let InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref, .. } =
113                    instance.def
114                {
115                    coroutine_kind = ty::ClosureKind::FnOnce;
116
117                    // Implementations of `FnMut` and `Fn` for coroutine-closures
118                    // still take their receiver by ref.
119                    if receiver_by_ref {
120                        Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, coroutine_ty)
121                    } else {
122                        coroutine_ty
123                    }
124                } else {
125                    tcx.closure_env_ty(coroutine_ty, coroutine_kind, tcx.lifetimes.re_erased)
126                };
127
128            let sig = tcx.instantiate_bound_regions_with_erased(sig);
129
130            tcx.mk_fn_sig(
131                iter::once(env_ty).chain([sig.tupled_inputs_ty]),
132                sig.to_coroutine_given_kind_and_upvars(
133                    tcx,
134                    args.as_coroutine_closure().parent_args(),
135                    tcx.coroutine_for_closure(def_id),
136                    coroutine_kind,
137                    tcx.lifetimes.re_erased,
138                    args.as_coroutine_closure().tupled_upvars_ty(),
139                    args.as_coroutine_closure().coroutine_captures_by_ref_ty(),
140                ),
141                sig.c_variadic,
142                sig.safety,
143                sig.abi,
144            )
145        }
146        ty::Coroutine(did, args) => {
147            let coroutine_kind = tcx.coroutine_kind(did).unwrap();
148            let sig = args.as_coroutine().sig();
149
150            let env_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, ty);
151
152            let pin_did = tcx.require_lang_item(LangItem::Pin, None);
153            let pin_adt_ref = tcx.adt_def(pin_did);
154            let pin_args = tcx.mk_args(&[env_ty.into()]);
155            let env_ty = match coroutine_kind {
156                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
157                    // Iterator::next doesn't accept a pinned argument,
158                    // unlike for all other coroutine kinds.
159                    env_ty
160                }
161                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _)
162                | hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _)
163                | hir::CoroutineKind::Coroutine(_) => Ty::new_adt(tcx, pin_adt_ref, pin_args),
164            };
165
166            // The `FnSig` and the `ret_ty` here is for a coroutines main
167            // `Coroutine::resume(...) -> CoroutineState` function in case we
168            // have an ordinary coroutine, the `Future::poll(...) -> Poll`
169            // function in case this is a special coroutine backing an async construct
170            // or the `Iterator::next(...) -> Option` function in case this is a
171            // special coroutine backing a gen construct.
172            let (resume_ty, ret_ty) = match coroutine_kind {
173                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _) => {
174                    // The signature should be `Future::poll(_, &mut Context<'_>) -> Poll<Output>`
175                    assert_eq!(sig.yield_ty, tcx.types.unit);
176
177                    let poll_did = tcx.require_lang_item(LangItem::Poll, None);
178                    let poll_adt_ref = tcx.adt_def(poll_did);
179                    let poll_args = tcx.mk_args(&[sig.return_ty.into()]);
180                    let ret_ty = Ty::new_adt(tcx, poll_adt_ref, poll_args);
181
182                    // We have to replace the `ResumeTy` that is used for type and borrow checking
183                    // with `&mut Context<'_>` which is used in codegen.
184                    #[cfg(debug_assertions)]
185                    {
186                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
187                            let expected_adt =
188                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, None));
189                            assert_eq!(*resume_ty_adt, expected_adt);
190                        } else {
191                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
192                        };
193                    }
194                    let context_mut_ref = Ty::new_task_context(tcx);
195
196                    (Some(context_mut_ref), ret_ty)
197                }
198                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
199                    // The signature should be `Iterator::next(_) -> Option<Yield>`
200                    let option_did = tcx.require_lang_item(LangItem::Option, None);
201                    let option_adt_ref = tcx.adt_def(option_did);
202                    let option_args = tcx.mk_args(&[sig.yield_ty.into()]);
203                    let ret_ty = Ty::new_adt(tcx, option_adt_ref, option_args);
204
205                    assert_eq!(sig.return_ty, tcx.types.unit);
206                    assert_eq!(sig.resume_ty, tcx.types.unit);
207
208                    (None, ret_ty)
209                }
210                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _) => {
211                    // The signature should be
212                    // `AsyncIterator::poll_next(_, &mut Context<'_>) -> Poll<Option<Output>>`
213                    assert_eq!(sig.return_ty, tcx.types.unit);
214
215                    // Yield type is already `Poll<Option<yield_ty>>`
216                    let ret_ty = sig.yield_ty;
217
218                    // We have to replace the `ResumeTy` that is used for type and borrow checking
219                    // with `&mut Context<'_>` which is used in codegen.
220                    #[cfg(debug_assertions)]
221                    {
222                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
223                            let expected_adt =
224                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, None));
225                            assert_eq!(*resume_ty_adt, expected_adt);
226                        } else {
227                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
228                        };
229                    }
230                    let context_mut_ref = Ty::new_task_context(tcx);
231
232                    (Some(context_mut_ref), ret_ty)
233                }
234                hir::CoroutineKind::Coroutine(_) => {
235                    // The signature should be `Coroutine::resume(_, Resume) -> CoroutineState<Yield, Return>`
236                    let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
237                    let state_adt_ref = tcx.adt_def(state_did);
238                    let state_args = tcx.mk_args(&[sig.yield_ty.into(), sig.return_ty.into()]);
239                    let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
240
241                    (Some(sig.resume_ty), ret_ty)
242                }
243            };
244
245            if let Some(resume_ty) = resume_ty {
246                tcx.mk_fn_sig(
247                    [env_ty, resume_ty],
248                    ret_ty,
249                    false,
250                    hir::Safety::Safe,
251                    rustc_abi::ExternAbi::Rust,
252                )
253            } else {
254                // `Iterator::next` doesn't have a `resume` argument.
255                tcx.mk_fn_sig(
256                    [env_ty],
257                    ret_ty,
258                    false,
259                    hir::Safety::Safe,
260                    rustc_abi::ExternAbi::Rust,
261                )
262            }
263        }
264        _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
265    }
266}
267
268#[inline]
269fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: ExternAbi, c_variadic: bool) -> Conv {
270    use rustc_abi::ExternAbi::*;
271    match tcx.sess.target.adjust_abi(abi, c_variadic) {
272        RustIntrinsic | Rust | RustCall => Conv::Rust,
273
274        // This is intentionally not using `Conv::Cold`, as that has to preserve
275        // even SIMD registers, which is generally not a good trade-off.
276        RustCold => Conv::PreserveMost,
277
278        // It's the ABI's job to select this, not ours.
279        System { .. } => bug!("system abi should be selected elsewhere"),
280        EfiApi => bug!("eficall abi should be selected elsewhere"),
281
282        Stdcall { .. } => Conv::X86Stdcall,
283        Fastcall { .. } => Conv::X86Fastcall,
284        Vectorcall { .. } => Conv::X86VectorCall,
285        Thiscall { .. } => Conv::X86ThisCall,
286        C { .. } => Conv::C,
287        Unadjusted => Conv::C,
288        Win64 { .. } => Conv::X86_64Win64,
289        SysV64 { .. } => Conv::X86_64SysV,
290        Aapcs { .. } => Conv::ArmAapcs,
291        CCmseNonSecureCall => Conv::CCmseNonSecureCall,
292        CCmseNonSecureEntry => Conv::CCmseNonSecureEntry,
293        PtxKernel => Conv::GpuKernel,
294        Msp430Interrupt => Conv::Msp430Intr,
295        X86Interrupt => Conv::X86Intr,
296        GpuKernel => Conv::GpuKernel,
297        AvrInterrupt => Conv::AvrInterrupt,
298        AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
299        RiscvInterruptM => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine },
300        RiscvInterruptS => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor },
301
302        // These API constants ought to be more specific...
303        Cdecl { .. } => Conv::C,
304    }
305}
306
307fn fn_abi_of_fn_ptr<'tcx>(
308    tcx: TyCtxt<'tcx>,
309    query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
310) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
311    let ty::PseudoCanonicalInput { typing_env, value: (sig, extra_args) } = query;
312    fn_abi_new_uncached(
313        &LayoutCx::new(tcx, typing_env),
314        tcx.instantiate_bound_regions_with_erased(sig),
315        extra_args,
316        None,
317    )
318}
319
320fn fn_abi_of_instance<'tcx>(
321    tcx: TyCtxt<'tcx>,
322    query: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
323) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
324    let ty::PseudoCanonicalInput { typing_env, value: (instance, extra_args) } = query;
325    fn_abi_new_uncached(
326        &LayoutCx::new(tcx, typing_env),
327        fn_sig_for_fn_abi(tcx, instance, typing_env),
328        extra_args,
329        Some(instance),
330    )
331}
332
333// Handle safe Rust thin and wide pointers.
334fn adjust_for_rust_scalar<'tcx>(
335    cx: LayoutCx<'tcx>,
336    attrs: &mut ArgAttributes,
337    scalar: Scalar,
338    layout: TyAndLayout<'tcx>,
339    offset: Size,
340    is_return: bool,
341    drop_target_pointee: Option<Ty<'tcx>>,
342) {
343    // Booleans are always a noundef i1 that needs to be zero-extended.
344    if scalar.is_bool() {
345        attrs.ext(ArgExtension::Zext);
346        attrs.set(ArgAttribute::NoUndef);
347        return;
348    }
349
350    if !scalar.is_uninit_valid() {
351        attrs.set(ArgAttribute::NoUndef);
352    }
353
354    // Only pointer types handled below.
355    let Scalar::Initialized { value: Pointer(_), valid_range } = scalar else { return };
356
357    // Set `nonnull` if the validity range excludes zero, or for the argument to `drop_in_place`,
358    // which must be nonnull per its documented safety requirements.
359    if !valid_range.contains(0) || drop_target_pointee.is_some() {
360        attrs.set(ArgAttribute::NonNull);
361    }
362
363    let tcx = cx.tcx();
364
365    if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
366        let kind = if let Some(kind) = pointee.safe {
367            Some(kind)
368        } else if let Some(pointee) = drop_target_pointee {
369            // The argument to `drop_in_place` is semantically equivalent to a mutable reference.
370            Some(PointerKind::MutableRef { unpin: pointee.is_unpin(tcx, cx.typing_env) })
371        } else {
372            None
373        };
374        if let Some(kind) = kind {
375            attrs.pointee_align = Some(pointee.align);
376
377            // `Box` are not necessarily dereferenceable for the entire duration of the function as
378            // they can be deallocated at any time. Same for non-frozen shared references (see
379            // <https://github.com/rust-lang/rust/pull/98017>), and for mutable references to
380            // potentially self-referential types (see
381            // <https://github.com/rust-lang/unsafe-code-guidelines/issues/381>). If LLVM had a way
382            // to say "dereferenceable on entry" we could use it here.
383            attrs.pointee_size = match kind {
384                PointerKind::Box { .. }
385                | PointerKind::SharedRef { frozen: false }
386                | PointerKind::MutableRef { unpin: false } => Size::ZERO,
387                PointerKind::SharedRef { frozen: true }
388                | PointerKind::MutableRef { unpin: true } => pointee.size,
389            };
390
391            // The aliasing rules for `Box<T>` are still not decided, but currently we emit
392            // `noalias` for it. This can be turned off using an unstable flag.
393            // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
394            let noalias_for_box = tcx.sess.opts.unstable_opts.box_noalias;
395
396            // LLVM prior to version 12 had known miscompiles in the presence of noalias attributes
397            // (see #54878), so it was conditionally disabled, but we don't support earlier
398            // versions at all anymore. We still support turning it off using -Zmutable-noalias.
399            let noalias_mut_ref = tcx.sess.opts.unstable_opts.mutable_noalias;
400
401            // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
402            // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on memory
403            // dependencies rather than pointer equality. However this only applies to arguments,
404            // not return values.
405            //
406            // `&mut T` and `Box<T>` where `T: Unpin` are unique and hence `noalias`.
407            let no_alias = match kind {
408                PointerKind::SharedRef { frozen } => frozen,
409                PointerKind::MutableRef { unpin } => unpin && noalias_mut_ref,
410                PointerKind::Box { unpin, global } => unpin && global && noalias_for_box,
411            };
412            // We can never add `noalias` in return position; that LLVM attribute has some very surprising semantics
413            // (see <https://github.com/rust-lang/unsafe-code-guidelines/issues/385#issuecomment-1368055745>).
414            if no_alias && !is_return {
415                attrs.set(ArgAttribute::NoAlias);
416            }
417
418            if matches!(kind, PointerKind::SharedRef { frozen: true }) && !is_return {
419                attrs.set(ArgAttribute::ReadOnly);
420            }
421        }
422    }
423}
424
425/// Ensure that the ABI makes basic sense.
426fn fn_abi_sanity_check<'tcx>(
427    cx: &LayoutCx<'tcx>,
428    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
429    spec_abi: ExternAbi,
430) {
431    fn fn_arg_sanity_check<'tcx>(
432        cx: &LayoutCx<'tcx>,
433        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
434        spec_abi: ExternAbi,
435        arg: &ArgAbi<'tcx, Ty<'tcx>>,
436    ) {
437        let tcx = cx.tcx();
438
439        if spec_abi == ExternAbi::Rust
440            || spec_abi == ExternAbi::RustCall
441            || spec_abi == ExternAbi::RustCold
442        {
443            if arg.layout.is_zst() {
444                // Casting closures to function pointers depends on ZST closure types being
445                // omitted entirely in the calling convention.
446                assert!(arg.is_ignore());
447            }
448            if let PassMode::Indirect { on_stack, .. } = arg.mode {
449                assert!(!on_stack, "rust abi shouldn't use on_stack");
450            }
451        }
452
453        match &arg.mode {
454            PassMode::Ignore => {
455                assert!(arg.layout.is_zst() || arg.layout.is_uninhabited());
456            }
457            PassMode::Direct(_) => {
458                // Here the Rust type is used to determine the actual ABI, so we have to be very
459                // careful. Scalar/Vector is fine, since backends will generally use
460                // `layout.backend_repr` and ignore everything else. We should just reject
461                //`Aggregate` entirely here, but some targets need to be fixed first.
462                match arg.layout.backend_repr {
463                    BackendRepr::Uninhabited
464                    | BackendRepr::Scalar(_)
465                    | BackendRepr::Vector { .. } => {}
466                    BackendRepr::ScalarPair(..) => {
467                        panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty)
468                    }
469                    BackendRepr::Memory { sized } => {
470                        // For an unsized type we'd only pass the sized prefix, so there is no universe
471                        // in which we ever want to allow this.
472                        assert!(sized, "`PassMode::Direct` for unsized type in ABI: {:#?}", fn_abi);
473                        // This really shouldn't happen even for sized aggregates, since
474                        // `immediate_llvm_type` will use `layout.fields` to turn this Rust type into an
475                        // LLVM type. This means all sorts of Rust type details leak into the ABI.
476                        // However wasm sadly *does* currently use this mode for it's "C" ABI so we
477                        // have to allow it -- but we absolutely shouldn't let any more targets do
478                        // that. (Also see <https://github.com/rust-lang/rust/issues/115666>.)
479                        //
480                        // The unadjusted ABI also uses Direct for all args and is ill-specified,
481                        // but unfortunately we need it for calling certain LLVM intrinsics.
482
483                        match spec_abi {
484                            ExternAbi::Unadjusted => {}
485                            ExternAbi::C { unwind: _ }
486                                if matches!(&*tcx.sess.target.arch, "wasm32" | "wasm64") => {}
487                            _ => {
488                                panic!(
489                                    "`PassMode::Direct` for aggregates only allowed for \"unadjusted\" functions and on wasm\n\
490                                      Problematic type: {:#?}",
491                                    arg.layout,
492                                );
493                            }
494                        }
495                    }
496                }
497            }
498            PassMode::Pair(_, _) => {
499                // Similar to `Direct`, we need to make sure that backends use `layout.backend_repr`
500                // and ignore the rest of the layout.
501                assert!(
502                    matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
503                    "PassMode::Pair for type {}",
504                    arg.layout.ty
505                );
506            }
507            PassMode::Cast { .. } => {
508                // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
509                assert!(arg.layout.is_sized());
510            }
511            PassMode::Indirect { meta_attrs: None, .. } => {
512                // No metadata, must be sized.
513                // Conceptually, unsized arguments must be copied around, which requires dynamically
514                // determining their size, which we cannot do without metadata. Consult
515                // t-opsem before removing this check.
516                assert!(arg.layout.is_sized());
517            }
518            PassMode::Indirect { meta_attrs: Some(_), on_stack, .. } => {
519                // With metadata. Must be unsized and not on the stack.
520                assert!(arg.layout.is_unsized() && !on_stack);
521                // Also, must not be `extern` type.
522                let tail = tcx.struct_tail_for_codegen(arg.layout.ty, cx.typing_env);
523                if matches!(tail.kind(), ty::Foreign(..)) {
524                    // These types do not have metadata, so having `meta_attrs` is bogus.
525                    // Conceptually, unsized arguments must be copied around, which requires dynamically
526                    // determining their size. Therefore, we cannot allow `extern` types here. Consult
527                    // t-opsem before removing this check.
528                    panic!("unsized arguments must not be `extern` types");
529                }
530            }
531        }
532    }
533
534    for arg in fn_abi.args.iter() {
535        fn_arg_sanity_check(cx, fn_abi, spec_abi, arg);
536    }
537    fn_arg_sanity_check(cx, fn_abi, spec_abi, &fn_abi.ret);
538}
539
540#[tracing::instrument(level = "debug", skip(cx, instance))]
541fn fn_abi_new_uncached<'tcx>(
542    cx: &LayoutCx<'tcx>,
543    sig: ty::FnSig<'tcx>,
544    extra_args: &[Ty<'tcx>],
545    instance: Option<ty::Instance<'tcx>>,
546) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
547    let tcx = cx.tcx();
548    let (caller_location, determined_fn_def_id, is_virtual_call) = if let Some(instance) = instance
549    {
550        let is_virtual_call = matches!(instance.def, ty::InstanceKind::Virtual(..));
551        (
552            instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()),
553            if is_virtual_call { None } else { Some(instance.def_id()) },
554            is_virtual_call,
555        )
556    } else {
557        (None, None, false)
558    };
559    let sig = tcx.normalize_erasing_regions(cx.typing_env, sig);
560
561    let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic);
562
563    let mut inputs = sig.inputs();
564    let extra_args = if sig.abi == ExternAbi::RustCall {
565        assert!(!sig.c_variadic && extra_args.is_empty());
566
567        if let Some(input) = sig.inputs().last()
568            && let ty::Tuple(tupled_arguments) = input.kind()
569        {
570            inputs = &sig.inputs()[0..sig.inputs().len() - 1];
571            tupled_arguments
572        } else {
573            bug!(
574                "argument to function with \"rust-call\" ABI \
575                    is not a tuple"
576            );
577        }
578    } else {
579        assert!(sig.c_variadic || extra_args.is_empty());
580        extra_args
581    };
582
583    let is_drop_in_place =
584        determined_fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace));
585
586    let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> {
587        let span = tracing::debug_span!("arg_of");
588        let _entered = span.enter();
589        let is_return = arg_idx.is_none();
590        let is_drop_target = is_drop_in_place && arg_idx == Some(0);
591        let drop_target_pointee = is_drop_target.then(|| match ty.kind() {
592            ty::RawPtr(ty, _) => *ty,
593            _ => bug!("argument to drop_in_place is not a raw ptr: {:?}", ty),
594        });
595
596        let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?;
597        let layout = if is_virtual_call && arg_idx == Some(0) {
598            // Don't pass the vtable, it's not an argument of the virtual fn.
599            // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
600            // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
601            make_thin_self_ptr(cx, layout)
602        } else {
603            layout
604        };
605
606        let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
607            let mut attrs = ArgAttributes::new();
608            adjust_for_rust_scalar(
609                *cx,
610                &mut attrs,
611                scalar,
612                *layout,
613                offset,
614                is_return,
615                drop_target_pointee,
616            );
617            attrs
618        });
619
620        if arg.layout.is_zst() {
621            arg.mode = PassMode::Ignore;
622        }
623
624        Ok(arg)
625    };
626
627    let mut fn_abi = FnAbi {
628        ret: arg_of(sig.output(), None)?,
629        args: inputs
630            .iter()
631            .copied()
632            .chain(extra_args.iter().copied())
633            .chain(caller_location)
634            .enumerate()
635            .map(|(i, ty)| arg_of(ty, Some(i)))
636            .collect::<Result<_, _>>()?,
637        c_variadic: sig.c_variadic,
638        fixed_count: inputs.len() as u32,
639        conv,
640        can_unwind: fn_can_unwind(
641            tcx,
642            // Since `#[rustc_nounwind]` can change unwinding, we cannot infer unwinding by `fn_def_id` for a virtual call.
643            determined_fn_def_id,
644            sig.abi,
645        ),
646    };
647    fn_abi_adjust_for_abi(
648        cx,
649        &mut fn_abi,
650        sig.abi,
651        // If this is a virtual call, we cannot pass the `fn_def_id`, as it might call other
652        // functions from vtable. Internally, `deduced_param_attrs` attempts to infer attributes by
653        // visit the function body.
654        determined_fn_def_id,
655    );
656    debug!("fn_abi_new_uncached = {:?}", fn_abi);
657    fn_abi_sanity_check(cx, &fn_abi, sig.abi);
658    Ok(tcx.arena.alloc(fn_abi))
659}
660
661#[tracing::instrument(level = "trace", skip(cx))]
662fn fn_abi_adjust_for_abi<'tcx>(
663    cx: &LayoutCx<'tcx>,
664    fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
665    abi: ExternAbi,
666    fn_def_id: Option<DefId>,
667) {
668    if abi == ExternAbi::Unadjusted {
669        // The "unadjusted" ABI passes aggregates in "direct" mode. That's fragile but needed for
670        // some LLVM intrinsics.
671        fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
672            // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
673            // but who knows what breaks if we change this now.
674            if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
675                assert!(
676                    arg.layout.backend_repr.is_sized(),
677                    "'unadjusted' ABI does not support unsized arguments"
678                );
679            }
680            arg.make_direct_deprecated();
681        }
682
683        unadjust(&mut fn_abi.ret);
684        for arg in fn_abi.args.iter_mut() {
685            unadjust(arg);
686        }
687        return;
688    }
689
690    let tcx = cx.tcx();
691
692    if abi == ExternAbi::Rust || abi == ExternAbi::RustCall || abi == ExternAbi::RustIntrinsic {
693        fn_abi.adjust_for_rust_abi(cx, abi);
694
695        // Look up the deduced parameter attributes for this function, if we have its def ID and
696        // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
697        // as appropriate.
698        let deduced_param_attrs =
699            if tcx.sess.opts.optimize != OptLevel::No && tcx.sess.opts.incremental.is_none() {
700                fn_def_id.map(|fn_def_id| tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
701            } else {
702                &[]
703            };
704
705        for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
706            if arg.is_ignore() {
707                continue;
708            }
709
710            // If we deduced that this parameter was read-only, add that to the attribute list now.
711            //
712            // The `readonly` parameter only applies to pointers, so we can only do this if the
713            // argument was passed indirectly. (If the argument is passed directly, it's an SSA
714            // value, so it's implicitly immutable.)
715            if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
716                // The `deduced_param_attrs` list could be empty if this is a type of function
717                // we can't deduce any parameters for, so make sure the argument index is in
718                // bounds.
719                if let Some(deduced_param_attrs) = deduced_param_attrs.get(arg_idx) {
720                    if deduced_param_attrs.read_only {
721                        attrs.regular.insert(ArgAttribute::ReadOnly);
722                        debug!("added deduced read-only attribute");
723                    }
724                }
725            }
726        }
727    } else {
728        fn_abi.adjust_for_foreign_abi(cx, abi);
729    }
730}
731
732#[tracing::instrument(level = "debug", skip(cx))]
733fn make_thin_self_ptr<'tcx>(
734    cx: &(impl HasTyCtxt<'tcx> + HasTypingEnv<'tcx>),
735    layout: TyAndLayout<'tcx>,
736) -> TyAndLayout<'tcx> {
737    let tcx = cx.tcx();
738    let wide_pointer_ty = if layout.is_unsized() {
739        // unsized `self` is passed as a pointer to `self`
740        // FIXME (mikeyhew) change this to use &own if it is ever added to the language
741        Ty::new_mut_ptr(tcx, layout.ty)
742    } else {
743        match layout.backend_repr {
744            BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
745            _ => bug!("receiver type has unsupported layout: {:?}", layout),
746        }
747
748        // In the case of Rc<Self>, we need to explicitly pass a *mut RcInner<Self>
749        // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
750        // elsewhere in the compiler as a method on a `dyn Trait`.
751        // To get the type `*mut RcInner<Self>`, we just keep unwrapping newtypes until we
752        // get a built-in pointer type
753        let mut wide_pointer_layout = layout;
754        while !wide_pointer_layout.ty.is_raw_ptr() && !wide_pointer_layout.ty.is_ref() {
755            wide_pointer_layout = wide_pointer_layout
756                .non_1zst_field(cx)
757                .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type")
758                .1
759        }
760
761        wide_pointer_layout.ty
762    };
763
764    // we now have a type like `*mut RcInner<dyn Trait>`
765    // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
766    // this is understood as a special case elsewhere in the compiler
767    let unit_ptr_ty = Ty::new_mut_ptr(tcx, tcx.types.unit);
768
769    TyAndLayout {
770        ty: wide_pointer_ty,
771
772        // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
773        // should always work because the type is always `*mut ()`.
774        ..tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(unit_ptr_ty)).unwrap()
775    }
776}