rustc_ty_utils/
layout.rs

1use std::fmt::Debug;
2use std::iter;
3
4use hir::def_id::DefId;
5use rustc_abi::Integer::{I8, I32};
6use rustc_abi::Primitive::{self, Float, Int, Pointer};
7use rustc_abi::{
8    AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape,
9    HasDataLayout, Layout, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
10    StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
11};
12use rustc_index::bit_set::DenseBitSet;
13use rustc_index::{IndexSlice, IndexVec};
14use rustc_middle::bug;
15use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal};
16use rustc_middle::query::Providers;
17use rustc_middle::ty::layout::{
18    FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, MAX_SIMD_LANES, TyAndLayout,
19};
20use rustc_middle::ty::print::with_no_trimmed_paths;
21use rustc_middle::ty::{
22    self, AdtDef, CoroutineArgsExt, EarlyBinder, GenericArgsRef, PseudoCanonicalInput, Ty, TyCtxt,
23    TypeVisitableExt,
24};
25use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
26use rustc_span::{Symbol, sym};
27use tracing::{debug, instrument, trace};
28use {rustc_abi as abi, rustc_hir as hir};
29
30use crate::errors::{
31    MultipleArrayFieldsSimdType, NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType,
32};
33
34mod invariant;
35
36pub(crate) fn provide(providers: &mut Providers) {
37    *providers = Providers { layout_of, ..*providers };
38}
39
40#[instrument(skip(tcx, query), level = "debug")]
41fn layout_of<'tcx>(
42    tcx: TyCtxt<'tcx>,
43    query: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>,
44) -> Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>> {
45    let PseudoCanonicalInput { typing_env, value: ty } = query;
46    debug!(?ty);
47
48    // Optimization: We convert to TypingMode::PostAnalysis and convert opaque types in
49    // the where bounds to their hidden types. This reduces overall uncached invocations
50    // of `layout_of` and is thus a small performance improvement.
51    let typing_env = typing_env.with_post_analysis_normalized(tcx);
52    let unnormalized_ty = ty;
53
54    // FIXME: We might want to have two different versions of `layout_of`:
55    // One that can be called after typecheck has completed and can use
56    // `normalize_erasing_regions` here and another one that can be called
57    // before typecheck has completed and uses `try_normalize_erasing_regions`.
58    let ty = match tcx.try_normalize_erasing_regions(typing_env, ty) {
59        Ok(t) => t,
60        Err(normalization_error) => {
61            return Err(tcx
62                .arena
63                .alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
64        }
65    };
66
67    if ty != unnormalized_ty {
68        // Ensure this layout is also cached for the normalized type.
69        return tcx.layout_of(typing_env.as_query_input(ty));
70    }
71
72    let cx = LayoutCx::new(tcx, typing_env);
73
74    let layout = layout_of_uncached(&cx, ty)?;
75    let layout = TyAndLayout { ty, layout };
76
77    // If we are running with `-Zprint-type-sizes`, maybe record layouts
78    // for dumping later.
79    if cx.tcx().sess.opts.unstable_opts.print_type_sizes {
80        record_layout_for_printing(&cx, layout);
81    }
82
83    invariant::layout_sanity_check(&cx, &layout);
84
85    Ok(layout)
86}
87
88fn error<'tcx>(cx: &LayoutCx<'tcx>, err: LayoutError<'tcx>) -> &'tcx LayoutError<'tcx> {
89    cx.tcx().arena.alloc(err)
90}
91
92fn map_error<'tcx>(
93    cx: &LayoutCx<'tcx>,
94    ty: Ty<'tcx>,
95    err: LayoutCalculatorError<TyAndLayout<'tcx>>,
96) -> &'tcx LayoutError<'tcx> {
97    let err = match err {
98        LayoutCalculatorError::SizeOverflow => {
99            // This is sometimes not a compile error in `check` builds.
100            // See `tests/ui/limits/huge-enum.rs` for an example.
101            LayoutError::SizeOverflow(ty)
102        }
103        LayoutCalculatorError::UnexpectedUnsized(field) => {
104            // This is sometimes not a compile error if there are trivially false where clauses.
105            // See `tests/ui/layout/trivial-bounds-sized.rs` for an example.
106            assert!(field.layout.is_unsized(), "invalid layout error {err:#?}");
107            if cx.typing_env.param_env.caller_bounds().is_empty() {
108                cx.tcx().dcx().delayed_bug(format!(
109                    "encountered unexpected unsized field in layout of {ty:?}: {field:#?}"
110                ));
111            }
112            LayoutError::Unknown(ty)
113        }
114        LayoutCalculatorError::EmptyUnion => {
115            // This is always a compile error.
116            let guar =
117                cx.tcx().dcx().delayed_bug(format!("computed layout of empty union: {ty:?}"));
118            LayoutError::ReferencesError(guar)
119        }
120        LayoutCalculatorError::ReprConflict => {
121            // packed enums are the only known trigger of this, but others might arise
122            let guar = cx
123                .tcx()
124                .dcx()
125                .delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}"));
126            LayoutError::ReferencesError(guar)
127        }
128    };
129    error(cx, err)
130}
131
132fn univariant_uninterned<'tcx>(
133    cx: &LayoutCx<'tcx>,
134    ty: Ty<'tcx>,
135    fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>,
136    repr: &ReprOptions,
137    kind: StructKind,
138) -> Result<LayoutData<FieldIdx, VariantIdx>, &'tcx LayoutError<'tcx>> {
139    let pack = repr.pack;
140    if pack.is_some() && repr.align.is_some() {
141        cx.tcx().dcx().bug("struct cannot be packed and aligned");
142    }
143
144    cx.calc.univariant(fields, repr, kind).map_err(|err| map_error(cx, ty, err))
145}
146
147fn extract_const_value<'tcx>(
148    const_: ty::Const<'tcx>,
149    ty: Ty<'tcx>,
150    cx: &LayoutCx<'tcx>,
151) -> Result<ty::Value<'tcx>, &'tcx LayoutError<'tcx>> {
152    match const_.kind() {
153        ty::ConstKind::Value(cv) => Ok(cv),
154        ty::ConstKind::Error(guar) => {
155            return Err(error(cx, LayoutError::ReferencesError(guar)));
156        }
157        ty::ConstKind::Param(_) | ty::ConstKind::Expr(_) => {
158            if !const_.has_param() {
159                bug!("no generic type found in the type: {ty:?}");
160            }
161            return Err(error(cx, LayoutError::TooGeneric(ty)));
162        }
163        ty::ConstKind::Unevaluated(_) => {
164            if !const_.has_param() {
165                return Err(error(cx, LayoutError::Unknown(ty)));
166            } else {
167                return Err(error(cx, LayoutError::TooGeneric(ty)));
168            }
169        }
170        ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
171            bug!("unexpected type: {ty:?}");
172        }
173    }
174}
175
176fn layout_of_uncached<'tcx>(
177    cx: &LayoutCx<'tcx>,
178    ty: Ty<'tcx>,
179) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
180    // Types that reference `ty::Error` pessimistically don't have a meaningful layout.
181    // The only side-effect of this is possibly worse diagnostics in case the layout
182    // was actually computable (like if the `ty::Error` showed up only in a `PhantomData`).
183    if let Err(guar) = ty.error_reported() {
184        return Err(error(cx, LayoutError::ReferencesError(guar)));
185    }
186
187    let tcx = cx.tcx();
188    let dl = cx.data_layout();
189    let scalar_unit = |value: Primitive| {
190        let size = value.size(dl);
191        assert!(size.bits() <= 128);
192        Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
193    };
194    let scalar = |value: Primitive| tcx.mk_layout(LayoutData::scalar(cx, scalar_unit(value)));
195
196    let univariant =
197        |fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, repr: &ReprOptions, kind| {
198            Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
199        };
200    debug_assert!(!ty.has_non_region_infer());
201
202    Ok(match *ty.kind() {
203        ty::Pat(ty, pat) => {
204            let layout = cx.layout_of(ty)?.layout;
205            let mut layout = LayoutData::clone(&layout.0);
206            match *pat {
207                ty::PatternKind::Range { start, end, include_end } => {
208                    if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
209                        &mut layout.backend_repr
210                    {
211                        if let Some(start) = start {
212                            scalar.valid_range_mut().start = extract_const_value(start, ty, cx)?
213                                .try_to_bits(tcx, cx.typing_env)
214                                .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
215                        }
216                        if let Some(end) = end {
217                            let mut end = extract_const_value(end, ty, cx)?
218                                .try_to_bits(tcx, cx.typing_env)
219                                .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
220                            if !include_end {
221                                end = end.wrapping_sub(1);
222                            }
223                            scalar.valid_range_mut().end = end;
224                        }
225
226                        let niche = Niche {
227                            offset: Size::ZERO,
228                            value: scalar.primitive(),
229                            valid_range: scalar.valid_range(cx),
230                        };
231
232                        layout.largest_niche = Some(niche);
233
234                        tcx.mk_layout(layout)
235                    } else {
236                        bug!("pattern type with range but not scalar layout: {ty:?}, {layout:?}")
237                    }
238                }
239            }
240        }
241
242        // Basic scalars.
243        ty::Bool => tcx.mk_layout(LayoutData::scalar(
244            cx,
245            Scalar::Initialized {
246                value: Int(I8, false),
247                valid_range: WrappingRange { start: 0, end: 1 },
248            },
249        )),
250        ty::Char => tcx.mk_layout(LayoutData::scalar(
251            cx,
252            Scalar::Initialized {
253                value: Int(I32, false),
254                valid_range: WrappingRange { start: 0, end: 0x10FFFF },
255            },
256        )),
257        ty::Int(ity) => scalar(Int(abi::Integer::from_int_ty(dl, ity), true)),
258        ty::Uint(ity) => scalar(Int(abi::Integer::from_uint_ty(dl, ity), false)),
259        ty::Float(fty) => scalar(Float(abi::Float::from_float_ty(fty))),
260        ty::FnPtr(..) => {
261            let mut ptr = scalar_unit(Pointer(dl.instruction_address_space));
262            ptr.valid_range_mut().start = 1;
263            tcx.mk_layout(LayoutData::scalar(cx, ptr))
264        }
265
266        // The never type.
267        ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()),
268
269        // Potentially-wide pointers.
270        ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => {
271            let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
272            if !ty.is_raw_ptr() {
273                data_ptr.valid_range_mut().start = 1;
274            }
275
276            let pointee = tcx.normalize_erasing_regions(cx.typing_env, pointee);
277            if pointee.is_sized(tcx, cx.typing_env) {
278                return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
279            }
280
281            let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
282                // Projection eagerly bails out when the pointee references errors,
283                // fall back to structurally deducing metadata.
284                && !pointee.references_error()
285            {
286                let pointee_metadata = Ty::new_projection(tcx, metadata_def_id, [pointee]);
287                let metadata_ty =
288                    match tcx.try_normalize_erasing_regions(cx.typing_env, pointee_metadata) {
289                        Ok(metadata_ty) => metadata_ty,
290                        Err(mut err) => {
291                            // Usually `<Ty as Pointee>::Metadata` can't be normalized because
292                            // its struct tail cannot be normalized either, so try to get a
293                            // more descriptive layout error here, which will lead to less confusing
294                            // diagnostics.
295                            //
296                            // We use the raw struct tail function here to get the first tail
297                            // that is an alias, which is likely the cause of the normalization
298                            // error.
299                            match tcx.try_normalize_erasing_regions(
300                                cx.typing_env,
301                                tcx.struct_tail_raw(pointee, |ty| ty, || {}),
302                            ) {
303                                Ok(_) => {}
304                                Err(better_err) => {
305                                    err = better_err;
306                                }
307                            }
308                            return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
309                        }
310                    };
311
312                let metadata_layout = cx.layout_of(metadata_ty)?;
313                // If the metadata is a 1-zst, then the pointer is thin.
314                if metadata_layout.is_1zst() {
315                    return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
316                }
317
318                let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else {
319                    return Err(error(cx, LayoutError::Unknown(pointee)));
320                };
321
322                metadata
323            } else {
324                let unsized_part = tcx.struct_tail_for_codegen(pointee, cx.typing_env);
325
326                match unsized_part.kind() {
327                    ty::Foreign(..) => {
328                        return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
329                    }
330                    ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
331                    ty::Dynamic(..) => {
332                        let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
333                        vtable.valid_range_mut().start = 1;
334                        vtable
335                    }
336                    _ => {
337                        return Err(error(cx, LayoutError::Unknown(pointee)));
338                    }
339                }
340            };
341
342            // Effectively a (ptr, meta) tuple.
343            tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata))
344        }
345
346        ty::Dynamic(_, _, ty::DynStar) => {
347            let mut data = scalar_unit(Pointer(AddressSpace::DATA));
348            data.valid_range_mut().start = 0;
349            let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
350            vtable.valid_range_mut().start = 1;
351            tcx.mk_layout(cx.calc.scalar_pair(data, vtable))
352        }
353
354        // Arrays and slices.
355        ty::Array(element, count) => {
356            let count = extract_const_value(count, ty, cx)?
357                .try_to_target_usize(tcx)
358                .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
359
360            let element = cx.layout_of(element)?;
361            let size = element
362                .size
363                .checked_mul(count, dl)
364                .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
365
366            let abi = if count != 0 && ty.is_privately_uninhabited(tcx, cx.typing_env) {
367                BackendRepr::Uninhabited
368            } else {
369                BackendRepr::Memory { sized: true }
370            };
371
372            let largest_niche = if count != 0 { element.largest_niche } else { None };
373
374            tcx.mk_layout(LayoutData {
375                variants: Variants::Single { index: FIRST_VARIANT },
376                fields: FieldsShape::Array { stride: element.size, count },
377                backend_repr: abi,
378                largest_niche,
379                align: element.align,
380                size,
381                max_repr_align: None,
382                unadjusted_abi_align: element.align.abi,
383                randomization_seed: element.randomization_seed.wrapping_add(count),
384            })
385        }
386        ty::Slice(element) => {
387            let element = cx.layout_of(element)?;
388            tcx.mk_layout(LayoutData {
389                variants: Variants::Single { index: FIRST_VARIANT },
390                fields: FieldsShape::Array { stride: element.size, count: 0 },
391                backend_repr: BackendRepr::Memory { sized: false },
392                largest_niche: None,
393                align: element.align,
394                size: Size::ZERO,
395                max_repr_align: None,
396                unadjusted_abi_align: element.align.abi,
397                // adding a randomly chosen value to distinguish slices
398                randomization_seed: element.randomization_seed.wrapping_add(0x2dcba99c39784102),
399            })
400        }
401        ty::Str => tcx.mk_layout(LayoutData {
402            variants: Variants::Single { index: FIRST_VARIANT },
403            fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
404            backend_repr: BackendRepr::Memory { sized: false },
405            largest_niche: None,
406            align: dl.i8_align,
407            size: Size::ZERO,
408            max_repr_align: None,
409            unadjusted_abi_align: dl.i8_align.abi,
410            // another random value
411            randomization_seed: 0xc1325f37d127be22,
412        }),
413
414        // Odd unit types.
415        ty::FnDef(..) => {
416            univariant(IndexSlice::empty(), &ReprOptions::default(), StructKind::AlwaysSized)?
417        }
418        ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
419            let mut unit = univariant_uninterned(
420                cx,
421                ty,
422                IndexSlice::empty(),
423                &ReprOptions::default(),
424                StructKind::AlwaysSized,
425            )?;
426            match unit.backend_repr {
427                BackendRepr::Memory { ref mut sized } => *sized = false,
428                _ => bug!(),
429            }
430            tcx.mk_layout(unit)
431        }
432
433        ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?,
434
435        ty::Closure(_, args) => {
436            let tys = args.as_closure().upvar_tys();
437            univariant(
438                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
439                &ReprOptions::default(),
440                StructKind::AlwaysSized,
441            )?
442        }
443
444        ty::CoroutineClosure(_, args) => {
445            let tys = args.as_coroutine_closure().upvar_tys();
446            univariant(
447                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
448                &ReprOptions::default(),
449                StructKind::AlwaysSized,
450            )?
451        }
452
453        ty::Tuple(tys) => {
454            let kind =
455                if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
456
457            univariant(
458                &tys.iter().map(|k| cx.layout_of(k)).try_collect::<IndexVec<_, _>>()?,
459                &ReprOptions::default(),
460                kind,
461            )?
462        }
463
464        // SIMD vector types.
465        ty::Adt(def, args) if def.repr().simd() => {
466            if !def.is_struct() {
467                // Should have yielded E0517 by now.
468                let guar = tcx
469                    .dcx()
470                    .delayed_bug("#[repr(simd)] was applied to an ADT that is not a struct");
471                return Err(error(cx, LayoutError::ReferencesError(guar)));
472            }
473
474            let fields = &def.non_enum_variant().fields;
475
476            // Supported SIMD vectors are homogeneous ADTs with at least one field:
477            //
478            // * #[repr(simd)] struct S(T, T, T, T);
479            // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
480            // * #[repr(simd)] struct S([T; 4])
481            //
482            // where T is a primitive scalar (integer/float/pointer).
483
484            // SIMD vectors with zero fields are not supported.
485            // (should be caught by typeck)
486            if fields.is_empty() {
487                tcx.dcx().emit_fatal(ZeroLengthSimdType { ty })
488            }
489
490            // Type of the first ADT field:
491            let f0_ty = fields[FieldIdx::ZERO].ty(tcx, args);
492
493            // Heterogeneous SIMD vectors are not supported:
494            // (should be caught by typeck)
495            for fi in fields {
496                if fi.ty(tcx, args) != f0_ty {
497                    let guar = tcx.dcx().delayed_bug(
498                        "#[repr(simd)] was applied to an ADT with heterogeneous field type",
499                    );
500                    return Err(error(cx, LayoutError::ReferencesError(guar)));
501                }
502            }
503
504            // The element type and number of elements of the SIMD vector
505            // are obtained from:
506            //
507            // * the element type and length of the single array field, if
508            // the first field is of array type, or
509            //
510            // * the homogeneous field type and the number of fields.
511            let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
512                // First ADT field is an array:
513
514                // SIMD vectors with multiple array fields are not supported:
515                // Can't be caught by typeck with a generic simd type.
516                if def.non_enum_variant().fields.len() != 1 {
517                    tcx.dcx().emit_fatal(MultipleArrayFieldsSimdType { ty });
518                }
519
520                // Extract the number of elements from the layout of the array field:
521                let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
522                    return Err(error(cx, LayoutError::Unknown(ty)));
523                };
524
525                (*e_ty, *count, true)
526            } else {
527                // First ADT field is not an array:
528                (f0_ty, def.non_enum_variant().fields.len() as _, false)
529            };
530
531            // SIMD vectors of zero length are not supported.
532            // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
533            // support.
534            //
535            // Can't be caught in typeck if the array length is generic.
536            if e_len == 0 {
537                tcx.dcx().emit_fatal(ZeroLengthSimdType { ty });
538            } else if e_len > MAX_SIMD_LANES {
539                tcx.dcx().emit_fatal(OversizedSimdType { ty, max_lanes: MAX_SIMD_LANES });
540            }
541
542            // Compute the ABI of the element type:
543            let e_ly = cx.layout_of(e_ty)?;
544            let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
545                // This error isn't caught in typeck, e.g., if
546                // the element type of the vector is generic.
547                tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
548            };
549
550            // Compute the size and alignment of the vector:
551            let size = e_ly
552                .size
553                .checked_mul(e_len, dl)
554                .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
555
556            let (abi, align) = if def.repr().packed() && !e_len.is_power_of_two() {
557                // Non-power-of-two vectors have padding up to the next power-of-two.
558                // If we're a packed repr, remove the padding while keeping the alignment as close
559                // to a vector as possible.
560                (
561                    BackendRepr::Memory { sized: true },
562                    AbiAndPrefAlign {
563                        abi: Align::max_for_offset(size),
564                        pref: dl.vector_align(size).pref,
565                    },
566                )
567            } else {
568                (BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
569            };
570            let size = size.align_to(align.abi);
571
572            // Compute the placement of the vector fields:
573            let fields = if is_array {
574                FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }
575            } else {
576                FieldsShape::Array { stride: e_ly.size, count: e_len }
577            };
578
579            tcx.mk_layout(LayoutData {
580                variants: Variants::Single { index: FIRST_VARIANT },
581                fields,
582                backend_repr: abi,
583                largest_niche: e_ly.largest_niche,
584                size,
585                align,
586                max_repr_align: None,
587                unadjusted_abi_align: align.abi,
588                randomization_seed: e_ly.randomization_seed.wrapping_add(e_len),
589            })
590        }
591
592        // ADTs.
593        ty::Adt(def, args) => {
594            // Cache the field layouts.
595            let variants = def
596                .variants()
597                .iter()
598                .map(|v| {
599                    v.fields
600                        .iter()
601                        .map(|field| cx.layout_of(field.ty(tcx, args)))
602                        .try_collect::<IndexVec<_, _>>()
603                })
604                .try_collect::<IndexVec<VariantIdx, _>>()?;
605
606            if def.is_union() {
607                if def.repr().pack.is_some() && def.repr().align.is_some() {
608                    let guar = tcx.dcx().span_delayed_bug(
609                        tcx.def_span(def.did()),
610                        "union cannot be packed and aligned",
611                    );
612                    return Err(error(cx, LayoutError::ReferencesError(guar)));
613                }
614
615                return Ok(tcx.mk_layout(
616                    cx.calc
617                        .layout_of_union(&def.repr(), &variants)
618                        .map_err(|err| map_error(cx, ty, err))?,
619                ));
620            }
621
622            let get_discriminant_type =
623                |min, max| abi::Integer::repr_discr(tcx, ty, &def.repr(), min, max);
624
625            let discriminants_iter = || {
626                def.is_enum()
627                    .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
628                    .into_iter()
629                    .flatten()
630            };
631
632            let dont_niche_optimize_enum = def.repr().inhibit_enum_layout_opt()
633                || def
634                    .variants()
635                    .iter_enumerated()
636                    .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()));
637
638            let maybe_unsized = def.is_struct()
639                && def.non_enum_variant().tail_opt().is_some_and(|last_field| {
640                    let typing_env = ty::TypingEnv::post_analysis(tcx, def.did());
641                    !tcx.type_of(last_field.did).instantiate_identity().is_sized(tcx, typing_env)
642                });
643
644            let layout = cx
645                .calc
646                .layout_of_struct_or_enum(
647                    &def.repr(),
648                    &variants,
649                    def.is_enum(),
650                    def.is_unsafe_cell(),
651                    tcx.layout_scalar_valid_range(def.did()),
652                    get_discriminant_type,
653                    discriminants_iter(),
654                    dont_niche_optimize_enum,
655                    !maybe_unsized,
656                )
657                .map_err(|err| map_error(cx, ty, err))?;
658
659            if !maybe_unsized && layout.is_unsized() {
660                bug!("got unsized layout for type that cannot be unsized {ty:?}: {layout:#?}");
661            }
662
663            // If the struct tail is sized and can be unsized, check that unsizing doesn't move the fields around.
664            if cfg!(debug_assertions)
665                && maybe_unsized
666                && def.non_enum_variant().tail().ty(tcx, args).is_sized(tcx, cx.typing_env)
667            {
668                let mut variants = variants;
669                let tail_replacement = cx.layout_of(Ty::new_slice(tcx, tcx.types.u8)).unwrap();
670                *variants[FIRST_VARIANT].raw.last_mut().unwrap() = tail_replacement;
671
672                let Ok(unsized_layout) = cx.calc.layout_of_struct_or_enum(
673                    &def.repr(),
674                    &variants,
675                    def.is_enum(),
676                    def.is_unsafe_cell(),
677                    tcx.layout_scalar_valid_range(def.did()),
678                    get_discriminant_type,
679                    discriminants_iter(),
680                    dont_niche_optimize_enum,
681                    !maybe_unsized,
682                ) else {
683                    bug!("failed to compute unsized layout of {ty:?}");
684                };
685
686                let FieldsShape::Arbitrary { offsets: sized_offsets, .. } = &layout.fields else {
687                    bug!("unexpected FieldsShape for sized layout of {ty:?}: {:?}", layout.fields);
688                };
689                let FieldsShape::Arbitrary { offsets: unsized_offsets, .. } =
690                    &unsized_layout.fields
691                else {
692                    bug!(
693                        "unexpected FieldsShape for unsized layout of {ty:?}: {:?}",
694                        unsized_layout.fields
695                    );
696                };
697
698                let (sized_tail, sized_fields) = sized_offsets.raw.split_last().unwrap();
699                let (unsized_tail, unsized_fields) = unsized_offsets.raw.split_last().unwrap();
700
701                if sized_fields != unsized_fields {
702                    bug!("unsizing {ty:?} changed field order!\n{layout:?}\n{unsized_layout:?}");
703                }
704
705                if sized_tail < unsized_tail {
706                    bug!("unsizing {ty:?} moved tail backwards!\n{layout:?}\n{unsized_layout:?}");
707                }
708            }
709
710            tcx.mk_layout(layout)
711        }
712
713        ty::UnsafeBinder(bound_ty) => {
714            let ty = tcx.instantiate_bound_regions_with_erased(bound_ty.into());
715            cx.layout_of(ty)?.layout
716        }
717
718        // Types with no meaningful known layout.
719        ty::Alias(..) => {
720            if ty.has_param() {
721                return Err(error(cx, LayoutError::TooGeneric(ty)));
722            }
723            // NOTE(eddyb) `layout_of` query should've normalized these away,
724            // if that was possible, so there's no reason to try again here.
725            return Err(error(cx, LayoutError::Unknown(ty)));
726        }
727
728        ty::Bound(..) | ty::CoroutineWitness(..) | ty::Infer(_) | ty::Error(_) => {
729            bug!("Layout::compute: unexpected type `{}`", ty)
730        }
731
732        ty::Param(_) => {
733            return Err(error(cx, LayoutError::TooGeneric(ty)));
734        }
735
736        ty::Placeholder(..) => {
737            return Err(error(cx, LayoutError::Unknown(ty)));
738        }
739    })
740}
741
742/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
743#[derive(Clone, Debug, PartialEq)]
744enum SavedLocalEligibility {
745    Unassigned,
746    Assigned(VariantIdx),
747    Ineligible(Option<FieldIdx>),
748}
749
750// When laying out coroutines, we divide our saved local fields into two
751// categories: overlap-eligible and overlap-ineligible.
752//
753// Those fields which are ineligible for overlap go in a "prefix" at the
754// beginning of the layout, and always have space reserved for them.
755//
756// Overlap-eligible fields are only assigned to one variant, so we lay
757// those fields out for each variant and put them right after the
758// prefix.
759//
760// Finally, in the layout details, we point to the fields from the
761// variants they are assigned to. It is possible for some fields to be
762// included in multiple variants. No field ever "moves around" in the
763// layout; its offset is always the same.
764//
765// Also included in the layout are the upvars and the discriminant.
766// These are included as fields on the "outer" layout; they are not part
767// of any variant.
768
769/// Compute the eligibility and assignment of each local.
770fn coroutine_saved_local_eligibility(
771    info: &CoroutineLayout<'_>,
772) -> (DenseBitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) {
773    use SavedLocalEligibility::*;
774
775    let mut assignments: IndexVec<CoroutineSavedLocal, SavedLocalEligibility> =
776        IndexVec::from_elem(Unassigned, &info.field_tys);
777
778    // The saved locals not eligible for overlap. These will get
779    // "promoted" to the prefix of our coroutine.
780    let mut ineligible_locals = DenseBitSet::new_empty(info.field_tys.len());
781
782    // Figure out which of our saved locals are fields in only
783    // one variant. The rest are deemed ineligible for overlap.
784    for (variant_index, fields) in info.variant_fields.iter_enumerated() {
785        for local in fields {
786            match assignments[*local] {
787                Unassigned => {
788                    assignments[*local] = Assigned(variant_index);
789                }
790                Assigned(idx) => {
791                    // We've already seen this local at another suspension
792                    // point, so it is no longer a candidate.
793                    trace!(
794                        "removing local {:?} in >1 variant ({:?}, {:?})",
795                        local, variant_index, idx
796                    );
797                    ineligible_locals.insert(*local);
798                    assignments[*local] = Ineligible(None);
799                }
800                Ineligible(_) => {}
801            }
802        }
803    }
804
805    // Next, check every pair of eligible locals to see if they
806    // conflict.
807    for local_a in info.storage_conflicts.rows() {
808        let conflicts_a = info.storage_conflicts.count(local_a);
809        if ineligible_locals.contains(local_a) {
810            continue;
811        }
812
813        for local_b in info.storage_conflicts.iter(local_a) {
814            // local_a and local_b are storage live at the same time, therefore they
815            // cannot overlap in the coroutine layout. The only way to guarantee
816            // this is if they are in the same variant, or one is ineligible
817            // (which means it is stored in every variant).
818            if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
819                continue;
820            }
821
822            // If they conflict, we will choose one to make ineligible.
823            // This is not always optimal; it's just a greedy heuristic that
824            // seems to produce good results most of the time.
825            let conflicts_b = info.storage_conflicts.count(local_b);
826            let (remove, other) =
827                if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
828            ineligible_locals.insert(remove);
829            assignments[remove] = Ineligible(None);
830            trace!("removing local {:?} due to conflict with {:?}", remove, other);
831        }
832    }
833
834    // Count the number of variants in use. If only one of them, then it is
835    // impossible to overlap any locals in our layout. In this case it's
836    // always better to make the remaining locals ineligible, so we can
837    // lay them out with the other locals in the prefix and eliminate
838    // unnecessary padding bytes.
839    {
840        let mut used_variants = DenseBitSet::new_empty(info.variant_fields.len());
841        for assignment in &assignments {
842            if let Assigned(idx) = assignment {
843                used_variants.insert(*idx);
844            }
845        }
846        if used_variants.count() < 2 {
847            for assignment in assignments.iter_mut() {
848                *assignment = Ineligible(None);
849            }
850            ineligible_locals.insert_all();
851        }
852    }
853
854    // Write down the order of our locals that will be promoted to the prefix.
855    {
856        for (idx, local) in ineligible_locals.iter().enumerate() {
857            assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
858        }
859    }
860    debug!("coroutine saved local assignments: {:?}", assignments);
861
862    (ineligible_locals, assignments)
863}
864
865/// Compute the full coroutine layout.
866fn coroutine_layout<'tcx>(
867    cx: &LayoutCx<'tcx>,
868    ty: Ty<'tcx>,
869    def_id: hir::def_id::DefId,
870    args: GenericArgsRef<'tcx>,
871) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
872    use SavedLocalEligibility::*;
873    let tcx = cx.tcx();
874    let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args);
875
876    let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
877        return Err(error(cx, LayoutError::Unknown(ty)));
878    };
879    let (ineligible_locals, assignments) = coroutine_saved_local_eligibility(info);
880
881    // Build a prefix layout, including "promoting" all ineligible
882    // locals as part of the prefix. We compute the layout of all of
883    // these fields at once to get optimal packing.
884    let tag_index = args.as_coroutine().prefix_tys().len();
885
886    // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
887    let max_discr = (info.variant_fields.len() - 1) as u128;
888    let discr_int = abi::Integer::fit_unsigned(max_discr);
889    let tag = Scalar::Initialized {
890        value: Primitive::Int(discr_int, /* signed = */ false),
891        valid_range: WrappingRange { start: 0, end: max_discr },
892    };
893    let tag_layout = TyAndLayout {
894        ty: discr_int.to_ty(tcx, /* signed = */ false),
895        layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
896    };
897
898    let promoted_layouts = ineligible_locals.iter().map(|local| {
899        let field_ty = instantiate_field(info.field_tys[local].ty);
900        let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty);
901        cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span)
902    });
903    let prefix_layouts = args
904        .as_coroutine()
905        .prefix_tys()
906        .iter()
907        .map(|ty| cx.layout_of(ty))
908        .chain(iter::once(Ok(tag_layout)))
909        .chain(promoted_layouts)
910        .try_collect::<IndexVec<_, _>>()?;
911    let prefix = univariant_uninterned(
912        cx,
913        ty,
914        &prefix_layouts,
915        &ReprOptions::default(),
916        StructKind::AlwaysSized,
917    )?;
918
919    let (prefix_size, prefix_align) = (prefix.size, prefix.align);
920
921    // Split the prefix layout into the "outer" fields (upvars and
922    // discriminant) and the "promoted" fields. Promoted fields will
923    // get included in each variant that requested them in
924    // CoroutineLayout.
925    debug!("prefix = {:#?}", prefix);
926    let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
927        FieldsShape::Arbitrary { mut offsets, memory_index } => {
928            let mut inverse_memory_index = memory_index.invert_bijective_mapping();
929
930            // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
931            // "outer" and "promoted" fields respectively.
932            let b_start = FieldIdx::from_usize(tag_index + 1);
933            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
934            let offsets_a = offsets;
935
936            // Disentangle the "a" and "b" components of `inverse_memory_index`
937            // by preserving the order but keeping only one disjoint "half" each.
938            // FIXME(eddyb) build a better abstraction for permutations, if possible.
939            let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
940                .iter()
941                .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
942                .collect();
943            inverse_memory_index.raw.retain(|&i| i < b_start);
944            let inverse_memory_index_a = inverse_memory_index;
945
946            // Since `inverse_memory_index_{a,b}` each only refer to their
947            // respective fields, they can be safely inverted
948            let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
949            let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
950
951            let outer_fields =
952                FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
953            (outer_fields, offsets_b, memory_index_b)
954        }
955        _ => bug!(),
956    };
957
958    let mut size = prefix.size;
959    let mut align = prefix.align;
960    let variants = info
961        .variant_fields
962        .iter_enumerated()
963        .map(|(index, variant_fields)| {
964            // Only include overlap-eligible fields when we compute our variant layout.
965            let variant_only_tys = variant_fields
966                .iter()
967                .filter(|local| match assignments[**local] {
968                    Unassigned => bug!(),
969                    Assigned(v) if v == index => true,
970                    Assigned(_) => bug!("assignment does not match variant"),
971                    Ineligible(_) => false,
972                })
973                .map(|local| {
974                    let field_ty = instantiate_field(info.field_tys[*local].ty);
975                    Ty::new_maybe_uninit(tcx, field_ty)
976                });
977
978            let mut variant = univariant_uninterned(
979                cx,
980                ty,
981                &variant_only_tys.map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
982                &ReprOptions::default(),
983                StructKind::Prefixed(prefix_size, prefix_align.abi),
984            )?;
985            variant.variants = Variants::Single { index };
986
987            let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
988                bug!();
989            };
990
991            // Now, stitch the promoted and variant-only fields back together in
992            // the order they are mentioned by our CoroutineLayout.
993            // Because we only use some subset (that can differ between variants)
994            // of the promoted fields, we can't just pick those elements of the
995            // `promoted_memory_index` (as we'd end up with gaps).
996            // So instead, we build an "inverse memory_index", as if all of the
997            // promoted fields were being used, but leave the elements not in the
998            // subset as `INVALID_FIELD_IDX`, which we can filter out later to
999            // obtain a valid (bijective) mapping.
1000            const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
1001            debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
1002
1003            let mut combined_inverse_memory_index = IndexVec::from_elem_n(
1004                INVALID_FIELD_IDX,
1005                promoted_memory_index.len() + memory_index.len(),
1006            );
1007            let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1008            let combined_offsets = variant_fields
1009                .iter_enumerated()
1010                .map(|(i, local)| {
1011                    let (offset, memory_index) = match assignments[*local] {
1012                        Unassigned => bug!(),
1013                        Assigned(_) => {
1014                            let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1015                            (offset, promoted_memory_index.len() as u32 + memory_index)
1016                        }
1017                        Ineligible(field_idx) => {
1018                            let field_idx = field_idx.unwrap();
1019                            (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1020                        }
1021                    };
1022                    combined_inverse_memory_index[memory_index] = i;
1023                    offset
1024                })
1025                .collect();
1026
1027            // Remove the unused slots and invert the mapping to obtain the
1028            // combined `memory_index` (also see previous comment).
1029            combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
1030            let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
1031
1032            variant.fields = FieldsShape::Arbitrary {
1033                offsets: combined_offsets,
1034                memory_index: combined_memory_index,
1035            };
1036
1037            size = size.max(variant.size);
1038            align = align.max(variant.align);
1039            Ok(variant)
1040        })
1041        .try_collect::<IndexVec<VariantIdx, _>>()?;
1042
1043    size = size.align_to(align.abi);
1044
1045    let abi = if prefix.backend_repr.is_uninhabited()
1046        || variants.iter().all(|v| v.backend_repr.is_uninhabited())
1047    {
1048        BackendRepr::Uninhabited
1049    } else {
1050        BackendRepr::Memory { sized: true }
1051    };
1052
1053    // this is similar to how ReprOptions populates its field_shuffle_seed
1054    let def_hash = tcx.def_path_hash(def_id).0.to_smaller_hash().as_u64();
1055
1056    let layout = tcx.mk_layout(LayoutData {
1057        variants: Variants::Multiple {
1058            tag,
1059            tag_encoding: TagEncoding::Direct,
1060            tag_field: tag_index,
1061            variants,
1062        },
1063        fields: outer_fields,
1064        backend_repr: abi,
1065        // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
1066        // self-referentiality), getting the discriminant can cause aliasing violations.
1067        // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
1068        // would do the same for us here.
1069        // See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
1070        // FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
1071        largest_niche: None,
1072        size,
1073        align,
1074        max_repr_align: None,
1075        unadjusted_abi_align: align.abi,
1076        randomization_seed: def_hash,
1077    });
1078    debug!("coroutine layout ({:?}): {:#?}", ty, layout);
1079    Ok(layout)
1080}
1081
1082fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
1083    // Ignore layouts that are done with non-empty environments or
1084    // non-monomorphic layouts, as the user only wants to see the stuff
1085    // resulting from the final codegen session.
1086    if layout.ty.has_non_region_param() || !cx.typing_env.param_env.caller_bounds().is_empty() {
1087        return;
1088    }
1089
1090    // (delay format until we actually need it)
1091    let record = |kind, packed, opt_discr_size, variants| {
1092        let type_desc = with_no_trimmed_paths!(format!("{}", layout.ty));
1093        cx.tcx().sess.code_stats.record_type_size(
1094            kind,
1095            type_desc,
1096            layout.align.abi,
1097            layout.size,
1098            packed,
1099            opt_discr_size,
1100            variants,
1101        );
1102    };
1103
1104    match *layout.ty.kind() {
1105        ty::Adt(adt_def, _) => {
1106            debug!("print-type-size t: `{:?}` process adt", layout.ty);
1107            let adt_kind = adt_def.adt_kind();
1108            let adt_packed = adt_def.repr().pack.is_some();
1109            let (variant_infos, opt_discr_size) = variant_info_for_adt(cx, layout, adt_def);
1110            record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos);
1111        }
1112
1113        ty::Coroutine(def_id, args) => {
1114            debug!("print-type-size t: `{:?}` record coroutine", layout.ty);
1115            // Coroutines always have a begin/poisoned/end state with additional suspend points
1116            let (variant_infos, opt_discr_size) =
1117                variant_info_for_coroutine(cx, layout, def_id, args);
1118            record(DataTypeKind::Coroutine, false, opt_discr_size, variant_infos);
1119        }
1120
1121        ty::Closure(..) => {
1122            debug!("print-type-size t: `{:?}` record closure", layout.ty);
1123            record(DataTypeKind::Closure, false, None, vec![]);
1124        }
1125
1126        _ => {
1127            debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1128        }
1129    };
1130}
1131
1132fn variant_info_for_adt<'tcx>(
1133    cx: &LayoutCx<'tcx>,
1134    layout: TyAndLayout<'tcx>,
1135    adt_def: AdtDef<'tcx>,
1136) -> (Vec<VariantInfo>, Option<Size>) {
1137    let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1138        let mut min_size = Size::ZERO;
1139        let field_info: Vec<_> = flds
1140            .iter()
1141            .enumerate()
1142            .map(|(i, &name)| {
1143                let field_layout = layout.field(cx, i);
1144                let offset = layout.fields.offset(i);
1145                min_size = min_size.max(offset + field_layout.size);
1146                FieldInfo {
1147                    kind: FieldKind::AdtField,
1148                    name,
1149                    offset: offset.bytes(),
1150                    size: field_layout.size.bytes(),
1151                    align: field_layout.align.abi.bytes(),
1152                    type_name: None,
1153                }
1154            })
1155            .collect();
1156
1157        VariantInfo {
1158            name: n,
1159            kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1160            align: layout.align.abi.bytes(),
1161            size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1162            fields: field_info,
1163        }
1164    };
1165
1166    match layout.variants {
1167        Variants::Empty => (vec![], None),
1168
1169        Variants::Single { index } => {
1170            debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1171            let variant_def = &adt_def.variant(index);
1172            let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1173            (vec![build_variant_info(Some(variant_def.name), &fields, layout)], None)
1174        }
1175
1176        Variants::Multiple { tag, ref tag_encoding, .. } => {
1177            debug!(
1178                "print-type-size `{:#?}` adt general variants def {}",
1179                layout.ty,
1180                adt_def.variants().len()
1181            );
1182            let variant_infos: Vec<_> = adt_def
1183                .variants()
1184                .iter_enumerated()
1185                .map(|(i, variant_def)| {
1186                    let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1187                    build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1188                })
1189                .collect();
1190
1191            (
1192                variant_infos,
1193                match tag_encoding {
1194                    TagEncoding::Direct => Some(tag.size(cx)),
1195                    _ => None,
1196                },
1197            )
1198        }
1199    }
1200}
1201
1202fn variant_info_for_coroutine<'tcx>(
1203    cx: &LayoutCx<'tcx>,
1204    layout: TyAndLayout<'tcx>,
1205    def_id: DefId,
1206    args: ty::GenericArgsRef<'tcx>,
1207) -> (Vec<VariantInfo>, Option<Size>) {
1208    use itertools::Itertools;
1209
1210    let Variants::Multiple { tag, ref tag_encoding, tag_field, .. } = layout.variants else {
1211        return (vec![], None);
1212    };
1213
1214    let coroutine = cx.tcx().coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap();
1215    let upvar_names = cx.tcx().closure_saved_names_of_captured_variables(def_id);
1216
1217    let mut upvars_size = Size::ZERO;
1218    let upvar_fields: Vec<_> = args
1219        .as_coroutine()
1220        .upvar_tys()
1221        .iter()
1222        .zip_eq(upvar_names)
1223        .enumerate()
1224        .map(|(field_idx, (_, name))| {
1225            let field_layout = layout.field(cx, field_idx);
1226            let offset = layout.fields.offset(field_idx);
1227            upvars_size = upvars_size.max(offset + field_layout.size);
1228            FieldInfo {
1229                kind: FieldKind::Upvar,
1230                name: *name,
1231                offset: offset.bytes(),
1232                size: field_layout.size.bytes(),
1233                align: field_layout.align.abi.bytes(),
1234                type_name: None,
1235            }
1236        })
1237        .collect();
1238
1239    let mut variant_infos: Vec<_> = coroutine
1240        .variant_fields
1241        .iter_enumerated()
1242        .map(|(variant_idx, variant_def)| {
1243            let variant_layout = layout.for_variant(cx, variant_idx);
1244            let mut variant_size = Size::ZERO;
1245            let fields = variant_def
1246                .iter()
1247                .enumerate()
1248                .map(|(field_idx, local)| {
1249                    let field_name = coroutine.field_names[*local];
1250                    let field_layout = variant_layout.field(cx, field_idx);
1251                    let offset = variant_layout.fields.offset(field_idx);
1252                    // The struct is as large as the last field's end
1253                    variant_size = variant_size.max(offset + field_layout.size);
1254                    FieldInfo {
1255                        kind: FieldKind::CoroutineLocal,
1256                        name: field_name.unwrap_or(Symbol::intern(&format!(
1257                            ".coroutine_field{}",
1258                            local.as_usize()
1259                        ))),
1260                        offset: offset.bytes(),
1261                        size: field_layout.size.bytes(),
1262                        align: field_layout.align.abi.bytes(),
1263                        // Include the type name if there is no field name, or if the name is the
1264                        // __awaitee placeholder symbol which means a child future being `.await`ed.
1265                        type_name: (field_name.is_none() || field_name == Some(sym::__awaitee))
1266                            .then(|| Symbol::intern(&field_layout.ty.to_string())),
1267                    }
1268                })
1269                .chain(upvar_fields.iter().copied())
1270                .collect();
1271
1272            // If the variant has no state-specific fields, then it's the size of the upvars.
1273            if variant_size == Size::ZERO {
1274                variant_size = upvars_size;
1275            }
1276
1277            // This `if` deserves some explanation.
1278            //
1279            // The layout code has a choice of where to place the discriminant of this coroutine.
1280            // If the discriminant of the coroutine is placed early in the layout (before the
1281            // variant's own fields), then it'll implicitly be counted towards the size of the
1282            // variant, since we use the maximum offset to calculate size.
1283            //    (side-note: I know this is a bit problematic given upvars placement, etc).
1284            //
1285            // This is important, since the layout printing code always subtracts this discriminant
1286            // size from the variant size if the struct is "enum"-like, so failing to account for it
1287            // will either lead to numerical underflow, or an underreported variant size...
1288            //
1289            // However, if the discriminant is placed past the end of the variant, then we need
1290            // to factor in the size of the discriminant manually. This really should be refactored
1291            // better, but this "works" for now.
1292            if layout.fields.offset(tag_field) >= variant_size {
1293                variant_size += match tag_encoding {
1294                    TagEncoding::Direct => tag.size(cx),
1295                    _ => Size::ZERO,
1296                };
1297            }
1298
1299            VariantInfo {
1300                name: Some(Symbol::intern(&ty::CoroutineArgs::variant_name(variant_idx))),
1301                kind: SizeKind::Exact,
1302                size: variant_size.bytes(),
1303                align: variant_layout.align.abi.bytes(),
1304                fields,
1305            }
1306        })
1307        .collect();
1308
1309    // The first three variants are hardcoded to be `UNRESUMED`, `RETURNED` and `POISONED`.
1310    // We will move the `RETURNED` and `POISONED` elements to the end so we
1311    // are left with a sorting order according to the coroutines yield points:
1312    // First `Unresumed`, then the `SuspendN` followed by `Returned` and `Panicked` (POISONED).
1313    let end_states = variant_infos.drain(1..=2);
1314    let end_states: Vec<_> = end_states.collect();
1315    variant_infos.extend(end_states);
1316
1317    (
1318        variant_infos,
1319        match tag_encoding {
1320            TagEncoding::Direct => Some(tag.size(cx)),
1321            _ => None,
1322        },
1323    )
1324}