rustc_abi/
layout.rs

1use std::fmt::{self, Write};
2use std::ops::{Bound, Deref};
3use std::{cmp, iter};
4
5use rustc_hashes::Hash64;
6use rustc_index::Idx;
7use rustc_index::bit_set::BitMatrix;
8use tracing::debug;
9
10use crate::{
11    AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
12    LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
13    Variants, WrappingRange,
14};
15
16mod coroutine;
17mod simple;
18
19#[cfg(feature = "nightly")]
20mod ty;
21
22#[cfg(feature = "nightly")]
23pub use ty::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
24
25// A variant is absent if it's uninhabited and only has ZST fields.
26// Present uninhabited variants only require space for their fields,
27// but *not* an encoding of the discriminant (e.g., a tag value).
28// See issue #49298 for more details on the need to leave space
29// for non-ZST uninhabited data (mostly partial initialization).
30fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool
31where
32    FieldIdx: Idx,
33    VariantIdx: Idx,
34    F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
35{
36    let uninhabited = fields.iter().any(|f| f.is_uninhabited());
37    // We cannot ignore alignment; that might lead us to entirely discard a variant and
38    // produce an enum that is less aligned than it should be!
39    let is_1zst = fields.iter().all(|f| f.is_1zst());
40    uninhabited && is_1zst
41}
42
43/// Determines towards which end of a struct layout optimizations will try to place the best niches.
44enum NicheBias {
45    Start,
46    End,
47}
48
49#[derive(Copy, Clone, Debug, PartialEq, Eq)]
50pub enum LayoutCalculatorError<F> {
51    /// An unsized type was found in a location where a sized type was expected.
52    ///
53    /// This is not always a compile error, for example if there is a `[T]: Sized`
54    /// bound in a where clause.
55    ///
56    /// Contains the field that was unexpectedly unsized.
57    UnexpectedUnsized(F),
58
59    /// A type was too large for the target platform.
60    SizeOverflow,
61
62    /// A union had no fields.
63    EmptyUnion,
64
65    /// The fields or variants have irreconcilable reprs
66    ReprConflict,
67
68    /// The length of an SIMD type is zero
69    ZeroLengthSimdType,
70
71    /// The length of an SIMD type exceeds the maximum number of lanes
72    OversizedSimdType { max_lanes: u64 },
73
74    /// An element type of an SIMD type isn't a primitive
75    NonPrimitiveSimdType(F),
76}
77
78impl<F> LayoutCalculatorError<F> {
79    pub fn without_payload(&self) -> LayoutCalculatorError<()> {
80        use LayoutCalculatorError::*;
81        match *self {
82            UnexpectedUnsized(_) => UnexpectedUnsized(()),
83            SizeOverflow => SizeOverflow,
84            EmptyUnion => EmptyUnion,
85            ReprConflict => ReprConflict,
86            ZeroLengthSimdType => ZeroLengthSimdType,
87            OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
88            NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
89        }
90    }
91
92    /// Format an untranslated diagnostic for this type
93    ///
94    /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
95    pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
96        use LayoutCalculatorError::*;
97        f.write_str(match self {
98            UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
99            SizeOverflow => "size overflow",
100            EmptyUnion => "type is a union with no fields",
101            ReprConflict => "type has an invalid repr",
102            ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
103                "invalid simd type definition"
104            }
105        })
106    }
107}
108
109type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
110    Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
111
112#[derive(Clone, Copy, Debug)]
113pub struct LayoutCalculator<Cx> {
114    pub cx: Cx,
115}
116
117impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
118    pub fn new(cx: Cx) -> Self {
119        Self { cx }
120    }
121
122    pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
123        &self,
124        element: &LayoutData<FieldIdx, VariantIdx>,
125        count_if_sized: Option<u64>, // None for slices
126    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
127        let count = count_if_sized.unwrap_or(0);
128        let size =
129            element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
130
131        Ok(LayoutData {
132            variants: Variants::Single { index: VariantIdx::new(0) },
133            fields: FieldsShape::Array { stride: element.size, count },
134            backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
135            largest_niche: element.largest_niche.filter(|_| count != 0),
136            uninhabited: element.uninhabited && count != 0,
137            align: element.align,
138            size,
139            max_repr_align: None,
140            unadjusted_abi_align: element.align.abi,
141            randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
142        })
143    }
144
145    pub fn simd_type<
146        FieldIdx: Idx,
147        VariantIdx: Idx,
148        F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
149    >(
150        &self,
151        element: F,
152        count: u64,
153        repr_packed: bool,
154    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
155        let elt = element.as_ref();
156        if count == 0 {
157            return Err(LayoutCalculatorError::ZeroLengthSimdType);
158        } else if count > crate::MAX_SIMD_LANES {
159            return Err(LayoutCalculatorError::OversizedSimdType {
160                max_lanes: crate::MAX_SIMD_LANES,
161            });
162        }
163
164        let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
165            return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
166        };
167
168        // Compute the size and alignment of the vector
169        let dl = self.cx.data_layout();
170        let size =
171            elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
172        let (repr, align) = if repr_packed && !count.is_power_of_two() {
173            // Non-power-of-two vectors have padding up to the next power-of-two.
174            // If we're a packed repr, remove the padding while keeping the alignment as close
175            // to a vector as possible.
176            (
177                BackendRepr::Memory { sized: true },
178                AbiAndPrefAlign {
179                    abi: Align::max_aligned_factor(size),
180                    pref: dl.llvmlike_vector_align(size).pref,
181                },
182            )
183        } else {
184            (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
185        };
186        let size = size.align_to(align.abi);
187
188        Ok(LayoutData {
189            variants: Variants::Single { index: VariantIdx::new(0) },
190            fields: FieldsShape::Arbitrary {
191                offsets: [Size::ZERO].into(),
192                memory_index: [0].into(),
193            },
194            backend_repr: repr,
195            largest_niche: elt.largest_niche,
196            uninhabited: false,
197            size,
198            align,
199            max_repr_align: None,
200            unadjusted_abi_align: elt.align.abi,
201            randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
202        })
203    }
204
205    /// Compute the layout for a coroutine.
206    ///
207    /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
208    /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
209    pub fn coroutine<
210        'a,
211        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
212        VariantIdx: Idx,
213        FieldIdx: Idx,
214        LocalIdx: Idx,
215    >(
216        &self,
217        local_layouts: &IndexSlice<LocalIdx, F>,
218        prefix_layouts: IndexVec<FieldIdx, F>,
219        variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
220        storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
221        tag_to_layout: impl Fn(Scalar) -> F,
222    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
223        coroutine::layout(
224            self,
225            local_layouts,
226            prefix_layouts,
227            variant_fields,
228            storage_conflicts,
229            tag_to_layout,
230        )
231    }
232
233    pub fn univariant<
234        'a,
235        FieldIdx: Idx,
236        VariantIdx: Idx,
237        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
238    >(
239        &self,
240        fields: &IndexSlice<FieldIdx, F>,
241        repr: &ReprOptions,
242        kind: StructKind,
243    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
244        let dl = self.cx.data_layout();
245        let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
246        // Enums prefer niches close to the beginning or the end of the variants so that other
247        // (smaller) data-carrying variants can be packed into the space after/before the niche.
248        // If the default field ordering does not give us a niche at the front then we do a second
249        // run and bias niches to the right and then check which one is closer to one of the
250        // struct's edges.
251        if let Ok(layout) = &layout {
252            // Don't try to calculate an end-biased layout for unsizable structs,
253            // otherwise we could end up with different layouts for
254            // Foo<Type> and Foo<dyn Trait> which would break unsizing.
255            if !matches!(kind, StructKind::MaybeUnsized) {
256                if let Some(niche) = layout.largest_niche {
257                    let head_space = niche.offset.bytes();
258                    let niche_len = niche.value.size(dl).bytes();
259                    let tail_space = layout.size.bytes() - head_space - niche_len;
260
261                    // This may end up doing redundant work if the niche is already in the last
262                    // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
263                    // to get the unpadded size so we try anyway.
264                    if fields.len() > 1 && head_space != 0 && tail_space > 0 {
265                        let alt_layout = self
266                            .univariant_biased(fields, repr, kind, NicheBias::End)
267                            .expect("alt layout should always work");
268                        let alt_niche = alt_layout
269                            .largest_niche
270                            .expect("alt layout should have a niche like the regular one");
271                        let alt_head_space = alt_niche.offset.bytes();
272                        let alt_niche_len = alt_niche.value.size(dl).bytes();
273                        let alt_tail_space =
274                            alt_layout.size.bytes() - alt_head_space - alt_niche_len;
275
276                        debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
277
278                        let prefer_alt_layout =
279                            alt_head_space > head_space && alt_head_space > tail_space;
280
281                        debug!(
282                            "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
283                            layout: {}\n\
284                            alt_layout: {}\n",
285                            layout.size.bytes(),
286                            head_space,
287                            niche_len,
288                            tail_space,
289                            alt_head_space,
290                            alt_niche_len,
291                            alt_tail_space,
292                            layout.fields.count(),
293                            prefer_alt_layout,
294                            self.format_field_niches(layout, fields),
295                            self.format_field_niches(&alt_layout, fields),
296                        );
297
298                        if prefer_alt_layout {
299                            return Ok(alt_layout);
300                        }
301                    }
302                }
303            }
304        }
305        layout
306    }
307
308    pub fn layout_of_struct_or_enum<
309        'a,
310        FieldIdx: Idx,
311        VariantIdx: Idx,
312        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
313    >(
314        &self,
315        repr: &ReprOptions,
316        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
317        is_enum: bool,
318        is_unsafe_cell: bool,
319        scalar_valid_range: (Bound<u128>, Bound<u128>),
320        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
321        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
322        dont_niche_optimize_enum: bool,
323        always_sized: bool,
324    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
325        let (present_first, present_second) = {
326            let mut present_variants = variants
327                .iter_enumerated()
328                .filter_map(|(i, v)| if !repr.c() && absent(v) { None } else { Some(i) });
329            (present_variants.next(), present_variants.next())
330        };
331        let present_first = match present_first {
332            Some(present_first) => present_first,
333            // Uninhabited because it has no variants, or only absent ones.
334            None if is_enum => {
335                return Ok(LayoutData::never_type(&self.cx));
336            }
337            // If it's a struct, still compute a layout so that we can still compute the
338            // field offsets.
339            None => VariantIdx::new(0),
340        };
341
342        // take the struct path if it is an actual struct
343        if !is_enum ||
344            // or for optimizing univariant enums
345            (present_second.is_none() && !repr.inhibit_enum_layout_opt())
346        {
347            self.layout_of_struct(
348                repr,
349                variants,
350                is_enum,
351                is_unsafe_cell,
352                scalar_valid_range,
353                always_sized,
354                present_first,
355            )
356        } else {
357            // At this point, we have handled all unions and
358            // structs. (We have also handled univariant enums
359            // that allow representation optimization.)
360            assert!(is_enum);
361            self.layout_of_enum(
362                repr,
363                variants,
364                discr_range_of_repr,
365                discriminants,
366                dont_niche_optimize_enum,
367            )
368        }
369    }
370
371    pub fn layout_of_union<
372        'a,
373        FieldIdx: Idx,
374        VariantIdx: Idx,
375        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
376    >(
377        &self,
378        repr: &ReprOptions,
379        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
380    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
381        let dl = self.cx.data_layout();
382        let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
383        let mut max_repr_align = repr.align;
384
385        // If all the non-ZST fields have the same repr and union repr optimizations aren't
386        // disabled, we can use that common repr for the union as a whole.
387        struct AbiMismatch;
388        let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
389            // Can't optimize
390            Err(AbiMismatch)
391        } else {
392            Ok(None)
393        };
394
395        let mut size = Size::ZERO;
396        let only_variant_idx = VariantIdx::new(0);
397        let only_variant = &variants[only_variant_idx];
398        for field in only_variant {
399            if field.is_unsized() {
400                return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
401            }
402
403            align = align.max(field.align);
404            max_repr_align = max_repr_align.max(field.max_repr_align);
405            size = cmp::max(size, field.size);
406
407            if field.is_zst() {
408                // Nothing more to do for ZST fields
409                continue;
410            }
411
412            if let Ok(common) = common_non_zst_repr_and_align {
413                // Discard valid range information and allow undef
414                let field_abi = field.backend_repr.to_union();
415
416                if let Some((common_abi, common_align)) = common {
417                    if common_abi != field_abi {
418                        // Different fields have different ABI: disable opt
419                        common_non_zst_repr_and_align = Err(AbiMismatch);
420                    } else {
421                        // Fields with the same non-Aggregate ABI should also
422                        // have the same alignment
423                        if !matches!(common_abi, BackendRepr::Memory { .. }) {
424                            assert_eq!(
425                                common_align, field.align.abi,
426                                "non-Aggregate field with matching ABI but differing alignment"
427                            );
428                        }
429                    }
430                } else {
431                    // First non-ZST field: record its ABI and alignment
432                    common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
433                }
434            }
435        }
436
437        if let Some(pack) = repr.pack {
438            align = align.min(AbiAndPrefAlign::new(pack));
439        }
440        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
441        // See documentation on `LayoutS::unadjusted_abi_align`.
442        let unadjusted_abi_align = align.abi;
443        if let Some(repr_align) = repr.align {
444            align = align.max(AbiAndPrefAlign::new(repr_align));
445        }
446        // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
447        let align = align;
448
449        // If all non-ZST fields have the same ABI, we may forward that ABI
450        // for the union as a whole, unless otherwise inhibited.
451        let backend_repr = match common_non_zst_repr_and_align {
452            Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
453            Ok(Some((repr, _))) => match repr {
454                // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
455                BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
456                    if repr.scalar_align(dl).unwrap() != align.abi =>
457                {
458                    BackendRepr::Memory { sized: true }
459                }
460                // Vectors require at least element alignment, else disable the opt
461                BackendRepr::SimdVector { element, count: _ }
462                    if element.align(dl).abi > align.abi =>
463                {
464                    BackendRepr::Memory { sized: true }
465                }
466                // the alignment tests passed and we can use this
467                BackendRepr::Scalar(..)
468                | BackendRepr::ScalarPair(..)
469                | BackendRepr::SimdVector { .. }
470                | BackendRepr::Memory { .. } => repr,
471            },
472        };
473
474        let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
475            return Err(LayoutCalculatorError::EmptyUnion);
476        };
477
478        let combined_seed = only_variant
479            .iter()
480            .map(|v| v.randomization_seed)
481            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
482
483        Ok(LayoutData {
484            variants: Variants::Single { index: only_variant_idx },
485            fields: FieldsShape::Union(union_field_count),
486            backend_repr,
487            largest_niche: None,
488            uninhabited: false,
489            align,
490            size: size.align_to(align.abi),
491            max_repr_align,
492            unadjusted_abi_align,
493            randomization_seed: combined_seed,
494        })
495    }
496
497    /// single-variant enums are just structs, if you think about it
498    fn layout_of_struct<
499        'a,
500        FieldIdx: Idx,
501        VariantIdx: Idx,
502        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
503    >(
504        &self,
505        repr: &ReprOptions,
506        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
507        is_enum: bool,
508        is_unsafe_cell: bool,
509        scalar_valid_range: (Bound<u128>, Bound<u128>),
510        always_sized: bool,
511        present_first: VariantIdx,
512    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
513        // Struct, or univariant enum equivalent to a struct.
514        // (Typechecking will reject discriminant-sizing attrs.)
515
516        let dl = self.cx.data_layout();
517        let v = present_first;
518        let kind = if is_enum || variants[v].is_empty() || always_sized {
519            StructKind::AlwaysSized
520        } else {
521            StructKind::MaybeUnsized
522        };
523
524        let mut st = self.univariant(&variants[v], repr, kind)?;
525        st.variants = Variants::Single { index: v };
526
527        if is_unsafe_cell {
528            let hide_niches = |scalar: &mut _| match scalar {
529                Scalar::Initialized { value, valid_range } => {
530                    *valid_range = WrappingRange::full(value.size(dl))
531                }
532                // Already doesn't have any niches
533                Scalar::Union { .. } => {}
534            };
535            match &mut st.backend_repr {
536                BackendRepr::Scalar(scalar) => hide_niches(scalar),
537                BackendRepr::ScalarPair(a, b) => {
538                    hide_niches(a);
539                    hide_niches(b);
540                }
541                BackendRepr::SimdVector { element, count: _ } => hide_niches(element),
542                BackendRepr::Memory { sized: _ } => {}
543            }
544            st.largest_niche = None;
545            return Ok(st);
546        }
547
548        let (start, end) = scalar_valid_range;
549        match st.backend_repr {
550            BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
551                // Enlarging validity ranges would result in missed
552                // optimizations, *not* wrongly assuming the inner
553                // value is valid. e.g. unions already enlarge validity ranges,
554                // because the values may be uninitialized.
555                //
556                // Because of that we only check that the start and end
557                // of the range is representable with this scalar type.
558
559                let max_value = scalar.size(dl).unsigned_int_max();
560                if let Bound::Included(start) = start {
561                    // FIXME(eddyb) this might be incorrect - it doesn't
562                    // account for wrap-around (end < start) ranges.
563                    assert!(start <= max_value, "{start} > {max_value}");
564                    scalar.valid_range_mut().start = start;
565                }
566                if let Bound::Included(end) = end {
567                    // FIXME(eddyb) this might be incorrect - it doesn't
568                    // account for wrap-around (end < start) ranges.
569                    assert!(end <= max_value, "{end} > {max_value}");
570                    scalar.valid_range_mut().end = end;
571                }
572
573                // Update `largest_niche` if we have introduced a larger niche.
574                let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
575                if let Some(niche) = niche {
576                    match st.largest_niche {
577                        Some(largest_niche) => {
578                            // Replace the existing niche even if they're equal,
579                            // because this one is at a lower offset.
580                            if largest_niche.available(dl) <= niche.available(dl) {
581                                st.largest_niche = Some(niche);
582                            }
583                        }
584                        None => st.largest_niche = Some(niche),
585                    }
586                }
587            }
588            _ => assert!(
589                start == Bound::Unbounded && end == Bound::Unbounded,
590                "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
591            ),
592        }
593
594        Ok(st)
595    }
596
597    fn layout_of_enum<
598        'a,
599        FieldIdx: Idx,
600        VariantIdx: Idx,
601        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
602    >(
603        &self,
604        repr: &ReprOptions,
605        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
606        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
607        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
608        dont_niche_optimize_enum: bool,
609    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
610        // Until we've decided whether to use the tagged or
611        // niche filling LayoutS, we don't want to intern the
612        // variant layouts, so we can't store them in the
613        // overall LayoutS. Store the overall LayoutS
614        // and the variant LayoutSs here until then.
615        struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
616            layout: LayoutData<FieldIdx, VariantIdx>,
617            variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
618        }
619
620        let dl = self.cx.data_layout();
621        // bail if the enum has an incoherent repr that cannot be computed
622        if repr.packed() {
623            return Err(LayoutCalculatorError::ReprConflict);
624        }
625
626        let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
627            if dont_niche_optimize_enum {
628                return None;
629            }
630
631            if variants.len() < 2 {
632                return None;
633            }
634
635            let mut align = dl.aggregate_align;
636            let mut max_repr_align = repr.align;
637            let mut unadjusted_abi_align = align.abi;
638
639            let mut variant_layouts = variants
640                .iter_enumerated()
641                .map(|(j, v)| {
642                    let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
643                    st.variants = Variants::Single { index: j };
644
645                    align = align.max(st.align);
646                    max_repr_align = max_repr_align.max(st.max_repr_align);
647                    unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
648
649                    Some(st)
650                })
651                .collect::<Option<IndexVec<VariantIdx, _>>>()?;
652
653            let largest_variant_index = variant_layouts
654                .iter_enumerated()
655                .max_by_key(|(_i, layout)| layout.size.bytes())
656                .map(|(i, _layout)| i)?;
657
658            let all_indices = variants.indices();
659            let needs_disc =
660                |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
661            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
662                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
663
664            let count =
665                (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
666
667            // Use the largest niche in the largest variant.
668            let niche = variant_layouts[largest_variant_index].largest_niche?;
669            let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
670            let niche_offset = niche.offset;
671            let niche_size = niche.value.size(dl);
672            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
673
674            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
675                if i == largest_variant_index {
676                    return true;
677                }
678
679                layout.largest_niche = None;
680
681                if layout.size <= niche_offset {
682                    // This variant will fit before the niche.
683                    return true;
684                }
685
686                // Determine if it'll fit after the niche.
687                let this_align = layout.align.abi;
688                let this_offset = (niche_offset + niche_size).align_to(this_align);
689
690                if this_offset + layout.size > size {
691                    return false;
692                }
693
694                // It'll fit, but we need to make some adjustments.
695                match layout.fields {
696                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
697                        for offset in offsets.iter_mut() {
698                            *offset += this_offset;
699                        }
700                    }
701                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
702                        panic!("Layout of fields should be Arbitrary for variants")
703                    }
704                }
705
706                // It can't be a Scalar or ScalarPair because the offset isn't 0.
707                if !layout.is_uninhabited() {
708                    layout.backend_repr = BackendRepr::Memory { sized: true };
709                }
710                layout.size += this_offset;
711
712                true
713            });
714
715            if !all_variants_fit {
716                return None;
717            }
718
719            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
720
721            let others_zst = variant_layouts
722                .iter_enumerated()
723                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
724            let same_size = size == variant_layouts[largest_variant_index].size;
725            let same_align = align == variant_layouts[largest_variant_index].align;
726
727            let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
728            let abi = if same_size && same_align && others_zst {
729                match variant_layouts[largest_variant_index].backend_repr {
730                    // When the total alignment and size match, we can use the
731                    // same ABI as the scalar variant with the reserved niche.
732                    BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
733                    BackendRepr::ScalarPair(first, second) => {
734                        // Only the niche is guaranteed to be initialised,
735                        // so use union layouts for the other primitive.
736                        if niche_offset == Size::ZERO {
737                            BackendRepr::ScalarPair(niche_scalar, second.to_union())
738                        } else {
739                            BackendRepr::ScalarPair(first.to_union(), niche_scalar)
740                        }
741                    }
742                    _ => BackendRepr::Memory { sized: true },
743                }
744            } else {
745                BackendRepr::Memory { sized: true }
746            };
747
748            let combined_seed = variant_layouts
749                .iter()
750                .map(|v| v.randomization_seed)
751                .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
752
753            let layout = LayoutData {
754                variants: Variants::Multiple {
755                    tag: niche_scalar,
756                    tag_encoding: TagEncoding::Niche {
757                        untagged_variant: largest_variant_index,
758                        niche_variants,
759                        niche_start,
760                    },
761                    tag_field: 0,
762                    variants: IndexVec::new(),
763                },
764                fields: FieldsShape::Arbitrary {
765                    offsets: [niche_offset].into(),
766                    memory_index: [0].into(),
767                },
768                backend_repr: abi,
769                largest_niche,
770                uninhabited,
771                size,
772                align,
773                max_repr_align,
774                unadjusted_abi_align,
775                randomization_seed: combined_seed,
776            };
777
778            Some(TmpLayout { layout, variants: variant_layouts })
779        };
780
781        let niche_filling_layout = calculate_niche_filling_layout();
782
783        let (mut min, mut max) = (i128::MAX, i128::MIN);
784        let discr_type = repr.discr_type();
785        let bits = Integer::from_attr(dl, discr_type).size().bits();
786        for (i, mut val) in discriminants {
787            if !repr.c() && variants[i].iter().any(|f| f.is_uninhabited()) {
788                continue;
789            }
790            if discr_type.is_signed() {
791                // sign extend the raw representation to be an i128
792                val = (val << (128 - bits)) >> (128 - bits);
793            }
794            if val < min {
795                min = val;
796            }
797            if val > max {
798                max = val;
799            }
800        }
801        // We might have no inhabited variants, so pretend there's at least one.
802        if (min, max) == (i128::MAX, i128::MIN) {
803            min = 0;
804            max = 0;
805        }
806        assert!(min <= max, "discriminant range is {min}...{max}");
807        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
808
809        let mut align = dl.aggregate_align;
810        let mut max_repr_align = repr.align;
811        let mut unadjusted_abi_align = align.abi;
812
813        let mut size = Size::ZERO;
814
815        // We're interested in the smallest alignment, so start large.
816        let mut start_align = Align::from_bytes(256).unwrap();
817        assert_eq!(Integer::for_align(dl, start_align), None);
818
819        // repr(C) on an enum tells us to make a (tag, union) layout,
820        // so we need to grow the prefix alignment to be at least
821        // the alignment of the union. (This value is used both for
822        // determining the alignment of the overall enum, and the
823        // determining the alignment of the payload after the tag.)
824        let mut prefix_align = min_ity.align(dl).abi;
825        if repr.c() {
826            for fields in variants {
827                for field in fields {
828                    prefix_align = prefix_align.max(field.align.abi);
829                }
830            }
831        }
832
833        // Create the set of structs that represent each variant.
834        let mut layout_variants = variants
835            .iter_enumerated()
836            .map(|(i, field_layouts)| {
837                let mut st = self.univariant(
838                    field_layouts,
839                    repr,
840                    StructKind::Prefixed(min_ity.size(), prefix_align),
841                )?;
842                st.variants = Variants::Single { index: i };
843                // Find the first field we can't move later
844                // to make room for a larger discriminant.
845                for field_idx in st.fields.index_by_increasing_offset() {
846                    let field = &field_layouts[FieldIdx::new(field_idx)];
847                    if !field.is_1zst() {
848                        start_align = start_align.min(field.align.abi);
849                        break;
850                    }
851                }
852                size = cmp::max(size, st.size);
853                align = align.max(st.align);
854                max_repr_align = max_repr_align.max(st.max_repr_align);
855                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
856                Ok(st)
857            })
858            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
859
860        // Align the maximum variant size to the largest alignment.
861        size = size.align_to(align.abi);
862
863        // FIXME(oli-obk): deduplicate and harden these checks
864        if size.bytes() >= dl.obj_size_bound() {
865            return Err(LayoutCalculatorError::SizeOverflow);
866        }
867
868        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
869        if typeck_ity < min_ity {
870            // It is a bug if Layout decided on a greater discriminant size than typeck for
871            // some reason at this point (based on values discriminant can take on). Mostly
872            // because this discriminant will be loaded, and then stored into variable of
873            // type calculated by typeck. Consider such case (a bug): typeck decided on
874            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
875            // discriminant values. That would be a bug, because then, in codegen, in order
876            // to store this 16-bit discriminant into 8-bit sized temporary some of the
877            // space necessary to represent would have to be discarded (or layout is wrong
878            // on thinking it needs 16 bits)
879            panic!(
880                "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
881            );
882            // However, it is fine to make discr type however large (as an optimisation)
883            // after this point – we’ll just truncate the value we load in codegen.
884        }
885
886        // Check to see if we should use a different type for the
887        // discriminant. We can safely use a type with the same size
888        // as the alignment of the first field of each variant.
889        // We increase the size of the discriminant to avoid LLVM copying
890        // padding when it doesn't need to. This normally causes unaligned
891        // load/stores and excessive memcpy/memset operations. By using a
892        // bigger integer size, LLVM can be sure about its contents and
893        // won't be so conservative.
894
895        // Use the initial field alignment
896        let mut ity = if repr.c() || repr.int.is_some() {
897            min_ity
898        } else {
899            Integer::for_align(dl, start_align).unwrap_or(min_ity)
900        };
901
902        // If the alignment is not larger than the chosen discriminant size,
903        // don't use the alignment as the final size.
904        if ity <= min_ity {
905            ity = min_ity;
906        } else {
907            // Patch up the variants' first few fields.
908            let old_ity_size = min_ity.size();
909            let new_ity_size = ity.size();
910            for variant in &mut layout_variants {
911                match variant.fields {
912                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
913                        for i in offsets {
914                            if *i <= old_ity_size {
915                                assert_eq!(*i, old_ity_size);
916                                *i = new_ity_size;
917                            }
918                        }
919                        // We might be making the struct larger.
920                        if variant.size <= old_ity_size {
921                            variant.size = new_ity_size;
922                        }
923                    }
924                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
925                        panic!("encountered a non-arbitrary layout during enum layout")
926                    }
927                }
928            }
929        }
930
931        let tag_mask = ity.size().unsigned_int_max();
932        let tag = Scalar::Initialized {
933            value: Primitive::Int(ity, signed),
934            valid_range: WrappingRange {
935                start: (min as u128 & tag_mask),
936                end: (max as u128 & tag_mask),
937            },
938        };
939        let mut abi = BackendRepr::Memory { sized: true };
940
941        let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());
942        if tag.size(dl) == size {
943            // Make sure we only use scalar layout when the enum is entirely its
944            // own tag (i.e. it has no padding nor any non-ZST variant fields).
945            abi = BackendRepr::Scalar(tag);
946        } else {
947            // Try to use a ScalarPair for all tagged enums.
948            // That's possible only if we can find a common primitive type for all variants.
949            let mut common_prim = None;
950            let mut common_prim_initialized_in_all_variants = true;
951            for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
952                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
953                    panic!("encountered a non-arbitrary layout during enum layout");
954                };
955                // We skip *all* ZST here and later check if we are good in terms of alignment.
956                // This lets us handle some cases involving aligned ZST.
957                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
958                let (field, offset) = match (fields.next(), fields.next()) {
959                    (None, None) => {
960                        common_prim_initialized_in_all_variants = false;
961                        continue;
962                    }
963                    (Some(pair), None) => pair,
964                    _ => {
965                        common_prim = None;
966                        break;
967                    }
968                };
969                let prim = match field.backend_repr {
970                    BackendRepr::Scalar(scalar) => {
971                        common_prim_initialized_in_all_variants &=
972                            matches!(scalar, Scalar::Initialized { .. });
973                        scalar.primitive()
974                    }
975                    _ => {
976                        common_prim = None;
977                        break;
978                    }
979                };
980                if let Some((old_prim, common_offset)) = common_prim {
981                    // All variants must be at the same offset
982                    if offset != common_offset {
983                        common_prim = None;
984                        break;
985                    }
986                    // This is pretty conservative. We could go fancier
987                    // by realising that (u8, u8) could just cohabit with
988                    // u16 or even u32.
989                    let new_prim = match (old_prim, prim) {
990                        // Allow all identical primitives.
991                        (x, y) if x == y => x,
992                        // Allow integers of the same size with differing signedness.
993                        // We arbitrarily choose the signedness of the first variant.
994                        (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
995                        // Allow integers mixed with pointers of the same layout.
996                        // We must represent this using a pointer, to avoid
997                        // roundtripping pointers through ptrtoint/inttoptr.
998                        (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
999                        | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
1000                            if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
1001                        {
1002                            p
1003                        }
1004                        _ => {
1005                            common_prim = None;
1006                            break;
1007                        }
1008                    };
1009                    // We may be updating the primitive here, for example from int->ptr.
1010                    common_prim = Some((new_prim, common_offset));
1011                } else {
1012                    common_prim = Some((prim, offset));
1013                }
1014            }
1015            if let Some((prim, offset)) = common_prim {
1016                let prim_scalar = if common_prim_initialized_in_all_variants {
1017                    let size = prim.size(dl);
1018                    assert!(size.bits() <= 128);
1019                    Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
1020                } else {
1021                    // Common prim might be uninit.
1022                    Scalar::Union { value: prim }
1023                };
1024                let pair =
1025                    LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
1026                let pair_offsets = match pair.fields {
1027                    FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1028                        assert_eq!(memory_index.raw, [0, 1]);
1029                        offsets
1030                    }
1031                    _ => panic!("encountered a non-arbitrary layout during enum layout"),
1032                };
1033                if pair_offsets[FieldIdx::new(0)] == Size::ZERO
1034                    && pair_offsets[FieldIdx::new(1)] == *offset
1035                    && align == pair.align
1036                    && size == pair.size
1037                {
1038                    // We can use `ScalarPair` only when it matches our
1039                    // already computed layout (including `#[repr(C)]`).
1040                    abi = pair.backend_repr;
1041                }
1042            }
1043        }
1044
1045        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1046        // variants to ensure they are consistent. This is because a downcast is
1047        // semantically a NOP, and thus should not affect layout.
1048        if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
1049            for variant in &mut layout_variants {
1050                // We only do this for variants with fields; the others are not accessed anyway.
1051                // Also do not overwrite any already existing "clever" ABIs.
1052                if variant.fields.count() > 0
1053                    && matches!(variant.backend_repr, BackendRepr::Memory { .. })
1054                {
1055                    variant.backend_repr = abi;
1056                    // Also need to bump up the size and alignment, so that the entire value fits
1057                    // in here.
1058                    variant.size = cmp::max(variant.size, size);
1059                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
1060                }
1061            }
1062        }
1063
1064        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1065
1066        let combined_seed = layout_variants
1067            .iter()
1068            .map(|v| v.randomization_seed)
1069            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
1070
1071        let tagged_layout = LayoutData {
1072            variants: Variants::Multiple {
1073                tag,
1074                tag_encoding: TagEncoding::Direct,
1075                tag_field: 0,
1076                variants: IndexVec::new(),
1077            },
1078            fields: FieldsShape::Arbitrary {
1079                offsets: [Size::ZERO].into(),
1080                memory_index: [0].into(),
1081            },
1082            largest_niche,
1083            uninhabited,
1084            backend_repr: abi,
1085            align,
1086            size,
1087            max_repr_align,
1088            unadjusted_abi_align,
1089            randomization_seed: combined_seed,
1090        };
1091
1092        let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1093
1094        let mut best_layout = match (tagged_layout, niche_filling_layout) {
1095            (tl, Some(nl)) => {
1096                // Pick the smaller layout; otherwise,
1097                // pick the layout with the larger niche; otherwise,
1098                // pick tagged as it has simpler codegen.
1099                use cmp::Ordering::*;
1100                let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
1101                    tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1102                };
1103                match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1104                    (Greater, _) => nl,
1105                    (Equal, Less) => nl,
1106                    _ => tl,
1107                }
1108            }
1109            (tl, None) => tl,
1110        };
1111
1112        // Now we can intern the variant layouts and store them in the enum layout.
1113        best_layout.layout.variants = match best_layout.layout.variants {
1114            Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
1115                Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
1116            }
1117            Variants::Single { .. } | Variants::Empty => {
1118                panic!("encountered a single-variant or empty enum during multi-variant layout")
1119            }
1120        };
1121        Ok(best_layout.layout)
1122    }
1123
1124    fn univariant_biased<
1125        'a,
1126        FieldIdx: Idx,
1127        VariantIdx: Idx,
1128        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1129    >(
1130        &self,
1131        fields: &IndexSlice<FieldIdx, F>,
1132        repr: &ReprOptions,
1133        kind: StructKind,
1134        niche_bias: NicheBias,
1135    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1136        let dl = self.cx.data_layout();
1137        let pack = repr.pack;
1138        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1139        let mut max_repr_align = repr.align;
1140        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
1141        let optimize_field_order = !repr.inhibit_struct_field_reordering();
1142        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1143        let optimizing = &mut inverse_memory_index.raw[..end];
1144        let fields_excluding_tail = &fields.raw[..end];
1145        // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1146        let field_seed = fields_excluding_tail
1147            .iter()
1148            .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
1149
1150        if optimize_field_order && fields.len() > 1 {
1151            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
1152            // the field ordering to try and catch some code making assumptions about layouts
1153            // we don't guarantee.
1154            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
1155                #[cfg(feature = "randomize")]
1156                {
1157                    use rand::SeedableRng;
1158                    use rand::seq::SliceRandom;
1159                    // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1160                    // ordering.
1161                    let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1162                        field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1163                    );
1164
1165                    // Shuffle the ordering of the fields.
1166                    optimizing.shuffle(&mut rng);
1167                }
1168                // Otherwise we just leave things alone and actually optimize the type's fields
1169            } else {
1170                // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1171                // not depend on the layout of the tail.
1172                let max_field_align =
1173                    fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
1174                let largest_niche_size = fields_excluding_tail
1175                    .iter()
1176                    .filter_map(|f| f.largest_niche)
1177                    .map(|n| n.available(dl))
1178                    .max()
1179                    .unwrap_or(0);
1180
1181                // Calculates a sort key to group fields by their alignment or possibly some
1182                // size-derived pseudo-alignment.
1183                let alignment_group_key = |layout: &F| {
1184                    // The two branches here return values that cannot be meaningfully compared with
1185                    // each other. However, we know that consistently for all executions of
1186                    // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1187                    if let Some(pack) = pack {
1188                        // Return the packed alignment in bytes.
1189                        layout.align.abi.min(pack).bytes()
1190                    } else {
1191                        // Returns `log2(effective-align)`. The calculation assumes that size is an
1192                        // integer multiple of align, except for ZSTs.
1193                        let align = layout.align.abi.bytes();
1194                        let size = layout.size.bytes();
1195                        let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1196                        // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1197                        let size_as_align = align.max(size).trailing_zeros();
1198                        let size_as_align = if largest_niche_size > 0 {
1199                            match niche_bias {
1200                                // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1201                                // array to the front in the first case (for aligned loads) but keep
1202                                // the bool in front in the second case for its niches.
1203                                NicheBias::Start => {
1204                                    max_field_align.trailing_zeros().min(size_as_align)
1205                                }
1206                                // When moving niches towards the end of the struct then for
1207                                // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1208                                // in the align-1 group because its bool can be moved closer to the end.
1209                                NicheBias::End if niche_size == largest_niche_size => {
1210                                    align.trailing_zeros()
1211                                }
1212                                NicheBias::End => size_as_align,
1213                            }
1214                        } else {
1215                            size_as_align
1216                        };
1217                        size_as_align as u64
1218                    }
1219                };
1220
1221                match kind {
1222                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1223                        // Currently `LayoutS` only exposes a single niche so sorting is usually
1224                        // sufficient to get one niche into the preferred position. If it ever
1225                        // supported multiple niches then a more advanced pick-and-pack approach could
1226                        // provide better results. But even for the single-niche cache it's not
1227                        // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1228                        // bool to the front but it would require packing the tuple together with the
1229                        // u16 to build a 4-byte group so that the u32 can be placed after it without
1230                        // padding. This kind of packing can't be achieved by sorting.
1231                        optimizing.sort_by_key(|&x| {
1232                            let f = &fields[x];
1233                            let field_size = f.size.bytes();
1234                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1235                            let niche_size_key = match niche_bias {
1236                                // large niche first
1237                                NicheBias::Start => !niche_size,
1238                                // large niche last
1239                                NicheBias::End => niche_size,
1240                            };
1241                            let inner_niche_offset_key = match niche_bias {
1242                                NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1243                                NicheBias::End => f.largest_niche.map_or(0, |n| {
1244                                    !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1245                                }),
1246                            };
1247
1248                            (
1249                                // Then place largest alignments first.
1250                                cmp::Reverse(alignment_group_key(f)),
1251                                // Then prioritize niche placement within alignment group according to
1252                                // `niche_bias_start`.
1253                                niche_size_key,
1254                                // Then among fields with equally-sized niches prefer the ones
1255                                // closer to the start/end of the field.
1256                                inner_niche_offset_key,
1257                            )
1258                        });
1259                    }
1260
1261                    StructKind::Prefixed(..) => {
1262                        // Sort in ascending alignment so that the layout stays optimal
1263                        // regardless of the prefix.
1264                        // And put the largest niche in an alignment group at the end
1265                        // so it can be used as discriminant in jagged enums
1266                        optimizing.sort_by_key(|&x| {
1267                            let f = &fields[x];
1268                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1269                            (alignment_group_key(f), niche_size)
1270                        });
1271                    }
1272                }
1273
1274                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
1275                //                 regardless of the status of `-Z randomize-layout`
1276            }
1277        }
1278        // inverse_memory_index holds field indices by increasing memory offset.
1279        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1280        // We now write field offsets to the corresponding offset slot;
1281        // field 5 with offset 0 puts 0 in offsets[5].
1282        // At the bottom of this function, we invert `inverse_memory_index` to
1283        // produce `memory_index` (see `invert_mapping`).
1284        let mut unsized_field = None::<&F>;
1285        let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1286        let mut offset = Size::ZERO;
1287        let mut largest_niche = None;
1288        let mut largest_niche_available = 0;
1289        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1290            let prefix_align =
1291                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1292            align = align.max(AbiAndPrefAlign::new(prefix_align));
1293            offset = prefix_size.align_to(prefix_align);
1294        }
1295        for &i in &inverse_memory_index {
1296            let field = &fields[i];
1297            if let Some(unsized_field) = unsized_field {
1298                return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1299            }
1300
1301            if field.is_unsized() {
1302                if let StructKind::MaybeUnsized = kind {
1303                    unsized_field = Some(field);
1304                } else {
1305                    return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1306                }
1307            }
1308
1309            // Invariant: offset < dl.obj_size_bound() <= 1<<61
1310            let field_align = if let Some(pack) = pack {
1311                field.align.min(AbiAndPrefAlign::new(pack))
1312            } else {
1313                field.align
1314            };
1315            offset = offset.align_to(field_align.abi);
1316            align = align.max(field_align);
1317            max_repr_align = max_repr_align.max(field.max_repr_align);
1318
1319            debug!("univariant offset: {:?} field: {:#?}", offset, field);
1320            offsets[i] = offset;
1321
1322            if let Some(mut niche) = field.largest_niche {
1323                let available = niche.available(dl);
1324                // Pick up larger niches.
1325                let prefer_new_niche = match niche_bias {
1326                    NicheBias::Start => available > largest_niche_available,
1327                    // if there are several niches of the same size then pick the last one
1328                    NicheBias::End => available >= largest_niche_available,
1329                };
1330                if prefer_new_niche {
1331                    largest_niche_available = available;
1332                    niche.offset += offset;
1333                    largest_niche = Some(niche);
1334                }
1335            }
1336
1337            offset =
1338                offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1339        }
1340
1341        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1342        // See documentation on `LayoutS::unadjusted_abi_align`.
1343        let unadjusted_abi_align = align.abi;
1344        if let Some(repr_align) = repr.align {
1345            align = align.max(AbiAndPrefAlign::new(repr_align));
1346        }
1347        // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1348        let align = align;
1349
1350        debug!("univariant min_size: {:?}", offset);
1351        let min_size = offset;
1352        // As stated above, inverse_memory_index holds field indices by increasing offset.
1353        // This makes it an already-sorted view of the offsets vec.
1354        // To invert it, consider:
1355        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1356        // Field 5 would be the first element, so memory_index is i:
1357        // Note: if we didn't optimize, it's already right.
1358        let memory_index = if optimize_field_order {
1359            inverse_memory_index.invert_bijective_mapping()
1360        } else {
1361            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1362            inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
1363        };
1364        let size = min_size.align_to(align.abi);
1365        // FIXME(oli-obk): deduplicate and harden these checks
1366        if size.bytes() >= dl.obj_size_bound() {
1367            return Err(LayoutCalculatorError::SizeOverflow);
1368        }
1369        let mut layout_of_single_non_zst_field = None;
1370        let sized = unsized_field.is_none();
1371        let mut abi = BackendRepr::Memory { sized };
1372
1373        let optimize_abi = !repr.inhibit_newtype_abi_optimization();
1374
1375        // Try to make this a Scalar/ScalarPair.
1376        if sized && size.bytes() > 0 {
1377            // We skip *all* ZST here and later check if we are good in terms of alignment.
1378            // This lets us handle some cases involving aligned ZST.
1379            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
1380
1381            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1382                // We have exactly one non-ZST field.
1383                (Some((i, field)), None, None) => {
1384                    layout_of_single_non_zst_field = Some(field);
1385
1386                    // Field fills the struct and it has a scalar or scalar pair ABI.
1387                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
1388                    {
1389                        match field.backend_repr {
1390                            // For plain scalars, or vectors of them, we can't unpack
1391                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
1392                            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
1393                                if optimize_abi =>
1394                            {
1395                                abi = field.backend_repr;
1396                            }
1397                            // But scalar pairs are Rust-specific and get
1398                            // treated as aggregates by C ABIs anyway.
1399                            BackendRepr::ScalarPair(..) => {
1400                                abi = field.backend_repr;
1401                            }
1402                            _ => {}
1403                        }
1404                    }
1405                }
1406
1407                // Two non-ZST fields, and they're both scalars.
1408                (Some((i, a)), Some((j, b)), None) => {
1409                    match (a.backend_repr, b.backend_repr) {
1410                        (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1411                            // Order by the memory placement, not source order.
1412                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1413                                ((i, a), (j, b))
1414                            } else {
1415                                ((j, b), (i, a))
1416                            };
1417                            let pair =
1418                                LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
1419                            let pair_offsets = match pair.fields {
1420                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1421                                    assert_eq!(memory_index.raw, [0, 1]);
1422                                    offsets
1423                                }
1424                                FieldsShape::Primitive
1425                                | FieldsShape::Array { .. }
1426                                | FieldsShape::Union(..) => {
1427                                    panic!("encountered a non-arbitrary layout during enum layout")
1428                                }
1429                            };
1430                            if offsets[i] == pair_offsets[FieldIdx::new(0)]
1431                                && offsets[j] == pair_offsets[FieldIdx::new(1)]
1432                                && align == pair.align
1433                                && size == pair.size
1434                            {
1435                                // We can use `ScalarPair` only when it matches our
1436                                // already computed layout (including `#[repr(C)]`).
1437                                abi = pair.backend_repr;
1438                            }
1439                        }
1440                        _ => {}
1441                    }
1442                }
1443
1444                _ => {}
1445            }
1446        }
1447        let uninhabited = fields.iter().any(|f| f.is_uninhabited());
1448
1449        let unadjusted_abi_align = if repr.transparent() {
1450            match layout_of_single_non_zst_field {
1451                Some(l) => l.unadjusted_abi_align,
1452                None => {
1453                    // `repr(transparent)` with all ZST fields.
1454                    align.abi
1455                }
1456            }
1457        } else {
1458            unadjusted_abi_align
1459        };
1460
1461        let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
1462
1463        Ok(LayoutData {
1464            variants: Variants::Single { index: VariantIdx::new(0) },
1465            fields: FieldsShape::Arbitrary { offsets, memory_index },
1466            backend_repr: abi,
1467            largest_niche,
1468            uninhabited,
1469            align,
1470            size,
1471            max_repr_align,
1472            unadjusted_abi_align,
1473            randomization_seed: seed,
1474        })
1475    }
1476
1477    fn format_field_niches<
1478        'a,
1479        FieldIdx: Idx,
1480        VariantIdx: Idx,
1481        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1482    >(
1483        &self,
1484        layout: &LayoutData<FieldIdx, VariantIdx>,
1485        fields: &IndexSlice<FieldIdx, F>,
1486    ) -> String {
1487        let dl = self.cx.data_layout();
1488        let mut s = String::new();
1489        for i in layout.fields.index_by_increasing_offset() {
1490            let offset = layout.fields.offset(i);
1491            let f = &fields[FieldIdx::new(i)];
1492            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
1493            if let Some(n) = f.largest_niche {
1494                write!(
1495                    s,
1496                    " n{}b{}s{}",
1497                    n.offset.bytes(),
1498                    n.available(dl).ilog2(),
1499                    n.value.size(dl).bytes()
1500                )
1501                .unwrap();
1502            }
1503            write!(s, "] ").unwrap();
1504        }
1505        s
1506    }
1507}