rustc_abi/
layout.rs

1use std::fmt::{self, Write};
2use std::ops::{Bound, Deref};
3use std::{cmp, iter};
4
5use rustc_hashes::Hash64;
6use rustc_index::Idx;
7use tracing::debug;
8
9use crate::{
10    AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
11    LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
12    Variants, WrappingRange,
13};
14
15#[cfg(feature = "nightly")]
16mod ty;
17
18#[cfg(feature = "nightly")]
19pub use ty::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
20
21// A variant is absent if it's uninhabited and only has ZST fields.
22// Present uninhabited variants only require space for their fields,
23// but *not* an encoding of the discriminant (e.g., a tag value).
24// See issue #49298 for more details on the need to leave space
25// for non-ZST uninhabited data (mostly partial initialization).
26fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool
27where
28    FieldIdx: Idx,
29    VariantIdx: Idx,
30    F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
31{
32    let uninhabited = fields.iter().any(|f| f.is_uninhabited());
33    // We cannot ignore alignment; that might lead us to entirely discard a variant and
34    // produce an enum that is less aligned than it should be!
35    let is_1zst = fields.iter().all(|f| f.is_1zst());
36    uninhabited && is_1zst
37}
38
39/// Determines towards which end of a struct layout optimizations will try to place the best niches.
40enum NicheBias {
41    Start,
42    End,
43}
44
45#[derive(Copy, Clone, Debug, PartialEq, Eq)]
46pub enum LayoutCalculatorError<F> {
47    /// An unsized type was found in a location where a sized type was expected.
48    ///
49    /// This is not always a compile error, for example if there is a `[T]: Sized`
50    /// bound in a where clause.
51    ///
52    /// Contains the field that was unexpectedly unsized.
53    UnexpectedUnsized(F),
54
55    /// A type was too large for the target platform.
56    SizeOverflow,
57
58    /// A union had no fields.
59    EmptyUnion,
60
61    /// The fields or variants have irreconcilable reprs
62    ReprConflict,
63}
64
65impl<F> LayoutCalculatorError<F> {
66    pub fn without_payload(&self) -> LayoutCalculatorError<()> {
67        match self {
68            LayoutCalculatorError::UnexpectedUnsized(_) => {
69                LayoutCalculatorError::UnexpectedUnsized(())
70            }
71            LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow,
72            LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion,
73            LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict,
74        }
75    }
76
77    /// Format an untranslated diagnostic for this type
78    ///
79    /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
80    pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
81        f.write_str(match self {
82            LayoutCalculatorError::UnexpectedUnsized(_) => {
83                "an unsized type was found where a sized type was expected"
84            }
85            LayoutCalculatorError::SizeOverflow => "size overflow",
86            LayoutCalculatorError::EmptyUnion => "type is a union with no fields",
87            LayoutCalculatorError::ReprConflict => "type has an invalid repr",
88        })
89    }
90}
91
92type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
93    Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
94
95#[derive(Clone, Copy, Debug)]
96pub struct LayoutCalculator<Cx> {
97    pub cx: Cx,
98}
99
100impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
101    pub fn new(cx: Cx) -> Self {
102        Self { cx }
103    }
104
105    pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
106        &self,
107        a: Scalar,
108        b: Scalar,
109    ) -> LayoutData<FieldIdx, VariantIdx> {
110        let dl = self.cx.data_layout();
111        let b_align = b.align(dl);
112        let align = a.align(dl).max(b_align).max(dl.aggregate_align);
113        let b_offset = a.size(dl).align_to(b_align.abi);
114        let size = (b_offset + b.size(dl)).align_to(align.abi);
115
116        // HACK(nox): We iter on `b` and then `a` because `max_by_key`
117        // returns the last maximum.
118        let largest_niche = Niche::from_scalar(dl, b_offset, b)
119            .into_iter()
120            .chain(Niche::from_scalar(dl, Size::ZERO, a))
121            .max_by_key(|niche| niche.available(dl));
122
123        let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes());
124
125        LayoutData {
126            variants: Variants::Single { index: VariantIdx::new(0) },
127            fields: FieldsShape::Arbitrary {
128                offsets: [Size::ZERO, b_offset].into(),
129                memory_index: [0, 1].into(),
130            },
131            backend_repr: BackendRepr::ScalarPair(a, b),
132            largest_niche,
133            align,
134            size,
135            max_repr_align: None,
136            unadjusted_abi_align: align.abi,
137            randomization_seed: Hash64::new(combined_seed),
138        }
139    }
140
141    pub fn univariant<
142        'a,
143        FieldIdx: Idx,
144        VariantIdx: Idx,
145        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
146    >(
147        &self,
148        fields: &IndexSlice<FieldIdx, F>,
149        repr: &ReprOptions,
150        kind: StructKind,
151    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
152        let dl = self.cx.data_layout();
153        let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
154        // Enums prefer niches close to the beginning or the end of the variants so that other
155        // (smaller) data-carrying variants can be packed into the space after/before the niche.
156        // If the default field ordering does not give us a niche at the front then we do a second
157        // run and bias niches to the right and then check which one is closer to one of the
158        // struct's edges.
159        if let Ok(layout) = &layout {
160            // Don't try to calculate an end-biased layout for unsizable structs,
161            // otherwise we could end up with different layouts for
162            // Foo<Type> and Foo<dyn Trait> which would break unsizing.
163            if !matches!(kind, StructKind::MaybeUnsized) {
164                if let Some(niche) = layout.largest_niche {
165                    let head_space = niche.offset.bytes();
166                    let niche_len = niche.value.size(dl).bytes();
167                    let tail_space = layout.size.bytes() - head_space - niche_len;
168
169                    // This may end up doing redundant work if the niche is already in the last
170                    // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
171                    // to get the unpadded size so we try anyway.
172                    if fields.len() > 1 && head_space != 0 && tail_space > 0 {
173                        let alt_layout = self
174                            .univariant_biased(fields, repr, kind, NicheBias::End)
175                            .expect("alt layout should always work");
176                        let alt_niche = alt_layout
177                            .largest_niche
178                            .expect("alt layout should have a niche like the regular one");
179                        let alt_head_space = alt_niche.offset.bytes();
180                        let alt_niche_len = alt_niche.value.size(dl).bytes();
181                        let alt_tail_space =
182                            alt_layout.size.bytes() - alt_head_space - alt_niche_len;
183
184                        debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
185
186                        let prefer_alt_layout =
187                            alt_head_space > head_space && alt_head_space > tail_space;
188
189                        debug!(
190                            "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
191                            layout: {}\n\
192                            alt_layout: {}\n",
193                            layout.size.bytes(),
194                            head_space,
195                            niche_len,
196                            tail_space,
197                            alt_head_space,
198                            alt_niche_len,
199                            alt_tail_space,
200                            layout.fields.count(),
201                            prefer_alt_layout,
202                            self.format_field_niches(layout, fields),
203                            self.format_field_niches(&alt_layout, fields),
204                        );
205
206                        if prefer_alt_layout {
207                            return Ok(alt_layout);
208                        }
209                    }
210                }
211            }
212        }
213        layout
214    }
215
216    pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
217        &self,
218    ) -> LayoutData<FieldIdx, VariantIdx> {
219        let dl = self.cx.data_layout();
220        // This is also used for uninhabited enums, so we use `Variants::Empty`.
221        LayoutData {
222            variants: Variants::Empty,
223            fields: FieldsShape::Primitive,
224            backend_repr: BackendRepr::Uninhabited,
225            largest_niche: None,
226            align: dl.i8_align,
227            size: Size::ZERO,
228            max_repr_align: None,
229            unadjusted_abi_align: dl.i8_align.abi,
230            randomization_seed: Hash64::ZERO,
231        }
232    }
233
234    pub fn layout_of_struct_or_enum<
235        'a,
236        FieldIdx: Idx,
237        VariantIdx: Idx,
238        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
239    >(
240        &self,
241        repr: &ReprOptions,
242        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
243        is_enum: bool,
244        is_unsafe_cell: bool,
245        scalar_valid_range: (Bound<u128>, Bound<u128>),
246        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
247        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
248        dont_niche_optimize_enum: bool,
249        always_sized: bool,
250    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
251        let (present_first, present_second) = {
252            let mut present_variants = variants
253                .iter_enumerated()
254                .filter_map(|(i, v)| if !repr.c() && absent(v) { None } else { Some(i) });
255            (present_variants.next(), present_variants.next())
256        };
257        let present_first = match present_first {
258            Some(present_first) => present_first,
259            // Uninhabited because it has no variants, or only absent ones.
260            None if is_enum => {
261                return Ok(self.layout_of_never_type());
262            }
263            // If it's a struct, still compute a layout so that we can still compute the
264            // field offsets.
265            None => VariantIdx::new(0),
266        };
267
268        // take the struct path if it is an actual struct
269        if !is_enum ||
270            // or for optimizing univariant enums
271            (present_second.is_none() && !repr.inhibit_enum_layout_opt())
272        {
273            self.layout_of_struct(
274                repr,
275                variants,
276                is_enum,
277                is_unsafe_cell,
278                scalar_valid_range,
279                always_sized,
280                present_first,
281            )
282        } else {
283            // At this point, we have handled all unions and
284            // structs. (We have also handled univariant enums
285            // that allow representation optimization.)
286            assert!(is_enum);
287            self.layout_of_enum(
288                repr,
289                variants,
290                discr_range_of_repr,
291                discriminants,
292                dont_niche_optimize_enum,
293            )
294        }
295    }
296
297    pub fn layout_of_union<
298        'a,
299        FieldIdx: Idx,
300        VariantIdx: Idx,
301        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
302    >(
303        &self,
304        repr: &ReprOptions,
305        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
306    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
307        let dl = self.cx.data_layout();
308        let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
309        let mut max_repr_align = repr.align;
310
311        // If all the non-ZST fields have the same ABI and union ABI optimizations aren't
312        // disabled, we can use that common ABI for the union as a whole.
313        struct AbiMismatch;
314        let mut common_non_zst_abi_and_align = if repr.inhibits_union_abi_opt() {
315            // Can't optimize
316            Err(AbiMismatch)
317        } else {
318            Ok(None)
319        };
320
321        let mut size = Size::ZERO;
322        let only_variant_idx = VariantIdx::new(0);
323        let only_variant = &variants[only_variant_idx];
324        for field in only_variant {
325            if field.is_unsized() {
326                return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
327            }
328
329            align = align.max(field.align);
330            max_repr_align = max_repr_align.max(field.max_repr_align);
331            size = cmp::max(size, field.size);
332
333            if field.is_zst() {
334                // Nothing more to do for ZST fields
335                continue;
336            }
337
338            if let Ok(common) = common_non_zst_abi_and_align {
339                // Discard valid range information and allow undef
340                let field_abi = field.backend_repr.to_union();
341
342                if let Some((common_abi, common_align)) = common {
343                    if common_abi != field_abi {
344                        // Different fields have different ABI: disable opt
345                        common_non_zst_abi_and_align = Err(AbiMismatch);
346                    } else {
347                        // Fields with the same non-Aggregate ABI should also
348                        // have the same alignment
349                        if !matches!(common_abi, BackendRepr::Memory { .. }) {
350                            assert_eq!(
351                                common_align, field.align.abi,
352                                "non-Aggregate field with matching ABI but differing alignment"
353                            );
354                        }
355                    }
356                } else {
357                    // First non-ZST field: record its ABI and alignment
358                    common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi)));
359                }
360            }
361        }
362
363        if let Some(pack) = repr.pack {
364            align = align.min(AbiAndPrefAlign::new(pack));
365        }
366        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
367        // See documentation on `LayoutS::unadjusted_abi_align`.
368        let unadjusted_abi_align = align.abi;
369        if let Some(repr_align) = repr.align {
370            align = align.max(AbiAndPrefAlign::new(repr_align));
371        }
372        // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
373        let align = align;
374
375        // If all non-ZST fields have the same ABI, we may forward that ABI
376        // for the union as a whole, unless otherwise inhibited.
377        let abi = match common_non_zst_abi_and_align {
378            Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
379            Ok(Some((abi, _))) => {
380                if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
381                    // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
382                    BackendRepr::Memory { sized: true }
383                } else {
384                    abi
385                }
386            }
387        };
388
389        let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
390            return Err(LayoutCalculatorError::EmptyUnion);
391        };
392
393        let combined_seed = only_variant
394            .iter()
395            .map(|v| v.randomization_seed)
396            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
397
398        Ok(LayoutData {
399            variants: Variants::Single { index: only_variant_idx },
400            fields: FieldsShape::Union(union_field_count),
401            backend_repr: abi,
402            largest_niche: None,
403            align,
404            size: size.align_to(align.abi),
405            max_repr_align,
406            unadjusted_abi_align,
407            randomization_seed: combined_seed,
408        })
409    }
410
411    /// single-variant enums are just structs, if you think about it
412    fn layout_of_struct<
413        'a,
414        FieldIdx: Idx,
415        VariantIdx: Idx,
416        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
417    >(
418        &self,
419        repr: &ReprOptions,
420        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
421        is_enum: bool,
422        is_unsafe_cell: bool,
423        scalar_valid_range: (Bound<u128>, Bound<u128>),
424        always_sized: bool,
425        present_first: VariantIdx,
426    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
427        // Struct, or univariant enum equivalent to a struct.
428        // (Typechecking will reject discriminant-sizing attrs.)
429
430        let dl = self.cx.data_layout();
431        let v = present_first;
432        let kind = if is_enum || variants[v].is_empty() || always_sized {
433            StructKind::AlwaysSized
434        } else {
435            StructKind::MaybeUnsized
436        };
437
438        let mut st = self.univariant(&variants[v], repr, kind)?;
439        st.variants = Variants::Single { index: v };
440
441        if is_unsafe_cell {
442            let hide_niches = |scalar: &mut _| match scalar {
443                Scalar::Initialized { value, valid_range } => {
444                    *valid_range = WrappingRange::full(value.size(dl))
445                }
446                // Already doesn't have any niches
447                Scalar::Union { .. } => {}
448            };
449            match &mut st.backend_repr {
450                BackendRepr::Uninhabited => {}
451                BackendRepr::Scalar(scalar) => hide_niches(scalar),
452                BackendRepr::ScalarPair(a, b) => {
453                    hide_niches(a);
454                    hide_niches(b);
455                }
456                BackendRepr::Vector { element, count: _ } => hide_niches(element),
457                BackendRepr::Memory { sized: _ } => {}
458            }
459            st.largest_niche = None;
460            return Ok(st);
461        }
462
463        let (start, end) = scalar_valid_range;
464        match st.backend_repr {
465            BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
466                // Enlarging validity ranges would result in missed
467                // optimizations, *not* wrongly assuming the inner
468                // value is valid. e.g. unions already enlarge validity ranges,
469                // because the values may be uninitialized.
470                //
471                // Because of that we only check that the start and end
472                // of the range is representable with this scalar type.
473
474                let max_value = scalar.size(dl).unsigned_int_max();
475                if let Bound::Included(start) = start {
476                    // FIXME(eddyb) this might be incorrect - it doesn't
477                    // account for wrap-around (end < start) ranges.
478                    assert!(start <= max_value, "{start} > {max_value}");
479                    scalar.valid_range_mut().start = start;
480                }
481                if let Bound::Included(end) = end {
482                    // FIXME(eddyb) this might be incorrect - it doesn't
483                    // account for wrap-around (end < start) ranges.
484                    assert!(end <= max_value, "{end} > {max_value}");
485                    scalar.valid_range_mut().end = end;
486                }
487
488                // Update `largest_niche` if we have introduced a larger niche.
489                let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
490                if let Some(niche) = niche {
491                    match st.largest_niche {
492                        Some(largest_niche) => {
493                            // Replace the existing niche even if they're equal,
494                            // because this one is at a lower offset.
495                            if largest_niche.available(dl) <= niche.available(dl) {
496                                st.largest_niche = Some(niche);
497                            }
498                        }
499                        None => st.largest_niche = Some(niche),
500                    }
501                }
502            }
503            _ => assert!(
504                start == Bound::Unbounded && end == Bound::Unbounded,
505                "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
506            ),
507        }
508
509        Ok(st)
510    }
511
512    fn layout_of_enum<
513        'a,
514        FieldIdx: Idx,
515        VariantIdx: Idx,
516        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
517    >(
518        &self,
519        repr: &ReprOptions,
520        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
521        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
522        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
523        dont_niche_optimize_enum: bool,
524    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
525        // Until we've decided whether to use the tagged or
526        // niche filling LayoutS, we don't want to intern the
527        // variant layouts, so we can't store them in the
528        // overall LayoutS. Store the overall LayoutS
529        // and the variant LayoutSs here until then.
530        struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
531            layout: LayoutData<FieldIdx, VariantIdx>,
532            variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
533        }
534
535        let dl = self.cx.data_layout();
536        // bail if the enum has an incoherent repr that cannot be computed
537        if repr.packed() {
538            return Err(LayoutCalculatorError::ReprConflict);
539        }
540
541        let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
542            if dont_niche_optimize_enum {
543                return None;
544            }
545
546            if variants.len() < 2 {
547                return None;
548            }
549
550            let mut align = dl.aggregate_align;
551            let mut max_repr_align = repr.align;
552            let mut unadjusted_abi_align = align.abi;
553
554            let mut variant_layouts = variants
555                .iter_enumerated()
556                .map(|(j, v)| {
557                    let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
558                    st.variants = Variants::Single { index: j };
559
560                    align = align.max(st.align);
561                    max_repr_align = max_repr_align.max(st.max_repr_align);
562                    unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
563
564                    Some(st)
565                })
566                .collect::<Option<IndexVec<VariantIdx, _>>>()?;
567
568            let largest_variant_index = variant_layouts
569                .iter_enumerated()
570                .max_by_key(|(_i, layout)| layout.size.bytes())
571                .map(|(i, _layout)| i)?;
572
573            let all_indices = variants.indices();
574            let needs_disc =
575                |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
576            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
577                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
578
579            let count =
580                (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
581
582            // Use the largest niche in the largest variant.
583            let niche = variant_layouts[largest_variant_index].largest_niche?;
584            let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
585            let niche_offset = niche.offset;
586            let niche_size = niche.value.size(dl);
587            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
588
589            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
590                if i == largest_variant_index {
591                    return true;
592                }
593
594                layout.largest_niche = None;
595
596                if layout.size <= niche_offset {
597                    // This variant will fit before the niche.
598                    return true;
599                }
600
601                // Determine if it'll fit after the niche.
602                let this_align = layout.align.abi;
603                let this_offset = (niche_offset + niche_size).align_to(this_align);
604
605                if this_offset + layout.size > size {
606                    return false;
607                }
608
609                // It'll fit, but we need to make some adjustments.
610                match layout.fields {
611                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
612                        for offset in offsets.iter_mut() {
613                            *offset += this_offset;
614                        }
615                    }
616                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
617                        panic!("Layout of fields should be Arbitrary for variants")
618                    }
619                }
620
621                // It can't be a Scalar or ScalarPair because the offset isn't 0.
622                if !layout.is_uninhabited() {
623                    layout.backend_repr = BackendRepr::Memory { sized: true };
624                }
625                layout.size += this_offset;
626
627                true
628            });
629
630            if !all_variants_fit {
631                return None;
632            }
633
634            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
635
636            let others_zst = variant_layouts
637                .iter_enumerated()
638                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
639            let same_size = size == variant_layouts[largest_variant_index].size;
640            let same_align = align == variant_layouts[largest_variant_index].align;
641
642            let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
643                BackendRepr::Uninhabited
644            } else if same_size && same_align && others_zst {
645                match variant_layouts[largest_variant_index].backend_repr {
646                    // When the total alignment and size match, we can use the
647                    // same ABI as the scalar variant with the reserved niche.
648                    BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
649                    BackendRepr::ScalarPair(first, second) => {
650                        // Only the niche is guaranteed to be initialised,
651                        // so use union layouts for the other primitive.
652                        if niche_offset == Size::ZERO {
653                            BackendRepr::ScalarPair(niche_scalar, second.to_union())
654                        } else {
655                            BackendRepr::ScalarPair(first.to_union(), niche_scalar)
656                        }
657                    }
658                    _ => BackendRepr::Memory { sized: true },
659                }
660            } else {
661                BackendRepr::Memory { sized: true }
662            };
663
664            let combined_seed = variant_layouts
665                .iter()
666                .map(|v| v.randomization_seed)
667                .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
668
669            let layout = LayoutData {
670                variants: Variants::Multiple {
671                    tag: niche_scalar,
672                    tag_encoding: TagEncoding::Niche {
673                        untagged_variant: largest_variant_index,
674                        niche_variants,
675                        niche_start,
676                    },
677                    tag_field: 0,
678                    variants: IndexVec::new(),
679                },
680                fields: FieldsShape::Arbitrary {
681                    offsets: [niche_offset].into(),
682                    memory_index: [0].into(),
683                },
684                backend_repr: abi,
685                largest_niche,
686                size,
687                align,
688                max_repr_align,
689                unadjusted_abi_align,
690                randomization_seed: combined_seed,
691            };
692
693            Some(TmpLayout { layout, variants: variant_layouts })
694        };
695
696        let niche_filling_layout = calculate_niche_filling_layout();
697
698        let (mut min, mut max) = (i128::MAX, i128::MIN);
699        let discr_type = repr.discr_type();
700        let bits = Integer::from_attr(dl, discr_type).size().bits();
701        for (i, mut val) in discriminants {
702            if !repr.c() && variants[i].iter().any(|f| f.is_uninhabited()) {
703                continue;
704            }
705            if discr_type.is_signed() {
706                // sign extend the raw representation to be an i128
707                val = (val << (128 - bits)) >> (128 - bits);
708            }
709            if val < min {
710                min = val;
711            }
712            if val > max {
713                max = val;
714            }
715        }
716        // We might have no inhabited variants, so pretend there's at least one.
717        if (min, max) == (i128::MAX, i128::MIN) {
718            min = 0;
719            max = 0;
720        }
721        assert!(min <= max, "discriminant range is {min}...{max}");
722        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
723
724        let mut align = dl.aggregate_align;
725        let mut max_repr_align = repr.align;
726        let mut unadjusted_abi_align = align.abi;
727
728        let mut size = Size::ZERO;
729
730        // We're interested in the smallest alignment, so start large.
731        let mut start_align = Align::from_bytes(256).unwrap();
732        assert_eq!(Integer::for_align(dl, start_align), None);
733
734        // repr(C) on an enum tells us to make a (tag, union) layout,
735        // so we need to grow the prefix alignment to be at least
736        // the alignment of the union. (This value is used both for
737        // determining the alignment of the overall enum, and the
738        // determining the alignment of the payload after the tag.)
739        let mut prefix_align = min_ity.align(dl).abi;
740        if repr.c() {
741            for fields in variants {
742                for field in fields {
743                    prefix_align = prefix_align.max(field.align.abi);
744                }
745            }
746        }
747
748        // Create the set of structs that represent each variant.
749        let mut layout_variants = variants
750            .iter_enumerated()
751            .map(|(i, field_layouts)| {
752                let mut st = self.univariant(
753                    field_layouts,
754                    repr,
755                    StructKind::Prefixed(min_ity.size(), prefix_align),
756                )?;
757                st.variants = Variants::Single { index: i };
758                // Find the first field we can't move later
759                // to make room for a larger discriminant.
760                for field_idx in st.fields.index_by_increasing_offset() {
761                    let field = &field_layouts[FieldIdx::new(field_idx)];
762                    if !field.is_1zst() {
763                        start_align = start_align.min(field.align.abi);
764                        break;
765                    }
766                }
767                size = cmp::max(size, st.size);
768                align = align.max(st.align);
769                max_repr_align = max_repr_align.max(st.max_repr_align);
770                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
771                Ok(st)
772            })
773            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
774
775        // Align the maximum variant size to the largest alignment.
776        size = size.align_to(align.abi);
777
778        // FIXME(oli-obk): deduplicate and harden these checks
779        if size.bytes() >= dl.obj_size_bound() {
780            return Err(LayoutCalculatorError::SizeOverflow);
781        }
782
783        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
784        if typeck_ity < min_ity {
785            // It is a bug if Layout decided on a greater discriminant size than typeck for
786            // some reason at this point (based on values discriminant can take on). Mostly
787            // because this discriminant will be loaded, and then stored into variable of
788            // type calculated by typeck. Consider such case (a bug): typeck decided on
789            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
790            // discriminant values. That would be a bug, because then, in codegen, in order
791            // to store this 16-bit discriminant into 8-bit sized temporary some of the
792            // space necessary to represent would have to be discarded (or layout is wrong
793            // on thinking it needs 16 bits)
794            panic!(
795                "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
796            );
797            // However, it is fine to make discr type however large (as an optimisation)
798            // after this point – we’ll just truncate the value we load in codegen.
799        }
800
801        // Check to see if we should use a different type for the
802        // discriminant. We can safely use a type with the same size
803        // as the alignment of the first field of each variant.
804        // We increase the size of the discriminant to avoid LLVM copying
805        // padding when it doesn't need to. This normally causes unaligned
806        // load/stores and excessive memcpy/memset operations. By using a
807        // bigger integer size, LLVM can be sure about its contents and
808        // won't be so conservative.
809
810        // Use the initial field alignment
811        let mut ity = if repr.c() || repr.int.is_some() {
812            min_ity
813        } else {
814            Integer::for_align(dl, start_align).unwrap_or(min_ity)
815        };
816
817        // If the alignment is not larger than the chosen discriminant size,
818        // don't use the alignment as the final size.
819        if ity <= min_ity {
820            ity = min_ity;
821        } else {
822            // Patch up the variants' first few fields.
823            let old_ity_size = min_ity.size();
824            let new_ity_size = ity.size();
825            for variant in &mut layout_variants {
826                match variant.fields {
827                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
828                        for i in offsets {
829                            if *i <= old_ity_size {
830                                assert_eq!(*i, old_ity_size);
831                                *i = new_ity_size;
832                            }
833                        }
834                        // We might be making the struct larger.
835                        if variant.size <= old_ity_size {
836                            variant.size = new_ity_size;
837                        }
838                    }
839                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
840                        panic!("encountered a non-arbitrary layout during enum layout")
841                    }
842                }
843            }
844        }
845
846        let tag_mask = ity.size().unsigned_int_max();
847        let tag = Scalar::Initialized {
848            value: Primitive::Int(ity, signed),
849            valid_range: WrappingRange {
850                start: (min as u128 & tag_mask),
851                end: (max as u128 & tag_mask),
852            },
853        };
854        let mut abi = BackendRepr::Memory { sized: true };
855
856        if layout_variants.iter().all(|v| v.is_uninhabited()) {
857            abi = BackendRepr::Uninhabited;
858        } else if tag.size(dl) == size {
859            // Make sure we only use scalar layout when the enum is entirely its
860            // own tag (i.e. it has no padding nor any non-ZST variant fields).
861            abi = BackendRepr::Scalar(tag);
862        } else {
863            // Try to use a ScalarPair for all tagged enums.
864            // That's possible only if we can find a common primitive type for all variants.
865            let mut common_prim = None;
866            let mut common_prim_initialized_in_all_variants = true;
867            for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
868                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
869                    panic!("encountered a non-arbitrary layout during enum layout");
870                };
871                // We skip *all* ZST here and later check if we are good in terms of alignment.
872                // This lets us handle some cases involving aligned ZST.
873                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
874                let (field, offset) = match (fields.next(), fields.next()) {
875                    (None, None) => {
876                        common_prim_initialized_in_all_variants = false;
877                        continue;
878                    }
879                    (Some(pair), None) => pair,
880                    _ => {
881                        common_prim = None;
882                        break;
883                    }
884                };
885                let prim = match field.backend_repr {
886                    BackendRepr::Scalar(scalar) => {
887                        common_prim_initialized_in_all_variants &=
888                            matches!(scalar, Scalar::Initialized { .. });
889                        scalar.primitive()
890                    }
891                    _ => {
892                        common_prim = None;
893                        break;
894                    }
895                };
896                if let Some((old_prim, common_offset)) = common_prim {
897                    // All variants must be at the same offset
898                    if offset != common_offset {
899                        common_prim = None;
900                        break;
901                    }
902                    // This is pretty conservative. We could go fancier
903                    // by realising that (u8, u8) could just cohabit with
904                    // u16 or even u32.
905                    let new_prim = match (old_prim, prim) {
906                        // Allow all identical primitives.
907                        (x, y) if x == y => x,
908                        // Allow integers of the same size with differing signedness.
909                        // We arbitrarily choose the signedness of the first variant.
910                        (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
911                        // Allow integers mixed with pointers of the same layout.
912                        // We must represent this using a pointer, to avoid
913                        // roundtripping pointers through ptrtoint/inttoptr.
914                        (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
915                        | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
916                            if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
917                        {
918                            p
919                        }
920                        _ => {
921                            common_prim = None;
922                            break;
923                        }
924                    };
925                    // We may be updating the primitive here, for example from int->ptr.
926                    common_prim = Some((new_prim, common_offset));
927                } else {
928                    common_prim = Some((prim, offset));
929                }
930            }
931            if let Some((prim, offset)) = common_prim {
932                let prim_scalar = if common_prim_initialized_in_all_variants {
933                    let size = prim.size(dl);
934                    assert!(size.bits() <= 128);
935                    Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
936                } else {
937                    // Common prim might be uninit.
938                    Scalar::Union { value: prim }
939                };
940                let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
941                let pair_offsets = match pair.fields {
942                    FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
943                        assert_eq!(memory_index.raw, [0, 1]);
944                        offsets
945                    }
946                    _ => panic!("encountered a non-arbitrary layout during enum layout"),
947                };
948                if pair_offsets[FieldIdx::new(0)] == Size::ZERO
949                    && pair_offsets[FieldIdx::new(1)] == *offset
950                    && align == pair.align
951                    && size == pair.size
952                {
953                    // We can use `ScalarPair` only when it matches our
954                    // already computed layout (including `#[repr(C)]`).
955                    abi = pair.backend_repr;
956                }
957            }
958        }
959
960        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
961        // variants to ensure they are consistent. This is because a downcast is
962        // semantically a NOP, and thus should not affect layout.
963        if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
964            for variant in &mut layout_variants {
965                // We only do this for variants with fields; the others are not accessed anyway.
966                // Also do not overwrite any already existing "clever" ABIs.
967                if variant.fields.count() > 0
968                    && matches!(variant.backend_repr, BackendRepr::Memory { .. })
969                {
970                    variant.backend_repr = abi;
971                    // Also need to bump up the size and alignment, so that the entire value fits
972                    // in here.
973                    variant.size = cmp::max(variant.size, size);
974                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
975                }
976            }
977        }
978
979        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
980
981        let combined_seed = layout_variants
982            .iter()
983            .map(|v| v.randomization_seed)
984            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
985
986        let tagged_layout = LayoutData {
987            variants: Variants::Multiple {
988                tag,
989                tag_encoding: TagEncoding::Direct,
990                tag_field: 0,
991                variants: IndexVec::new(),
992            },
993            fields: FieldsShape::Arbitrary {
994                offsets: [Size::ZERO].into(),
995                memory_index: [0].into(),
996            },
997            largest_niche,
998            backend_repr: abi,
999            align,
1000            size,
1001            max_repr_align,
1002            unadjusted_abi_align,
1003            randomization_seed: combined_seed,
1004        };
1005
1006        let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1007
1008        let mut best_layout = match (tagged_layout, niche_filling_layout) {
1009            (tl, Some(nl)) => {
1010                // Pick the smaller layout; otherwise,
1011                // pick the layout with the larger niche; otherwise,
1012                // pick tagged as it has simpler codegen.
1013                use cmp::Ordering::*;
1014                let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
1015                    tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1016                };
1017                match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1018                    (Greater, _) => nl,
1019                    (Equal, Less) => nl,
1020                    _ => tl,
1021                }
1022            }
1023            (tl, None) => tl,
1024        };
1025
1026        // Now we can intern the variant layouts and store them in the enum layout.
1027        best_layout.layout.variants = match best_layout.layout.variants {
1028            Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
1029                Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
1030            }
1031            Variants::Single { .. } | Variants::Empty => {
1032                panic!("encountered a single-variant or empty enum during multi-variant layout")
1033            }
1034        };
1035        Ok(best_layout.layout)
1036    }
1037
1038    fn univariant_biased<
1039        'a,
1040        FieldIdx: Idx,
1041        VariantIdx: Idx,
1042        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1043    >(
1044        &self,
1045        fields: &IndexSlice<FieldIdx, F>,
1046        repr: &ReprOptions,
1047        kind: StructKind,
1048        niche_bias: NicheBias,
1049    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1050        let dl = self.cx.data_layout();
1051        let pack = repr.pack;
1052        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1053        let mut max_repr_align = repr.align;
1054        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
1055        let optimize_field_order = !repr.inhibit_struct_field_reordering();
1056        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1057        let optimizing = &mut inverse_memory_index.raw[..end];
1058        let fields_excluding_tail = &fields.raw[..end];
1059        // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1060        let field_seed = fields_excluding_tail
1061            .iter()
1062            .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
1063
1064        if optimize_field_order && fields.len() > 1 {
1065            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
1066            // the field ordering to try and catch some code making assumptions about layouts
1067            // we don't guarantee.
1068            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
1069                #[cfg(feature = "randomize")]
1070                {
1071                    use rand::SeedableRng;
1072                    use rand::seq::SliceRandom;
1073                    // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1074                    // ordering.
1075                    let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1076                        field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1077                    );
1078
1079                    // Shuffle the ordering of the fields.
1080                    optimizing.shuffle(&mut rng);
1081                }
1082                // Otherwise we just leave things alone and actually optimize the type's fields
1083            } else {
1084                // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1085                // not depend on the layout of the tail.
1086                let max_field_align =
1087                    fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
1088                let largest_niche_size = fields_excluding_tail
1089                    .iter()
1090                    .filter_map(|f| f.largest_niche)
1091                    .map(|n| n.available(dl))
1092                    .max()
1093                    .unwrap_or(0);
1094
1095                // Calculates a sort key to group fields by their alignment or possibly some
1096                // size-derived pseudo-alignment.
1097                let alignment_group_key = |layout: &F| {
1098                    // The two branches here return values that cannot be meaningfully compared with
1099                    // each other. However, we know that consistently for all executions of
1100                    // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1101                    if let Some(pack) = pack {
1102                        // Return the packed alignment in bytes.
1103                        layout.align.abi.min(pack).bytes()
1104                    } else {
1105                        // Returns `log2(effective-align)`. The calculation assumes that size is an
1106                        // integer multiple of align, except for ZSTs.
1107                        let align = layout.align.abi.bytes();
1108                        let size = layout.size.bytes();
1109                        let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1110                        // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1111                        let size_as_align = align.max(size).trailing_zeros();
1112                        let size_as_align = if largest_niche_size > 0 {
1113                            match niche_bias {
1114                                // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1115                                // array to the front in the first case (for aligned loads) but keep
1116                                // the bool in front in the second case for its niches.
1117                                NicheBias::Start => {
1118                                    max_field_align.trailing_zeros().min(size_as_align)
1119                                }
1120                                // When moving niches towards the end of the struct then for
1121                                // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1122                                // in the align-1 group because its bool can be moved closer to the end.
1123                                NicheBias::End if niche_size == largest_niche_size => {
1124                                    align.trailing_zeros()
1125                                }
1126                                NicheBias::End => size_as_align,
1127                            }
1128                        } else {
1129                            size_as_align
1130                        };
1131                        size_as_align as u64
1132                    }
1133                };
1134
1135                match kind {
1136                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1137                        // Currently `LayoutS` only exposes a single niche so sorting is usually
1138                        // sufficient to get one niche into the preferred position. If it ever
1139                        // supported multiple niches then a more advanced pick-and-pack approach could
1140                        // provide better results. But even for the single-niche cache it's not
1141                        // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1142                        // bool to the front but it would require packing the tuple together with the
1143                        // u16 to build a 4-byte group so that the u32 can be placed after it without
1144                        // padding. This kind of packing can't be achieved by sorting.
1145                        optimizing.sort_by_key(|&x| {
1146                            let f = &fields[x];
1147                            let field_size = f.size.bytes();
1148                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1149                            let niche_size_key = match niche_bias {
1150                                // large niche first
1151                                NicheBias::Start => !niche_size,
1152                                // large niche last
1153                                NicheBias::End => niche_size,
1154                            };
1155                            let inner_niche_offset_key = match niche_bias {
1156                                NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1157                                NicheBias::End => f.largest_niche.map_or(0, |n| {
1158                                    !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1159                                }),
1160                            };
1161
1162                            (
1163                                // Then place largest alignments first.
1164                                cmp::Reverse(alignment_group_key(f)),
1165                                // Then prioritize niche placement within alignment group according to
1166                                // `niche_bias_start`.
1167                                niche_size_key,
1168                                // Then among fields with equally-sized niches prefer the ones
1169                                // closer to the start/end of the field.
1170                                inner_niche_offset_key,
1171                            )
1172                        });
1173                    }
1174
1175                    StructKind::Prefixed(..) => {
1176                        // Sort in ascending alignment so that the layout stays optimal
1177                        // regardless of the prefix.
1178                        // And put the largest niche in an alignment group at the end
1179                        // so it can be used as discriminant in jagged enums
1180                        optimizing.sort_by_key(|&x| {
1181                            let f = &fields[x];
1182                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1183                            (alignment_group_key(f), niche_size)
1184                        });
1185                    }
1186                }
1187
1188                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
1189                //                 regardless of the status of `-Z randomize-layout`
1190            }
1191        }
1192        // inverse_memory_index holds field indices by increasing memory offset.
1193        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1194        // We now write field offsets to the corresponding offset slot;
1195        // field 5 with offset 0 puts 0 in offsets[5].
1196        // At the bottom of this function, we invert `inverse_memory_index` to
1197        // produce `memory_index` (see `invert_mapping`).
1198        let mut unsized_field = None::<&F>;
1199        let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1200        let mut offset = Size::ZERO;
1201        let mut largest_niche = None;
1202        let mut largest_niche_available = 0;
1203        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1204            let prefix_align =
1205                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1206            align = align.max(AbiAndPrefAlign::new(prefix_align));
1207            offset = prefix_size.align_to(prefix_align);
1208        }
1209        for &i in &inverse_memory_index {
1210            let field = &fields[i];
1211            if let Some(unsized_field) = unsized_field {
1212                return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1213            }
1214
1215            if field.is_unsized() {
1216                if let StructKind::MaybeUnsized = kind {
1217                    unsized_field = Some(field);
1218                } else {
1219                    return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1220                }
1221            }
1222
1223            // Invariant: offset < dl.obj_size_bound() <= 1<<61
1224            let field_align = if let Some(pack) = pack {
1225                field.align.min(AbiAndPrefAlign::new(pack))
1226            } else {
1227                field.align
1228            };
1229            offset = offset.align_to(field_align.abi);
1230            align = align.max(field_align);
1231            max_repr_align = max_repr_align.max(field.max_repr_align);
1232
1233            debug!("univariant offset: {:?} field: {:#?}", offset, field);
1234            offsets[i] = offset;
1235
1236            if let Some(mut niche) = field.largest_niche {
1237                let available = niche.available(dl);
1238                // Pick up larger niches.
1239                let prefer_new_niche = match niche_bias {
1240                    NicheBias::Start => available > largest_niche_available,
1241                    // if there are several niches of the same size then pick the last one
1242                    NicheBias::End => available >= largest_niche_available,
1243                };
1244                if prefer_new_niche {
1245                    largest_niche_available = available;
1246                    niche.offset += offset;
1247                    largest_niche = Some(niche);
1248                }
1249            }
1250
1251            offset =
1252                offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1253        }
1254
1255        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1256        // See documentation on `LayoutS::unadjusted_abi_align`.
1257        let unadjusted_abi_align = align.abi;
1258        if let Some(repr_align) = repr.align {
1259            align = align.max(AbiAndPrefAlign::new(repr_align));
1260        }
1261        // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1262        let align = align;
1263
1264        debug!("univariant min_size: {:?}", offset);
1265        let min_size = offset;
1266        // As stated above, inverse_memory_index holds field indices by increasing offset.
1267        // This makes it an already-sorted view of the offsets vec.
1268        // To invert it, consider:
1269        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1270        // Field 5 would be the first element, so memory_index is i:
1271        // Note: if we didn't optimize, it's already right.
1272        let memory_index = if optimize_field_order {
1273            inverse_memory_index.invert_bijective_mapping()
1274        } else {
1275            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1276            inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
1277        };
1278        let size = min_size.align_to(align.abi);
1279        // FIXME(oli-obk): deduplicate and harden these checks
1280        if size.bytes() >= dl.obj_size_bound() {
1281            return Err(LayoutCalculatorError::SizeOverflow);
1282        }
1283        let mut layout_of_single_non_zst_field = None;
1284        let sized = unsized_field.is_none();
1285        let mut abi = BackendRepr::Memory { sized };
1286
1287        let optimize_abi = !repr.inhibit_newtype_abi_optimization();
1288
1289        // Try to make this a Scalar/ScalarPair.
1290        if sized && size.bytes() > 0 {
1291            // We skip *all* ZST here and later check if we are good in terms of alignment.
1292            // This lets us handle some cases involving aligned ZST.
1293            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
1294
1295            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1296                // We have exactly one non-ZST field.
1297                (Some((i, field)), None, None) => {
1298                    layout_of_single_non_zst_field = Some(field);
1299
1300                    // Field fills the struct and it has a scalar or scalar pair ABI.
1301                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
1302                    {
1303                        match field.backend_repr {
1304                            // For plain scalars, or vectors of them, we can't unpack
1305                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
1306                            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
1307                                abi = field.backend_repr;
1308                            }
1309                            // But scalar pairs are Rust-specific and get
1310                            // treated as aggregates by C ABIs anyway.
1311                            BackendRepr::ScalarPair(..) => {
1312                                abi = field.backend_repr;
1313                            }
1314                            _ => {}
1315                        }
1316                    }
1317                }
1318
1319                // Two non-ZST fields, and they're both scalars.
1320                (Some((i, a)), Some((j, b)), None) => {
1321                    match (a.backend_repr, b.backend_repr) {
1322                        (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1323                            // Order by the memory placement, not source order.
1324                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1325                                ((i, a), (j, b))
1326                            } else {
1327                                ((j, b), (i, a))
1328                            };
1329                            let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
1330                            let pair_offsets = match pair.fields {
1331                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1332                                    assert_eq!(memory_index.raw, [0, 1]);
1333                                    offsets
1334                                }
1335                                FieldsShape::Primitive
1336                                | FieldsShape::Array { .. }
1337                                | FieldsShape::Union(..) => {
1338                                    panic!("encountered a non-arbitrary layout during enum layout")
1339                                }
1340                            };
1341                            if offsets[i] == pair_offsets[FieldIdx::new(0)]
1342                                && offsets[j] == pair_offsets[FieldIdx::new(1)]
1343                                && align == pair.align
1344                                && size == pair.size
1345                            {
1346                                // We can use `ScalarPair` only when it matches our
1347                                // already computed layout (including `#[repr(C)]`).
1348                                abi = pair.backend_repr;
1349                            }
1350                        }
1351                        _ => {}
1352                    }
1353                }
1354
1355                _ => {}
1356            }
1357        }
1358        if fields.iter().any(|f| f.is_uninhabited()) {
1359            abi = BackendRepr::Uninhabited;
1360        }
1361
1362        let unadjusted_abi_align = if repr.transparent() {
1363            match layout_of_single_non_zst_field {
1364                Some(l) => l.unadjusted_abi_align,
1365                None => {
1366                    // `repr(transparent)` with all ZST fields.
1367                    align.abi
1368                }
1369            }
1370        } else {
1371            unadjusted_abi_align
1372        };
1373
1374        let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
1375
1376        Ok(LayoutData {
1377            variants: Variants::Single { index: VariantIdx::new(0) },
1378            fields: FieldsShape::Arbitrary { offsets, memory_index },
1379            backend_repr: abi,
1380            largest_niche,
1381            align,
1382            size,
1383            max_repr_align,
1384            unadjusted_abi_align,
1385            randomization_seed: seed,
1386        })
1387    }
1388
1389    fn format_field_niches<
1390        'a,
1391        FieldIdx: Idx,
1392        VariantIdx: Idx,
1393        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1394    >(
1395        &self,
1396        layout: &LayoutData<FieldIdx, VariantIdx>,
1397        fields: &IndexSlice<FieldIdx, F>,
1398    ) -> String {
1399        let dl = self.cx.data_layout();
1400        let mut s = String::new();
1401        for i in layout.fields.index_by_increasing_offset() {
1402            let offset = layout.fields.offset(i);
1403            let f = &fields[FieldIdx::new(i)];
1404            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
1405            if let Some(n) = f.largest_niche {
1406                write!(
1407                    s,
1408                    " n{}b{}s{}",
1409                    n.offset.bytes(),
1410                    n.available(dl).ilog2(),
1411                    n.value.size(dl).bytes()
1412                )
1413                .unwrap();
1414            }
1415            write!(s, "] ").unwrap();
1416        }
1417        s
1418    }
1419}