rustc_abi/
layout.rs

1use std::collections::BTreeSet;
2use std::fmt::{self, Write};
3use std::ops::{Bound, Deref};
4use std::{cmp, iter};
5
6use rustc_hashes::Hash64;
7use rustc_index::Idx;
8use rustc_index::bit_set::BitMatrix;
9use tracing::{debug, trace};
10
11use crate::{
12    AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
13    LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
14    TargetDataLayout, Variants, WrappingRange,
15};
16
17mod coroutine;
18mod simple;
19
20#[cfg(feature = "nightly")]
21mod ty;
22
23#[cfg(feature = "nightly")]
24pub use ty::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
25
26// A variant is absent if it's uninhabited and only has ZST fields.
27// Present uninhabited variants only require space for their fields,
28// but *not* an encoding of the discriminant (e.g., a tag value).
29// See issue #49298 for more details on the need to leave space
30// for non-ZST uninhabited data (mostly partial initialization).
31fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool
32where
33    FieldIdx: Idx,
34    VariantIdx: Idx,
35    F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
36{
37    let uninhabited = fields.iter().any(|f| f.is_uninhabited());
38    // We cannot ignore alignment; that might lead us to entirely discard a variant and
39    // produce an enum that is less aligned than it should be!
40    let is_1zst = fields.iter().all(|f| f.is_1zst());
41    uninhabited && is_1zst
42}
43
44/// Determines towards which end of a struct layout optimizations will try to place the best niches.
45enum NicheBias {
46    Start,
47    End,
48}
49
50#[derive(Copy, Clone, Debug, PartialEq, Eq)]
51pub enum LayoutCalculatorError<F> {
52    /// An unsized type was found in a location where a sized type was expected.
53    ///
54    /// This is not always a compile error, for example if there is a `[T]: Sized`
55    /// bound in a where clause.
56    ///
57    /// Contains the field that was unexpectedly unsized.
58    UnexpectedUnsized(F),
59
60    /// A type was too large for the target platform.
61    SizeOverflow,
62
63    /// A union had no fields.
64    EmptyUnion,
65
66    /// The fields or variants have irreconcilable reprs
67    ReprConflict,
68
69    /// The length of an SIMD type is zero
70    ZeroLengthSimdType,
71
72    /// The length of an SIMD type exceeds the maximum number of lanes
73    OversizedSimdType { max_lanes: u64 },
74
75    /// An element type of an SIMD type isn't a primitive
76    NonPrimitiveSimdType(F),
77}
78
79impl<F> LayoutCalculatorError<F> {
80    pub fn without_payload(&self) -> LayoutCalculatorError<()> {
81        use LayoutCalculatorError::*;
82        match *self {
83            UnexpectedUnsized(_) => UnexpectedUnsized(()),
84            SizeOverflow => SizeOverflow,
85            EmptyUnion => EmptyUnion,
86            ReprConflict => ReprConflict,
87            ZeroLengthSimdType => ZeroLengthSimdType,
88            OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
89            NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
90        }
91    }
92
93    /// Format an untranslated diagnostic for this type
94    ///
95    /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
96    pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
97        use LayoutCalculatorError::*;
98        f.write_str(match self {
99            UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
100            SizeOverflow => "size overflow",
101            EmptyUnion => "type is a union with no fields",
102            ReprConflict => "type has an invalid repr",
103            ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
104                "invalid simd type definition"
105            }
106        })
107    }
108}
109
110type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
111    Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
112
113#[derive(Clone, Copy, Debug)]
114pub struct LayoutCalculator<Cx> {
115    pub cx: Cx,
116}
117
118impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
119    pub fn new(cx: Cx) -> Self {
120        Self { cx }
121    }
122
123    pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
124        &self,
125        element: &LayoutData<FieldIdx, VariantIdx>,
126        count_if_sized: Option<u64>, // None for slices
127    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
128        let count = count_if_sized.unwrap_or(0);
129        let size =
130            element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
131
132        Ok(LayoutData {
133            variants: Variants::Single { index: VariantIdx::new(0) },
134            fields: FieldsShape::Array { stride: element.size, count },
135            backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
136            largest_niche: element.largest_niche.filter(|_| count != 0),
137            uninhabited: element.uninhabited && count != 0,
138            align: element.align,
139            size,
140            max_repr_align: None,
141            unadjusted_abi_align: element.align.abi,
142            randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
143        })
144    }
145
146    pub fn scalable_vector_type<FieldIdx, VariantIdx, F>(
147        &self,
148        element: F,
149        count: u64,
150    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
151    where
152        FieldIdx: Idx,
153        VariantIdx: Idx,
154        F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
155    {
156        vector_type_layout(VectorKind::Scalable, self.cx.data_layout(), element, count)
157    }
158
159    pub fn simd_type<FieldIdx, VariantIdx, F>(
160        &self,
161        element: F,
162        count: u64,
163        repr_packed: bool,
164    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
165    where
166        FieldIdx: Idx,
167        VariantIdx: Idx,
168        F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
169    {
170        let kind = if repr_packed { VectorKind::PackedFixed } else { VectorKind::Fixed };
171        vector_type_layout(kind, self.cx.data_layout(), element, count)
172    }
173
174    /// Compute the layout for a coroutine.
175    ///
176    /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
177    /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
178    pub fn coroutine<
179        'a,
180        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
181        VariantIdx: Idx,
182        FieldIdx: Idx,
183        LocalIdx: Idx,
184    >(
185        &self,
186        local_layouts: &IndexSlice<LocalIdx, F>,
187        prefix_layouts: IndexVec<FieldIdx, F>,
188        variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
189        storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
190        tag_to_layout: impl Fn(Scalar) -> F,
191    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
192        coroutine::layout(
193            self,
194            local_layouts,
195            prefix_layouts,
196            variant_fields,
197            storage_conflicts,
198            tag_to_layout,
199        )
200    }
201
202    pub fn univariant<
203        'a,
204        FieldIdx: Idx,
205        VariantIdx: Idx,
206        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
207    >(
208        &self,
209        fields: &IndexSlice<FieldIdx, F>,
210        repr: &ReprOptions,
211        kind: StructKind,
212    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
213        let dl = self.cx.data_layout();
214        let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
215        // Enums prefer niches close to the beginning or the end of the variants so that other
216        // (smaller) data-carrying variants can be packed into the space after/before the niche.
217        // If the default field ordering does not give us a niche at the front then we do a second
218        // run and bias niches to the right and then check which one is closer to one of the
219        // struct's edges.
220        if let Ok(layout) = &layout {
221            // Don't try to calculate an end-biased layout for unsizable structs,
222            // otherwise we could end up with different layouts for
223            // Foo<Type> and Foo<dyn Trait> which would break unsizing.
224            if !matches!(kind, StructKind::MaybeUnsized) {
225                if let Some(niche) = layout.largest_niche {
226                    let head_space = niche.offset.bytes();
227                    let niche_len = niche.value.size(dl).bytes();
228                    let tail_space = layout.size.bytes() - head_space - niche_len;
229
230                    // This may end up doing redundant work if the niche is already in the last
231                    // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
232                    // to get the unpadded size so we try anyway.
233                    if fields.len() > 1 && head_space != 0 && tail_space > 0 {
234                        let alt_layout = self
235                            .univariant_biased(fields, repr, kind, NicheBias::End)
236                            .expect("alt layout should always work");
237                        let alt_niche = alt_layout
238                            .largest_niche
239                            .expect("alt layout should have a niche like the regular one");
240                        let alt_head_space = alt_niche.offset.bytes();
241                        let alt_niche_len = alt_niche.value.size(dl).bytes();
242                        let alt_tail_space =
243                            alt_layout.size.bytes() - alt_head_space - alt_niche_len;
244
245                        debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
246
247                        let prefer_alt_layout =
248                            alt_head_space > head_space && alt_head_space > tail_space;
249
250                        debug!(
251                            "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
252                            layout: {}\n\
253                            alt_layout: {}\n",
254                            layout.size.bytes(),
255                            head_space,
256                            niche_len,
257                            tail_space,
258                            alt_head_space,
259                            alt_niche_len,
260                            alt_tail_space,
261                            layout.fields.count(),
262                            prefer_alt_layout,
263                            self.format_field_niches(layout, fields),
264                            self.format_field_niches(&alt_layout, fields),
265                        );
266
267                        if prefer_alt_layout {
268                            return Ok(alt_layout);
269                        }
270                    }
271                }
272            }
273        }
274        layout
275    }
276
277    pub fn layout_of_struct_or_enum<
278        'a,
279        FieldIdx: Idx,
280        VariantIdx: Idx,
281        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
282    >(
283        &self,
284        repr: &ReprOptions,
285        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
286        is_enum: bool,
287        is_special_no_niche: bool,
288        scalar_valid_range: (Bound<u128>, Bound<u128>),
289        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
290        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
291        always_sized: bool,
292    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
293        let (present_first, present_second) = {
294            let mut present_variants = variants
295                .iter_enumerated()
296                .filter_map(|(i, v)| if !repr.c() && absent(v) { None } else { Some(i) });
297            (present_variants.next(), present_variants.next())
298        };
299        let present_first = match present_first {
300            Some(present_first) => present_first,
301            // Uninhabited because it has no variants, or only absent ones.
302            None if is_enum => {
303                return Ok(LayoutData::never_type(&self.cx));
304            }
305            // If it's a struct, still compute a layout so that we can still compute the
306            // field offsets.
307            None => VariantIdx::new(0),
308        };
309
310        // take the struct path if it is an actual struct
311        if !is_enum ||
312            // or for optimizing univariant enums
313            (present_second.is_none() && !repr.inhibit_enum_layout_opt())
314        {
315            self.layout_of_struct(
316                repr,
317                variants,
318                is_enum,
319                is_special_no_niche,
320                scalar_valid_range,
321                always_sized,
322                present_first,
323            )
324        } else {
325            // At this point, we have handled all unions and
326            // structs. (We have also handled univariant enums
327            // that allow representation optimization.)
328            assert!(is_enum);
329            self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)
330        }
331    }
332
333    pub fn layout_of_union<
334        'a,
335        FieldIdx: Idx,
336        VariantIdx: Idx,
337        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
338    >(
339        &self,
340        repr: &ReprOptions,
341        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
342    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
343        let dl = self.cx.data_layout();
344        let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
345        let mut max_repr_align = repr.align;
346
347        // If all the non-ZST fields have the same repr and union repr optimizations aren't
348        // disabled, we can use that common repr for the union as a whole.
349        struct AbiMismatch;
350        let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
351            // Can't optimize
352            Err(AbiMismatch)
353        } else {
354            Ok(None)
355        };
356
357        let mut size = Size::ZERO;
358        let only_variant_idx = VariantIdx::new(0);
359        let only_variant = &variants[only_variant_idx];
360        for field in only_variant {
361            if field.is_unsized() {
362                return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
363            }
364
365            align = align.max(field.align.abi);
366            max_repr_align = max_repr_align.max(field.max_repr_align);
367            size = cmp::max(size, field.size);
368
369            if field.is_zst() {
370                // Nothing more to do for ZST fields
371                continue;
372            }
373
374            if let Ok(common) = common_non_zst_repr_and_align {
375                // Discard valid range information and allow undef
376                let field_abi = field.backend_repr.to_union();
377
378                if let Some((common_abi, common_align)) = common {
379                    if common_abi != field_abi {
380                        // Different fields have different ABI: disable opt
381                        common_non_zst_repr_and_align = Err(AbiMismatch);
382                    } else {
383                        // Fields with the same non-Aggregate ABI should also
384                        // have the same alignment
385                        if !matches!(common_abi, BackendRepr::Memory { .. }) {
386                            assert_eq!(
387                                common_align, field.align.abi,
388                                "non-Aggregate field with matching ABI but differing alignment"
389                            );
390                        }
391                    }
392                } else {
393                    // First non-ZST field: record its ABI and alignment
394                    common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
395                }
396            }
397        }
398
399        if let Some(pack) = repr.pack {
400            align = align.min(pack);
401        }
402        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
403        // See documentation on `LayoutData::unadjusted_abi_align`.
404        let unadjusted_abi_align = align;
405        if let Some(repr_align) = repr.align {
406            align = align.max(repr_align);
407        }
408        // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
409        let align = align;
410
411        // If all non-ZST fields have the same ABI, we may forward that ABI
412        // for the union as a whole, unless otherwise inhibited.
413        let backend_repr = match common_non_zst_repr_and_align {
414            Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
415            Ok(Some((repr, _))) => match repr {
416                // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
417                BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
418                    if repr.scalar_align(dl).unwrap() != align =>
419                {
420                    BackendRepr::Memory { sized: true }
421                }
422                // Vectors require at least element alignment, else disable the opt
423                BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => {
424                    BackendRepr::Memory { sized: true }
425                }
426                // the alignment tests passed and we can use this
427                BackendRepr::Scalar(..)
428                | BackendRepr::ScalarPair(..)
429                | BackendRepr::SimdVector { .. }
430                | BackendRepr::ScalableVector { .. }
431                | BackendRepr::Memory { .. } => repr,
432            },
433        };
434
435        let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
436            return Err(LayoutCalculatorError::EmptyUnion);
437        };
438
439        let combined_seed = only_variant
440            .iter()
441            .map(|v| v.randomization_seed)
442            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
443
444        Ok(LayoutData {
445            variants: Variants::Single { index: only_variant_idx },
446            fields: FieldsShape::Union(union_field_count),
447            backend_repr,
448            largest_niche: None,
449            uninhabited: false,
450            align: AbiAlign::new(align),
451            size: size.align_to(align),
452            max_repr_align,
453            unadjusted_abi_align,
454            randomization_seed: combined_seed,
455        })
456    }
457
458    /// single-variant enums are just structs, if you think about it
459    fn layout_of_struct<
460        'a,
461        FieldIdx: Idx,
462        VariantIdx: Idx,
463        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
464    >(
465        &self,
466        repr: &ReprOptions,
467        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
468        is_enum: bool,
469        is_special_no_niche: bool,
470        scalar_valid_range: (Bound<u128>, Bound<u128>),
471        always_sized: bool,
472        present_first: VariantIdx,
473    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
474        // Struct, or univariant enum equivalent to a struct.
475        // (Typechecking will reject discriminant-sizing attrs.)
476
477        let dl = self.cx.data_layout();
478        let v = present_first;
479        let kind = if is_enum || variants[v].is_empty() || always_sized {
480            StructKind::AlwaysSized
481        } else {
482            StructKind::MaybeUnsized
483        };
484
485        let mut st = self.univariant(&variants[v], repr, kind)?;
486        st.variants = Variants::Single { index: v };
487
488        if is_special_no_niche {
489            let hide_niches = |scalar: &mut _| match scalar {
490                Scalar::Initialized { value, valid_range } => {
491                    *valid_range = WrappingRange::full(value.size(dl))
492                }
493                // Already doesn't have any niches
494                Scalar::Union { .. } => {}
495            };
496            match &mut st.backend_repr {
497                BackendRepr::Scalar(scalar) => hide_niches(scalar),
498                BackendRepr::ScalarPair(a, b) => {
499                    hide_niches(a);
500                    hide_niches(b);
501                }
502                BackendRepr::SimdVector { element, .. }
503                | BackendRepr::ScalableVector { element, .. } => hide_niches(element),
504                BackendRepr::Memory { sized: _ } => {}
505            }
506            st.largest_niche = None;
507            return Ok(st);
508        }
509
510        let (start, end) = scalar_valid_range;
511        match st.backend_repr {
512            BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
513                // Enlarging validity ranges would result in missed
514                // optimizations, *not* wrongly assuming the inner
515                // value is valid. e.g. unions already enlarge validity ranges,
516                // because the values may be uninitialized.
517                //
518                // Because of that we only check that the start and end
519                // of the range is representable with this scalar type.
520
521                let max_value = scalar.size(dl).unsigned_int_max();
522                if let Bound::Included(start) = start {
523                    // FIXME(eddyb) this might be incorrect - it doesn't
524                    // account for wrap-around (end < start) ranges.
525                    assert!(start <= max_value, "{start} > {max_value}");
526                    scalar.valid_range_mut().start = start;
527                }
528                if let Bound::Included(end) = end {
529                    // FIXME(eddyb) this might be incorrect - it doesn't
530                    // account for wrap-around (end < start) ranges.
531                    assert!(end <= max_value, "{end} > {max_value}");
532                    scalar.valid_range_mut().end = end;
533                }
534
535                // Update `largest_niche` if we have introduced a larger niche.
536                let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
537                if let Some(niche) = niche {
538                    match st.largest_niche {
539                        Some(largest_niche) => {
540                            // Replace the existing niche even if they're equal,
541                            // because this one is at a lower offset.
542                            if largest_niche.available(dl) <= niche.available(dl) {
543                                st.largest_niche = Some(niche);
544                            }
545                        }
546                        None => st.largest_niche = Some(niche),
547                    }
548                }
549            }
550            _ => assert!(
551                start == Bound::Unbounded && end == Bound::Unbounded,
552                "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
553            ),
554        }
555
556        Ok(st)
557    }
558
559    fn layout_of_enum<
560        'a,
561        FieldIdx: Idx,
562        VariantIdx: Idx,
563        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
564    >(
565        &self,
566        repr: &ReprOptions,
567        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
568        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
569        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
570    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
571        let dl = self.cx.data_layout();
572        // bail if the enum has an incoherent repr that cannot be computed
573        if repr.packed() {
574            return Err(LayoutCalculatorError::ReprConflict);
575        }
576
577        let calculate_niche_filling_layout = || -> Option<LayoutData<FieldIdx, VariantIdx>> {
578            if repr.inhibit_enum_layout_opt() {
579                return None;
580            }
581
582            if variants.len() < 2 {
583                return None;
584            }
585
586            let mut align = dl.aggregate_align;
587            let mut max_repr_align = repr.align;
588            let mut unadjusted_abi_align = align;
589
590            let mut variant_layouts = variants
591                .iter_enumerated()
592                .map(|(j, v)| {
593                    let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
594                    st.variants = Variants::Single { index: j };
595
596                    align = align.max(st.align.abi);
597                    max_repr_align = max_repr_align.max(st.max_repr_align);
598                    unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
599
600                    Some(st)
601                })
602                .collect::<Option<IndexVec<VariantIdx, _>>>()?;
603
604            let largest_variant_index = variant_layouts
605                .iter_enumerated()
606                .max_by_key(|(_i, layout)| layout.size.bytes())
607                .map(|(i, _layout)| i)?;
608
609            let all_indices = variants.indices();
610            let needs_disc =
611                |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
612            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
613                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
614
615            let count =
616                (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
617
618            // Use the largest niche in the largest variant.
619            let niche = variant_layouts[largest_variant_index].largest_niche?;
620            let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
621            let niche_offset = niche.offset;
622            let niche_size = niche.value.size(dl);
623            let size = variant_layouts[largest_variant_index].size.align_to(align);
624
625            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
626                if i == largest_variant_index {
627                    return true;
628                }
629
630                layout.largest_niche = None;
631
632                if layout.size <= niche_offset {
633                    // This variant will fit before the niche.
634                    return true;
635                }
636
637                // Determine if it'll fit after the niche.
638                let this_align = layout.align.abi;
639                let this_offset = (niche_offset + niche_size).align_to(this_align);
640
641                if this_offset + layout.size > size {
642                    return false;
643                }
644
645                // It'll fit, but we need to make some adjustments.
646                match layout.fields {
647                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
648                        for offset in offsets.iter_mut() {
649                            *offset += this_offset;
650                        }
651                    }
652                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
653                        panic!("Layout of fields should be Arbitrary for variants")
654                    }
655                }
656
657                // It can't be a Scalar or ScalarPair because the offset isn't 0.
658                if !layout.is_uninhabited() {
659                    layout.backend_repr = BackendRepr::Memory { sized: true };
660                }
661                layout.size += this_offset;
662
663                true
664            });
665
666            if !all_variants_fit {
667                return None;
668            }
669
670            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
671
672            let others_zst = variant_layouts
673                .iter_enumerated()
674                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
675            let same_size = size == variant_layouts[largest_variant_index].size;
676            let same_align = align == variant_layouts[largest_variant_index].align.abi;
677
678            let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
679            let abi = if same_size && same_align && others_zst {
680                match variant_layouts[largest_variant_index].backend_repr {
681                    // When the total alignment and size match, we can use the
682                    // same ABI as the scalar variant with the reserved niche.
683                    BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
684                    BackendRepr::ScalarPair(first, second) => {
685                        // Only the niche is guaranteed to be initialised,
686                        // so use union layouts for the other primitive.
687                        if niche_offset == Size::ZERO {
688                            BackendRepr::ScalarPair(niche_scalar, second.to_union())
689                        } else {
690                            BackendRepr::ScalarPair(first.to_union(), niche_scalar)
691                        }
692                    }
693                    _ => BackendRepr::Memory { sized: true },
694                }
695            } else {
696                BackendRepr::Memory { sized: true }
697            };
698
699            let combined_seed = variant_layouts
700                .iter()
701                .map(|v| v.randomization_seed)
702                .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
703
704            let layout = LayoutData {
705                variants: Variants::Multiple {
706                    tag: niche_scalar,
707                    tag_encoding: TagEncoding::Niche {
708                        untagged_variant: largest_variant_index,
709                        niche_variants,
710                        niche_start,
711                    },
712                    tag_field: FieldIdx::new(0),
713                    variants: variant_layouts,
714                },
715                fields: FieldsShape::Arbitrary {
716                    offsets: [niche_offset].into(),
717                    in_memory_order: [FieldIdx::new(0)].into(),
718                },
719                backend_repr: abi,
720                largest_niche,
721                uninhabited,
722                size,
723                align: AbiAlign::new(align),
724                max_repr_align,
725                unadjusted_abi_align,
726                randomization_seed: combined_seed,
727            };
728
729            Some(layout)
730        };
731
732        let niche_filling_layout = calculate_niche_filling_layout();
733
734        let discr_type = repr.discr_type();
735        let discr_int = Integer::from_attr(dl, discr_type);
736        // Because we can only represent one range of valid values, we'll look for the
737        // largest range of invalid values and pick everything else as the range of valid
738        // values.
739
740        // First we need to sort the possible discriminant values so that we can look for the largest gap:
741        let valid_discriminants: BTreeSet<i128> = discriminants
742            .filter(|&(i, _)| repr.c() || variants[i].iter().all(|f| !f.is_uninhabited()))
743            .map(|(_, val)| {
744                if discr_type.is_signed() {
745                    // sign extend the raw representation to be an i128
746                    // FIXME: do this at the discriminant iterator creation sites
747                    discr_int.size().sign_extend(val as u128)
748                } else {
749                    val
750                }
751            })
752            .collect();
753        trace!(?valid_discriminants);
754        let discriminants = valid_discriminants.iter().copied();
755        //let next_discriminants = discriminants.clone().cycle().skip(1);
756        let next_discriminants =
757            discriminants.clone().chain(valid_discriminants.first().copied()).skip(1);
758        // Iterate over pairs of each discriminant together with the next one.
759        // Since they were sorted, we can now compute the niche sizes and pick the largest.
760        let discriminants = discriminants.zip(next_discriminants);
761        let largest_niche = discriminants.max_by_key(|&(start, end)| {
762            trace!(?start, ?end);
763            // If this is a wraparound range, the niche size is `MAX - abs(diff)`, as the diff between
764            // the two end points is actually the size of the valid discriminants.
765            let dist = if start > end {
766                // Overflow can happen for 128 bit discriminants if `end` is negative.
767                // But in that case casting to `u128` still gets us the right value,
768                // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
769                let dist = start.wrapping_sub(end);
770                if discr_type.is_signed() {
771                    discr_int.signed_max().wrapping_sub(dist) as u128
772                } else {
773                    discr_int.size().unsigned_int_max() - dist as u128
774                }
775            } else {
776                // Overflow can happen for 128 bit discriminants if `start` is negative.
777                // But in that case casting to `u128` still gets us the right value,
778                // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
779                end.wrapping_sub(start) as u128
780            };
781            trace!(?dist);
782            dist
783        });
784        trace!(?largest_niche);
785
786        // `max` is the last valid discriminant before the largest niche
787        // `min` is the first valid discriminant after the largest niche
788        let (max, min) = largest_niche
789            // We might have no inhabited variants, so pretend there's at least one.
790            .unwrap_or((0, 0));
791        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::discr_range_of_repr(tcx, ty, &repr, min, max);
792
793        let mut align = dl.aggregate_align;
794        let mut max_repr_align = repr.align;
795        let mut unadjusted_abi_align = align;
796
797        let mut size = Size::ZERO;
798
799        // We're interested in the smallest alignment, so start large.
800        let mut start_align = Align::from_bytes(256).unwrap();
801        assert_eq!(Integer::for_align(dl, start_align), None);
802
803        // repr(C) on an enum tells us to make a (tag, union) layout,
804        // so we need to grow the prefix alignment to be at least
805        // the alignment of the union. (This value is used both for
806        // determining the alignment of the overall enum, and the
807        // determining the alignment of the payload after the tag.)
808        let mut prefix_align = min_ity.align(dl).abi;
809        if repr.c() {
810            for fields in variants {
811                for field in fields {
812                    prefix_align = prefix_align.max(field.align.abi);
813                }
814            }
815        }
816
817        // Create the set of structs that represent each variant.
818        let mut layout_variants = variants
819            .iter_enumerated()
820            .map(|(i, field_layouts)| {
821                let mut st = self.univariant(
822                    field_layouts,
823                    repr,
824                    StructKind::Prefixed(min_ity.size(), prefix_align),
825                )?;
826                st.variants = Variants::Single { index: i };
827                // Find the first field we can't move later
828                // to make room for a larger discriminant.
829                for field_idx in st.fields.index_by_increasing_offset() {
830                    let field = &field_layouts[FieldIdx::new(field_idx)];
831                    if !field.is_1zst() {
832                        start_align = start_align.min(field.align.abi);
833                        break;
834                    }
835                }
836                size = cmp::max(size, st.size);
837                align = align.max(st.align.abi);
838                max_repr_align = max_repr_align.max(st.max_repr_align);
839                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
840                Ok(st)
841            })
842            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
843
844        // Align the maximum variant size to the largest alignment.
845        size = size.align_to(align);
846
847        // FIXME(oli-obk): deduplicate and harden these checks
848        if size.bytes() >= dl.obj_size_bound() {
849            return Err(LayoutCalculatorError::SizeOverflow);
850        }
851
852        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
853        if typeck_ity < min_ity {
854            // It is a bug if Layout decided on a greater discriminant size than typeck for
855            // some reason at this point (based on values discriminant can take on). Mostly
856            // because this discriminant will be loaded, and then stored into variable of
857            // type calculated by typeck. Consider such case (a bug): typeck decided on
858            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
859            // discriminant values. That would be a bug, because then, in codegen, in order
860            // to store this 16-bit discriminant into 8-bit sized temporary some of the
861            // space necessary to represent would have to be discarded (or layout is wrong
862            // on thinking it needs 16 bits)
863            panic!(
864                "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
865            );
866            // However, it is fine to make discr type however large (as an optimisation)
867            // after this point – we’ll just truncate the value we load in codegen.
868        }
869
870        // Check to see if we should use a different type for the
871        // discriminant. We can safely use a type with the same size
872        // as the alignment of the first field of each variant.
873        // We increase the size of the discriminant to avoid LLVM copying
874        // padding when it doesn't need to. This normally causes unaligned
875        // load/stores and excessive memcpy/memset operations. By using a
876        // bigger integer size, LLVM can be sure about its contents and
877        // won't be so conservative.
878
879        // Use the initial field alignment
880        let mut ity = if repr.c() || repr.int.is_some() {
881            min_ity
882        } else {
883            Integer::for_align(dl, start_align).unwrap_or(min_ity)
884        };
885
886        // If the alignment is not larger than the chosen discriminant size,
887        // don't use the alignment as the final size.
888        if ity <= min_ity {
889            ity = min_ity;
890        } else {
891            // Patch up the variants' first few fields.
892            let old_ity_size = min_ity.size();
893            let new_ity_size = ity.size();
894            for variant in &mut layout_variants {
895                match variant.fields {
896                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
897                        for i in offsets {
898                            if *i <= old_ity_size {
899                                assert_eq!(*i, old_ity_size);
900                                *i = new_ity_size;
901                            }
902                        }
903                        // We might be making the struct larger.
904                        if variant.size <= old_ity_size {
905                            variant.size = new_ity_size;
906                        }
907                    }
908                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
909                        panic!("encountered a non-arbitrary layout during enum layout")
910                    }
911                }
912            }
913        }
914
915        let tag_mask = ity.size().unsigned_int_max();
916        let tag = Scalar::Initialized {
917            value: Primitive::Int(ity, signed),
918            valid_range: WrappingRange {
919                start: (min as u128 & tag_mask),
920                end: (max as u128 & tag_mask),
921            },
922        };
923        let mut abi = BackendRepr::Memory { sized: true };
924
925        let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());
926        if tag.size(dl) == size {
927            // Make sure we only use scalar layout when the enum is entirely its
928            // own tag (i.e. it has no padding nor any non-ZST variant fields).
929            abi = BackendRepr::Scalar(tag);
930        } else {
931            // Try to use a ScalarPair for all tagged enums.
932            // That's possible only if we can find a common primitive type for all variants.
933            let mut common_prim = None;
934            let mut common_prim_initialized_in_all_variants = true;
935            for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
936                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
937                    panic!("encountered a non-arbitrary layout during enum layout");
938                };
939                // We skip *all* ZST here and later check if we are good in terms of alignment.
940                // This lets us handle some cases involving aligned ZST.
941                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
942                let (field, offset) = match (fields.next(), fields.next()) {
943                    (None, None) => {
944                        common_prim_initialized_in_all_variants = false;
945                        continue;
946                    }
947                    (Some(pair), None) => pair,
948                    _ => {
949                        common_prim = None;
950                        break;
951                    }
952                };
953                let prim = match field.backend_repr {
954                    BackendRepr::Scalar(scalar) => {
955                        common_prim_initialized_in_all_variants &=
956                            matches!(scalar, Scalar::Initialized { .. });
957                        scalar.primitive()
958                    }
959                    _ => {
960                        common_prim = None;
961                        break;
962                    }
963                };
964                if let Some((old_prim, common_offset)) = common_prim {
965                    // All variants must be at the same offset
966                    if offset != common_offset {
967                        common_prim = None;
968                        break;
969                    }
970                    // This is pretty conservative. We could go fancier
971                    // by realising that (u8, u8) could just cohabit with
972                    // u16 or even u32.
973                    let new_prim = match (old_prim, prim) {
974                        // Allow all identical primitives.
975                        (x, y) if x == y => x,
976                        // Allow integers of the same size with differing signedness.
977                        // We arbitrarily choose the signedness of the first variant.
978                        (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
979                        // Allow integers mixed with pointers of the same layout.
980                        // We must represent this using a pointer, to avoid
981                        // roundtripping pointers through ptrtoint/inttoptr.
982                        (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
983                        | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
984                            if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
985                        {
986                            p
987                        }
988                        _ => {
989                            common_prim = None;
990                            break;
991                        }
992                    };
993                    // We may be updating the primitive here, for example from int->ptr.
994                    common_prim = Some((new_prim, common_offset));
995                } else {
996                    common_prim = Some((prim, offset));
997                }
998            }
999            if let Some((prim, offset)) = common_prim {
1000                let prim_scalar = if common_prim_initialized_in_all_variants {
1001                    let size = prim.size(dl);
1002                    assert!(size.bits() <= 128);
1003                    Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
1004                } else {
1005                    // Common prim might be uninit.
1006                    Scalar::Union { value: prim }
1007                };
1008                let pair =
1009                    LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
1010                let pair_offsets = match pair.fields {
1011                    FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1012                        assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);
1013                        offsets
1014                    }
1015                    _ => panic!("encountered a non-arbitrary layout during enum layout"),
1016                };
1017                if pair_offsets[FieldIdx::new(0)] == Size::ZERO
1018                    && pair_offsets[FieldIdx::new(1)] == *offset
1019                    && align == pair.align.abi
1020                    && size == pair.size
1021                {
1022                    // We can use `ScalarPair` only when it matches our
1023                    // already computed layout (including `#[repr(C)]`).
1024                    abi = pair.backend_repr;
1025                }
1026            }
1027        }
1028
1029        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1030        // variants to ensure they are consistent. This is because a downcast is
1031        // semantically a NOP, and thus should not affect layout.
1032        if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
1033            for variant in &mut layout_variants {
1034                // We only do this for variants with fields; the others are not accessed anyway.
1035                // Also do not overwrite any already existing "clever" ABIs.
1036                if variant.fields.count() > 0
1037                    && matches!(variant.backend_repr, BackendRepr::Memory { .. })
1038                {
1039                    variant.backend_repr = abi;
1040                    // Also need to bump up the size and alignment, so that the entire value fits
1041                    // in here.
1042                    variant.size = cmp::max(variant.size, size);
1043                    variant.align.abi = cmp::max(variant.align.abi, align);
1044                }
1045            }
1046        }
1047
1048        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1049
1050        let combined_seed = layout_variants
1051            .iter()
1052            .map(|v| v.randomization_seed)
1053            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
1054
1055        let tagged_layout = LayoutData {
1056            variants: Variants::Multiple {
1057                tag,
1058                tag_encoding: TagEncoding::Direct,
1059                tag_field: FieldIdx::new(0),
1060                variants: layout_variants,
1061            },
1062            fields: FieldsShape::Arbitrary {
1063                offsets: [Size::ZERO].into(),
1064                in_memory_order: [FieldIdx::new(0)].into(),
1065            },
1066            largest_niche,
1067            uninhabited,
1068            backend_repr: abi,
1069            align: AbiAlign::new(align),
1070            size,
1071            max_repr_align,
1072            unadjusted_abi_align,
1073            randomization_seed: combined_seed,
1074        };
1075
1076        let best_layout = match (tagged_layout, niche_filling_layout) {
1077            (tl, Some(nl)) => {
1078                // Pick the smaller layout; otherwise,
1079                // pick the layout with the larger niche; otherwise,
1080                // pick tagged as it has simpler codegen.
1081                use cmp::Ordering::*;
1082                let niche_size = |l: &LayoutData<FieldIdx, VariantIdx>| {
1083                    l.largest_niche.map_or(0, |n| n.available(dl))
1084                };
1085                match (tl.size.cmp(&nl.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1086                    (Greater, _) => nl,
1087                    (Equal, Less) => nl,
1088                    _ => tl,
1089                }
1090            }
1091            (tl, None) => tl,
1092        };
1093
1094        Ok(best_layout)
1095    }
1096
1097    fn univariant_biased<
1098        'a,
1099        FieldIdx: Idx,
1100        VariantIdx: Idx,
1101        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1102    >(
1103        &self,
1104        fields: &IndexSlice<FieldIdx, F>,
1105        repr: &ReprOptions,
1106        kind: StructKind,
1107        niche_bias: NicheBias,
1108    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1109        let dl = self.cx.data_layout();
1110        let pack = repr.pack;
1111        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1112        let mut max_repr_align = repr.align;
1113        let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();
1114        let optimize_field_order = !repr.inhibit_struct_field_reordering();
1115        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1116        let optimizing = &mut in_memory_order.raw[..end];
1117        let fields_excluding_tail = &fields.raw[..end];
1118        // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1119        let field_seed = fields_excluding_tail
1120            .iter()
1121            .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
1122
1123        if optimize_field_order && fields.len() > 1 {
1124            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
1125            // the field ordering to try and catch some code making assumptions about layouts
1126            // we don't guarantee.
1127            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
1128                #[cfg(feature = "randomize")]
1129                {
1130                    use rand::SeedableRng;
1131                    use rand::seq::SliceRandom;
1132                    // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1133                    // ordering.
1134                    let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1135                        field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1136                    );
1137
1138                    // Shuffle the ordering of the fields.
1139                    optimizing.shuffle(&mut rng);
1140                }
1141                // Otherwise we just leave things alone and actually optimize the type's fields
1142            } else {
1143                // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1144                // not depend on the layout of the tail.
1145                let max_field_align =
1146                    fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1);
1147                let largest_niche_size = fields_excluding_tail
1148                    .iter()
1149                    .filter_map(|f| f.largest_niche)
1150                    .map(|n| n.available(dl))
1151                    .max()
1152                    .unwrap_or(0);
1153
1154                // Calculates a sort key to group fields by their alignment or possibly some
1155                // size-derived pseudo-alignment.
1156                let alignment_group_key = |layout: &F| {
1157                    // The two branches here return values that cannot be meaningfully compared with
1158                    // each other. However, we know that consistently for all executions of
1159                    // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1160                    if let Some(pack) = pack {
1161                        // Return the packed alignment in bytes.
1162                        layout.align.abi.min(pack).bytes()
1163                    } else {
1164                        // Returns `log2(effective-align)`. The calculation assumes that size is an
1165                        // integer multiple of align, except for ZSTs.
1166                        let align = layout.align.bytes();
1167                        let size = layout.size.bytes();
1168                        let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1169                        // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1170                        let size_as_align = align.max(size).trailing_zeros();
1171                        let size_as_align = if largest_niche_size > 0 {
1172                            match niche_bias {
1173                                // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1174                                // array to the front in the first case (for aligned loads) but keep
1175                                // the bool in front in the second case for its niches.
1176                                NicheBias::Start => {
1177                                    max_field_align.trailing_zeros().min(size_as_align)
1178                                }
1179                                // When moving niches towards the end of the struct then for
1180                                // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1181                                // in the align-1 group because its bool can be moved closer to the end.
1182                                NicheBias::End if niche_size == largest_niche_size => {
1183                                    align.trailing_zeros()
1184                                }
1185                                NicheBias::End => size_as_align,
1186                            }
1187                        } else {
1188                            size_as_align
1189                        };
1190                        size_as_align as u64
1191                    }
1192                };
1193
1194                match kind {
1195                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1196                        // Currently `LayoutData` only exposes a single niche so sorting is usually
1197                        // sufficient to get one niche into the preferred position. If it ever
1198                        // supported multiple niches then a more advanced pick-and-pack approach could
1199                        // provide better results. But even for the single-niche cache it's not
1200                        // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1201                        // bool to the front but it would require packing the tuple together with the
1202                        // u16 to build a 4-byte group so that the u32 can be placed after it without
1203                        // padding. This kind of packing can't be achieved by sorting.
1204                        optimizing.sort_by_key(|&x| {
1205                            let f = &fields[x];
1206                            let field_size = f.size.bytes();
1207                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1208                            let niche_size_key = match niche_bias {
1209                                // large niche first
1210                                NicheBias::Start => !niche_size,
1211                                // large niche last
1212                                NicheBias::End => niche_size,
1213                            };
1214                            let inner_niche_offset_key = match niche_bias {
1215                                NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1216                                NicheBias::End => f.largest_niche.map_or(0, |n| {
1217                                    !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1218                                }),
1219                            };
1220
1221                            (
1222                                // Then place largest alignments first.
1223                                cmp::Reverse(alignment_group_key(f)),
1224                                // Then prioritize niche placement within alignment group according to
1225                                // `niche_bias_start`.
1226                                niche_size_key,
1227                                // Then among fields with equally-sized niches prefer the ones
1228                                // closer to the start/end of the field.
1229                                inner_niche_offset_key,
1230                            )
1231                        });
1232                    }
1233
1234                    StructKind::Prefixed(..) => {
1235                        // Sort in ascending alignment so that the layout stays optimal
1236                        // regardless of the prefix.
1237                        // And put the largest niche in an alignment group at the end
1238                        // so it can be used as discriminant in jagged enums
1239                        optimizing.sort_by_key(|&x| {
1240                            let f = &fields[x];
1241                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1242                            (alignment_group_key(f), niche_size)
1243                        });
1244                    }
1245                }
1246
1247                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
1248                //                 regardless of the status of `-Z randomize-layout`
1249            }
1250        }
1251        // in_memory_order holds field indices by increasing memory offset.
1252        // That is, if field 5 has offset 0, the first element of in_memory_order is 5.
1253        // We now write field offsets to the corresponding offset slot;
1254        // field 5 with offset 0 puts 0 in offsets[5].
1255        let mut unsized_field = None::<&F>;
1256        let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1257        let mut offset = Size::ZERO;
1258        let mut largest_niche = None;
1259        let mut largest_niche_available = 0;
1260        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1261            let prefix_align =
1262                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1263            align = align.max(prefix_align);
1264            offset = prefix_size.align_to(prefix_align);
1265        }
1266        for &i in &in_memory_order {
1267            let field = &fields[i];
1268            if let Some(unsized_field) = unsized_field {
1269                return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1270            }
1271
1272            if field.is_unsized() {
1273                if let StructKind::MaybeUnsized = kind {
1274                    unsized_field = Some(field);
1275                } else {
1276                    return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1277                }
1278            }
1279
1280            // Invariant: offset < dl.obj_size_bound() <= 1<<61
1281            let field_align = if let Some(pack) = pack {
1282                field.align.min(AbiAlign::new(pack))
1283            } else {
1284                field.align
1285            };
1286            offset = offset.align_to(field_align.abi);
1287            align = align.max(field_align.abi);
1288            max_repr_align = max_repr_align.max(field.max_repr_align);
1289
1290            debug!("univariant offset: {:?} field: {:#?}", offset, field);
1291            offsets[i] = offset;
1292
1293            if let Some(mut niche) = field.largest_niche {
1294                let available = niche.available(dl);
1295                // Pick up larger niches.
1296                let prefer_new_niche = match niche_bias {
1297                    NicheBias::Start => available > largest_niche_available,
1298                    // if there are several niches of the same size then pick the last one
1299                    NicheBias::End => available >= largest_niche_available,
1300                };
1301                if prefer_new_niche {
1302                    largest_niche_available = available;
1303                    niche.offset += offset;
1304                    largest_niche = Some(niche);
1305                }
1306            }
1307
1308            offset =
1309                offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1310        }
1311
1312        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1313        // See documentation on `LayoutData::unadjusted_abi_align`.
1314        let unadjusted_abi_align = align;
1315        if let Some(repr_align) = repr.align {
1316            align = align.max(repr_align);
1317        }
1318        // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1319        let align = align;
1320
1321        debug!("univariant min_size: {:?}", offset);
1322        let min_size = offset;
1323        let size = min_size.align_to(align);
1324        // FIXME(oli-obk): deduplicate and harden these checks
1325        if size.bytes() >= dl.obj_size_bound() {
1326            return Err(LayoutCalculatorError::SizeOverflow);
1327        }
1328        let mut layout_of_single_non_zst_field = None;
1329        let sized = unsized_field.is_none();
1330        let mut abi = BackendRepr::Memory { sized };
1331
1332        let optimize_abi = !repr.inhibit_newtype_abi_optimization();
1333
1334        // Try to make this a Scalar/ScalarPair.
1335        if sized && size.bytes() > 0 {
1336            // We skip *all* ZST here and later check if we are good in terms of alignment.
1337            // This lets us handle some cases involving aligned ZST.
1338            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
1339
1340            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1341                // We have exactly one non-ZST field.
1342                (Some((i, field)), None, None) => {
1343                    layout_of_single_non_zst_field = Some(field);
1344
1345                    // Field fills the struct and it has a scalar or scalar pair ABI.
1346                    if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size {
1347                        match field.backend_repr {
1348                            // For plain scalars, or vectors of them, we can't unpack
1349                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
1350                            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
1351                                if optimize_abi =>
1352                            {
1353                                abi = field.backend_repr;
1354                            }
1355                            // But scalar pairs are Rust-specific and get
1356                            // treated as aggregates by C ABIs anyway.
1357                            BackendRepr::ScalarPair(..) => {
1358                                abi = field.backend_repr;
1359                            }
1360                            _ => {}
1361                        }
1362                    }
1363                }
1364
1365                // Two non-ZST fields, and they're both scalars.
1366                (Some((i, a)), Some((j, b)), None) => {
1367                    match (a.backend_repr, b.backend_repr) {
1368                        (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1369                            // Order by the memory placement, not source order.
1370                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1371                                ((i, a), (j, b))
1372                            } else {
1373                                ((j, b), (i, a))
1374                            };
1375                            let pair =
1376                                LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
1377                            let pair_offsets = match pair.fields {
1378                                FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1379                                    assert_eq!(
1380                                        in_memory_order.raw,
1381                                        [FieldIdx::new(0), FieldIdx::new(1)]
1382                                    );
1383                                    offsets
1384                                }
1385                                FieldsShape::Primitive
1386                                | FieldsShape::Array { .. }
1387                                | FieldsShape::Union(..) => {
1388                                    panic!("encountered a non-arbitrary layout during enum layout")
1389                                }
1390                            };
1391                            if offsets[i] == pair_offsets[FieldIdx::new(0)]
1392                                && offsets[j] == pair_offsets[FieldIdx::new(1)]
1393                                && align == pair.align.abi
1394                                && size == pair.size
1395                            {
1396                                // We can use `ScalarPair` only when it matches our
1397                                // already computed layout (including `#[repr(C)]`).
1398                                abi = pair.backend_repr;
1399                            }
1400                        }
1401                        _ => {}
1402                    }
1403                }
1404
1405                _ => {}
1406            }
1407        }
1408        let uninhabited = fields.iter().any(|f| f.is_uninhabited());
1409
1410        let unadjusted_abi_align = if repr.transparent() {
1411            match layout_of_single_non_zst_field {
1412                Some(l) => l.unadjusted_abi_align,
1413                None => {
1414                    // `repr(transparent)` with all ZST fields.
1415                    align
1416                }
1417            }
1418        } else {
1419            unadjusted_abi_align
1420        };
1421
1422        let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
1423
1424        Ok(LayoutData {
1425            variants: Variants::Single { index: VariantIdx::new(0) },
1426            fields: FieldsShape::Arbitrary { offsets, in_memory_order },
1427            backend_repr: abi,
1428            largest_niche,
1429            uninhabited,
1430            align: AbiAlign::new(align),
1431            size,
1432            max_repr_align,
1433            unadjusted_abi_align,
1434            randomization_seed: seed,
1435        })
1436    }
1437
1438    fn format_field_niches<
1439        'a,
1440        FieldIdx: Idx,
1441        VariantIdx: Idx,
1442        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1443    >(
1444        &self,
1445        layout: &LayoutData<FieldIdx, VariantIdx>,
1446        fields: &IndexSlice<FieldIdx, F>,
1447    ) -> String {
1448        let dl = self.cx.data_layout();
1449        let mut s = String::new();
1450        for i in layout.fields.index_by_increasing_offset() {
1451            let offset = layout.fields.offset(i);
1452            let f = &fields[FieldIdx::new(i)];
1453            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap();
1454            if let Some(n) = f.largest_niche {
1455                write!(
1456                    s,
1457                    " n{}b{}s{}",
1458                    n.offset.bytes(),
1459                    n.available(dl).ilog2(),
1460                    n.value.size(dl).bytes()
1461                )
1462                .unwrap();
1463            }
1464            write!(s, "] ").unwrap();
1465        }
1466        s
1467    }
1468}
1469
1470enum VectorKind {
1471    /// `#[rustc_scalable_vector]`
1472    Scalable,
1473    /// `#[repr(simd, packed)]`
1474    PackedFixed,
1475    /// `#[repr(simd)]`
1476    Fixed,
1477}
1478
1479fn vector_type_layout<FieldIdx, VariantIdx, F>(
1480    kind: VectorKind,
1481    dl: &TargetDataLayout,
1482    element: F,
1483    count: u64,
1484) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
1485where
1486    FieldIdx: Idx,
1487    VariantIdx: Idx,
1488    F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1489{
1490    let elt = element.as_ref();
1491    if count == 0 {
1492        return Err(LayoutCalculatorError::ZeroLengthSimdType);
1493    } else if count > crate::MAX_SIMD_LANES {
1494        return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES });
1495    }
1496
1497    let BackendRepr::Scalar(element) = elt.backend_repr else {
1498        return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
1499    };
1500
1501    // Compute the size and alignment of the vector
1502    let size =
1503        elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
1504    let (repr, align) = match kind {
1505        VectorKind::Scalable => {
1506            (BackendRepr::ScalableVector { element, count }, dl.llvmlike_vector_align(size))
1507        }
1508        // Non-power-of-two vectors have padding up to the next power-of-two.
1509        // If we're a packed repr, remove the padding while keeping the alignment as close
1510        // to a vector as possible.
1511        VectorKind::PackedFixed if !count.is_power_of_two() => {
1512            (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
1513        }
1514        VectorKind::PackedFixed | VectorKind::Fixed => {
1515            (BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size))
1516        }
1517    };
1518    let size = size.align_to(align);
1519
1520    Ok(LayoutData {
1521        variants: Variants::Single { index: VariantIdx::new(0) },
1522        fields: FieldsShape::Arbitrary {
1523            offsets: [Size::ZERO].into(),
1524            in_memory_order: [FieldIdx::new(0)].into(),
1525        },
1526        backend_repr: repr,
1527        largest_niche: elt.largest_niche,
1528        uninhabited: false,
1529        size,
1530        align: AbiAlign::new(align),
1531        max_repr_align: None,
1532        unadjusted_abi_align: elt.align.abi,
1533        randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
1534    })
1535}