1use std::collections::BTreeSet;
2use std::fmt::{self, Write};
3use std::ops::{Bound, Deref};
4use std::{cmp, iter};
56use rustc_hashes::Hash64;
7use rustc_index::Idx;
8use rustc_index::bit_set::BitMatrix;
9use tracing::{debug, trace};
1011use crate::{
12AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
13LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
14TargetDataLayout, Variants, WrappingRange,
15};
1617mod coroutine;
18mod simple;
1920#[cfg(feature = "nightly")]
21mod ty;
2223#[cfg(feature = "nightly")]
24pub use ty::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
2526// A variant is absent if it's uninhabited and only has ZST fields.
27// Present uninhabited variants only require space for their fields,
28// but *not* an encoding of the discriminant (e.g., a tag value).
29// See issue #49298 for more details on the need to leave space
30// for non-ZST uninhabited data (mostly partial initialization).
31fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool32where
33FieldIdx: Idx,
34 VariantIdx: Idx,
35 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
36{
37let uninhabited = fields.iter().any(|f| f.is_uninhabited());
38// We cannot ignore alignment; that might lead us to entirely discard a variant and
39 // produce an enum that is less aligned than it should be!
40let is_1zst = fields.iter().all(|f| f.is_1zst());
41uninhabited && is_1zst42}
4344/// Determines towards which end of a struct layout optimizations will try to place the best niches.
45enum NicheBias {
46 Start,
47 End,
48}
4950#[derive(#[automatically_derived]
impl<F: ::core::marker::Copy> ::core::marker::Copy for
LayoutCalculatorError<F> {
}Copy, #[automatically_derived]
impl<F: ::core::clone::Clone> ::core::clone::Clone for
LayoutCalculatorError<F> {
#[inline]
fn clone(&self) -> LayoutCalculatorError<F> {
match self {
LayoutCalculatorError::UnexpectedUnsized(__self_0) =>
LayoutCalculatorError::UnexpectedUnsized(::core::clone::Clone::clone(__self_0)),
LayoutCalculatorError::SizeOverflow =>
LayoutCalculatorError::SizeOverflow,
LayoutCalculatorError::EmptyUnion =>
LayoutCalculatorError::EmptyUnion,
LayoutCalculatorError::ReprConflict =>
LayoutCalculatorError::ReprConflict,
LayoutCalculatorError::ZeroLengthSimdType =>
LayoutCalculatorError::ZeroLengthSimdType,
LayoutCalculatorError::OversizedSimdType { max_lanes: __self_0 }
=>
LayoutCalculatorError::OversizedSimdType {
max_lanes: ::core::clone::Clone::clone(__self_0),
},
LayoutCalculatorError::NonPrimitiveSimdType(__self_0) =>
LayoutCalculatorError::NonPrimitiveSimdType(::core::clone::Clone::clone(__self_0)),
}
}
}Clone, #[automatically_derived]
impl<F: ::core::fmt::Debug> ::core::fmt::Debug for LayoutCalculatorError<F> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
LayoutCalculatorError::UnexpectedUnsized(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"UnexpectedUnsized", &__self_0),
LayoutCalculatorError::SizeOverflow =>
::core::fmt::Formatter::write_str(f, "SizeOverflow"),
LayoutCalculatorError::EmptyUnion =>
::core::fmt::Formatter::write_str(f, "EmptyUnion"),
LayoutCalculatorError::ReprConflict =>
::core::fmt::Formatter::write_str(f, "ReprConflict"),
LayoutCalculatorError::ZeroLengthSimdType =>
::core::fmt::Formatter::write_str(f, "ZeroLengthSimdType"),
LayoutCalculatorError::OversizedSimdType { max_lanes: __self_0 }
=>
::core::fmt::Formatter::debug_struct_field1_finish(f,
"OversizedSimdType", "max_lanes", &__self_0),
LayoutCalculatorError::NonPrimitiveSimdType(__self_0) =>
::core::fmt::Formatter::debug_tuple_field1_finish(f,
"NonPrimitiveSimdType", &__self_0),
}
}
}Debug, #[automatically_derived]
impl<F: ::core::cmp::PartialEq> ::core::cmp::PartialEq for
LayoutCalculatorError<F> {
#[inline]
fn eq(&self, other: &LayoutCalculatorError<F>) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(LayoutCalculatorError::UnexpectedUnsized(__self_0),
LayoutCalculatorError::UnexpectedUnsized(__arg1_0)) =>
__self_0 == __arg1_0,
(LayoutCalculatorError::OversizedSimdType {
max_lanes: __self_0 },
LayoutCalculatorError::OversizedSimdType {
max_lanes: __arg1_0 }) => __self_0 == __arg1_0,
(LayoutCalculatorError::NonPrimitiveSimdType(__self_0),
LayoutCalculatorError::NonPrimitiveSimdType(__arg1_0)) =>
__self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, #[automatically_derived]
impl<F: ::core::cmp::Eq> ::core::cmp::Eq for LayoutCalculatorError<F> {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<F>;
let _: ::core::cmp::AssertParamIsEq<u64>;
}
}Eq)]
51pub enum LayoutCalculatorError<F> {
52/// An unsized type was found in a location where a sized type was expected.
53 ///
54 /// This is not always a compile error, for example if there is a `[T]: Sized`
55 /// bound in a where clause.
56 ///
57 /// Contains the field that was unexpectedly unsized.
58UnexpectedUnsized(F),
5960/// A type was too large for the target platform.
61SizeOverflow,
6263/// A union had no fields.
64EmptyUnion,
6566/// The fields or variants have irreconcilable reprs
67ReprConflict,
6869/// The length of an SIMD type is zero
70ZeroLengthSimdType,
7172/// The length of an SIMD type exceeds the maximum number of lanes
73OversizedSimdType { max_lanes: u64 },
7475/// An element type of an SIMD type isn't a primitive
76NonPrimitiveSimdType(F),
77}
7879impl<F> LayoutCalculatorError<F> {
80pub fn without_payload(&self) -> LayoutCalculatorError<()> {
81use LayoutCalculatorError::*;
82match *self {
83UnexpectedUnsized(_) => UnexpectedUnsized(()),
84SizeOverflow => SizeOverflow,
85EmptyUnion => EmptyUnion,
86ReprConflict => ReprConflict,
87ZeroLengthSimdType => ZeroLengthSimdType,
88OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
89NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
90 }
91 }
9293/// Format an untranslated diagnostic for this type
94 ///
95 /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
96pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
97use LayoutCalculatorError::*;
98f.write_str(match self {
99UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
100SizeOverflow => "size overflow",
101EmptyUnion => "type is a union with no fields",
102ReprConflict => "type has an invalid repr",
103ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
104"invalid simd type definition"
105}
106 })
107 }
108}
109110type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
111Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
112113#[derive(#[automatically_derived]
impl<Cx: ::core::clone::Clone> ::core::clone::Clone for LayoutCalculator<Cx> {
#[inline]
fn clone(&self) -> LayoutCalculator<Cx> {
LayoutCalculator { cx: ::core::clone::Clone::clone(&self.cx) }
}
}Clone, #[automatically_derived]
impl<Cx: ::core::marker::Copy> ::core::marker::Copy for LayoutCalculator<Cx> {
}Copy, #[automatically_derived]
impl<Cx: ::core::fmt::Debug> ::core::fmt::Debug for LayoutCalculator<Cx> {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f,
"LayoutCalculator", "cx", &&self.cx)
}
}Debug)]
114pub struct LayoutCalculator<Cx> {
115pub cx: Cx,
116}
117118impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
119pub fn new(cx: Cx) -> Self {
120Self { cx }
121 }
122123pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
124&self,
125 element: &LayoutData<FieldIdx, VariantIdx>,
126 count_if_sized: Option<u64>, // None for slices
127) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
128let count = count_if_sized.unwrap_or(0);
129let size =
130element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
131132Ok(LayoutData {
133 variants: Variants::Single { index: VariantIdx::new(0) },
134 fields: FieldsShape::Array { stride: element.size, count },
135 backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
136 largest_niche: element.largest_niche.filter(|_| count != 0),
137 uninhabited: element.uninhabited && count != 0,
138 align: element.align,
139size,
140 max_repr_align: None,
141 unadjusted_abi_align: element.align.abi,
142 randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
143 })
144 }
145146pub fn scalable_vector_type<FieldIdx, VariantIdx, F>(
147&self,
148 element: F,
149 count: u64,
150 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
151where
152FieldIdx: Idx,
153 VariantIdx: Idx,
154 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
155 {
156vector_type_layout(VectorKind::Scalable, self.cx.data_layout(), element, count)
157 }
158159pub fn simd_type<FieldIdx, VariantIdx, F>(
160&self,
161 element: F,
162 count: u64,
163 repr_packed: bool,
164 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
165where
166FieldIdx: Idx,
167 VariantIdx: Idx,
168 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
169 {
170let kind = if repr_packed { VectorKind::PackedFixed } else { VectorKind::Fixed };
171vector_type_layout(kind, self.cx.data_layout(), element, count)
172 }
173174/// Compute the layout for a coroutine.
175 ///
176 /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
177 /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
178pub fn coroutine<
179'a,
180 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
181 VariantIdx: Idx,
182 FieldIdx: Idx,
183 LocalIdx: Idx,
184 >(
185&self,
186 local_layouts: &IndexSlice<LocalIdx, F>,
187 prefix_layouts: IndexVec<FieldIdx, F>,
188 variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
189 storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
190 tag_to_layout: impl Fn(Scalar) -> F,
191 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
192 coroutine::layout(
193self,
194local_layouts,
195prefix_layouts,
196variant_fields,
197storage_conflicts,
198tag_to_layout,
199 )
200 }
201202pub fn univariant<
203'a,
204 FieldIdx: Idx,
205 VariantIdx: Idx,
206 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
207 >(
208&self,
209 fields: &IndexSlice<FieldIdx, F>,
210 repr: &ReprOptions,
211 kind: StructKind,
212 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
213let dl = self.cx.data_layout();
214let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
215// Enums prefer niches close to the beginning or the end of the variants so that other
216 // (smaller) data-carrying variants can be packed into the space after/before the niche.
217 // If the default field ordering does not give us a niche at the front then we do a second
218 // run and bias niches to the right and then check which one is closer to one of the
219 // struct's edges.
220if let Ok(layout) = &layout {
221// Don't try to calculate an end-biased layout for unsizable structs,
222 // otherwise we could end up with different layouts for
223 // Foo<Type> and Foo<dyn Trait> which would break unsizing.
224if !#[allow(non_exhaustive_omitted_patterns)] match kind {
StructKind::MaybeUnsized => true,
_ => false,
}matches!(kind, StructKind::MaybeUnsized) {
225if let Some(niche) = layout.largest_niche {
226let head_space = niche.offset.bytes();
227let niche_len = niche.value.size(dl).bytes();
228let tail_space = layout.size.bytes() - head_space - niche_len;
229230// This may end up doing redundant work if the niche is already in the last
231 // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
232 // to get the unpadded size so we try anyway.
233if fields.len() > 1 && head_space != 0 && tail_space > 0 {
234let alt_layout = self235 .univariant_biased(fields, repr, kind, NicheBias::End)
236 .expect("alt layout should always work");
237let alt_niche = alt_layout238 .largest_niche
239 .expect("alt layout should have a niche like the regular one");
240let alt_head_space = alt_niche.offset.bytes();
241let alt_niche_len = alt_niche.value.size(dl).bytes();
242let alt_tail_space =
243alt_layout.size.bytes() - alt_head_space - alt_niche_len;
244245if true {
match (&layout.size.bytes(), &alt_layout.size.bytes()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
246247let prefer_alt_layout =
248alt_head_space > head_space && alt_head_space > tail_space;
249250{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:250",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(250u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("sz: {0}, default_niche_at: {1}+{2}, default_tail_space: {3}, alt_niche_at/head_space: {4}+{5}, alt_tail: {6}, num_fields: {7}, better: {8}\nlayout: {9}\nalt_layout: {10}\n",
layout.size.bytes(), head_space, niche_len, tail_space,
alt_head_space, alt_niche_len, alt_tail_space,
layout.fields.count(), prefer_alt_layout,
self.format_field_niches(layout, fields),
self.format_field_niches(&alt_layout, fields)) as
&dyn Value))])
});
} else { ; }
};debug!(
251"sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
252 layout: {}\n\
253 alt_layout: {}\n",
254 layout.size.bytes(),
255 head_space,
256 niche_len,
257 tail_space,
258 alt_head_space,
259 alt_niche_len,
260 alt_tail_space,
261 layout.fields.count(),
262 prefer_alt_layout,
263self.format_field_niches(layout, fields),
264self.format_field_niches(&alt_layout, fields),
265 );
266267if prefer_alt_layout {
268return Ok(alt_layout);
269 }
270 }
271 }
272 }
273 }
274layout275 }
276277pub fn layout_of_struct_or_enum<
278'a,
279 FieldIdx: Idx,
280 VariantIdx: Idx,
281 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
282 >(
283&self,
284 repr: &ReprOptions,
285 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
286 is_enum: bool,
287 is_special_no_niche: bool,
288 scalar_valid_range: (Bound<u128>, Bound<u128>),
289 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
290 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
291 always_sized: bool,
292 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
293let (present_first, present_second) = {
294let mut present_variants = variants295 .iter_enumerated()
296 .filter_map(|(i, v)| if !repr.c() && absent(v) { None } else { Some(i) });
297 (present_variants.next(), present_variants.next())
298 };
299let present_first = match present_first {
300Some(present_first) => present_first,
301// Uninhabited because it has no variants, or only absent ones.
302Noneif is_enum => {
303return Ok(LayoutData::never_type(&self.cx));
304 }
305// If it's a struct, still compute a layout so that we can still compute the
306 // field offsets.
307None => VariantIdx::new(0),
308 };
309310// take the struct path if it is an actual struct
311if !is_enum ||
312// or for optimizing univariant enums
313(present_second.is_none() && !repr.inhibit_enum_layout_opt())
314 {
315self.layout_of_struct(
316repr,
317variants,
318is_enum,
319is_special_no_niche,
320scalar_valid_range,
321always_sized,
322present_first,
323 )
324 } else {
325// At this point, we have handled all unions and
326 // structs. (We have also handled univariant enums
327 // that allow representation optimization.)
328if !is_enum { ::core::panicking::panic("assertion failed: is_enum") };assert!(is_enum);
329self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)
330 }
331 }
332333pub fn layout_of_union<
334'a,
335 FieldIdx: Idx,
336 VariantIdx: Idx,
337 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
338 >(
339&self,
340 repr: &ReprOptions,
341 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
342 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
343let dl = self.cx.data_layout();
344let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
345let mut max_repr_align = repr.align;
346347// If all the non-ZST fields have the same repr and union repr optimizations aren't
348 // disabled, we can use that common repr for the union as a whole.
349struct AbiMismatch;
350let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
351// Can't optimize
352Err(AbiMismatch)
353 } else {
354Ok(None)
355 };
356357let mut size = Size::ZERO;
358let only_variant_idx = VariantIdx::new(0);
359let only_variant = &variants[only_variant_idx];
360for field in only_variant {
361if field.is_unsized() {
362return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
363 }
364365 align = align.max(field.align.abi);
366 max_repr_align = max_repr_align.max(field.max_repr_align);
367 size = cmp::max(size, field.size);
368369if field.is_zst() {
370// Nothing more to do for ZST fields
371continue;
372 }
373374if let Ok(common) = common_non_zst_repr_and_align {
375// Discard valid range information and allow undef
376let field_abi = field.backend_repr.to_union();
377378if let Some((common_abi, common_align)) = common {
379if common_abi != field_abi {
380// Different fields have different ABI: disable opt
381common_non_zst_repr_and_align = Err(AbiMismatch);
382 } else {
383// Fields with the same non-Aggregate ABI should also
384 // have the same alignment
385if !#[allow(non_exhaustive_omitted_patterns)] match common_abi {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(common_abi, BackendRepr::Memory { .. }) {
386match (&common_align, &field.align.abi) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::Some(format_args!("non-Aggregate field with matching ABI but differing alignment")));
}
}
};assert_eq!(
387 common_align, field.align.abi,
388"non-Aggregate field with matching ABI but differing alignment"
389);
390 }
391 }
392 } else {
393// First non-ZST field: record its ABI and alignment
394common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
395 }
396 }
397 }
398399if let Some(pack) = repr.pack {
400align = align.min(pack);
401 }
402// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
403 // See documentation on `LayoutData::unadjusted_abi_align`.
404let unadjusted_abi_align = align;
405if let Some(repr_align) = repr.align {
406align = align.max(repr_align);
407 }
408// `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
409let align = align;
410411// If all non-ZST fields have the same ABI, we may forward that ABI
412 // for the union as a whole, unless otherwise inhibited.
413let backend_repr = match common_non_zst_repr_and_align {
414Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
415Ok(Some((repr, _))) => match repr {
416// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
417BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
418if repr.scalar_align(dl).unwrap() != align =>
419 {
420 BackendRepr::Memory { sized: true }
421 }
422// Vectors require at least element alignment, else disable the opt
423BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => {
424 BackendRepr::Memory { sized: true }
425 }
426// the alignment tests passed and we can use this
427BackendRepr::Scalar(..)
428 | BackendRepr::ScalarPair(..)
429 | BackendRepr::SimdVector { .. }
430 | BackendRepr::ScalableVector { .. }
431 | BackendRepr::Memory { .. } => repr,
432 },
433 };
434435let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
436return Err(LayoutCalculatorError::EmptyUnion);
437 };
438439let combined_seed = only_variant440 .iter()
441 .map(|v| v.randomization_seed)
442 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
443444Ok(LayoutData {
445 variants: Variants::Single { index: only_variant_idx },
446 fields: FieldsShape::Union(union_field_count),
447backend_repr,
448 largest_niche: None,
449 uninhabited: false,
450 align: AbiAlign::new(align),
451 size: size.align_to(align),
452max_repr_align,
453unadjusted_abi_align,
454 randomization_seed: combined_seed,
455 })
456 }
457458/// single-variant enums are just structs, if you think about it
459fn layout_of_struct<
460'a,
461 FieldIdx: Idx,
462 VariantIdx: Idx,
463 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
464 >(
465&self,
466 repr: &ReprOptions,
467 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
468 is_enum: bool,
469 is_special_no_niche: bool,
470 scalar_valid_range: (Bound<u128>, Bound<u128>),
471 always_sized: bool,
472 present_first: VariantIdx,
473 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
474// Struct, or univariant enum equivalent to a struct.
475 // (Typechecking will reject discriminant-sizing attrs.)
476477let dl = self.cx.data_layout();
478let v = present_first;
479let kind = if is_enum || variants[v].is_empty() || always_sized {
480 StructKind::AlwaysSized481 } else {
482 StructKind::MaybeUnsized483 };
484485let mut st = self.univariant(&variants[v], repr, kind)?;
486st.variants = Variants::Single { index: v };
487488if is_special_no_niche {
489let hide_niches = |scalar: &mut _| match scalar {
490 Scalar::Initialized { value, valid_range } => {
491*valid_range = WrappingRange::full(value.size(dl))
492 }
493// Already doesn't have any niches
494Scalar::Union { .. } => {}
495 };
496match &mut st.backend_repr {
497 BackendRepr::Scalar(scalar) => hide_niches(scalar),
498 BackendRepr::ScalarPair(a, b) => {
499hide_niches(a);
500hide_niches(b);
501 }
502 BackendRepr::SimdVector { element, .. }
503 | BackendRepr::ScalableVector { element, .. } => hide_niches(element),
504 BackendRepr::Memory { sized: _ } => {}
505 }
506st.largest_niche = None;
507return Ok(st);
508 }
509510let (start, end) = scalar_valid_range;
511match st.backend_repr {
512 BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
513// Enlarging validity ranges would result in missed
514 // optimizations, *not* wrongly assuming the inner
515 // value is valid. e.g. unions already enlarge validity ranges,
516 // because the values may be uninitialized.
517 //
518 // Because of that we only check that the start and end
519 // of the range is representable with this scalar type.
520521let max_value = scalar.size(dl).unsigned_int_max();
522if let Bound::Included(start) = start {
523// FIXME(eddyb) this might be incorrect - it doesn't
524 // account for wrap-around (end < start) ranges.
525if !(start <= max_value) {
{
::core::panicking::panic_fmt(format_args!("{0} > {1}", start,
max_value));
}
};assert!(start <= max_value, "{start} > {max_value}");
526scalar.valid_range_mut().start = start;
527 }
528if let Bound::Included(end) = end {
529// FIXME(eddyb) this might be incorrect - it doesn't
530 // account for wrap-around (end < start) ranges.
531if !(end <= max_value) {
{
::core::panicking::panic_fmt(format_args!("{0} > {1}", end,
max_value));
}
};assert!(end <= max_value, "{end} > {max_value}");
532scalar.valid_range_mut().end = end;
533 }
534535// Update `largest_niche` if we have introduced a larger niche.
536let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
537if let Some(niche) = niche {
538match st.largest_niche {
539Some(largest_niche) => {
540// Replace the existing niche even if they're equal,
541 // because this one is at a lower offset.
542if largest_niche.available(dl) <= niche.available(dl) {
543st.largest_niche = Some(niche);
544 }
545 }
546None => st.largest_niche = Some(niche),
547 }
548 }
549 }
550_ => if !(start == Bound::Unbounded && end == Bound::Unbounded) {
{
::core::panicking::panic_fmt(format_args!("nonscalar layout for layout_scalar_valid_range type: {0:#?}",
st));
}
}assert!(
551 start == Bound::Unbounded && end == Bound::Unbounded,
552"nonscalar layout for layout_scalar_valid_range type: {st:#?}",
553 ),
554 }
555556Ok(st)
557 }
558559fn layout_of_enum<
560'a,
561 FieldIdx: Idx,
562 VariantIdx: Idx,
563 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
564 >(
565&self,
566 repr: &ReprOptions,
567 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
568 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
569 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
570 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
571let dl = self.cx.data_layout();
572// bail if the enum has an incoherent repr that cannot be computed
573if repr.packed() {
574return Err(LayoutCalculatorError::ReprConflict);
575 }
576577let calculate_niche_filling_layout = || -> Option<LayoutData<FieldIdx, VariantIdx>> {
578if repr.inhibit_enum_layout_opt() {
579return None;
580 }
581582if variants.len() < 2 {
583return None;
584 }
585586let mut align = dl.aggregate_align;
587let mut max_repr_align = repr.align;
588let mut unadjusted_abi_align = align;
589590let mut variant_layouts = variants591 .iter_enumerated()
592 .map(|(j, v)| {
593let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
594st.variants = Variants::Single { index: j };
595596align = align.max(st.align.abi);
597max_repr_align = max_repr_align.max(st.max_repr_align);
598unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
599600Some(st)
601 })
602 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
603604let largest_variant_index = variant_layouts605 .iter_enumerated()
606 .max_by_key(|(_i, layout)| layout.size.bytes())
607 .map(|(i, _layout)| i)?;
608609let all_indices = variants.indices();
610let needs_disc =
611 |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
612let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
613 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
614615let count =
616 (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
617618// Use the largest niche in the largest variant.
619let niche = variant_layouts[largest_variant_index].largest_niche?;
620let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
621let niche_offset = niche.offset;
622let niche_size = niche.value.size(dl);
623let size = variant_layouts[largest_variant_index].size.align_to(align);
624625let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
626if i == largest_variant_index {
627return true;
628 }
629630layout.largest_niche = None;
631632if layout.size <= niche_offset {
633// This variant will fit before the niche.
634return true;
635 }
636637// Determine if it'll fit after the niche.
638let this_align = layout.align.abi;
639let this_offset = (niche_offset + niche_size).align_to(this_align);
640641if this_offset + layout.size > size {
642return false;
643 }
644645// It'll fit, but we need to make some adjustments.
646match layout.fields {
647 FieldsShape::Arbitrary { ref mut offsets, .. } => {
648for offset in offsets.iter_mut() {
649*offset += this_offset;
650 }
651 }
652 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
653{
::core::panicking::panic_fmt(format_args!("Layout of fields should be Arbitrary for variants"));
}panic!("Layout of fields should be Arbitrary for variants")654 }
655 }
656657// It can't be a Scalar or ScalarPair because the offset isn't 0.
658if !layout.is_uninhabited() {
659layout.backend_repr = BackendRepr::Memory { sized: true };
660 }
661layout.size += this_offset;
662663true
664});
665666if !all_variants_fit {
667return None;
668 }
669670let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
671672let others_zst = variant_layouts673 .iter_enumerated()
674 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
675let same_size = size == variant_layouts[largest_variant_index].size;
676let same_align = align == variant_layouts[largest_variant_index].align.abi;
677678let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
679let abi = if same_size && same_align && others_zst {
680match variant_layouts[largest_variant_index].backend_repr {
681// When the total alignment and size match, we can use the
682 // same ABI as the scalar variant with the reserved niche.
683BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
684 BackendRepr::ScalarPair(first, second) => {
685// Only the niche is guaranteed to be initialised,
686 // so use union layouts for the other primitive.
687if niche_offset == Size::ZERO {
688 BackendRepr::ScalarPair(niche_scalar, second.to_union())
689 } else {
690 BackendRepr::ScalarPair(first.to_union(), niche_scalar)
691 }
692 }
693_ => BackendRepr::Memory { sized: true },
694 }
695 } else {
696 BackendRepr::Memory { sized: true }
697 };
698699let combined_seed = variant_layouts700 .iter()
701 .map(|v| v.randomization_seed)
702 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
703704let layout = LayoutData {
705 variants: Variants::Multiple {
706 tag: niche_scalar,
707 tag_encoding: TagEncoding::Niche {
708 untagged_variant: largest_variant_index,
709niche_variants,
710niche_start,
711 },
712 tag_field: FieldIdx::new(0),
713 variants: variant_layouts,
714 },
715 fields: FieldsShape::Arbitrary {
716 offsets: [niche_offset].into(),
717 in_memory_order: [FieldIdx::new(0)].into(),
718 },
719 backend_repr: abi,
720largest_niche,
721uninhabited,
722size,
723 align: AbiAlign::new(align),
724max_repr_align,
725unadjusted_abi_align,
726 randomization_seed: combined_seed,
727 };
728729Some(layout)
730 };
731732let niche_filling_layout = calculate_niche_filling_layout();
733734let discr_type = repr.discr_type();
735let discr_int = Integer::from_attr(dl, discr_type);
736// Because we can only represent one range of valid values, we'll look for the
737 // largest range of invalid values and pick everything else as the range of valid
738 // values.
739740 // First we need to sort the possible discriminant values so that we can look for the largest gap:
741let valid_discriminants: BTreeSet<i128> = discriminants742 .filter(|&(i, _)| repr.c() || variants[i].iter().all(|f| !f.is_uninhabited()))
743 .map(|(_, val)| {
744if discr_type.is_signed() {
745// sign extend the raw representation to be an i128
746 // FIXME: do this at the discriminant iterator creation sites
747discr_int.size().sign_extend(valas u128)
748 } else {
749val750 }
751 })
752 .collect();
753{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:753",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(753u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["valid_discriminants"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&valid_discriminants)
as &dyn Value))])
});
} else { ; }
};trace!(?valid_discriminants);
754let discriminants = valid_discriminants.iter().copied();
755//let next_discriminants = discriminants.clone().cycle().skip(1);
756let next_discriminants =
757discriminants.clone().chain(valid_discriminants.first().copied()).skip(1);
758// Iterate over pairs of each discriminant together with the next one.
759 // Since they were sorted, we can now compute the niche sizes and pick the largest.
760let discriminants = discriminants.zip(next_discriminants);
761let largest_niche = discriminants.max_by_key(|&(start, end)| {
762{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:762",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(762u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["start", "end"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&start) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&end) as
&dyn Value))])
});
} else { ; }
};trace!(?start, ?end);
763// If this is a wraparound range, the niche size is `MAX - abs(diff)`, as the diff between
764 // the two end points is actually the size of the valid discriminants.
765let dist = if start > end {
766// Overflow can happen for 128 bit discriminants if `end` is negative.
767 // But in that case casting to `u128` still gets us the right value,
768 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
769let dist = start.wrapping_sub(end);
770if discr_type.is_signed() {
771discr_int.signed_max().wrapping_sub(dist) as u128772 } else {
773discr_int.size().unsigned_int_max() - distas u128774 }
775 } else {
776// Overflow can happen for 128 bit discriminants if `start` is negative.
777 // But in that case casting to `u128` still gets us the right value,
778 // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
779end.wrapping_sub(start) as u128780 };
781{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:781",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(781u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["dist"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&dist) as
&dyn Value))])
});
} else { ; }
};trace!(?dist);
782dist783 });
784{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:784",
"rustc_abi::layout", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(784u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["largest_niche"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&largest_niche)
as &dyn Value))])
});
} else { ; }
};trace!(?largest_niche);
785786// `max` is the last valid discriminant before the largest niche
787 // `min` is the first valid discriminant after the largest niche
788let (max, min) = largest_niche789// We might have no inhabited variants, so pretend there's at least one.
790.unwrap_or((0, 0));
791let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::discr_range_of_repr(tcx, ty, &repr, min, max);
792793let mut align = dl.aggregate_align;
794let mut max_repr_align = repr.align;
795let mut unadjusted_abi_align = align;
796797let mut size = Size::ZERO;
798799// We're interested in the smallest alignment, so start large.
800let mut start_align = Align::from_bytes(256).unwrap();
801match (&Integer::for_align(dl, start_align), &None) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(Integer::for_align(dl, start_align), None);
802803// repr(C) on an enum tells us to make a (tag, union) layout,
804 // so we need to grow the prefix alignment to be at least
805 // the alignment of the union. (This value is used both for
806 // determining the alignment of the overall enum, and the
807 // determining the alignment of the payload after the tag.)
808let mut prefix_align = min_ity.align(dl).abi;
809if repr.c() {
810for fields in variants {
811for field in fields {
812 prefix_align = prefix_align.max(field.align.abi);
813 }
814 }
815 }
816817// Create the set of structs that represent each variant.
818let mut layout_variants = variants819 .iter_enumerated()
820 .map(|(i, field_layouts)| {
821let mut st = self.univariant(
822field_layouts,
823repr,
824 StructKind::Prefixed(min_ity.size(), prefix_align),
825 )?;
826st.variants = Variants::Single { index: i };
827// Find the first field we can't move later
828 // to make room for a larger discriminant.
829for field_idx in st.fields.index_by_increasing_offset() {
830let field = &field_layouts[FieldIdx::new(field_idx)];
831if !field.is_1zst() {
832 start_align = start_align.min(field.align.abi);
833break;
834 }
835 }
836size = cmp::max(size, st.size);
837align = align.max(st.align.abi);
838max_repr_align = max_repr_align.max(st.max_repr_align);
839unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
840Ok(st)
841 })
842 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
843844// Align the maximum variant size to the largest alignment.
845size = size.align_to(align);
846847// FIXME(oli-obk): deduplicate and harden these checks
848if size.bytes() >= dl.obj_size_bound() {
849return Err(LayoutCalculatorError::SizeOverflow);
850 }
851852let typeck_ity = Integer::from_attr(dl, repr.discr_type());
853if typeck_ity < min_ity {
854// It is a bug if Layout decided on a greater discriminant size than typeck for
855 // some reason at this point (based on values discriminant can take on). Mostly
856 // because this discriminant will be loaded, and then stored into variable of
857 // type calculated by typeck. Consider such case (a bug): typeck decided on
858 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
859 // discriminant values. That would be a bug, because then, in codegen, in order
860 // to store this 16-bit discriminant into 8-bit sized temporary some of the
861 // space necessary to represent would have to be discarded (or layout is wrong
862 // on thinking it needs 16 bits)
863{
::core::panicking::panic_fmt(format_args!("layout decided on a larger discriminant type ({0:?}) than typeck ({1:?})",
min_ity, typeck_ity));
};panic!(
864"layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
865);
866// However, it is fine to make discr type however large (as an optimisation)
867 // after this point – we’ll just truncate the value we load in codegen.
868}
869870// Check to see if we should use a different type for the
871 // discriminant. We can safely use a type with the same size
872 // as the alignment of the first field of each variant.
873 // We increase the size of the discriminant to avoid LLVM copying
874 // padding when it doesn't need to. This normally causes unaligned
875 // load/stores and excessive memcpy/memset operations. By using a
876 // bigger integer size, LLVM can be sure about its contents and
877 // won't be so conservative.
878879 // Use the initial field alignment
880let mut ity = if repr.c() || repr.int.is_some() {
881min_ity882 } else {
883Integer::for_align(dl, start_align).unwrap_or(min_ity)
884 };
885886// If the alignment is not larger than the chosen discriminant size,
887 // don't use the alignment as the final size.
888if ity <= min_ity {
889ity = min_ity;
890 } else {
891// Patch up the variants' first few fields.
892let old_ity_size = min_ity.size();
893let new_ity_size = ity.size();
894for variant in &mut layout_variants {
895match variant.fields {
896 FieldsShape::Arbitrary { ref mut offsets, .. } => {
897for i in offsets {
898if *i <= old_ity_size {
899match (&*i, &old_ity_size) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(*i, old_ity_size);
900*i = new_ity_size;
901 }
902 }
903// We might be making the struct larger.
904if variant.size <= old_ity_size {
905 variant.size = new_ity_size;
906 }
907 }
908 FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
909{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout")910 }
911 }
912 }
913 }
914915let tag_mask = ity.size().unsigned_int_max();
916let tag = Scalar::Initialized {
917 value: Primitive::Int(ity, signed),
918 valid_range: WrappingRange {
919 start: (minas u128 & tag_mask),
920 end: (maxas u128 & tag_mask),
921 },
922 };
923let mut abi = BackendRepr::Memory { sized: true };
924925let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());
926if tag.size(dl) == size {
927// Make sure we only use scalar layout when the enum is entirely its
928 // own tag (i.e. it has no padding nor any non-ZST variant fields).
929abi = BackendRepr::Scalar(tag);
930 } else {
931// Try to use a ScalarPair for all tagged enums.
932 // That's possible only if we can find a common primitive type for all variants.
933let mut common_prim = None;
934let mut common_prim_initialized_in_all_variants = true;
935for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
936let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
937{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
};panic!("encountered a non-arbitrary layout during enum layout");
938 };
939// We skip *all* ZST here and later check if we are good in terms of alignment.
940 // This lets us handle some cases involving aligned ZST.
941let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
942let (field, offset) = match (fields.next(), fields.next()) {
943 (None, None) => {
944 common_prim_initialized_in_all_variants = false;
945continue;
946 }
947 (Some(pair), None) => pair,
948_ => {
949 common_prim = None;
950break;
951 }
952 };
953let prim = match field.backend_repr {
954 BackendRepr::Scalar(scalar) => {
955 common_prim_initialized_in_all_variants &=
956#[allow(non_exhaustive_omitted_patterns)] match scalar {
Scalar::Initialized { .. } => true,
_ => false,
}matches!(scalar, Scalar::Initialized { .. });
957 scalar.primitive()
958 }
959_ => {
960 common_prim = None;
961break;
962 }
963 };
964if let Some((old_prim, common_offset)) = common_prim {
965// All variants must be at the same offset
966if offset != common_offset {
967 common_prim = None;
968break;
969 }
970// This is pretty conservative. We could go fancier
971 // by realising that (u8, u8) could just cohabit with
972 // u16 or even u32.
973let new_prim = match (old_prim, prim) {
974// Allow all identical primitives.
975(x, y) if x == y => x,
976// Allow integers of the same size with differing signedness.
977 // We arbitrarily choose the signedness of the first variant.
978(p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
979// Allow integers mixed with pointers of the same layout.
980 // We must represent this using a pointer, to avoid
981 // roundtripping pointers through ptrtoint/inttoptr.
982(p @ Primitive::Pointer(_), i @ Primitive::Int(..))
983 | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
984if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
985 {
986 p
987 }
988_ => {
989 common_prim = None;
990break;
991 }
992 };
993// We may be updating the primitive here, for example from int->ptr.
994common_prim = Some((new_prim, common_offset));
995 } else {
996 common_prim = Some((prim, offset));
997 }
998 }
999if let Some((prim, offset)) = common_prim {
1000let prim_scalar = if common_prim_initialized_in_all_variants {
1001let size = prim.size(dl);
1002if !(size.bits() <= 128) {
::core::panicking::panic("assertion failed: size.bits() <= 128")
};assert!(size.bits() <= 128);
1003 Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
1004 } else {
1005// Common prim might be uninit.
1006Scalar::Union { value: prim }
1007 };
1008let pair =
1009 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
1010let pair_offsets = match pair.fields {
1011 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1012match (&in_memory_order.raw, &[FieldIdx::new(0), FieldIdx::new(1)]) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);
1013offsets1014 }
1015_ => {
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout"),
1016 };
1017if pair_offsets[FieldIdx::new(0)] == Size::ZERO1018 && pair_offsets[FieldIdx::new(1)] == *offset1019 && align == pair.align.abi
1020 && size == pair.size
1021 {
1022// We can use `ScalarPair` only when it matches our
1023 // already computed layout (including `#[repr(C)]`).
1024abi = pair.backend_repr;
1025 }
1026 }
1027 }
10281029// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1030 // variants to ensure they are consistent. This is because a downcast is
1031 // semantically a NOP, and thus should not affect layout.
1032if #[allow(non_exhaustive_omitted_patterns)] match abi {
BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) => true,
_ => false,
}matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
1033for variant in &mut layout_variants {
1034// We only do this for variants with fields; the others are not accessed anyway.
1035 // Also do not overwrite any already existing "clever" ABIs.
1036if variant.fields.count() > 0
1037 && #[allow(non_exhaustive_omitted_patterns)] match variant.backend_repr {
BackendRepr::Memory { .. } => true,
_ => false,
}matches!(variant.backend_repr, BackendRepr::Memory { .. })1038 {
1039 variant.backend_repr = abi;
1040// Also need to bump up the size and alignment, so that the entire value fits
1041 // in here.
1042variant.size = cmp::max(variant.size, size);
1043 variant.align.abi = cmp::max(variant.align.abi, align);
1044 }
1045 }
1046 }
10471048let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
10491050let combined_seed = layout_variants1051 .iter()
1052 .map(|v| v.randomization_seed)
1053 .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
10541055let tagged_layout = LayoutData {
1056 variants: Variants::Multiple {
1057tag,
1058 tag_encoding: TagEncoding::Direct,
1059 tag_field: FieldIdx::new(0),
1060 variants: layout_variants,
1061 },
1062 fields: FieldsShape::Arbitrary {
1063 offsets: [Size::ZERO].into(),
1064 in_memory_order: [FieldIdx::new(0)].into(),
1065 },
1066largest_niche,
1067uninhabited,
1068 backend_repr: abi,
1069 align: AbiAlign::new(align),
1070size,
1071max_repr_align,
1072unadjusted_abi_align,
1073 randomization_seed: combined_seed,
1074 };
10751076let best_layout = match (tagged_layout, niche_filling_layout) {
1077 (tl, Some(nl)) => {
1078// Pick the smaller layout; otherwise,
1079 // pick the layout with the larger niche; otherwise,
1080 // pick tagged as it has simpler codegen.
1081use cmp::Ordering::*;
1082let niche_size = |l: &LayoutData<FieldIdx, VariantIdx>| {
1083l.largest_niche.map_or(0, |n| n.available(dl))
1084 };
1085match (tl.size.cmp(&nl.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1086 (Greater, _) => nl,
1087 (Equal, Less) => nl,
1088_ => tl,
1089 }
1090 }
1091 (tl, None) => tl,
1092 };
10931094Ok(best_layout)
1095 }
10961097fn univariant_biased<
1098'a,
1099 FieldIdx: Idx,
1100 VariantIdx: Idx,
1101 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1102 >(
1103&self,
1104 fields: &IndexSlice<FieldIdx, F>,
1105 repr: &ReprOptions,
1106 kind: StructKind,
1107 niche_bias: NicheBias,
1108 ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1109let dl = self.cx.data_layout();
1110let pack = repr.pack;
1111let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1112let mut max_repr_align = repr.align;
1113let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();
1114let optimize_field_order = !repr.inhibit_struct_field_reordering();
1115let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1116let optimizing = &mut in_memory_order.raw[..end];
1117let fields_excluding_tail = &fields.raw[..end];
1118// unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1119let field_seed = fields_excluding_tail1120 .iter()
1121 .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
11221123if optimize_field_order && fields.len() > 1 {
1124// If `-Z randomize-layout` was enabled for the type definition we can shuffle
1125 // the field ordering to try and catch some code making assumptions about layouts
1126 // we don't guarantee.
1127if repr.can_randomize_type_layout() && truecfg!(feature = "randomize") {
1128#[cfg(feature = "randomize")]
1129{
1130use rand::SeedableRng;
1131use rand::seq::SliceRandom;
1132// `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1133 // ordering.
1134let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1135field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1136 );
11371138// Shuffle the ordering of the fields.
1139optimizing.shuffle(&mut rng);
1140 }
1141// Otherwise we just leave things alone and actually optimize the type's fields
1142} else {
1143// To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1144 // not depend on the layout of the tail.
1145let max_field_align =
1146fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1);
1147let largest_niche_size = fields_excluding_tail1148 .iter()
1149 .filter_map(|f| f.largest_niche)
1150 .map(|n| n.available(dl))
1151 .max()
1152 .unwrap_or(0);
11531154// Calculates a sort key to group fields by their alignment or possibly some
1155 // size-derived pseudo-alignment.
1156let alignment_group_key = |layout: &F| {
1157// The two branches here return values that cannot be meaningfully compared with
1158 // each other. However, we know that consistently for all executions of
1159 // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1160if let Some(pack) = pack {
1161// Return the packed alignment in bytes.
1162layout.align.abi.min(pack).bytes()
1163 } else {
1164// Returns `log2(effective-align)`. The calculation assumes that size is an
1165 // integer multiple of align, except for ZSTs.
1166let align = layout.align.bytes();
1167let size = layout.size.bytes();
1168let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1169// Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1170let size_as_align = align.max(size).trailing_zeros();
1171let size_as_align = if largest_niche_size > 0 {
1172match niche_bias {
1173// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1174 // array to the front in the first case (for aligned loads) but keep
1175 // the bool in front in the second case for its niches.
1176NicheBias::Start => {
1177max_field_align.trailing_zeros().min(size_as_align)
1178 }
1179// When moving niches towards the end of the struct then for
1180 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1181 // in the align-1 group because its bool can be moved closer to the end.
1182NicheBias::Endif niche_size == largest_niche_size => {
1183align.trailing_zeros()
1184 }
1185 NicheBias::End => size_as_align,
1186 }
1187 } else {
1188size_as_align1189 };
1190size_as_alignas u641191 }
1192 };
11931194match kind {
1195 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1196// Currently `LayoutData` only exposes a single niche so sorting is usually
1197 // sufficient to get one niche into the preferred position. If it ever
1198 // supported multiple niches then a more advanced pick-and-pack approach could
1199 // provide better results. But even for the single-niche cache it's not
1200 // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1201 // bool to the front but it would require packing the tuple together with the
1202 // u16 to build a 4-byte group so that the u32 can be placed after it without
1203 // padding. This kind of packing can't be achieved by sorting.
1204optimizing.sort_by_key(|&x| {
1205let f = &fields[x];
1206let field_size = f.size.bytes();
1207let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1208let niche_size_key = match niche_bias {
1209// large niche first
1210NicheBias::Start => !niche_size,
1211// large niche last
1212NicheBias::End => niche_size,
1213 };
1214let inner_niche_offset_key = match niche_bias {
1215 NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1216 NicheBias::End => f.largest_niche.map_or(0, |n| {
1217 !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1218 }),
1219 };
12201221 (
1222// Then place largest alignments first.
1223cmp::Reverse(alignment_group_key(f)),
1224// Then prioritize niche placement within alignment group according to
1225 // `niche_bias_start`.
1226niche_size_key,
1227// Then among fields with equally-sized niches prefer the ones
1228 // closer to the start/end of the field.
1229inner_niche_offset_key,
1230 )
1231 });
1232 }
12331234 StructKind::Prefixed(..) => {
1235// Sort in ascending alignment so that the layout stays optimal
1236 // regardless of the prefix.
1237 // And put the largest niche in an alignment group at the end
1238 // so it can be used as discriminant in jagged enums
1239optimizing.sort_by_key(|&x| {
1240let f = &fields[x];
1241let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1242 (alignment_group_key(f), niche_size)
1243 });
1244 }
1245 }
12461247// FIXME(Kixiron): We can always shuffle fields within a given alignment class
1248 // regardless of the status of `-Z randomize-layout`
1249}
1250 }
1251// in_memory_order holds field indices by increasing memory offset.
1252 // That is, if field 5 has offset 0, the first element of in_memory_order is 5.
1253 // We now write field offsets to the corresponding offset slot;
1254 // field 5 with offset 0 puts 0 in offsets[5].
1255let mut unsized_field = None::<&F>;
1256let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1257let mut offset = Size::ZERO;
1258let mut largest_niche = None;
1259let mut largest_niche_available = 0;
1260if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1261let prefix_align =
1262if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1263align = align.max(prefix_align);
1264offset = prefix_size.align_to(prefix_align);
1265 }
1266for &i in &in_memory_order {
1267let field = &fields[i];
1268if let Some(unsized_field) = unsized_field {
1269return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1270 }
12711272if field.is_unsized() {
1273if let StructKind::MaybeUnsized = kind {
1274 unsized_field = Some(field);
1275 } else {
1276return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1277 }
1278 }
12791280// Invariant: offset < dl.obj_size_bound() <= 1<<61
1281let field_align = if let Some(pack) = pack {
1282 field.align.min(AbiAlign::new(pack))
1283 } else {
1284 field.align
1285 };
1286 offset = offset.align_to(field_align.abi);
1287 align = align.max(field_align.abi);
1288 max_repr_align = max_repr_align.max(field.max_repr_align);
12891290{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:1290",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(1290u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("univariant offset: {0:?} field: {1:#?}",
offset, field) as &dyn Value))])
});
} else { ; }
};debug!("univariant offset: {:?} field: {:#?}", offset, field);
1291 offsets[i] = offset;
12921293if let Some(mut niche) = field.largest_niche {
1294let available = niche.available(dl);
1295// Pick up larger niches.
1296let prefer_new_niche = match niche_bias {
1297 NicheBias::Start => available > largest_niche_available,
1298// if there are several niches of the same size then pick the last one
1299NicheBias::End => available >= largest_niche_available,
1300 };
1301if prefer_new_niche {
1302 largest_niche_available = available;
1303 niche.offset += offset;
1304 largest_niche = Some(niche);
1305 }
1306 }
13071308 offset =
1309 offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1310 }
13111312// The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1313 // See documentation on `LayoutData::unadjusted_abi_align`.
1314let unadjusted_abi_align = align;
1315if let Some(repr_align) = repr.align {
1316align = align.max(repr_align);
1317 }
1318// `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1319let align = align;
13201321{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_abi/src/layout.rs:1321",
"rustc_abi::layout", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_abi/src/layout.rs"),
::tracing_core::__macro_support::Option::Some(1321u32),
::tracing_core::__macro_support::Option::Some("rustc_abi::layout"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("univariant min_size: {0:?}",
offset) as &dyn Value))])
});
} else { ; }
};debug!("univariant min_size: {:?}", offset);
1322let min_size = offset;
1323let size = min_size.align_to(align);
1324// FIXME(oli-obk): deduplicate and harden these checks
1325if size.bytes() >= dl.obj_size_bound() {
1326return Err(LayoutCalculatorError::SizeOverflow);
1327 }
1328let mut layout_of_single_non_zst_field = None;
1329let sized = unsized_field.is_none();
1330let mut abi = BackendRepr::Memory { sized };
13311332let optimize_abi = !repr.inhibit_newtype_abi_optimization();
13331334// Try to make this a Scalar/ScalarPair.
1335if sized && size.bytes() > 0 {
1336// We skip *all* ZST here and later check if we are good in terms of alignment.
1337 // This lets us handle some cases involving aligned ZST.
1338let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
13391340match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1341// We have exactly one non-ZST field.
1342(Some((i, field)), None, None) => {
1343layout_of_single_non_zst_field = Some(field);
13441345// Field fills the struct and it has a scalar or scalar pair ABI.
1346if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size {
1347match field.backend_repr {
1348// For plain scalars, or vectors of them, we can't unpack
1349 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1350BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
1351if optimize_abi =>
1352 {
1353abi = field.backend_repr;
1354 }
1355// But scalar pairs are Rust-specific and get
1356 // treated as aggregates by C ABIs anyway.
1357BackendRepr::ScalarPair(..) => {
1358abi = field.backend_repr;
1359 }
1360_ => {}
1361 }
1362 }
1363 }
13641365// Two non-ZST fields, and they're both scalars.
1366(Some((i, a)), Some((j, b)), None) => {
1367match (a.backend_repr, b.backend_repr) {
1368 (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1369// Order by the memory placement, not source order.
1370let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1371 ((i, a), (j, b))
1372 } else {
1373 ((j, b), (i, a))
1374 };
1375let pair =
1376 LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
1377let pair_offsets = match pair.fields {
1378 FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1379match (&in_memory_order.raw, &[FieldIdx::new(0), FieldIdx::new(1)]) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(
1380 in_memory_order.raw,
1381 [FieldIdx::new(0), FieldIdx::new(1)]
1382 );
1383offsets1384 }
1385 FieldsShape::Primitive1386 | FieldsShape::Array { .. }
1387 | FieldsShape::Union(..) => {
1388{
::core::panicking::panic_fmt(format_args!("encountered a non-arbitrary layout during enum layout"));
}panic!("encountered a non-arbitrary layout during enum layout")1389 }
1390 };
1391if offsets[i] == pair_offsets[FieldIdx::new(0)]
1392 && offsets[j] == pair_offsets[FieldIdx::new(1)]
1393 && align == pair.align.abi
1394 && size == pair.size
1395 {
1396// We can use `ScalarPair` only when it matches our
1397 // already computed layout (including `#[repr(C)]`).
1398abi = pair.backend_repr;
1399 }
1400 }
1401_ => {}
1402 }
1403 }
14041405_ => {}
1406 }
1407 }
1408let uninhabited = fields.iter().any(|f| f.is_uninhabited());
14091410let unadjusted_abi_align = if repr.transparent() {
1411match layout_of_single_non_zst_field {
1412Some(l) => l.unadjusted_abi_align,
1413None => {
1414// `repr(transparent)` with all ZST fields.
1415align1416 }
1417 }
1418 } else {
1419unadjusted_abi_align1420 };
14211422let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
14231424Ok(LayoutData {
1425 variants: Variants::Single { index: VariantIdx::new(0) },
1426 fields: FieldsShape::Arbitrary { offsets, in_memory_order },
1427 backend_repr: abi,
1428largest_niche,
1429uninhabited,
1430 align: AbiAlign::new(align),
1431size,
1432max_repr_align,
1433unadjusted_abi_align,
1434 randomization_seed: seed,
1435 })
1436 }
14371438fn format_field_niches<
1439'a,
1440 FieldIdx: Idx,
1441 VariantIdx: Idx,
1442 F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1443 >(
1444&self,
1445 layout: &LayoutData<FieldIdx, VariantIdx>,
1446 fields: &IndexSlice<FieldIdx, F>,
1447 ) -> String {
1448let dl = self.cx.data_layout();
1449let mut s = String::new();
1450for i in layout.fields.index_by_increasing_offset() {
1451let offset = layout.fields.offset(i);
1452let f = &fields[FieldIdx::new(i)];
1453s.write_fmt(format_args!("[o{0}a{1}s{2}", offset.bytes(), f.align.bytes(),
f.size.bytes()))write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap();
1454if let Some(n) = f.largest_niche {
1455s.write_fmt(format_args!(" n{0}b{1}s{2}", n.offset.bytes(),
n.available(dl).ilog2(), n.value.size(dl).bytes()))write!(
1456 s,
1457" n{}b{}s{}",
1458 n.offset.bytes(),
1459 n.available(dl).ilog2(),
1460 n.value.size(dl).bytes()
1461 )1462 .unwrap();
1463 }
1464s.write_fmt(format_args!("] "))write!(s, "] ").unwrap();
1465 }
1466s1467 }
1468}
14691470enum VectorKind {
1471/// `#[rustc_scalable_vector]`
1472Scalable,
1473/// `#[repr(simd, packed)]`
1474PackedFixed,
1475/// `#[repr(simd)]`
1476Fixed,
1477}
14781479fn vector_type_layout<FieldIdx, VariantIdx, F>(
1480 kind: VectorKind,
1481 dl: &TargetDataLayout,
1482 element: F,
1483 count: u64,
1484) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
1485where
1486FieldIdx: Idx,
1487 VariantIdx: Idx,
1488 F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1489{
1490let elt = element.as_ref();
1491if count == 0 {
1492return Err(LayoutCalculatorError::ZeroLengthSimdType);
1493 } else if count > crate::MAX_SIMD_LANES {
1494return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES });
1495 }
14961497let BackendRepr::Scalar(element) = elt.backend_repr else {
1498return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
1499 };
15001501// Compute the size and alignment of the vector
1502let size =
1503elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
1504let (repr, align) = match kind {
1505 VectorKind::Scalable => {
1506 (BackendRepr::ScalableVector { element, count }, dl.llvmlike_vector_align(size))
1507 }
1508// Non-power-of-two vectors have padding up to the next power-of-two.
1509 // If we're a packed repr, remove the padding while keeping the alignment as close
1510 // to a vector as possible.
1511VectorKind::PackedFixedif !count.is_power_of_two() => {
1512 (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
1513 }
1514 VectorKind::PackedFixed | VectorKind::Fixed => {
1515 (BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size))
1516 }
1517 };
1518let size = size.align_to(align);
15191520Ok(LayoutData {
1521 variants: Variants::Single { index: VariantIdx::new(0) },
1522 fields: FieldsShape::Arbitrary {
1523 offsets: [Size::ZERO].into(),
1524 in_memory_order: [FieldIdx::new(0)].into(),
1525 },
1526 backend_repr: repr,
1527 largest_niche: elt.largest_niche,
1528 uninhabited: false,
1529size,
1530 align: AbiAlign::new(align),
1531 max_repr_align: None,
1532 unadjusted_abi_align: elt.align.abi,
1533 randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
1534 })
1535}