rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8// tidy-alphabetical-end
9
10/*! ABI handling for rustc
11
12## What is an "ABI"?
13
14Literally, "application binary interface", which means it is everything about how code interacts,
15at the machine level, with other code. This means it technically covers all of the following:
16- object binary format for e.g. relocations or offset tables
17- in-memory layout of types
18- procedure calling conventions
19
20When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
21To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
22Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
23You will encounter all of them and more if you study target-specific codegen enough!
24Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
25either or both of
26- `repr(Rust)` types have a mostly-unspecified layout
27- `extern "Rust" fn(A) -> R` has an unspecified calling convention
28
29## Crate Goal
30
31ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
32It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
33Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
34It should contain traits and types that other crates then use in their implementation.
35For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
36but `rustc_abi` contains the types for calculating layout and describing register-passing.
37This makes it easier to describe things in the same way across targets, codegen backends, and
38even other Rust compilers, such as rust-analyzer!
39
40*/
41
42use std::fmt;
43#[cfg(feature = "nightly")]
44use std::iter::Step;
45use std::num::{NonZeroUsize, ParseIntError};
46use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
47use std::str::FromStr;
48
49use bitflags::bitflags;
50#[cfg(feature = "nightly")]
51use rustc_data_structures::stable_hasher::StableOrd;
52use rustc_hashes::Hash64;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
56
57mod callconv;
58mod layout;
59#[cfg(test)]
60mod tests;
61
62mod extern_abi;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use extern_abi::{ExternAbi, all_names};
66#[cfg(feature = "nightly")]
67pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
68pub use layout::{LayoutCalculator, LayoutCalculatorError};
69
70/// Requirements for a `StableHashingContext` to be used in this crate.
71/// This is a hack to allow using the `HashStable_Generic` derive macro
72/// instead of implementing everything in `rustc_middle`.
73#[cfg(feature = "nightly")]
74pub trait HashStableContext {}
75
76#[derive(Clone, Copy, PartialEq, Eq, Default)]
77#[cfg_attr(
78    feature = "nightly",
79    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
80)]
81pub struct ReprFlags(u8);
82
83bitflags! {
84    impl ReprFlags: u8 {
85        const IS_C               = 1 << 0;
86        const IS_SIMD            = 1 << 1;
87        const IS_TRANSPARENT     = 1 << 2;
88        // Internal only for now. If true, don't reorder fields.
89        // On its own it does not prevent ABI optimizations.
90        const IS_LINEAR          = 1 << 3;
91        // If true, the type's crate has opted into layout randomization.
92        // Other flags can still inhibit reordering and thus randomization.
93        // The seed stored in `ReprOptions.field_shuffle_seed`.
94        const RANDOMIZE_LAYOUT   = 1 << 4;
95        // Any of these flags being set prevent field reordering optimisation.
96        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
97                                 | ReprFlags::IS_SIMD.bits()
98                                 | ReprFlags::IS_LINEAR.bits();
99        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
100    }
101}
102
103// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
104// `rustc_data_structures` to make it build on stable.
105impl std::fmt::Debug for ReprFlags {
106    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
107        bitflags::parser::to_writer(self, f)
108    }
109}
110
111#[derive(Copy, Clone, Debug, Eq, PartialEq)]
112#[cfg_attr(
113    feature = "nightly",
114    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
115)]
116pub enum IntegerType {
117    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
118    /// `Pointer(true)` means `isize`.
119    Pointer(bool),
120    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
121    /// `Fixed(I8, false)` means `u8`.
122    Fixed(Integer, bool),
123}
124
125impl IntegerType {
126    pub fn is_signed(&self) -> bool {
127        match self {
128            IntegerType::Pointer(b) => *b,
129            IntegerType::Fixed(_, b) => *b,
130        }
131    }
132}
133
134/// Represents the repr options provided by the user.
135#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
136#[cfg_attr(
137    feature = "nightly",
138    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
139)]
140pub struct ReprOptions {
141    pub int: Option<IntegerType>,
142    pub align: Option<Align>,
143    pub pack: Option<Align>,
144    pub flags: ReprFlags,
145    /// The seed to be used for randomizing a type's layout
146    ///
147    /// Note: This could technically be a `u128` which would
148    /// be the "most accurate" hash as it'd encompass the item and crate
149    /// hash without loss, but it does pay the price of being larger.
150    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
151    /// purposes (primarily `-Z randomize-layout`)
152    pub field_shuffle_seed: Hash64,
153}
154
155impl ReprOptions {
156    #[inline]
157    pub fn simd(&self) -> bool {
158        self.flags.contains(ReprFlags::IS_SIMD)
159    }
160
161    #[inline]
162    pub fn c(&self) -> bool {
163        self.flags.contains(ReprFlags::IS_C)
164    }
165
166    #[inline]
167    pub fn packed(&self) -> bool {
168        self.pack.is_some()
169    }
170
171    #[inline]
172    pub fn transparent(&self) -> bool {
173        self.flags.contains(ReprFlags::IS_TRANSPARENT)
174    }
175
176    #[inline]
177    pub fn linear(&self) -> bool {
178        self.flags.contains(ReprFlags::IS_LINEAR)
179    }
180
181    /// Returns the discriminant type, given these `repr` options.
182    /// This must only be called on enums!
183    pub fn discr_type(&self) -> IntegerType {
184        self.int.unwrap_or(IntegerType::Pointer(true))
185    }
186
187    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
188    /// layout" optimizations, such as representing `Foo<&T>` as a
189    /// single pointer.
190    pub fn inhibit_enum_layout_opt(&self) -> bool {
191        self.c() || self.int.is_some()
192    }
193
194    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
195        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
196    }
197
198    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
199    /// e.g. `repr(C)` or `repr(<int>)`.
200    pub fn inhibit_struct_field_reordering(&self) -> bool {
201        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
202    }
203
204    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
205    /// was enabled for its declaration crate.
206    pub fn can_randomize_type_layout(&self) -> bool {
207        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
208    }
209
210    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
211    pub fn inhibits_union_abi_opt(&self) -> bool {
212        self.c()
213    }
214}
215
216/// The maximum supported number of lanes in a SIMD vector.
217///
218/// This value is selected based on backend support:
219/// * LLVM does not appear to have a vector width limit.
220/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
221pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
222
223/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
224/// for a target, which contains everything needed to compute layouts.
225#[derive(Debug, PartialEq, Eq)]
226pub struct TargetDataLayout {
227    pub endian: Endian,
228    pub i1_align: AbiAndPrefAlign,
229    pub i8_align: AbiAndPrefAlign,
230    pub i16_align: AbiAndPrefAlign,
231    pub i32_align: AbiAndPrefAlign,
232    pub i64_align: AbiAndPrefAlign,
233    pub i128_align: AbiAndPrefAlign,
234    pub f16_align: AbiAndPrefAlign,
235    pub f32_align: AbiAndPrefAlign,
236    pub f64_align: AbiAndPrefAlign,
237    pub f128_align: AbiAndPrefAlign,
238    pub pointer_size: Size,
239    pub pointer_align: AbiAndPrefAlign,
240    pub aggregate_align: AbiAndPrefAlign,
241
242    /// Alignments for vector types.
243    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
244
245    pub instruction_address_space: AddressSpace,
246
247    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
248    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
249    /// so the only valid spec for LLVM is c_int::BITS or 8
250    pub c_enum_min_size: Integer,
251}
252
253impl Default for TargetDataLayout {
254    /// Creates an instance of `TargetDataLayout`.
255    fn default() -> TargetDataLayout {
256        let align = |bits| Align::from_bits(bits).unwrap();
257        TargetDataLayout {
258            endian: Endian::Big,
259            i1_align: AbiAndPrefAlign::new(align(8)),
260            i8_align: AbiAndPrefAlign::new(align(8)),
261            i16_align: AbiAndPrefAlign::new(align(16)),
262            i32_align: AbiAndPrefAlign::new(align(32)),
263            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
264            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
265            f16_align: AbiAndPrefAlign::new(align(16)),
266            f32_align: AbiAndPrefAlign::new(align(32)),
267            f64_align: AbiAndPrefAlign::new(align(64)),
268            f128_align: AbiAndPrefAlign::new(align(128)),
269            pointer_size: Size::from_bits(64),
270            pointer_align: AbiAndPrefAlign::new(align(64)),
271            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
272            vector_align: vec![
273                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
274                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
275            ],
276            instruction_address_space: AddressSpace::DATA,
277            c_enum_min_size: Integer::I32,
278        }
279    }
280}
281
282pub enum TargetDataLayoutErrors<'a> {
283    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
284    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
285    MissingAlignment { cause: &'a str },
286    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
287    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
288    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
289    InvalidBitsSize { err: String },
290}
291
292impl TargetDataLayout {
293    /// Parse data layout from an
294    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
295    ///
296    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
297    /// determined from llvm string.
298    pub fn parse_from_llvm_datalayout_string<'a>(
299        input: &'a str,
300    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
301        // Parse an address space index from a string.
302        let parse_address_space = |s: &'a str, cause: &'a str| {
303            s.parse::<u32>().map(AddressSpace).map_err(|err| {
304                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
305            })
306        };
307
308        // Parse a bit count from a string.
309        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
310            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
311                kind,
312                bit: s,
313                cause,
314                err,
315            })
316        };
317
318        // Parse a size string.
319        let parse_size =
320            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
321
322        // Parse an alignment string.
323        let parse_align = |s: &[&'a str], cause: &'a str| {
324            if s.is_empty() {
325                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
326            }
327            let align_from_bits = |bits| {
328                Align::from_bits(bits)
329                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
330            };
331            let abi = parse_bits(s[0], "alignment", cause)?;
332            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
333            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
334        };
335
336        let mut dl = TargetDataLayout::default();
337        let mut i128_align_src = 64;
338        for spec in input.split('-') {
339            let spec_parts = spec.split(':').collect::<Vec<_>>();
340
341            match &*spec_parts {
342                ["e"] => dl.endian = Endian::Little,
343                ["E"] => dl.endian = Endian::Big,
344                [p] if p.starts_with('P') => {
345                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
346                }
347                ["a", a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
348                ["f16", a @ ..] => dl.f16_align = parse_align(a, "f16")?,
349                ["f32", a @ ..] => dl.f32_align = parse_align(a, "f32")?,
350                ["f64", a @ ..] => dl.f64_align = parse_align(a, "f64")?,
351                ["f128", a @ ..] => dl.f128_align = parse_align(a, "f128")?,
352                // FIXME(erikdesjardins): we should be parsing nonzero address spaces
353                // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
354                // with e.g. `fn pointer_size_in(AddressSpace)`
355                [p @ "p", s, a @ ..] | [p @ "p0", s, a @ ..] => {
356                    dl.pointer_size = parse_size(s, p)?;
357                    dl.pointer_align = parse_align(a, p)?;
358                }
359                [s, a @ ..] if s.starts_with('i') => {
360                    let Ok(bits) = s[1..].parse::<u64>() else {
361                        parse_size(&s[1..], "i")?; // For the user error.
362                        continue;
363                    };
364                    let a = parse_align(a, s)?;
365                    match bits {
366                        1 => dl.i1_align = a,
367                        8 => dl.i8_align = a,
368                        16 => dl.i16_align = a,
369                        32 => dl.i32_align = a,
370                        64 => dl.i64_align = a,
371                        _ => {}
372                    }
373                    if bits >= i128_align_src && bits <= 128 {
374                        // Default alignment for i128 is decided by taking the alignment of
375                        // largest-sized i{64..=128}.
376                        i128_align_src = bits;
377                        dl.i128_align = a;
378                    }
379                }
380                [s, a @ ..] if s.starts_with('v') => {
381                    let v_size = parse_size(&s[1..], "v")?;
382                    let a = parse_align(a, s)?;
383                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
384                        v.1 = a;
385                        continue;
386                    }
387                    // No existing entry, add a new one.
388                    dl.vector_align.push((v_size, a));
389                }
390                _ => {} // Ignore everything else.
391            }
392        }
393        Ok(dl)
394    }
395
396    /// Returns **exclusive** upper bound on object size in bytes.
397    ///
398    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
399    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
400    /// index every address within an object along with one byte past the end, along with allowing
401    /// `isize` to store the difference between any two pointers into an object.
402    ///
403    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
404    /// so we adopt such a more-constrained size bound due to its technical limitations.
405    #[inline]
406    pub fn obj_size_bound(&self) -> u64 {
407        match self.pointer_size.bits() {
408            16 => 1 << 15,
409            32 => 1 << 31,
410            64 => 1 << 61,
411            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
412        }
413    }
414
415    #[inline]
416    pub fn ptr_sized_integer(&self) -> Integer {
417        use Integer::*;
418        match self.pointer_size.bits() {
419            16 => I16,
420            32 => I32,
421            64 => I64,
422            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
423        }
424    }
425
426    /// psABI-mandated alignment for a vector type, if any
427    #[inline]
428    fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAndPrefAlign> {
429        self.vector_align
430            .iter()
431            .find(|(size, _align)| *size == vec_size)
432            .map(|(_size, align)| *align)
433    }
434
435    /// an alignment resembling the one LLVM would pick for a vector
436    #[inline]
437    pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
438        self.cabi_vector_align(vec_size).unwrap_or(AbiAndPrefAlign::new(
439            Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
440        ))
441    }
442}
443
444pub trait HasDataLayout {
445    fn data_layout(&self) -> &TargetDataLayout;
446}
447
448impl HasDataLayout for TargetDataLayout {
449    #[inline]
450    fn data_layout(&self) -> &TargetDataLayout {
451        self
452    }
453}
454
455// used by rust-analyzer
456impl HasDataLayout for &TargetDataLayout {
457    #[inline]
458    fn data_layout(&self) -> &TargetDataLayout {
459        (**self).data_layout()
460    }
461}
462
463/// Endianness of the target, which must match cfg(target-endian).
464#[derive(Copy, Clone, PartialEq, Eq)]
465pub enum Endian {
466    Little,
467    Big,
468}
469
470impl Endian {
471    pub fn as_str(&self) -> &'static str {
472        match self {
473            Self::Little => "little",
474            Self::Big => "big",
475        }
476    }
477}
478
479impl fmt::Debug for Endian {
480    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
481        f.write_str(self.as_str())
482    }
483}
484
485impl FromStr for Endian {
486    type Err = String;
487
488    fn from_str(s: &str) -> Result<Self, Self::Err> {
489        match s {
490            "little" => Ok(Self::Little),
491            "big" => Ok(Self::Big),
492            _ => Err(format!(r#"unknown endian: "{s}""#)),
493        }
494    }
495}
496
497/// Size of a type in bytes.
498#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
499#[cfg_attr(
500    feature = "nightly",
501    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
502)]
503pub struct Size {
504    raw: u64,
505}
506
507#[cfg(feature = "nightly")]
508impl StableOrd for Size {
509    const CAN_USE_UNSTABLE_SORT: bool = true;
510
511    // `Ord` is implemented as just comparing numerical values and numerical values
512    // are not changed by (de-)serialization.
513    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
514}
515
516// This is debug-printed a lot in larger structs, don't waste too much space there
517impl fmt::Debug for Size {
518    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
519        write!(f, "Size({} bytes)", self.bytes())
520    }
521}
522
523impl Size {
524    pub const ZERO: Size = Size { raw: 0 };
525
526    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
527    /// not a multiple of 8.
528    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
529        let bits = bits.try_into().ok().unwrap();
530        // Avoid potential overflow from `bits + 7`.
531        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
532    }
533
534    #[inline]
535    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
536        let bytes: u64 = bytes.try_into().ok().unwrap();
537        Size { raw: bytes }
538    }
539
540    #[inline]
541    pub fn bytes(self) -> u64 {
542        self.raw
543    }
544
545    #[inline]
546    pub fn bytes_usize(self) -> usize {
547        self.bytes().try_into().unwrap()
548    }
549
550    #[inline]
551    pub fn bits(self) -> u64 {
552        #[cold]
553        fn overflow(bytes: u64) -> ! {
554            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
555        }
556
557        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
558    }
559
560    #[inline]
561    pub fn bits_usize(self) -> usize {
562        self.bits().try_into().unwrap()
563    }
564
565    #[inline]
566    pub fn align_to(self, align: Align) -> Size {
567        let mask = align.bytes() - 1;
568        Size::from_bytes((self.bytes() + mask) & !mask)
569    }
570
571    #[inline]
572    pub fn is_aligned(self, align: Align) -> bool {
573        let mask = align.bytes() - 1;
574        self.bytes() & mask == 0
575    }
576
577    #[inline]
578    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
579        let dl = cx.data_layout();
580
581        let bytes = self.bytes().checked_add(offset.bytes())?;
582
583        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
584    }
585
586    #[inline]
587    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
588        let dl = cx.data_layout();
589
590        let bytes = self.bytes().checked_mul(count)?;
591        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
592    }
593
594    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
595    /// (i.e., if it is negative, fill with 1's on the left).
596    #[inline]
597    pub fn sign_extend(self, value: u128) -> i128 {
598        let size = self.bits();
599        if size == 0 {
600            // Truncated until nothing is left.
601            return 0;
602        }
603        // Sign-extend it.
604        let shift = 128 - size;
605        // Shift the unsigned value to the left, then shift back to the right as signed
606        // (essentially fills with sign bit on the left).
607        ((value << shift) as i128) >> shift
608    }
609
610    /// Truncates `value` to `self` bits.
611    #[inline]
612    pub fn truncate(self, value: u128) -> u128 {
613        let size = self.bits();
614        if size == 0 {
615            // Truncated until nothing is left.
616            return 0;
617        }
618        let shift = 128 - size;
619        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
620        (value << shift) >> shift
621    }
622
623    #[inline]
624    pub fn signed_int_min(&self) -> i128 {
625        self.sign_extend(1_u128 << (self.bits() - 1))
626    }
627
628    #[inline]
629    pub fn signed_int_max(&self) -> i128 {
630        i128::MAX >> (128 - self.bits())
631    }
632
633    #[inline]
634    pub fn unsigned_int_max(&self) -> u128 {
635        u128::MAX >> (128 - self.bits())
636    }
637}
638
639// Panicking addition, subtraction and multiplication for convenience.
640// Avoid during layout computation, return `LayoutError` instead.
641
642impl Add for Size {
643    type Output = Size;
644    #[inline]
645    fn add(self, other: Size) -> Size {
646        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
647            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
648        }))
649    }
650}
651
652impl Sub for Size {
653    type Output = Size;
654    #[inline]
655    fn sub(self, other: Size) -> Size {
656        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
657            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
658        }))
659    }
660}
661
662impl Mul<Size> for u64 {
663    type Output = Size;
664    #[inline]
665    fn mul(self, size: Size) -> Size {
666        size * self
667    }
668}
669
670impl Mul<u64> for Size {
671    type Output = Size;
672    #[inline]
673    fn mul(self, count: u64) -> Size {
674        match self.bytes().checked_mul(count) {
675            Some(bytes) => Size::from_bytes(bytes),
676            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
677        }
678    }
679}
680
681impl AddAssign for Size {
682    #[inline]
683    fn add_assign(&mut self, other: Size) {
684        *self = *self + other;
685    }
686}
687
688#[cfg(feature = "nightly")]
689impl Step for Size {
690    #[inline]
691    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
692        u64::steps_between(&start.bytes(), &end.bytes())
693    }
694
695    #[inline]
696    fn forward_checked(start: Self, count: usize) -> Option<Self> {
697        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
698    }
699
700    #[inline]
701    fn forward(start: Self, count: usize) -> Self {
702        Self::from_bytes(u64::forward(start.bytes(), count))
703    }
704
705    #[inline]
706    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
707        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
708    }
709
710    #[inline]
711    fn backward_checked(start: Self, count: usize) -> Option<Self> {
712        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
713    }
714
715    #[inline]
716    fn backward(start: Self, count: usize) -> Self {
717        Self::from_bytes(u64::backward(start.bytes(), count))
718    }
719
720    #[inline]
721    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
722        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
723    }
724}
725
726/// Alignment of a type in bytes (always a power of two).
727#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
728#[cfg_attr(
729    feature = "nightly",
730    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
731)]
732pub struct Align {
733    pow2: u8,
734}
735
736// This is debug-printed a lot in larger structs, don't waste too much space there
737impl fmt::Debug for Align {
738    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
739        write!(f, "Align({} bytes)", self.bytes())
740    }
741}
742
743#[derive(Clone, Copy)]
744pub enum AlignFromBytesError {
745    NotPowerOfTwo(u64),
746    TooLarge(u64),
747}
748
749impl AlignFromBytesError {
750    pub fn diag_ident(self) -> &'static str {
751        match self {
752            Self::NotPowerOfTwo(_) => "not_power_of_two",
753            Self::TooLarge(_) => "too_large",
754        }
755    }
756
757    pub fn align(self) -> u64 {
758        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
759        align
760    }
761}
762
763impl fmt::Debug for AlignFromBytesError {
764    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
765        fmt::Display::fmt(self, f)
766    }
767}
768
769impl fmt::Display for AlignFromBytesError {
770    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
771        match self {
772            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
773            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
774        }
775    }
776}
777
778impl Align {
779    pub const ONE: Align = Align { pow2: 0 };
780    pub const EIGHT: Align = Align { pow2: 3 };
781    // LLVM has a maximal supported alignment of 2^29, we inherit that.
782    pub const MAX: Align = Align { pow2: 29 };
783
784    #[inline]
785    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
786        Align::from_bytes(Size::from_bits(bits).bytes())
787    }
788
789    #[inline]
790    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
791        // Treat an alignment of 0 bytes like 1-byte alignment.
792        if align == 0 {
793            return Ok(Align::ONE);
794        }
795
796        #[cold]
797        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
798            AlignFromBytesError::NotPowerOfTwo(align)
799        }
800
801        #[cold]
802        const fn too_large(align: u64) -> AlignFromBytesError {
803            AlignFromBytesError::TooLarge(align)
804        }
805
806        let tz = align.trailing_zeros();
807        if align != (1 << tz) {
808            return Err(not_power_of_2(align));
809        }
810
811        let pow2 = tz as u8;
812        if pow2 > Self::MAX.pow2 {
813            return Err(too_large(align));
814        }
815
816        Ok(Align { pow2 })
817    }
818
819    #[inline]
820    pub const fn bytes(self) -> u64 {
821        1 << self.pow2
822    }
823
824    #[inline]
825    pub fn bytes_usize(self) -> usize {
826        self.bytes().try_into().unwrap()
827    }
828
829    #[inline]
830    pub const fn bits(self) -> u64 {
831        self.bytes() * 8
832    }
833
834    #[inline]
835    pub fn bits_usize(self) -> usize {
836        self.bits().try_into().unwrap()
837    }
838
839    /// Obtain the greatest factor of `size` that is an alignment
840    /// (the largest power of two the Size is a multiple of).
841    ///
842    /// Note that all numbers are factors of 0
843    #[inline]
844    pub fn max_aligned_factor(size: Size) -> Align {
845        Align { pow2: size.bytes().trailing_zeros() as u8 }
846    }
847
848    /// Reduces Align to an aligned factor of `size`.
849    #[inline]
850    pub fn restrict_for_offset(self, size: Size) -> Align {
851        self.min(Align::max_aligned_factor(size))
852    }
853}
854
855/// A pair of alignments, ABI-mandated and preferred.
856///
857/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
858/// it is not exposed semantically to programmers nor can they meaningfully affect it.
859/// The only concern for us is that preferred alignment must not be less than the mandated alignment
860/// and thus in practice the two values are almost always identical.
861///
862/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
863/// It is of effectively no consequence for layout in structs and on the stack.
864#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
865#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
866pub struct AbiAndPrefAlign {
867    pub abi: Align,
868    pub pref: Align,
869}
870
871impl AbiAndPrefAlign {
872    #[inline]
873    pub fn new(align: Align) -> AbiAndPrefAlign {
874        AbiAndPrefAlign { abi: align, pref: align }
875    }
876
877    #[inline]
878    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
879        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
880    }
881
882    #[inline]
883    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
884        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
885    }
886}
887
888/// Integers, also used for enum discriminants.
889#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
890#[cfg_attr(
891    feature = "nightly",
892    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
893)]
894pub enum Integer {
895    I8,
896    I16,
897    I32,
898    I64,
899    I128,
900}
901
902impl Integer {
903    pub fn int_ty_str(self) -> &'static str {
904        use Integer::*;
905        match self {
906            I8 => "i8",
907            I16 => "i16",
908            I32 => "i32",
909            I64 => "i64",
910            I128 => "i128",
911        }
912    }
913
914    pub fn uint_ty_str(self) -> &'static str {
915        use Integer::*;
916        match self {
917            I8 => "u8",
918            I16 => "u16",
919            I32 => "u32",
920            I64 => "u64",
921            I128 => "u128",
922        }
923    }
924
925    #[inline]
926    pub fn size(self) -> Size {
927        use Integer::*;
928        match self {
929            I8 => Size::from_bytes(1),
930            I16 => Size::from_bytes(2),
931            I32 => Size::from_bytes(4),
932            I64 => Size::from_bytes(8),
933            I128 => Size::from_bytes(16),
934        }
935    }
936
937    /// Gets the Integer type from an IntegerType.
938    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
939        let dl = cx.data_layout();
940
941        match ity {
942            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
943            IntegerType::Fixed(x, _) => x,
944        }
945    }
946
947    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
948        use Integer::*;
949        let dl = cx.data_layout();
950
951        match self {
952            I8 => dl.i8_align,
953            I16 => dl.i16_align,
954            I32 => dl.i32_align,
955            I64 => dl.i64_align,
956            I128 => dl.i128_align,
957        }
958    }
959
960    /// Returns the largest signed value that can be represented by this Integer.
961    #[inline]
962    pub fn signed_max(self) -> i128 {
963        use Integer::*;
964        match self {
965            I8 => i8::MAX as i128,
966            I16 => i16::MAX as i128,
967            I32 => i32::MAX as i128,
968            I64 => i64::MAX as i128,
969            I128 => i128::MAX,
970        }
971    }
972
973    /// Finds the smallest Integer type which can represent the signed value.
974    #[inline]
975    pub fn fit_signed(x: i128) -> Integer {
976        use Integer::*;
977        match x {
978            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
979            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
980            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
981            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
982            _ => I128,
983        }
984    }
985
986    /// Finds the smallest Integer type which can represent the unsigned value.
987    #[inline]
988    pub fn fit_unsigned(x: u128) -> Integer {
989        use Integer::*;
990        match x {
991            0..=0x0000_0000_0000_00ff => I8,
992            0..=0x0000_0000_0000_ffff => I16,
993            0..=0x0000_0000_ffff_ffff => I32,
994            0..=0xffff_ffff_ffff_ffff => I64,
995            _ => I128,
996        }
997    }
998
999    /// Finds the smallest integer with the given alignment.
1000    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1001        use Integer::*;
1002        let dl = cx.data_layout();
1003
1004        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1005            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1006        })
1007    }
1008
1009    /// Find the largest integer with the given alignment or less.
1010    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1011        use Integer::*;
1012        let dl = cx.data_layout();
1013
1014        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1015        for candidate in [I64, I32, I16] {
1016            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1017                return candidate;
1018            }
1019        }
1020        I8
1021    }
1022
1023    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1024    // `Integer` given some requirements.
1025    #[inline]
1026    pub fn from_size(size: Size) -> Result<Self, String> {
1027        match size.bits() {
1028            8 => Ok(Integer::I8),
1029            16 => Ok(Integer::I16),
1030            32 => Ok(Integer::I32),
1031            64 => Ok(Integer::I64),
1032            128 => Ok(Integer::I128),
1033            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1034        }
1035    }
1036}
1037
1038/// Floating-point types.
1039#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1040#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1041pub enum Float {
1042    F16,
1043    F32,
1044    F64,
1045    F128,
1046}
1047
1048impl Float {
1049    pub fn size(self) -> Size {
1050        use Float::*;
1051
1052        match self {
1053            F16 => Size::from_bits(16),
1054            F32 => Size::from_bits(32),
1055            F64 => Size::from_bits(64),
1056            F128 => Size::from_bits(128),
1057        }
1058    }
1059
1060    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1061        use Float::*;
1062        let dl = cx.data_layout();
1063
1064        match self {
1065            F16 => dl.f16_align,
1066            F32 => dl.f32_align,
1067            F64 => dl.f64_align,
1068            F128 => dl.f128_align,
1069        }
1070    }
1071}
1072
1073/// Fundamental unit of memory access and layout.
1074#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1075#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1076pub enum Primitive {
1077    /// The `bool` is the signedness of the `Integer` type.
1078    ///
1079    /// One would think we would not care about such details this low down,
1080    /// but some ABIs are described in terms of C types and ISAs where the
1081    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1082    /// a negative integer passed by zero-extension will appear positive in
1083    /// the callee, and most operations on it will produce the wrong values.
1084    Int(Integer, bool),
1085    Float(Float),
1086    Pointer(AddressSpace),
1087}
1088
1089impl Primitive {
1090    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1091        use Primitive::*;
1092        let dl = cx.data_layout();
1093
1094        match self {
1095            Int(i, _) => i.size(),
1096            Float(f) => f.size(),
1097            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1098            // different address spaces can have different sizes
1099            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1100            Pointer(_) => dl.pointer_size,
1101        }
1102    }
1103
1104    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1105        use Primitive::*;
1106        let dl = cx.data_layout();
1107
1108        match self {
1109            Int(i, _) => i.align(dl),
1110            Float(f) => f.align(dl),
1111            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1112            // different address spaces can have different alignments
1113            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1114            Pointer(_) => dl.pointer_align,
1115        }
1116    }
1117}
1118
1119/// Inclusive wrap-around range of valid values, that is, if
1120/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1121///
1122/// That is, for an i8 primitive, a range of `254..=2` means following
1123/// sequence:
1124///
1125///    254 (-2), 255 (-1), 0, 1, 2
1126///
1127/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1128#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1129#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1130pub struct WrappingRange {
1131    pub start: u128,
1132    pub end: u128,
1133}
1134
1135impl WrappingRange {
1136    pub fn full(size: Size) -> Self {
1137        Self { start: 0, end: size.unsigned_int_max() }
1138    }
1139
1140    /// Returns `true` if `v` is contained in the range.
1141    #[inline(always)]
1142    pub fn contains(&self, v: u128) -> bool {
1143        if self.start <= self.end {
1144            self.start <= v && v <= self.end
1145        } else {
1146            self.start <= v || v <= self.end
1147        }
1148    }
1149
1150    /// Returns `self` with replaced `start`
1151    #[inline(always)]
1152    fn with_start(mut self, start: u128) -> Self {
1153        self.start = start;
1154        self
1155    }
1156
1157    /// Returns `self` with replaced `end`
1158    #[inline(always)]
1159    fn with_end(mut self, end: u128) -> Self {
1160        self.end = end;
1161        self
1162    }
1163
1164    /// Returns `true` if `size` completely fills the range.
1165    #[inline]
1166    fn is_full_for(&self, size: Size) -> bool {
1167        let max_value = size.unsigned_int_max();
1168        debug_assert!(self.start <= max_value && self.end <= max_value);
1169        self.start == (self.end.wrapping_add(1) & max_value)
1170    }
1171}
1172
1173impl fmt::Debug for WrappingRange {
1174    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1175        if self.start > self.end {
1176            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1177        } else {
1178            write!(fmt, "{}..={}", self.start, self.end)?;
1179        }
1180        Ok(())
1181    }
1182}
1183
1184/// Information about one scalar component of a Rust type.
1185#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1186#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1187pub enum Scalar {
1188    Initialized {
1189        value: Primitive,
1190
1191        // FIXME(eddyb) always use the shortest range, e.g., by finding
1192        // the largest space between two consecutive valid values and
1193        // taking everything else as the (shortest) valid range.
1194        valid_range: WrappingRange,
1195    },
1196    Union {
1197        /// Even for unions, we need to use the correct registers for the kind of
1198        /// values inside the union, so we keep the `Primitive` type around. We
1199        /// also use it to compute the size of the scalar.
1200        /// However, unions never have niches and even allow undef,
1201        /// so there is no `valid_range`.
1202        value: Primitive,
1203    },
1204}
1205
1206impl Scalar {
1207    #[inline]
1208    pub fn is_bool(&self) -> bool {
1209        use Integer::*;
1210        matches!(
1211            self,
1212            Scalar::Initialized {
1213                value: Primitive::Int(I8, false),
1214                valid_range: WrappingRange { start: 0, end: 1 }
1215            }
1216        )
1217    }
1218
1219    /// Get the primitive representation of this type, ignoring the valid range and whether the
1220    /// value is allowed to be undefined (due to being a union).
1221    pub fn primitive(&self) -> Primitive {
1222        match *self {
1223            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1224        }
1225    }
1226
1227    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1228        self.primitive().align(cx)
1229    }
1230
1231    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1232        self.primitive().size(cx)
1233    }
1234
1235    #[inline]
1236    pub fn to_union(&self) -> Self {
1237        Self::Union { value: self.primitive() }
1238    }
1239
1240    #[inline]
1241    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1242        match *self {
1243            Scalar::Initialized { valid_range, .. } => valid_range,
1244            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1245        }
1246    }
1247
1248    #[inline]
1249    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1250    /// union.
1251    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1252        match self {
1253            Scalar::Initialized { valid_range, .. } => valid_range,
1254            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1255        }
1256    }
1257
1258    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1259    /// layout.
1260    #[inline]
1261    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1262        match *self {
1263            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1264            Scalar::Union { .. } => true,
1265        }
1266    }
1267
1268    /// Returns `true` if this type can be left uninit.
1269    #[inline]
1270    pub fn is_uninit_valid(&self) -> bool {
1271        match *self {
1272            Scalar::Initialized { .. } => false,
1273            Scalar::Union { .. } => true,
1274        }
1275    }
1276
1277    /// Returns `true` if this is a signed integer scalar
1278    #[inline]
1279    pub fn is_signed(&self) -> bool {
1280        match self.primitive() {
1281            Primitive::Int(_, signed) => signed,
1282            _ => false,
1283        }
1284    }
1285}
1286
1287// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1288/// Describes how the fields of a type are located in memory.
1289#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1290#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1291pub enum FieldsShape<FieldIdx: Idx> {
1292    /// Scalar primitives and `!`, which never have fields.
1293    Primitive,
1294
1295    /// All fields start at no offset. The `usize` is the field count.
1296    Union(NonZeroUsize),
1297
1298    /// Array/vector-like placement, with all fields of identical types.
1299    Array { stride: Size, count: u64 },
1300
1301    /// Struct-like placement, with precomputed offsets.
1302    ///
1303    /// Fields are guaranteed to not overlap, but note that gaps
1304    /// before, between and after all the fields are NOT always
1305    /// padding, and as such their contents may not be discarded.
1306    /// For example, enum variants leave a gap at the start,
1307    /// where the discriminant field in the enum layout goes.
1308    Arbitrary {
1309        /// Offsets for the first byte of each field,
1310        /// ordered to match the source definition order.
1311        /// This vector does not go in increasing order.
1312        // FIXME(eddyb) use small vector optimization for the common case.
1313        offsets: IndexVec<FieldIdx, Size>,
1314
1315        /// Maps source order field indices to memory order indices,
1316        /// depending on how the fields were reordered (if at all).
1317        /// This is a permutation, with both the source order and the
1318        /// memory order using the same (0..n) index ranges.
1319        ///
1320        /// Note that during computation of `memory_index`, sometimes
1321        /// it is easier to operate on the inverse mapping (that is,
1322        /// from memory order to source order), and that is usually
1323        /// named `inverse_memory_index`.
1324        ///
1325        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1326        // FIXME(camlorn) also consider small vector optimization here.
1327        memory_index: IndexVec<FieldIdx, u32>,
1328    },
1329}
1330
1331impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1332    #[inline]
1333    pub fn count(&self) -> usize {
1334        match *self {
1335            FieldsShape::Primitive => 0,
1336            FieldsShape::Union(count) => count.get(),
1337            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1338            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1339        }
1340    }
1341
1342    #[inline]
1343    pub fn offset(&self, i: usize) -> Size {
1344        match *self {
1345            FieldsShape::Primitive => {
1346                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1347            }
1348            FieldsShape::Union(count) => {
1349                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1350                Size::ZERO
1351            }
1352            FieldsShape::Array { stride, count } => {
1353                let i = u64::try_from(i).unwrap();
1354                assert!(i < count, "tried to access field {i} of array with {count} fields");
1355                stride * i
1356            }
1357            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1358        }
1359    }
1360
1361    #[inline]
1362    pub fn memory_index(&self, i: usize) -> usize {
1363        match *self {
1364            FieldsShape::Primitive => {
1365                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1366            }
1367            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1368            FieldsShape::Arbitrary { ref memory_index, .. } => {
1369                memory_index[FieldIdx::new(i)].try_into().unwrap()
1370            }
1371        }
1372    }
1373
1374    /// Gets source indices of the fields by increasing offsets.
1375    #[inline]
1376    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1377        let mut inverse_small = [0u8; 64];
1378        let mut inverse_big = IndexVec::new();
1379        let use_small = self.count() <= inverse_small.len();
1380
1381        // We have to write this logic twice in order to keep the array small.
1382        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1383            if use_small {
1384                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1385                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1386                }
1387            } else {
1388                inverse_big = memory_index.invert_bijective_mapping();
1389            }
1390        }
1391
1392        // Primitives don't really have fields in the way that structs do,
1393        // but having this return an empty iterator for them is unhelpful
1394        // since that makes them look kinda like ZSTs, which they're not.
1395        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1396
1397        (0..pseudofield_count).map(move |i| match *self {
1398            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1399            FieldsShape::Arbitrary { .. } => {
1400                if use_small {
1401                    inverse_small[i] as usize
1402                } else {
1403                    inverse_big[i as u32].index()
1404                }
1405            }
1406        })
1407    }
1408}
1409
1410/// An identifier that specifies the address space that some operation
1411/// should operate on. Special address spaces have an effect on code generation,
1412/// depending on the target and the address spaces it implements.
1413#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1414#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1415pub struct AddressSpace(pub u32);
1416
1417impl AddressSpace {
1418    /// The default address space, corresponding to data space.
1419    pub const DATA: Self = AddressSpace(0);
1420}
1421
1422/// The way we represent values to the backend
1423///
1424/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1425/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1426/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1427/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1428/// how the value will be lowered to the calling convention, in itself.
1429///
1430/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1431/// and larger values will usually prefer to be represented as memory.
1432#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1433#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1434pub enum BackendRepr {
1435    Scalar(Scalar),
1436    ScalarPair(Scalar, Scalar),
1437    SimdVector {
1438        element: Scalar,
1439        count: u64,
1440    },
1441    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1442    Memory {
1443        /// If true, the size is exact, otherwise it's only a lower bound.
1444        sized: bool,
1445    },
1446}
1447
1448impl BackendRepr {
1449    /// Returns `true` if the layout corresponds to an unsized type.
1450    #[inline]
1451    pub fn is_unsized(&self) -> bool {
1452        match *self {
1453            BackendRepr::Scalar(_)
1454            | BackendRepr::ScalarPair(..)
1455            | BackendRepr::SimdVector { .. } => false,
1456            BackendRepr::Memory { sized } => !sized,
1457        }
1458    }
1459
1460    #[inline]
1461    pub fn is_sized(&self) -> bool {
1462        !self.is_unsized()
1463    }
1464
1465    /// Returns `true` if this is a single signed integer scalar.
1466    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1467    #[inline]
1468    pub fn is_signed(&self) -> bool {
1469        match self {
1470            BackendRepr::Scalar(scal) => scal.is_signed(),
1471            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1472        }
1473    }
1474
1475    /// Returns `true` if this is a scalar type
1476    #[inline]
1477    pub fn is_scalar(&self) -> bool {
1478        matches!(*self, BackendRepr::Scalar(_))
1479    }
1480
1481    /// Returns `true` if this is a bool
1482    #[inline]
1483    pub fn is_bool(&self) -> bool {
1484        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1485    }
1486
1487    /// The psABI alignment for a `Scalar` or `ScalarPair`
1488    ///
1489    /// `None` for other variants.
1490    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1491        match *self {
1492            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1493            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1494            // The align of a Vector can vary in surprising ways
1495            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1496        }
1497    }
1498
1499    /// The psABI size for a `Scalar` or `ScalarPair`
1500    ///
1501    /// `None` for other variants
1502    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1503        match *self {
1504            // No padding in scalars.
1505            BackendRepr::Scalar(s) => Some(s.size(cx)),
1506            // May have some padding between the pair.
1507            BackendRepr::ScalarPair(s1, s2) => {
1508                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1509                let size = (field2_offset + s2.size(cx)).align_to(
1510                    self.scalar_align(cx)
1511                        // We absolutely must have an answer here or everything is FUBAR.
1512                        .unwrap(),
1513                );
1514                Some(size)
1515            }
1516            // The size of a Vector can vary in surprising ways
1517            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1518        }
1519    }
1520
1521    /// Discard validity range information and allow undef.
1522    pub fn to_union(&self) -> Self {
1523        match *self {
1524            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1525            BackendRepr::ScalarPair(s1, s2) => {
1526                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1527            }
1528            BackendRepr::SimdVector { element, count } => {
1529                BackendRepr::SimdVector { element: element.to_union(), count }
1530            }
1531            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1532        }
1533    }
1534
1535    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1536        match (self, other) {
1537            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1538            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1539            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1540            (
1541                BackendRepr::SimdVector { element: element_l, count: count_l },
1542                BackendRepr::SimdVector { element: element_r, count: count_r },
1543            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1544            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1545                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1546            }
1547            // Everything else must be strictly identical.
1548            _ => self == other,
1549        }
1550    }
1551}
1552
1553// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1554#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1555#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1556pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1557    /// A type with no valid variants. Must be uninhabited.
1558    Empty,
1559
1560    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1561    Single {
1562        /// Always `0` for types that cannot have multiple variants.
1563        index: VariantIdx,
1564    },
1565
1566    /// Enum-likes with more than one variant: each variant comes with
1567    /// a *discriminant* (usually the same as the variant index but the user can
1568    /// assign explicit discriminant values). That discriminant is encoded
1569    /// as a *tag* on the machine. The layout of each variant is
1570    /// a struct, and they all have space reserved for the tag.
1571    /// For enums, the tag is the sole field of the layout.
1572    Multiple {
1573        tag: Scalar,
1574        tag_encoding: TagEncoding<VariantIdx>,
1575        tag_field: usize,
1576        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1577    },
1578}
1579
1580// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1581#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1582#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1583pub enum TagEncoding<VariantIdx: Idx> {
1584    /// The tag directly stores the discriminant, but possibly with a smaller layout
1585    /// (so converting the tag to the discriminant can require sign extension).
1586    Direct,
1587
1588    /// Niche (values invalid for a type) encoding the discriminant:
1589    /// Discriminant and variant index coincide.
1590    /// The variant `untagged_variant` contains a niche at an arbitrary
1591    /// offset (field `tag_field` of the enum), which for a variant with
1592    /// discriminant `d` is set to
1593    /// `(d - niche_variants.start).wrapping_add(niche_start)`
1594    /// (this is wrapping arithmetic using the type of the niche field).
1595    ///
1596    /// For example, `Option<(usize, &T)>`  is represented such that
1597    /// `None` has a null pointer for the second tuple field, and
1598    /// `Some` is the identity function (with a non-null reference).
1599    ///
1600    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1601    /// range cannot be represented; they must be uninhabited.
1602    Niche {
1603        untagged_variant: VariantIdx,
1604        /// This range *may* contain `untagged_variant`; that is then just a "dead value" and
1605        /// not used to encode anything.
1606        niche_variants: RangeInclusive<VariantIdx>,
1607        /// This is inbounds of the type of the niche field
1608        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1609        niche_start: u128,
1610    },
1611}
1612
1613#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1614#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1615pub struct Niche {
1616    pub offset: Size,
1617    pub value: Primitive,
1618    pub valid_range: WrappingRange,
1619}
1620
1621impl Niche {
1622    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1623        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1624        let niche = Niche { offset, value, valid_range };
1625        if niche.available(cx) > 0 { Some(niche) } else { None }
1626    }
1627
1628    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1629        let Self { value, valid_range: v, .. } = *self;
1630        let size = value.size(cx);
1631        assert!(size.bits() <= 128);
1632        let max_value = size.unsigned_int_max();
1633
1634        // Find out how many values are outside the valid range.
1635        let niche = v.end.wrapping_add(1)..v.start;
1636        niche.end.wrapping_sub(niche.start) & max_value
1637    }
1638
1639    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1640        assert!(count > 0);
1641
1642        let Self { value, valid_range: v, .. } = *self;
1643        let size = value.size(cx);
1644        assert!(size.bits() <= 128);
1645        let max_value = size.unsigned_int_max();
1646
1647        let niche = v.end.wrapping_add(1)..v.start;
1648        let available = niche.end.wrapping_sub(niche.start) & max_value;
1649        if count > available {
1650            return None;
1651        }
1652
1653        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1654        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1655        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1656        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1657        // enable some special optimizations.
1658        //
1659        // Bound selection criteria:
1660        // 1. Select closest to zero given wrapping semantics.
1661        // 2. Avoid moving past zero if possible.
1662        //
1663        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1664        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1665        // bounds are of little interest.
1666        let move_start = |v: WrappingRange| {
1667            let start = v.start.wrapping_sub(count) & max_value;
1668            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1669        };
1670        let move_end = |v: WrappingRange| {
1671            let start = v.end.wrapping_add(1) & max_value;
1672            let end = v.end.wrapping_add(count) & max_value;
1673            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1674        };
1675        let distance_end_zero = max_value - v.end;
1676        if v.start > v.end {
1677            // zero is unavailable because wrapping occurs
1678            move_end(v)
1679        } else if v.start <= distance_end_zero {
1680            if count <= v.start {
1681                move_start(v)
1682            } else {
1683                // moved past zero, use other bound
1684                move_end(v)
1685            }
1686        } else {
1687            let end = v.end.wrapping_add(count) & max_value;
1688            let overshot_zero = (1..=v.end).contains(&end);
1689            if overshot_zero {
1690                // moved past zero, use other bound
1691                move_start(v)
1692            } else {
1693                move_end(v)
1694            }
1695        }
1696    }
1697}
1698
1699// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1700#[derive(PartialEq, Eq, Hash, Clone)]
1701#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1702pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
1703    /// Says where the fields are located within the layout.
1704    pub fields: FieldsShape<FieldIdx>,
1705
1706    /// Encodes information about multi-variant layouts.
1707    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1708    /// shared between all variants. One of them will be the discriminant,
1709    /// but e.g. coroutines can have more.
1710    ///
1711    /// To access all fields of this layout, both `fields` and the fields of the active variant
1712    /// must be taken into account.
1713    pub variants: Variants<FieldIdx, VariantIdx>,
1714
1715    /// The `backend_repr` defines how this data will be represented to the codegen backend,
1716    /// and encodes value restrictions via `valid_range`.
1717    ///
1718    /// Note that this is entirely orthogonal to the recursive structure defined by
1719    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1720    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
1721    /// have to be taken into account to find all fields of this layout.
1722    pub backend_repr: BackendRepr,
1723
1724    /// The leaf scalar with the largest number of invalid values
1725    /// (i.e. outside of its `valid_range`), if it exists.
1726    pub largest_niche: Option<Niche>,
1727    /// Is this type known to be uninhabted?
1728    ///
1729    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
1730    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
1731    pub uninhabited: bool,
1732
1733    pub align: AbiAndPrefAlign,
1734    pub size: Size,
1735
1736    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
1737    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
1738    /// requested, even if the requested alignment is equal to the natural alignment.
1739    pub max_repr_align: Option<Align>,
1740
1741    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
1742    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
1743    /// in some cases.
1744    pub unadjusted_abi_align: Align,
1745
1746    /// The randomization seed based on this type's own repr and its fields.
1747    ///
1748    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
1749    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
1750    /// types.
1751    ///
1752    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
1753    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
1754    /// to reorder its fields based on that information. The current implementation is a conservative
1755    /// approximation of this goal.
1756    pub randomization_seed: Hash64,
1757}
1758
1759impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1760    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
1761    pub fn is_aggregate(&self) -> bool {
1762        match self.backend_repr {
1763            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
1764            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
1765        }
1766    }
1767
1768    /// Returns `true` if this is an uninhabited type
1769    pub fn is_uninhabited(&self) -> bool {
1770        self.uninhabited
1771    }
1772}
1773
1774impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
1775where
1776    FieldsShape<FieldIdx>: fmt::Debug,
1777    Variants<FieldIdx, VariantIdx>: fmt::Debug,
1778{
1779    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1780        // This is how `Layout` used to print before it become
1781        // `Interned<LayoutS>`. We print it like this to avoid having to update
1782        // expected output in a lot of tests.
1783        let LayoutData {
1784            size,
1785            align,
1786            backend_repr,
1787            fields,
1788            largest_niche,
1789            uninhabited,
1790            variants,
1791            max_repr_align,
1792            unadjusted_abi_align,
1793            randomization_seed,
1794        } = self;
1795        f.debug_struct("Layout")
1796            .field("size", size)
1797            .field("align", align)
1798            .field("backend_repr", backend_repr)
1799            .field("fields", fields)
1800            .field("largest_niche", largest_niche)
1801            .field("uninhabited", uninhabited)
1802            .field("variants", variants)
1803            .field("max_repr_align", max_repr_align)
1804            .field("unadjusted_abi_align", unadjusted_abi_align)
1805            .field("randomization_seed", randomization_seed)
1806            .finish()
1807    }
1808}
1809
1810#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1811pub enum PointerKind {
1812    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1813    SharedRef { frozen: bool },
1814    /// Mutable reference. `unpin` indicates the absence of any pinned data.
1815    MutableRef { unpin: bool },
1816    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
1817    /// uses the global allocator or a custom one.
1818    Box { unpin: bool, global: bool },
1819}
1820
1821/// Encodes extra information we have about a pointer.
1822/// Note that this information is advisory only, and backends are free to ignore it:
1823/// if the information is wrong, that can cause UB, but if the information is absent,
1824/// that must always be okay.
1825#[derive(Copy, Clone, Debug)]
1826pub struct PointeeInfo {
1827    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
1828    /// be reliable.
1829    pub safe: Option<PointerKind>,
1830    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
1831    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
1832    /// of this function call", i.e. it is UB for the memory that this pointer points to to be freed
1833    /// while this function is still running.
1834    /// The size can be zero if the pointer is not dereferenceable.
1835    pub size: Size,
1836    /// If `safe` is `Some`, then the pointer is aligned as indicated.
1837    pub align: Align,
1838}
1839
1840impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1841    /// Returns `true` if the layout corresponds to an unsized type.
1842    #[inline]
1843    pub fn is_unsized(&self) -> bool {
1844        self.backend_repr.is_unsized()
1845    }
1846
1847    #[inline]
1848    pub fn is_sized(&self) -> bool {
1849        self.backend_repr.is_sized()
1850    }
1851
1852    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
1853    pub fn is_1zst(&self) -> bool {
1854        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
1855    }
1856
1857    /// Returns `true` if the type is a ZST and not unsized.
1858    ///
1859    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
1860    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
1861    pub fn is_zst(&self) -> bool {
1862        match self.backend_repr {
1863            BackendRepr::Scalar(_)
1864            | BackendRepr::ScalarPair(..)
1865            | BackendRepr::SimdVector { .. } => false,
1866            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
1867        }
1868    }
1869
1870    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
1871    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
1872    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
1873    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
1874    /// checks would otherwise be required.
1875    pub fn eq_abi(&self, other: &Self) -> bool {
1876        // The one thing that we are not capturing here is that for unsized types, the metadata must
1877        // also have the same ABI, and moreover that the same metadata leads to the same size. The
1878        // 2nd point is quite hard to check though.
1879        self.size == other.size
1880            && self.is_sized() == other.is_sized()
1881            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
1882            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
1883            && self.align.abi == other.align.abi
1884            && self.max_repr_align == other.max_repr_align
1885            && self.unadjusted_abi_align == other.unadjusted_abi_align
1886    }
1887}
1888
1889#[derive(Copy, Clone, Debug)]
1890pub enum StructKind {
1891    /// A tuple, closure, or univariant which cannot be coerced to unsized.
1892    AlwaysSized,
1893    /// A univariant, the last field of which may be coerced to unsized.
1894    MaybeUnsized,
1895    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1896    Prefixed(Size, Align),
1897}