rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", feature(assert_matches))]
4#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
5#![cfg_attr(feature = "nightly", feature(step_trait))]
6// tidy-alphabetical-end
7
8/*! ABI handling for rustc
9
10## What is an "ABI"?
11
12Literally, "application binary interface", which means it is everything about how code interacts,
13at the machine level, with other code. This means it technically covers all of the following:
14- object binary format for e.g. relocations or offset tables
15- in-memory layout of types
16- procedure calling conventions
17
18When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
19To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
20Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
21You will encounter all of them and more if you study target-specific codegen enough!
22Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
23either or both of
24- `repr(Rust)` types have a mostly-unspecified layout
25- `extern "Rust" fn(A) -> R` has an unspecified calling convention
26
27## Crate Goal
28
29ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
30It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
31Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
32It should contain traits and types that other crates then use in their implementation.
33For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
34but `rustc_abi` contains the types for calculating layout and describing register-passing.
35This makes it easier to describe things in the same way across targets, codegen backends, and
36even other Rust compilers, such as rust-analyzer!
37
38*/
39
40use std::fmt;
41#[cfg(feature = "nightly")]
42use std::iter::Step;
43use std::num::{NonZeroUsize, ParseIntError};
44use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
45use std::str::FromStr;
46
47use bitflags::bitflags;
48#[cfg(feature = "nightly")]
49use rustc_data_structures::stable_hasher::StableOrd;
50use rustc_hashes::Hash64;
51use rustc_index::{Idx, IndexSlice, IndexVec};
52#[cfg(feature = "nightly")]
53use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
54
55mod callconv;
56mod canon_abi;
57mod extern_abi;
58mod layout;
59#[cfg(test)]
60mod tests;
61
62pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
63pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
64#[cfg(feature = "nightly")]
65pub use extern_abi::CVariadicStatus;
66pub use extern_abi::{ExternAbi, all_names};
67#[cfg(feature = "nightly")]
68pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
69pub use layout::{LayoutCalculator, LayoutCalculatorError};
70
71/// Requirements for a `StableHashingContext` to be used in this crate.
72/// This is a hack to allow using the `HashStable_Generic` derive macro
73/// instead of implementing everything in `rustc_middle`.
74#[cfg(feature = "nightly")]
75pub trait HashStableContext {}
76
77#[derive(Clone, Copy, PartialEq, Eq, Default)]
78#[cfg_attr(
79    feature = "nightly",
80    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
81)]
82pub struct ReprFlags(u8);
83
84bitflags! {
85    impl ReprFlags: u8 {
86        const IS_C               = 1 << 0;
87        const IS_SIMD            = 1 << 1;
88        const IS_TRANSPARENT     = 1 << 2;
89        /// Internal only for now. If true, don't reorder fields.
90        /// On its own it does not prevent ABI optimizations.
91        const IS_LINEAR          = 1 << 3;
92        /// If true, the type's crate has opted into layout randomization.
93        /// Other flags can still inhibit reordering and thus randomization.
94        /// The seed stored in `ReprOptions.field_shuffle_seed`.
95        const RANDOMIZE_LAYOUT   = 1 << 4;
96        /// If true, the type is always passed indirectly by non-Rustic ABIs.
97        /// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
98        const PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS = 1 << 5;
99        const IS_SCALABLE        = 1 << 6;
100         // Any of these flags being set prevent field reordering optimisation.
101        const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits()
102                                 | ReprFlags::IS_SIMD.bits()
103                                 | ReprFlags::IS_SCALABLE.bits()
104                                 | ReprFlags::IS_LINEAR.bits();
105        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
106    }
107}
108
109// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
110// `rustc_data_structures` to make it build on stable.
111impl std::fmt::Debug for ReprFlags {
112    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
113        bitflags::parser::to_writer(self, f)
114    }
115}
116
117#[derive(Copy, Clone, Debug, Eq, PartialEq)]
118#[cfg_attr(
119    feature = "nightly",
120    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
121)]
122pub enum IntegerType {
123    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
124    /// `Pointer(true)` means `isize`.
125    Pointer(bool),
126    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
127    /// `Fixed(I8, false)` means `u8`.
128    Fixed(Integer, bool),
129}
130
131impl IntegerType {
132    pub fn is_signed(&self) -> bool {
133        match self {
134            IntegerType::Pointer(b) => *b,
135            IntegerType::Fixed(_, b) => *b,
136        }
137    }
138}
139
140#[derive(Copy, Clone, Debug, Eq, PartialEq)]
141#[cfg_attr(
142    feature = "nightly",
143    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
144)]
145pub enum ScalableElt {
146    /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector
147    ElementCount(u16),
148    /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only
149    /// contain other scalable vectors
150    Container,
151}
152
153/// Represents the repr options provided by the user.
154#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
155#[cfg_attr(
156    feature = "nightly",
157    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
158)]
159pub struct ReprOptions {
160    pub int: Option<IntegerType>,
161    pub align: Option<Align>,
162    pub pack: Option<Align>,
163    pub flags: ReprFlags,
164    /// `#[rustc_scalable_vector]`
165    pub scalable: Option<ScalableElt>,
166    /// The seed to be used for randomizing a type's layout
167    ///
168    /// Note: This could technically be a `u128` which would
169    /// be the "most accurate" hash as it'd encompass the item and crate
170    /// hash without loss, but it does pay the price of being larger.
171    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
172    /// purposes (primarily `-Z randomize-layout`)
173    pub field_shuffle_seed: Hash64,
174}
175
176impl ReprOptions {
177    #[inline]
178    pub fn simd(&self) -> bool {
179        self.flags.contains(ReprFlags::IS_SIMD)
180    }
181
182    #[inline]
183    pub fn scalable(&self) -> bool {
184        self.flags.contains(ReprFlags::IS_SCALABLE)
185    }
186
187    #[inline]
188    pub fn c(&self) -> bool {
189        self.flags.contains(ReprFlags::IS_C)
190    }
191
192    #[inline]
193    pub fn packed(&self) -> bool {
194        self.pack.is_some()
195    }
196
197    #[inline]
198    pub fn transparent(&self) -> bool {
199        self.flags.contains(ReprFlags::IS_TRANSPARENT)
200    }
201
202    #[inline]
203    pub fn linear(&self) -> bool {
204        self.flags.contains(ReprFlags::IS_LINEAR)
205    }
206
207    /// Returns the discriminant type, given these `repr` options.
208    /// This must only be called on enums!
209    ///
210    /// This is the "typeck type" of the discriminant, which is effectively the maximum size:
211    /// discriminant values will be wrapped to fit (with a lint). Layout can later decide to use a
212    /// smaller type for the tag that stores the discriminant at runtime and that will work just
213    /// fine, it just induces casts when getting/setting the discriminant.
214    pub fn discr_type(&self) -> IntegerType {
215        self.int.unwrap_or(IntegerType::Pointer(true))
216    }
217
218    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
219    /// layout" optimizations, such as representing `Foo<&T>` as a
220    /// single pointer.
221    pub fn inhibit_enum_layout_opt(&self) -> bool {
222        self.c() || self.int.is_some()
223    }
224
225    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
226        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
227    }
228
229    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
230    /// e.g. `repr(C)` or `repr(<int>)`.
231    pub fn inhibit_struct_field_reordering(&self) -> bool {
232        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
233    }
234
235    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
236    /// was enabled for its declaration crate.
237    pub fn can_randomize_type_layout(&self) -> bool {
238        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
239    }
240
241    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
242    pub fn inhibits_union_abi_opt(&self) -> bool {
243        self.c()
244    }
245}
246
247/// The maximum supported number of lanes in a SIMD vector.
248///
249/// This value is selected based on backend support:
250/// * LLVM does not appear to have a vector width limit.
251/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
252pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
253
254/// How pointers are represented in a given address space
255#[derive(Copy, Clone, Debug, PartialEq, Eq)]
256pub struct PointerSpec {
257    /// The size of the bitwise representation of the pointer.
258    pointer_size: Size,
259    /// The alignment of pointers for this address space
260    pointer_align: Align,
261    /// The size of the value a pointer can be offset by in this address space.
262    pointer_offset: Size,
263    /// Pointers into this address space contain extra metadata
264    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
265    _is_fat: bool,
266}
267
268/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
269/// for a target, which contains everything needed to compute layouts.
270#[derive(Debug, PartialEq, Eq)]
271pub struct TargetDataLayout {
272    pub endian: Endian,
273    pub i1_align: Align,
274    pub i8_align: Align,
275    pub i16_align: Align,
276    pub i32_align: Align,
277    pub i64_align: Align,
278    pub i128_align: Align,
279    pub f16_align: Align,
280    pub f32_align: Align,
281    pub f64_align: Align,
282    pub f128_align: Align,
283    pub aggregate_align: Align,
284
285    /// Alignments for vector types.
286    pub vector_align: Vec<(Size, Align)>,
287
288    pub default_address_space: AddressSpace,
289    pub default_address_space_pointer_spec: PointerSpec,
290
291    /// Address space information of all known address spaces.
292    ///
293    /// # Note
294    ///
295    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
296    /// which instead lives in [`Self::default_address_space_pointer_spec`].
297    address_space_info: Vec<(AddressSpace, PointerSpec)>,
298
299    pub instruction_address_space: AddressSpace,
300
301    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
302    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
303    /// so the only valid spec for LLVM is c_int::BITS or 8
304    pub c_enum_min_size: Integer,
305}
306
307impl Default for TargetDataLayout {
308    /// Creates an instance of `TargetDataLayout`.
309    fn default() -> TargetDataLayout {
310        let align = |bits| Align::from_bits(bits).unwrap();
311        TargetDataLayout {
312            endian: Endian::Big,
313            i1_align: align(8),
314            i8_align: align(8),
315            i16_align: align(16),
316            i32_align: align(32),
317            i64_align: align(32),
318            i128_align: align(32),
319            f16_align: align(16),
320            f32_align: align(32),
321            f64_align: align(64),
322            f128_align: align(128),
323            aggregate_align: align(8),
324            vector_align: vec![
325                (Size::from_bits(64), align(64)),
326                (Size::from_bits(128), align(128)),
327            ],
328            default_address_space: AddressSpace::ZERO,
329            default_address_space_pointer_spec: PointerSpec {
330                pointer_size: Size::from_bits(64),
331                pointer_align: align(64),
332                pointer_offset: Size::from_bits(64),
333                _is_fat: false,
334            },
335            address_space_info: vec![],
336            instruction_address_space: AddressSpace::ZERO,
337            c_enum_min_size: Integer::I32,
338        }
339    }
340}
341
342pub enum TargetDataLayoutErrors<'a> {
343    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
344    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
345    MissingAlignment { cause: &'a str },
346    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
347    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
348    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
349    InvalidBitsSize { err: String },
350    UnknownPointerSpecification { err: String },
351}
352
353impl TargetDataLayout {
354    /// Parse data layout from an
355    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
356    ///
357    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
358    /// determined from llvm string.
359    pub fn parse_from_llvm_datalayout_string<'a>(
360        input: &'a str,
361        default_address_space: AddressSpace,
362    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
363        // Parse an address space index from a string.
364        let parse_address_space = |s: &'a str, cause: &'a str| {
365            s.parse::<u32>().map(AddressSpace).map_err(|err| {
366                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
367            })
368        };
369
370        // Parse a bit count from a string.
371        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
372            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
373                kind,
374                bit: s,
375                cause,
376                err,
377            })
378        };
379
380        // Parse a size string.
381        let parse_size =
382            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
383
384        // Parse an alignment string.
385        let parse_align_str = |s: &'a str, cause: &'a str| {
386            let align_from_bits = |bits| {
387                Align::from_bits(bits)
388                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
389            };
390            let abi = parse_bits(s, "alignment", cause)?;
391            Ok(align_from_bits(abi)?)
392        };
393
394        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
395        // ignoring the secondary alignment specifications.
396        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
397            if s.is_empty() {
398                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
399            }
400            parse_align_str(s[0], cause)
401        };
402
403        let mut dl = TargetDataLayout::default();
404        dl.default_address_space = default_address_space;
405
406        let mut i128_align_src = 64;
407        for spec in input.split('-') {
408            let spec_parts = spec.split(':').collect::<Vec<_>>();
409
410            match &*spec_parts {
411                ["e"] => dl.endian = Endian::Little,
412                ["E"] => dl.endian = Endian::Big,
413                [p] if p.starts_with('P') => {
414                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
415                }
416                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
417                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
418                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
419                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
420                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
421                [p, s, a @ ..] if p.starts_with("p") => {
422                    let mut p = p.strip_prefix('p').unwrap();
423                    let mut _is_fat = false;
424
425                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
426                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
427
428                    if p.starts_with('f') {
429                        p = p.strip_prefix('f').unwrap();
430                        _is_fat = true;
431                    }
432
433                    // However, we currently don't take into account further specifications:
434                    // an error is emitted instead.
435                    if p.starts_with(char::is_alphabetic) {
436                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
437                            err: p.to_string(),
438                        });
439                    }
440
441                    let addr_space = if !p.is_empty() {
442                        parse_address_space(p, "p-")?
443                    } else {
444                        AddressSpace::ZERO
445                    };
446
447                    let pointer_size = parse_size(s, "p-")?;
448                    let pointer_align = parse_align_seq(a, "p-")?;
449                    let info = PointerSpec {
450                        pointer_offset: pointer_size,
451                        pointer_size,
452                        pointer_align,
453                        _is_fat,
454                    };
455                    if addr_space == default_address_space {
456                        dl.default_address_space_pointer_spec = info;
457                    } else {
458                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
459                            Some(e) => e.1 = info,
460                            None => {
461                                dl.address_space_info.push((addr_space, info));
462                            }
463                        }
464                    }
465                }
466                [p, s, a, _pr, i] if p.starts_with("p") => {
467                    let mut p = p.strip_prefix('p').unwrap();
468                    let mut _is_fat = false;
469
470                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
471                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
472
473                    if p.starts_with('f') {
474                        p = p.strip_prefix('f').unwrap();
475                        _is_fat = true;
476                    }
477
478                    // However, we currently don't take into account further specifications:
479                    // an error is emitted instead.
480                    if p.starts_with(char::is_alphabetic) {
481                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
482                            err: p.to_string(),
483                        });
484                    }
485
486                    let addr_space = if !p.is_empty() {
487                        parse_address_space(p, "p")?
488                    } else {
489                        AddressSpace::ZERO
490                    };
491
492                    let info = PointerSpec {
493                        pointer_size: parse_size(s, "p-")?,
494                        pointer_align: parse_align_str(a, "p-")?,
495                        pointer_offset: parse_size(i, "p-")?,
496                        _is_fat,
497                    };
498
499                    if addr_space == default_address_space {
500                        dl.default_address_space_pointer_spec = info;
501                    } else {
502                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
503                            Some(e) => e.1 = info,
504                            None => {
505                                dl.address_space_info.push((addr_space, info));
506                            }
507                        }
508                    }
509                }
510
511                [s, a @ ..] if s.starts_with('i') => {
512                    let Ok(bits) = s[1..].parse::<u64>() else {
513                        parse_size(&s[1..], "i")?; // For the user error.
514                        continue;
515                    };
516                    let a = parse_align_seq(a, s)?;
517                    match bits {
518                        1 => dl.i1_align = a,
519                        8 => dl.i8_align = a,
520                        16 => dl.i16_align = a,
521                        32 => dl.i32_align = a,
522                        64 => dl.i64_align = a,
523                        _ => {}
524                    }
525                    if bits >= i128_align_src && bits <= 128 {
526                        // Default alignment for i128 is decided by taking the alignment of
527                        // largest-sized i{64..=128}.
528                        i128_align_src = bits;
529                        dl.i128_align = a;
530                    }
531                }
532                [s, a @ ..] if s.starts_with('v') => {
533                    let v_size = parse_size(&s[1..], "v")?;
534                    let a = parse_align_seq(a, s)?;
535                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
536                        v.1 = a;
537                        continue;
538                    }
539                    // No existing entry, add a new one.
540                    dl.vector_align.push((v_size, a));
541                }
542                _ => {} // Ignore everything else.
543            }
544        }
545
546        // Inherit, if not given, address space information for specific LLVM elements from the
547        // default data address space.
548        if (dl.instruction_address_space != dl.default_address_space)
549            && dl
550                .address_space_info
551                .iter()
552                .find(|(a, _)| *a == dl.instruction_address_space)
553                .is_none()
554        {
555            dl.address_space_info.push((
556                dl.instruction_address_space,
557                dl.default_address_space_pointer_spec.clone(),
558            ));
559        }
560
561        Ok(dl)
562    }
563
564    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
565    /// space.
566    ///
567    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
568    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
569    /// index every address within an object along with one byte past the end, along with allowing
570    /// `isize` to store the difference between any two pointers into an object.
571    ///
572    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
573    /// so we adopt such a more-constrained size bound due to its technical limitations.
574    #[inline]
575    pub fn obj_size_bound(&self) -> u64 {
576        match self.pointer_size().bits() {
577            16 => 1 << 15,
578            32 => 1 << 31,
579            64 => 1 << 61,
580            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
581        }
582    }
583
584    /// Returns **exclusive** upper bound on object size in bytes.
585    ///
586    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
587    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
588    /// index every address within an object along with one byte past the end, along with allowing
589    /// `isize` to store the difference between any two pointers into an object.
590    ///
591    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
592    /// so we adopt such a more-constrained size bound due to its technical limitations.
593    #[inline]
594    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
595        match self.pointer_size_in(address_space).bits() {
596            16 => 1 << 15,
597            32 => 1 << 31,
598            64 => 1 << 61,
599            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
600        }
601    }
602
603    #[inline]
604    pub fn ptr_sized_integer(&self) -> Integer {
605        use Integer::*;
606        match self.pointer_offset().bits() {
607            16 => I16,
608            32 => I32,
609            64 => I64,
610            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
611        }
612    }
613
614    #[inline]
615    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
616        use Integer::*;
617        match self.pointer_offset_in(address_space).bits() {
618            16 => I16,
619            32 => I32,
620            64 => I64,
621            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
622        }
623    }
624
625    /// psABI-mandated alignment for a vector type, if any
626    #[inline]
627    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
628        self.vector_align
629            .iter()
630            .find(|(size, _align)| *size == vec_size)
631            .map(|(_size, align)| *align)
632    }
633
634    /// an alignment resembling the one LLVM would pick for a vector
635    #[inline]
636    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
637        self.cabi_vector_align(vec_size)
638            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
639    }
640
641    /// Get the pointer size in the default data address space.
642    #[inline]
643    pub fn pointer_size(&self) -> Size {
644        self.default_address_space_pointer_spec.pointer_size
645    }
646
647    /// Get the pointer size in a specific address space.
648    #[inline]
649    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
650        if c == self.default_address_space {
651            return self.default_address_space_pointer_spec.pointer_size;
652        }
653
654        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
655            e.1.pointer_size
656        } else {
657            panic!("Use of unknown address space {c:?}");
658        }
659    }
660
661    /// Get the pointer index in the default data address space.
662    #[inline]
663    pub fn pointer_offset(&self) -> Size {
664        self.default_address_space_pointer_spec.pointer_offset
665    }
666
667    /// Get the pointer index in a specific address space.
668    #[inline]
669    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
670        if c == self.default_address_space {
671            return self.default_address_space_pointer_spec.pointer_offset;
672        }
673
674        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
675            e.1.pointer_offset
676        } else {
677            panic!("Use of unknown address space {c:?}");
678        }
679    }
680
681    /// Get the pointer alignment in the default data address space.
682    #[inline]
683    pub fn pointer_align(&self) -> AbiAlign {
684        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
685    }
686
687    /// Get the pointer alignment in a specific address space.
688    #[inline]
689    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
690        AbiAlign::new(if c == self.default_address_space {
691            self.default_address_space_pointer_spec.pointer_align
692        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
693            e.1.pointer_align
694        } else {
695            panic!("Use of unknown address space {c:?}");
696        })
697    }
698}
699
700pub trait HasDataLayout {
701    fn data_layout(&self) -> &TargetDataLayout;
702}
703
704impl HasDataLayout for TargetDataLayout {
705    #[inline]
706    fn data_layout(&self) -> &TargetDataLayout {
707        self
708    }
709}
710
711// used by rust-analyzer
712impl HasDataLayout for &TargetDataLayout {
713    #[inline]
714    fn data_layout(&self) -> &TargetDataLayout {
715        (**self).data_layout()
716    }
717}
718
719/// Endianness of the target, which must match cfg(target-endian).
720#[derive(Copy, Clone, PartialEq, Eq)]
721pub enum Endian {
722    Little,
723    Big,
724}
725
726impl Endian {
727    pub fn as_str(&self) -> &'static str {
728        match self {
729            Self::Little => "little",
730            Self::Big => "big",
731        }
732    }
733}
734
735impl fmt::Debug for Endian {
736    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
737        f.write_str(self.as_str())
738    }
739}
740
741impl FromStr for Endian {
742    type Err = String;
743
744    fn from_str(s: &str) -> Result<Self, Self::Err> {
745        match s {
746            "little" => Ok(Self::Little),
747            "big" => Ok(Self::Big),
748            _ => Err(format!(r#"unknown endian: "{s}""#)),
749        }
750    }
751}
752
753/// Size of a type in bytes.
754#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
755#[cfg_attr(
756    feature = "nightly",
757    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
758)]
759pub struct Size {
760    raw: u64,
761}
762
763#[cfg(feature = "nightly")]
764impl StableOrd for Size {
765    const CAN_USE_UNSTABLE_SORT: bool = true;
766
767    // `Ord` is implemented as just comparing numerical values and numerical values
768    // are not changed by (de-)serialization.
769    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
770}
771
772// This is debug-printed a lot in larger structs, don't waste too much space there
773impl fmt::Debug for Size {
774    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
775        write!(f, "Size({} bytes)", self.bytes())
776    }
777}
778
779impl Size {
780    pub const ZERO: Size = Size { raw: 0 };
781
782    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
783    /// not a multiple of 8.
784    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
785        let bits = bits.try_into().ok().unwrap();
786        Size { raw: bits.div_ceil(8) }
787    }
788
789    #[inline]
790    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
791        let bytes: u64 = bytes.try_into().ok().unwrap();
792        Size { raw: bytes }
793    }
794
795    #[inline]
796    pub fn bytes(self) -> u64 {
797        self.raw
798    }
799
800    #[inline]
801    pub fn bytes_usize(self) -> usize {
802        self.bytes().try_into().unwrap()
803    }
804
805    #[inline]
806    pub fn bits(self) -> u64 {
807        #[cold]
808        fn overflow(bytes: u64) -> ! {
809            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
810        }
811
812        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
813    }
814
815    #[inline]
816    pub fn bits_usize(self) -> usize {
817        self.bits().try_into().unwrap()
818    }
819
820    #[inline]
821    pub fn align_to(self, align: Align) -> Size {
822        let mask = align.bytes() - 1;
823        Size::from_bytes((self.bytes() + mask) & !mask)
824    }
825
826    #[inline]
827    pub fn is_aligned(self, align: Align) -> bool {
828        let mask = align.bytes() - 1;
829        self.bytes() & mask == 0
830    }
831
832    #[inline]
833    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
834        let dl = cx.data_layout();
835
836        let bytes = self.bytes().checked_add(offset.bytes())?;
837
838        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
839    }
840
841    #[inline]
842    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
843        let dl = cx.data_layout();
844
845        let bytes = self.bytes().checked_mul(count)?;
846        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
847    }
848
849    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
850    /// (i.e., if it is negative, fill with 1's on the left).
851    #[inline]
852    pub fn sign_extend(self, value: u128) -> i128 {
853        let size = self.bits();
854        if size == 0 {
855            // Truncated until nothing is left.
856            return 0;
857        }
858        // Sign-extend it.
859        let shift = 128 - size;
860        // Shift the unsigned value to the left, then shift back to the right as signed
861        // (essentially fills with sign bit on the left).
862        ((value << shift) as i128) >> shift
863    }
864
865    /// Truncates `value` to `self` bits.
866    #[inline]
867    pub fn truncate(self, value: u128) -> u128 {
868        let size = self.bits();
869        if size == 0 {
870            // Truncated until nothing is left.
871            return 0;
872        }
873        let shift = 128 - size;
874        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
875        (value << shift) >> shift
876    }
877
878    #[inline]
879    pub fn signed_int_min(&self) -> i128 {
880        self.sign_extend(1_u128 << (self.bits() - 1))
881    }
882
883    #[inline]
884    pub fn signed_int_max(&self) -> i128 {
885        i128::MAX >> (128 - self.bits())
886    }
887
888    #[inline]
889    pub fn unsigned_int_max(&self) -> u128 {
890        u128::MAX >> (128 - self.bits())
891    }
892}
893
894// Panicking addition, subtraction and multiplication for convenience.
895// Avoid during layout computation, return `LayoutError` instead.
896
897impl Add for Size {
898    type Output = Size;
899    #[inline]
900    fn add(self, other: Size) -> Size {
901        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
902            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
903        }))
904    }
905}
906
907impl Sub for Size {
908    type Output = Size;
909    #[inline]
910    fn sub(self, other: Size) -> Size {
911        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
912            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
913        }))
914    }
915}
916
917impl Mul<Size> for u64 {
918    type Output = Size;
919    #[inline]
920    fn mul(self, size: Size) -> Size {
921        size * self
922    }
923}
924
925impl Mul<u64> for Size {
926    type Output = Size;
927    #[inline]
928    fn mul(self, count: u64) -> Size {
929        match self.bytes().checked_mul(count) {
930            Some(bytes) => Size::from_bytes(bytes),
931            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
932        }
933    }
934}
935
936impl AddAssign for Size {
937    #[inline]
938    fn add_assign(&mut self, other: Size) {
939        *self = *self + other;
940    }
941}
942
943#[cfg(feature = "nightly")]
944impl Step for Size {
945    #[inline]
946    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
947        u64::steps_between(&start.bytes(), &end.bytes())
948    }
949
950    #[inline]
951    fn forward_checked(start: Self, count: usize) -> Option<Self> {
952        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
953    }
954
955    #[inline]
956    fn forward(start: Self, count: usize) -> Self {
957        Self::from_bytes(u64::forward(start.bytes(), count))
958    }
959
960    #[inline]
961    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
962        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
963    }
964
965    #[inline]
966    fn backward_checked(start: Self, count: usize) -> Option<Self> {
967        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
968    }
969
970    #[inline]
971    fn backward(start: Self, count: usize) -> Self {
972        Self::from_bytes(u64::backward(start.bytes(), count))
973    }
974
975    #[inline]
976    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
977        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
978    }
979}
980
981/// Alignment of a type in bytes (always a power of two).
982#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
983#[cfg_attr(
984    feature = "nightly",
985    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
986)]
987pub struct Align {
988    pow2: u8,
989}
990
991// This is debug-printed a lot in larger structs, don't waste too much space there
992impl fmt::Debug for Align {
993    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
994        write!(f, "Align({} bytes)", self.bytes())
995    }
996}
997
998#[derive(Clone, Copy)]
999pub enum AlignFromBytesError {
1000    NotPowerOfTwo(u64),
1001    TooLarge(u64),
1002}
1003
1004impl AlignFromBytesError {
1005    pub fn diag_ident(self) -> &'static str {
1006        match self {
1007            Self::NotPowerOfTwo(_) => "not_power_of_two",
1008            Self::TooLarge(_) => "too_large",
1009        }
1010    }
1011
1012    pub fn align(self) -> u64 {
1013        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
1014        align
1015    }
1016}
1017
1018impl fmt::Debug for AlignFromBytesError {
1019    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1020        fmt::Display::fmt(self, f)
1021    }
1022}
1023
1024impl fmt::Display for AlignFromBytesError {
1025    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1026        match self {
1027            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
1028            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
1029        }
1030    }
1031}
1032
1033impl Align {
1034    pub const ONE: Align = Align { pow2: 0 };
1035    pub const EIGHT: Align = Align { pow2: 3 };
1036    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1037    pub const MAX: Align = Align { pow2: 29 };
1038
1039    #[inline]
1040    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1041        Align::from_bytes(Size::from_bits(bits).bytes())
1042    }
1043
1044    #[inline]
1045    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1046        // Treat an alignment of 0 bytes like 1-byte alignment.
1047        if align == 0 {
1048            return Ok(Align::ONE);
1049        }
1050
1051        #[cold]
1052        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1053            AlignFromBytesError::NotPowerOfTwo(align)
1054        }
1055
1056        #[cold]
1057        const fn too_large(align: u64) -> AlignFromBytesError {
1058            AlignFromBytesError::TooLarge(align)
1059        }
1060
1061        let tz = align.trailing_zeros();
1062        if align != (1 << tz) {
1063            return Err(not_power_of_2(align));
1064        }
1065
1066        let pow2 = tz as u8;
1067        if pow2 > Self::MAX.pow2 {
1068            return Err(too_large(align));
1069        }
1070
1071        Ok(Align { pow2 })
1072    }
1073
1074    #[inline]
1075    pub const fn bytes(self) -> u64 {
1076        1 << self.pow2
1077    }
1078
1079    #[inline]
1080    pub fn bytes_usize(self) -> usize {
1081        self.bytes().try_into().unwrap()
1082    }
1083
1084    #[inline]
1085    pub const fn bits(self) -> u64 {
1086        self.bytes() * 8
1087    }
1088
1089    #[inline]
1090    pub fn bits_usize(self) -> usize {
1091        self.bits().try_into().unwrap()
1092    }
1093
1094    /// Obtain the greatest factor of `size` that is an alignment
1095    /// (the largest power of two the Size is a multiple of).
1096    ///
1097    /// Note that all numbers are factors of 0
1098    #[inline]
1099    pub fn max_aligned_factor(size: Size) -> Align {
1100        Align { pow2: size.bytes().trailing_zeros() as u8 }
1101    }
1102
1103    /// Reduces Align to an aligned factor of `size`.
1104    #[inline]
1105    pub fn restrict_for_offset(self, size: Size) -> Align {
1106        self.min(Align::max_aligned_factor(size))
1107    }
1108}
1109
1110/// A pair of alignments, ABI-mandated and preferred.
1111///
1112/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1113/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1114/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1115/// and thus in practice the two values are almost always identical.
1116///
1117/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1118/// It is of effectively no consequence for layout in structs and on the stack.
1119#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1120#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1121pub struct AbiAlign {
1122    pub abi: Align,
1123}
1124
1125impl AbiAlign {
1126    #[inline]
1127    pub fn new(align: Align) -> AbiAlign {
1128        AbiAlign { abi: align }
1129    }
1130
1131    #[inline]
1132    pub fn min(self, other: AbiAlign) -> AbiAlign {
1133        AbiAlign { abi: self.abi.min(other.abi) }
1134    }
1135
1136    #[inline]
1137    pub fn max(self, other: AbiAlign) -> AbiAlign {
1138        AbiAlign { abi: self.abi.max(other.abi) }
1139    }
1140}
1141
1142impl Deref for AbiAlign {
1143    type Target = Align;
1144
1145    fn deref(&self) -> &Self::Target {
1146        &self.abi
1147    }
1148}
1149
1150/// Integers, also used for enum discriminants.
1151#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1152#[cfg_attr(
1153    feature = "nightly",
1154    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
1155)]
1156pub enum Integer {
1157    I8,
1158    I16,
1159    I32,
1160    I64,
1161    I128,
1162}
1163
1164impl Integer {
1165    pub fn int_ty_str(self) -> &'static str {
1166        use Integer::*;
1167        match self {
1168            I8 => "i8",
1169            I16 => "i16",
1170            I32 => "i32",
1171            I64 => "i64",
1172            I128 => "i128",
1173        }
1174    }
1175
1176    pub fn uint_ty_str(self) -> &'static str {
1177        use Integer::*;
1178        match self {
1179            I8 => "u8",
1180            I16 => "u16",
1181            I32 => "u32",
1182            I64 => "u64",
1183            I128 => "u128",
1184        }
1185    }
1186
1187    #[inline]
1188    pub fn size(self) -> Size {
1189        use Integer::*;
1190        match self {
1191            I8 => Size::from_bytes(1),
1192            I16 => Size::from_bytes(2),
1193            I32 => Size::from_bytes(4),
1194            I64 => Size::from_bytes(8),
1195            I128 => Size::from_bytes(16),
1196        }
1197    }
1198
1199    /// Gets the Integer type from an IntegerType.
1200    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1201        let dl = cx.data_layout();
1202
1203        match ity {
1204            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1205            IntegerType::Fixed(x, _) => x,
1206        }
1207    }
1208
1209    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1210        use Integer::*;
1211        let dl = cx.data_layout();
1212
1213        AbiAlign::new(match self {
1214            I8 => dl.i8_align,
1215            I16 => dl.i16_align,
1216            I32 => dl.i32_align,
1217            I64 => dl.i64_align,
1218            I128 => dl.i128_align,
1219        })
1220    }
1221
1222    /// Returns the largest signed value that can be represented by this Integer.
1223    #[inline]
1224    pub fn signed_max(self) -> i128 {
1225        use Integer::*;
1226        match self {
1227            I8 => i8::MAX as i128,
1228            I16 => i16::MAX as i128,
1229            I32 => i32::MAX as i128,
1230            I64 => i64::MAX as i128,
1231            I128 => i128::MAX,
1232        }
1233    }
1234
1235    /// Returns the smallest signed value that can be represented by this Integer.
1236    #[inline]
1237    pub fn signed_min(self) -> i128 {
1238        use Integer::*;
1239        match self {
1240            I8 => i8::MIN as i128,
1241            I16 => i16::MIN as i128,
1242            I32 => i32::MIN as i128,
1243            I64 => i64::MIN as i128,
1244            I128 => i128::MIN,
1245        }
1246    }
1247
1248    /// Finds the smallest Integer type which can represent the signed value.
1249    #[inline]
1250    pub fn fit_signed(x: i128) -> Integer {
1251        use Integer::*;
1252        match x {
1253            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1254            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1255            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1256            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1257            _ => I128,
1258        }
1259    }
1260
1261    /// Finds the smallest Integer type which can represent the unsigned value.
1262    #[inline]
1263    pub fn fit_unsigned(x: u128) -> Integer {
1264        use Integer::*;
1265        match x {
1266            0..=0x0000_0000_0000_00ff => I8,
1267            0..=0x0000_0000_0000_ffff => I16,
1268            0..=0x0000_0000_ffff_ffff => I32,
1269            0..=0xffff_ffff_ffff_ffff => I64,
1270            _ => I128,
1271        }
1272    }
1273
1274    /// Finds the smallest integer with the given alignment.
1275    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1276        use Integer::*;
1277        let dl = cx.data_layout();
1278
1279        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1280            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1281        })
1282    }
1283
1284    /// Find the largest integer with the given alignment or less.
1285    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1286        use Integer::*;
1287        let dl = cx.data_layout();
1288
1289        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1290        for candidate in [I64, I32, I16] {
1291            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1292                return candidate;
1293            }
1294        }
1295        I8
1296    }
1297
1298    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1299    // `Integer` given some requirements.
1300    #[inline]
1301    pub fn from_size(size: Size) -> Result<Self, String> {
1302        match size.bits() {
1303            8 => Ok(Integer::I8),
1304            16 => Ok(Integer::I16),
1305            32 => Ok(Integer::I32),
1306            64 => Ok(Integer::I64),
1307            128 => Ok(Integer::I128),
1308            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1309        }
1310    }
1311}
1312
1313/// Floating-point types.
1314#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1315#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1316pub enum Float {
1317    F16,
1318    F32,
1319    F64,
1320    F128,
1321}
1322
1323impl Float {
1324    pub fn size(self) -> Size {
1325        use Float::*;
1326
1327        match self {
1328            F16 => Size::from_bits(16),
1329            F32 => Size::from_bits(32),
1330            F64 => Size::from_bits(64),
1331            F128 => Size::from_bits(128),
1332        }
1333    }
1334
1335    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1336        use Float::*;
1337        let dl = cx.data_layout();
1338
1339        AbiAlign::new(match self {
1340            F16 => dl.f16_align,
1341            F32 => dl.f32_align,
1342            F64 => dl.f64_align,
1343            F128 => dl.f128_align,
1344        })
1345    }
1346}
1347
1348/// Fundamental unit of memory access and layout.
1349#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1350#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1351pub enum Primitive {
1352    /// The `bool` is the signedness of the `Integer` type.
1353    ///
1354    /// One would think we would not care about such details this low down,
1355    /// but some ABIs are described in terms of C types and ISAs where the
1356    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1357    /// a negative integer passed by zero-extension will appear positive in
1358    /// the callee, and most operations on it will produce the wrong values.
1359    Int(Integer, bool),
1360    Float(Float),
1361    Pointer(AddressSpace),
1362}
1363
1364impl Primitive {
1365    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1366        use Primitive::*;
1367        let dl = cx.data_layout();
1368
1369        match self {
1370            Int(i, _) => i.size(),
1371            Float(f) => f.size(),
1372            Pointer(a) => dl.pointer_size_in(a),
1373        }
1374    }
1375
1376    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1377        use Primitive::*;
1378        let dl = cx.data_layout();
1379
1380        match self {
1381            Int(i, _) => i.align(dl),
1382            Float(f) => f.align(dl),
1383            Pointer(a) => dl.pointer_align_in(a),
1384        }
1385    }
1386}
1387
1388/// Inclusive wrap-around range of valid values, that is, if
1389/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1390///
1391/// That is, for an i8 primitive, a range of `254..=2` means following
1392/// sequence:
1393///
1394///    254 (-2), 255 (-1), 0, 1, 2
1395///
1396/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1397#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1398#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1399pub struct WrappingRange {
1400    pub start: u128,
1401    pub end: u128,
1402}
1403
1404impl WrappingRange {
1405    pub fn full(size: Size) -> Self {
1406        Self { start: 0, end: size.unsigned_int_max() }
1407    }
1408
1409    /// Returns `true` if `v` is contained in the range.
1410    #[inline(always)]
1411    pub fn contains(&self, v: u128) -> bool {
1412        if self.start <= self.end {
1413            self.start <= v && v <= self.end
1414        } else {
1415            self.start <= v || v <= self.end
1416        }
1417    }
1418
1419    /// Returns `true` if all the values in `other` are contained in this range,
1420    /// when the values are considered as having width `size`.
1421    #[inline(always)]
1422    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1423        if self.is_full_for(size) {
1424            true
1425        } else {
1426            let trunc = |x| size.truncate(x);
1427
1428            let delta = self.start;
1429            let max = trunc(self.end.wrapping_sub(delta));
1430
1431            let other_start = trunc(other.start.wrapping_sub(delta));
1432            let other_end = trunc(other.end.wrapping_sub(delta));
1433
1434            // Having shifted both input ranges by `delta`, now we only need to check
1435            // whether `0..=max` contains `other_start..=other_end`, which can only
1436            // happen if the other doesn't wrap since `self` isn't everything.
1437            (other_start <= other_end) && (other_end <= max)
1438        }
1439    }
1440
1441    /// Returns `self` with replaced `start`
1442    #[inline(always)]
1443    fn with_start(mut self, start: u128) -> Self {
1444        self.start = start;
1445        self
1446    }
1447
1448    /// Returns `self` with replaced `end`
1449    #[inline(always)]
1450    fn with_end(mut self, end: u128) -> Self {
1451        self.end = end;
1452        self
1453    }
1454
1455    /// Returns `true` if `size` completely fills the range.
1456    ///
1457    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1458    /// Niche calculations can produce full ranges which are not the canonical one;
1459    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1460    #[inline]
1461    fn is_full_for(&self, size: Size) -> bool {
1462        let max_value = size.unsigned_int_max();
1463        debug_assert!(self.start <= max_value && self.end <= max_value);
1464        self.start == (self.end.wrapping_add(1) & max_value)
1465    }
1466
1467    /// Checks whether this range is considered non-wrapping when the values are
1468    /// interpreted as *unsigned* numbers of width `size`.
1469    ///
1470    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1471    /// and `Err(..)` if the range is full so it depends how you think about it.
1472    #[inline]
1473    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1474        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1475    }
1476
1477    /// Checks whether this range is considered non-wrapping when the values are
1478    /// interpreted as *signed* numbers of width `size`.
1479    ///
1480    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1481    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1482    ///
1483    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1484    /// and `Err(..)` if the range is full so it depends how you think about it.
1485    #[inline]
1486    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1487        if self.is_full_for(size) {
1488            Err(..)
1489        } else {
1490            let start: i128 = size.sign_extend(self.start);
1491            let end: i128 = size.sign_extend(self.end);
1492            Ok(start <= end)
1493        }
1494    }
1495}
1496
1497impl fmt::Debug for WrappingRange {
1498    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1499        if self.start > self.end {
1500            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1501        } else {
1502            write!(fmt, "{}..={}", self.start, self.end)?;
1503        }
1504        Ok(())
1505    }
1506}
1507
1508/// Information about one scalar component of a Rust type.
1509#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1510#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1511pub enum Scalar {
1512    Initialized {
1513        value: Primitive,
1514
1515        // FIXME(eddyb) always use the shortest range, e.g., by finding
1516        // the largest space between two consecutive valid values and
1517        // taking everything else as the (shortest) valid range.
1518        valid_range: WrappingRange,
1519    },
1520    Union {
1521        /// Even for unions, we need to use the correct registers for the kind of
1522        /// values inside the union, so we keep the `Primitive` type around. We
1523        /// also use it to compute the size of the scalar.
1524        /// However, unions never have niches and even allow undef,
1525        /// so there is no `valid_range`.
1526        value: Primitive,
1527    },
1528}
1529
1530impl Scalar {
1531    #[inline]
1532    pub fn is_bool(&self) -> bool {
1533        use Integer::*;
1534        matches!(
1535            self,
1536            Scalar::Initialized {
1537                value: Primitive::Int(I8, false),
1538                valid_range: WrappingRange { start: 0, end: 1 }
1539            }
1540        )
1541    }
1542
1543    /// Get the primitive representation of this type, ignoring the valid range and whether the
1544    /// value is allowed to be undefined (due to being a union).
1545    pub fn primitive(&self) -> Primitive {
1546        match *self {
1547            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1548        }
1549    }
1550
1551    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1552        self.primitive().align(cx)
1553    }
1554
1555    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1556        self.primitive().size(cx)
1557    }
1558
1559    #[inline]
1560    pub fn to_union(&self) -> Self {
1561        Self::Union { value: self.primitive() }
1562    }
1563
1564    #[inline]
1565    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1566        match *self {
1567            Scalar::Initialized { valid_range, .. } => valid_range,
1568            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1569        }
1570    }
1571
1572    #[inline]
1573    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1574    /// union.
1575    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1576        match self {
1577            Scalar::Initialized { valid_range, .. } => valid_range,
1578            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1579        }
1580    }
1581
1582    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1583    /// layout.
1584    #[inline]
1585    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1586        match *self {
1587            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1588            Scalar::Union { .. } => true,
1589        }
1590    }
1591
1592    /// Returns `true` if this type can be left uninit.
1593    #[inline]
1594    pub fn is_uninit_valid(&self) -> bool {
1595        match *self {
1596            Scalar::Initialized { .. } => false,
1597            Scalar::Union { .. } => true,
1598        }
1599    }
1600
1601    /// Returns `true` if this is a signed integer scalar
1602    #[inline]
1603    pub fn is_signed(&self) -> bool {
1604        match self.primitive() {
1605            Primitive::Int(_, signed) => signed,
1606            _ => false,
1607        }
1608    }
1609}
1610
1611// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1612/// Describes how the fields of a type are located in memory.
1613#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1614#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1615pub enum FieldsShape<FieldIdx: Idx> {
1616    /// Scalar primitives and `!`, which never have fields.
1617    Primitive,
1618
1619    /// All fields start at no offset. The `usize` is the field count.
1620    Union(NonZeroUsize),
1621
1622    /// Array/vector-like placement, with all fields of identical types.
1623    Array { stride: Size, count: u64 },
1624
1625    /// Struct-like placement, with precomputed offsets.
1626    ///
1627    /// Fields are guaranteed to not overlap, but note that gaps
1628    /// before, between and after all the fields are NOT always
1629    /// padding, and as such their contents may not be discarded.
1630    /// For example, enum variants leave a gap at the start,
1631    /// where the discriminant field in the enum layout goes.
1632    Arbitrary {
1633        /// Offsets for the first byte of each field,
1634        /// ordered to match the source definition order.
1635        /// This vector does not go in increasing order.
1636        // FIXME(eddyb) use small vector optimization for the common case.
1637        offsets: IndexVec<FieldIdx, Size>,
1638
1639        /// Maps memory order field indices to source order indices,
1640        /// depending on how the fields were reordered (if at all).
1641        /// This is a permutation, with both the source order and the
1642        /// memory order using the same (0..n) index ranges.
1643        ///
1644        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1645        // FIXME(camlorn) also consider small vector optimization here.
1646        in_memory_order: IndexVec<u32, FieldIdx>,
1647    },
1648}
1649
1650impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1651    #[inline]
1652    pub fn count(&self) -> usize {
1653        match *self {
1654            FieldsShape::Primitive => 0,
1655            FieldsShape::Union(count) => count.get(),
1656            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1657            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1658        }
1659    }
1660
1661    #[inline]
1662    pub fn offset(&self, i: usize) -> Size {
1663        match *self {
1664            FieldsShape::Primitive => {
1665                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1666            }
1667            FieldsShape::Union(count) => {
1668                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1669                Size::ZERO
1670            }
1671            FieldsShape::Array { stride, count } => {
1672                let i = u64::try_from(i).unwrap();
1673                assert!(i < count, "tried to access field {i} of array with {count} fields");
1674                stride * i
1675            }
1676            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1677        }
1678    }
1679
1680    /// Gets source indices of the fields by increasing offsets.
1681    #[inline]
1682    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1683        // Primitives don't really have fields in the way that structs do,
1684        // but having this return an empty iterator for them is unhelpful
1685        // since that makes them look kinda like ZSTs, which they're not.
1686        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1687
1688        (0..pseudofield_count).map(move |i| match self {
1689            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1690            FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
1691        })
1692    }
1693}
1694
1695/// An identifier that specifies the address space that some operation
1696/// should operate on. Special address spaces have an effect on code generation,
1697/// depending on the target and the address spaces it implements.
1698#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1699#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1700pub struct AddressSpace(pub u32);
1701
1702impl AddressSpace {
1703    /// LLVM's `0` address space.
1704    pub const ZERO: Self = AddressSpace(0);
1705}
1706
1707/// The way we represent values to the backend
1708///
1709/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1710/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1711/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1712/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1713/// how the value will be lowered to the calling convention, in itself.
1714///
1715/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1716/// and larger values will usually prefer to be represented as memory.
1717#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1718#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1719pub enum BackendRepr {
1720    Scalar(Scalar),
1721    ScalarPair(Scalar, Scalar),
1722    ScalableVector {
1723        element: Scalar,
1724        count: u64,
1725    },
1726    SimdVector {
1727        element: Scalar,
1728        count: u64,
1729    },
1730    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1731    Memory {
1732        /// If true, the size is exact, otherwise it's only a lower bound.
1733        sized: bool,
1734    },
1735}
1736
1737impl BackendRepr {
1738    /// Returns `true` if the layout corresponds to an unsized type.
1739    #[inline]
1740    pub fn is_unsized(&self) -> bool {
1741        match *self {
1742            BackendRepr::Scalar(_)
1743            | BackendRepr::ScalarPair(..)
1744            // FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
1745            // `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
1746            // fully implemented, scalable vectors will remain `Sized`, they just won't be
1747            // `const Sized` - whether `is_unsized` continues to return `false` at that point will
1748            // need to be revisited and will depend on what `is_unsized` is used for.
1749            | BackendRepr::ScalableVector { .. }
1750            | BackendRepr::SimdVector { .. } => false,
1751            BackendRepr::Memory { sized } => !sized,
1752        }
1753    }
1754
1755    #[inline]
1756    pub fn is_sized(&self) -> bool {
1757        !self.is_unsized()
1758    }
1759
1760    /// Returns `true` if this is a single signed integer scalar.
1761    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1762    #[inline]
1763    pub fn is_signed(&self) -> bool {
1764        match self {
1765            BackendRepr::Scalar(scal) => scal.is_signed(),
1766            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1767        }
1768    }
1769
1770    /// Returns `true` if this is a scalar type
1771    #[inline]
1772    pub fn is_scalar(&self) -> bool {
1773        matches!(*self, BackendRepr::Scalar(_))
1774    }
1775
1776    /// Returns `true` if this is a bool
1777    #[inline]
1778    pub fn is_bool(&self) -> bool {
1779        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1780    }
1781
1782    /// The psABI alignment for a `Scalar` or `ScalarPair`
1783    ///
1784    /// `None` for other variants.
1785    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1786        match *self {
1787            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1788            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1789            // The align of a Vector can vary in surprising ways
1790            BackendRepr::SimdVector { .. }
1791            | BackendRepr::Memory { .. }
1792            | BackendRepr::ScalableVector { .. } => None,
1793        }
1794    }
1795
1796    /// The psABI size for a `Scalar` or `ScalarPair`
1797    ///
1798    /// `None` for other variants
1799    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1800        match *self {
1801            // No padding in scalars.
1802            BackendRepr::Scalar(s) => Some(s.size(cx)),
1803            // May have some padding between the pair.
1804            BackendRepr::ScalarPair(s1, s2) => {
1805                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1806                let size = (field2_offset + s2.size(cx)).align_to(
1807                    self.scalar_align(cx)
1808                        // We absolutely must have an answer here or everything is FUBAR.
1809                        .unwrap(),
1810                );
1811                Some(size)
1812            }
1813            // The size of a Vector can vary in surprising ways
1814            BackendRepr::SimdVector { .. }
1815            | BackendRepr::Memory { .. }
1816            | BackendRepr::ScalableVector { .. } => None,
1817        }
1818    }
1819
1820    /// Discard validity range information and allow undef.
1821    pub fn to_union(&self) -> Self {
1822        match *self {
1823            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1824            BackendRepr::ScalarPair(s1, s2) => {
1825                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1826            }
1827            BackendRepr::SimdVector { element, count } => {
1828                BackendRepr::SimdVector { element: element.to_union(), count }
1829            }
1830            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1831            BackendRepr::ScalableVector { element, count } => {
1832                BackendRepr::ScalableVector { element: element.to_union(), count }
1833            }
1834        }
1835    }
1836
1837    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1838        match (self, other) {
1839            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1840            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1841            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1842            (
1843                BackendRepr::SimdVector { element: element_l, count: count_l },
1844                BackendRepr::SimdVector { element: element_r, count: count_r },
1845            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1846            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1847                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1848            }
1849            // Everything else must be strictly identical.
1850            _ => self == other,
1851        }
1852    }
1853}
1854
1855// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1856#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1857#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1858pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1859    /// A type with no valid variants. Must be uninhabited.
1860    Empty,
1861
1862    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1863    Single {
1864        /// Always `0` for types that cannot have multiple variants.
1865        index: VariantIdx,
1866    },
1867
1868    /// Enum-likes with more than one variant: each variant comes with
1869    /// a *discriminant* (usually the same as the variant index but the user can
1870    /// assign explicit discriminant values). That discriminant is encoded
1871    /// as a *tag* on the machine. The layout of each variant is
1872    /// a struct, and they all have space reserved for the tag.
1873    /// For enums, the tag is the sole field of the layout.
1874    Multiple {
1875        tag: Scalar,
1876        tag_encoding: TagEncoding<VariantIdx>,
1877        tag_field: FieldIdx,
1878        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1879    },
1880}
1881
1882// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1883#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1884#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1885pub enum TagEncoding<VariantIdx: Idx> {
1886    /// The tag directly stores the discriminant, but possibly with a smaller layout
1887    /// (so converting the tag to the discriminant can require sign extension).
1888    Direct,
1889
1890    /// Niche (values invalid for a type) encoding the discriminant.
1891    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1892    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1893    ///
1894    /// The variant `untagged_variant` contains a niche at an arbitrary
1895    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1896    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1897    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1898    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1899    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1900    /// query implementation).
1901    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1902    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1903    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1904    ///
1905    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1906    /// `None` is the null pointer in the second tuple field, and
1907    /// `Some` is the identity function (with a non-null reference)
1908    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1909    ///
1910    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1911    /// range cannot be represented; they must be uninhabited.
1912    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1913    Niche {
1914        untagged_variant: VariantIdx,
1915        /// This range *may* contain `untagged_variant` or uninhabited variants;
1916        /// these are then just "dead values" and not used to encode anything.
1917        niche_variants: RangeInclusive<VariantIdx>,
1918        /// This is inbounds of the type of the niche field
1919        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1920        niche_start: u128,
1921    },
1922}
1923
1924#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1925#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1926pub struct Niche {
1927    pub offset: Size,
1928    pub value: Primitive,
1929    pub valid_range: WrappingRange,
1930}
1931
1932impl Niche {
1933    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1934        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1935        let niche = Niche { offset, value, valid_range };
1936        if niche.available(cx) > 0 { Some(niche) } else { None }
1937    }
1938
1939    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1940        let Self { value, valid_range: v, .. } = *self;
1941        let size = value.size(cx);
1942        assert!(size.bits() <= 128);
1943        let max_value = size.unsigned_int_max();
1944
1945        // Find out how many values are outside the valid range.
1946        let niche = v.end.wrapping_add(1)..v.start;
1947        niche.end.wrapping_sub(niche.start) & max_value
1948    }
1949
1950    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1951        assert!(count > 0);
1952
1953        let Self { value, valid_range: v, .. } = *self;
1954        let size = value.size(cx);
1955        assert!(size.bits() <= 128);
1956        let max_value = size.unsigned_int_max();
1957
1958        let niche = v.end.wrapping_add(1)..v.start;
1959        let available = niche.end.wrapping_sub(niche.start) & max_value;
1960        if count > available {
1961            return None;
1962        }
1963
1964        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1965        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1966        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1967        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1968        // enable some special optimizations.
1969        //
1970        // Bound selection criteria:
1971        // 1. Select closest to zero given wrapping semantics.
1972        // 2. Avoid moving past zero if possible.
1973        //
1974        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1975        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1976        // bounds are of little interest.
1977        let move_start = |v: WrappingRange| {
1978            let start = v.start.wrapping_sub(count) & max_value;
1979            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1980        };
1981        let move_end = |v: WrappingRange| {
1982            let start = v.end.wrapping_add(1) & max_value;
1983            let end = v.end.wrapping_add(count) & max_value;
1984            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1985        };
1986        let distance_end_zero = max_value - v.end;
1987        if v.start > v.end {
1988            // zero is unavailable because wrapping occurs
1989            move_end(v)
1990        } else if v.start <= distance_end_zero {
1991            if count <= v.start {
1992                move_start(v)
1993            } else {
1994                // moved past zero, use other bound
1995                move_end(v)
1996            }
1997        } else {
1998            let end = v.end.wrapping_add(count) & max_value;
1999            let overshot_zero = (1..=v.end).contains(&end);
2000            if overshot_zero {
2001                // moved past zero, use other bound
2002                move_start(v)
2003            } else {
2004                move_end(v)
2005            }
2006        }
2007    }
2008}
2009
2010// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2011#[derive(PartialEq, Eq, Hash, Clone)]
2012#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
2013pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2014    /// Says where the fields are located within the layout.
2015    pub fields: FieldsShape<FieldIdx>,
2016
2017    /// Encodes information about multi-variant layouts.
2018    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2019    /// shared between all variants. One of them will be the discriminant,
2020    /// but e.g. coroutines can have more.
2021    ///
2022    /// To access all fields of this layout, both `fields` and the fields of the active variant
2023    /// must be taken into account.
2024    pub variants: Variants<FieldIdx, VariantIdx>,
2025
2026    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2027    /// and encodes value restrictions via `valid_range`.
2028    ///
2029    /// Note that this is entirely orthogonal to the recursive structure defined by
2030    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2031    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2032    /// have to be taken into account to find all fields of this layout.
2033    pub backend_repr: BackendRepr,
2034
2035    /// The leaf scalar with the largest number of invalid values
2036    /// (i.e. outside of its `valid_range`), if it exists.
2037    pub largest_niche: Option<Niche>,
2038    /// Is this type known to be uninhabted?
2039    ///
2040    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2041    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2042    pub uninhabited: bool,
2043
2044    pub align: AbiAlign,
2045    pub size: Size,
2046
2047    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2048    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2049    /// requested, even if the requested alignment is equal to the natural alignment.
2050    pub max_repr_align: Option<Align>,
2051
2052    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2053    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2054    /// in some cases.
2055    pub unadjusted_abi_align: Align,
2056
2057    /// The randomization seed based on this type's own repr and its fields.
2058    ///
2059    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2060    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2061    /// types.
2062    ///
2063    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2064    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2065    /// to reorder its fields based on that information. The current implementation is a conservative
2066    /// approximation of this goal.
2067    pub randomization_seed: Hash64,
2068}
2069
2070impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2071    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2072    pub fn is_aggregate(&self) -> bool {
2073        match self.backend_repr {
2074            BackendRepr::Scalar(_)
2075            | BackendRepr::SimdVector { .. }
2076            | BackendRepr::ScalableVector { .. } => false,
2077            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2078        }
2079    }
2080
2081    /// Returns `true` if this is an uninhabited type
2082    pub fn is_uninhabited(&self) -> bool {
2083        self.uninhabited
2084    }
2085}
2086
2087impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2088where
2089    FieldsShape<FieldIdx>: fmt::Debug,
2090    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2091{
2092    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2093        // This is how `Layout` used to print before it become
2094        // `Interned<LayoutData>`. We print it like this to avoid having to update
2095        // expected output in a lot of tests.
2096        let LayoutData {
2097            size,
2098            align,
2099            backend_repr,
2100            fields,
2101            largest_niche,
2102            uninhabited,
2103            variants,
2104            max_repr_align,
2105            unadjusted_abi_align,
2106            randomization_seed,
2107        } = self;
2108        f.debug_struct("Layout")
2109            .field("size", size)
2110            .field("align", align)
2111            .field("backend_repr", backend_repr)
2112            .field("fields", fields)
2113            .field("largest_niche", largest_niche)
2114            .field("uninhabited", uninhabited)
2115            .field("variants", variants)
2116            .field("max_repr_align", max_repr_align)
2117            .field("unadjusted_abi_align", unadjusted_abi_align)
2118            .field("randomization_seed", randomization_seed)
2119            .finish()
2120    }
2121}
2122
2123#[derive(Copy, Clone, PartialEq, Eq, Debug)]
2124pub enum PointerKind {
2125    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2126    SharedRef { frozen: bool },
2127    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2128    MutableRef { unpin: bool },
2129    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2130    /// uses the global allocator or a custom one.
2131    Box { unpin: bool, global: bool },
2132}
2133
2134/// Encodes extra information we have about a pointer.
2135/// Note that this information is advisory only, and backends are free to ignore it:
2136/// if the information is wrong, that can cause UB, but if the information is absent,
2137/// that must always be okay.
2138#[derive(Copy, Clone, Debug)]
2139pub struct PointeeInfo {
2140    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
2141    /// be reliable.
2142    pub safe: Option<PointerKind>,
2143    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
2144    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2145    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2146    /// while this function is still running.
2147    /// The size can be zero if the pointer is not dereferenceable.
2148    pub size: Size,
2149    /// If `safe` is `Some`, then the pointer is aligned as indicated.
2150    pub align: Align,
2151}
2152
2153impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2154    /// Returns `true` if the layout corresponds to an unsized type.
2155    #[inline]
2156    pub fn is_unsized(&self) -> bool {
2157        self.backend_repr.is_unsized()
2158    }
2159
2160    #[inline]
2161    pub fn is_sized(&self) -> bool {
2162        self.backend_repr.is_sized()
2163    }
2164
2165    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2166    pub fn is_1zst(&self) -> bool {
2167        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2168    }
2169
2170    /// Returns `true` if the size of the type is only known at runtime.
2171    pub fn is_runtime_sized(&self) -> bool {
2172        matches!(self.backend_repr, BackendRepr::ScalableVector { .. })
2173    }
2174
2175    /// Returns the elements count of a scalable vector.
2176    pub fn scalable_vector_element_count(&self) -> Option<u64> {
2177        match self.backend_repr {
2178            BackendRepr::ScalableVector { count, .. } => Some(count),
2179            _ => None,
2180        }
2181    }
2182
2183    /// Returns `true` if the type is a ZST and not unsized.
2184    ///
2185    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2186    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2187    pub fn is_zst(&self) -> bool {
2188        match self.backend_repr {
2189            BackendRepr::Scalar(_)
2190            | BackendRepr::ScalarPair(..)
2191            | BackendRepr::ScalableVector { .. }
2192            | BackendRepr::SimdVector { .. } => false,
2193            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2194        }
2195    }
2196
2197    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2198    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2199    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2200    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2201    /// checks would otherwise be required.
2202    pub fn eq_abi(&self, other: &Self) -> bool {
2203        // The one thing that we are not capturing here is that for unsized types, the metadata must
2204        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2205        // 2nd point is quite hard to check though.
2206        self.size == other.size
2207            && self.is_sized() == other.is_sized()
2208            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2209            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2210            && self.align.abi == other.align.abi
2211            && self.max_repr_align == other.max_repr_align
2212            && self.unadjusted_abi_align == other.unadjusted_abi_align
2213    }
2214}
2215
2216#[derive(Copy, Clone, Debug)]
2217pub enum StructKind {
2218    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2219    AlwaysSized,
2220    /// A univariant, the last field of which may be coerced to unsized.
2221    MaybeUnsized,
2222    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2223    Prefixed(Size, Align),
2224}
2225
2226#[derive(Clone, Debug)]
2227pub enum AbiFromStrErr {
2228    /// not a known ABI
2229    Unknown,
2230    /// no "-unwind" variant can be used here
2231    NoExplicitUnwind,
2232}