rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8// tidy-alphabetical-end
9
10/*! ABI handling for rustc
11
12## What is an "ABI"?
13
14Literally, "application binary interface", which means it is everything about how code interacts,
15at the machine level, with other code. This means it technically covers all of the following:
16- object binary format for e.g. relocations or offset tables
17- in-memory layout of types
18- procedure calling conventions
19
20When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
21To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
22Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
23You will encounter all of them and more if you study target-specific codegen enough!
24Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
25either or both of
26- `repr(Rust)` types have a mostly-unspecified layout
27- `extern "Rust" fn(A) -> R` has an unspecified calling convention
28
29## Crate Goal
30
31ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
32It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
33Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
34It should contain traits and types that other crates then use in their implementation.
35For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
36but `rustc_abi` contains the types for calculating layout and describing register-passing.
37This makes it easier to describe things in the same way across targets, codegen backends, and
38even other Rust compilers, such as rust-analyzer!
39
40*/
41
42use std::fmt;
43#[cfg(feature = "nightly")]
44use std::iter::Step;
45use std::num::{NonZeroUsize, ParseIntError};
46use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
47use std::str::FromStr;
48
49use bitflags::bitflags;
50#[cfg(feature = "nightly")]
51use rustc_data_structures::stable_hasher::StableOrd;
52use rustc_hashes::Hash64;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
56
57mod callconv;
58mod canon_abi;
59mod extern_abi;
60mod layout;
61#[cfg(test)]
62mod tests;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
66#[cfg(feature = "nightly")]
67pub use extern_abi::CVariadicStatus;
68pub use extern_abi::{ExternAbi, all_names};
69#[cfg(feature = "nightly")]
70pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
71pub use layout::{LayoutCalculator, LayoutCalculatorError};
72
73/// Requirements for a `StableHashingContext` to be used in this crate.
74/// This is a hack to allow using the `HashStable_Generic` derive macro
75/// instead of implementing everything in `rustc_middle`.
76#[cfg(feature = "nightly")]
77pub trait HashStableContext {}
78
79#[derive(Clone, Copy, PartialEq, Eq, Default)]
80#[cfg_attr(
81    feature = "nightly",
82    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
83)]
84pub struct ReprFlags(u8);
85
86bitflags! {
87    impl ReprFlags: u8 {
88        const IS_C               = 1 << 0;
89        const IS_SIMD            = 1 << 1;
90        const IS_TRANSPARENT     = 1 << 2;
91        // Internal only for now. If true, don't reorder fields.
92        // On its own it does not prevent ABI optimizations.
93        const IS_LINEAR          = 1 << 3;
94        // If true, the type's crate has opted into layout randomization.
95        // Other flags can still inhibit reordering and thus randomization.
96        // The seed stored in `ReprOptions.field_shuffle_seed`.
97        const RANDOMIZE_LAYOUT   = 1 << 4;
98        // Any of these flags being set prevent field reordering optimisation.
99        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
100                                 | ReprFlags::IS_SIMD.bits()
101                                 | ReprFlags::IS_LINEAR.bits();
102        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
103    }
104}
105
106// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
107// `rustc_data_structures` to make it build on stable.
108impl std::fmt::Debug for ReprFlags {
109    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
110        bitflags::parser::to_writer(self, f)
111    }
112}
113
114#[derive(Copy, Clone, Debug, Eq, PartialEq)]
115#[cfg_attr(
116    feature = "nightly",
117    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
118)]
119pub enum IntegerType {
120    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
121    /// `Pointer(true)` means `isize`.
122    Pointer(bool),
123    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
124    /// `Fixed(I8, false)` means `u8`.
125    Fixed(Integer, bool),
126}
127
128impl IntegerType {
129    pub fn is_signed(&self) -> bool {
130        match self {
131            IntegerType::Pointer(b) => *b,
132            IntegerType::Fixed(_, b) => *b,
133        }
134    }
135}
136
137/// Represents the repr options provided by the user.
138#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
139#[cfg_attr(
140    feature = "nightly",
141    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
142)]
143pub struct ReprOptions {
144    pub int: Option<IntegerType>,
145    pub align: Option<Align>,
146    pub pack: Option<Align>,
147    pub flags: ReprFlags,
148    /// The seed to be used for randomizing a type's layout
149    ///
150    /// Note: This could technically be a `u128` which would
151    /// be the "most accurate" hash as it'd encompass the item and crate
152    /// hash without loss, but it does pay the price of being larger.
153    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
154    /// purposes (primarily `-Z randomize-layout`)
155    pub field_shuffle_seed: Hash64,
156}
157
158impl ReprOptions {
159    #[inline]
160    pub fn simd(&self) -> bool {
161        self.flags.contains(ReprFlags::IS_SIMD)
162    }
163
164    #[inline]
165    pub fn c(&self) -> bool {
166        self.flags.contains(ReprFlags::IS_C)
167    }
168
169    #[inline]
170    pub fn packed(&self) -> bool {
171        self.pack.is_some()
172    }
173
174    #[inline]
175    pub fn transparent(&self) -> bool {
176        self.flags.contains(ReprFlags::IS_TRANSPARENT)
177    }
178
179    #[inline]
180    pub fn linear(&self) -> bool {
181        self.flags.contains(ReprFlags::IS_LINEAR)
182    }
183
184    /// Returns the discriminant type, given these `repr` options.
185    /// This must only be called on enums!
186    pub fn discr_type(&self) -> IntegerType {
187        self.int.unwrap_or(IntegerType::Pointer(true))
188    }
189
190    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
191    /// layout" optimizations, such as representing `Foo<&T>` as a
192    /// single pointer.
193    pub fn inhibit_enum_layout_opt(&self) -> bool {
194        self.c() || self.int.is_some()
195    }
196
197    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
198        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
199    }
200
201    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
202    /// e.g. `repr(C)` or `repr(<int>)`.
203    pub fn inhibit_struct_field_reordering(&self) -> bool {
204        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
205    }
206
207    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
208    /// was enabled for its declaration crate.
209    pub fn can_randomize_type_layout(&self) -> bool {
210        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
211    }
212
213    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
214    pub fn inhibits_union_abi_opt(&self) -> bool {
215        self.c()
216    }
217}
218
219/// The maximum supported number of lanes in a SIMD vector.
220///
221/// This value is selected based on backend support:
222/// * LLVM does not appear to have a vector width limit.
223/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
224pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
225
226/// How pointers are represented in a given address space
227#[derive(Copy, Clone, Debug, PartialEq, Eq)]
228pub struct PointerSpec {
229    /// The size of the bitwise representation of the pointer.
230    pointer_size: Size,
231    /// The alignment of pointers for this address space
232    pointer_align: Align,
233    /// The size of the value a pointer can be offset by in this address space.
234    pointer_offset: Size,
235    /// Pointers into this address space contain extra metadata
236    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
237    _is_fat: bool,
238}
239
240/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
241/// for a target, which contains everything needed to compute layouts.
242#[derive(Debug, PartialEq, Eq)]
243pub struct TargetDataLayout {
244    pub endian: Endian,
245    pub i1_align: Align,
246    pub i8_align: Align,
247    pub i16_align: Align,
248    pub i32_align: Align,
249    pub i64_align: Align,
250    pub i128_align: Align,
251    pub f16_align: Align,
252    pub f32_align: Align,
253    pub f64_align: Align,
254    pub f128_align: Align,
255    pub aggregate_align: Align,
256
257    /// Alignments for vector types.
258    pub vector_align: Vec<(Size, Align)>,
259
260    pub default_address_space: AddressSpace,
261    pub default_address_space_pointer_spec: PointerSpec,
262
263    /// Address space information of all known address spaces.
264    ///
265    /// # Note
266    ///
267    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
268    /// which instead lives in [`Self::default_address_space_pointer_spec`].
269    address_space_info: Vec<(AddressSpace, PointerSpec)>,
270
271    pub instruction_address_space: AddressSpace,
272
273    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
274    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
275    /// so the only valid spec for LLVM is c_int::BITS or 8
276    pub c_enum_min_size: Integer,
277}
278
279impl Default for TargetDataLayout {
280    /// Creates an instance of `TargetDataLayout`.
281    fn default() -> TargetDataLayout {
282        let align = |bits| Align::from_bits(bits).unwrap();
283        TargetDataLayout {
284            endian: Endian::Big,
285            i1_align: align(8),
286            i8_align: align(8),
287            i16_align: align(16),
288            i32_align: align(32),
289            i64_align: align(32),
290            i128_align: align(32),
291            f16_align: align(16),
292            f32_align: align(32),
293            f64_align: align(64),
294            f128_align: align(128),
295            aggregate_align: align(8),
296            vector_align: vec![
297                (Size::from_bits(64), align(64)),
298                (Size::from_bits(128), align(128)),
299            ],
300            default_address_space: AddressSpace::ZERO,
301            default_address_space_pointer_spec: PointerSpec {
302                pointer_size: Size::from_bits(64),
303                pointer_align: align(64),
304                pointer_offset: Size::from_bits(64),
305                _is_fat: false,
306            },
307            address_space_info: vec![],
308            instruction_address_space: AddressSpace::ZERO,
309            c_enum_min_size: Integer::I32,
310        }
311    }
312}
313
314pub enum TargetDataLayoutErrors<'a> {
315    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
316    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
317    MissingAlignment { cause: &'a str },
318    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
319    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
320    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
321    InvalidBitsSize { err: String },
322    UnknownPointerSpecification { err: String },
323}
324
325impl TargetDataLayout {
326    /// Parse data layout from an
327    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
328    ///
329    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
330    /// determined from llvm string.
331    pub fn parse_from_llvm_datalayout_string<'a>(
332        input: &'a str,
333        default_address_space: AddressSpace,
334    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
335        // Parse an address space index from a string.
336        let parse_address_space = |s: &'a str, cause: &'a str| {
337            s.parse::<u32>().map(AddressSpace).map_err(|err| {
338                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
339            })
340        };
341
342        // Parse a bit count from a string.
343        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
344            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
345                kind,
346                bit: s,
347                cause,
348                err,
349            })
350        };
351
352        // Parse a size string.
353        let parse_size =
354            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
355
356        // Parse an alignment string.
357        let parse_align_str = |s: &'a str, cause: &'a str| {
358            let align_from_bits = |bits| {
359                Align::from_bits(bits)
360                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
361            };
362            let abi = parse_bits(s, "alignment", cause)?;
363            Ok(align_from_bits(abi)?)
364        };
365
366        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
367        // ignoring the secondary alignment specifications.
368        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
369            if s.is_empty() {
370                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
371            }
372            parse_align_str(s[0], cause)
373        };
374
375        let mut dl = TargetDataLayout::default();
376        dl.default_address_space = default_address_space;
377
378        let mut i128_align_src = 64;
379        for spec in input.split('-') {
380            let spec_parts = spec.split(':').collect::<Vec<_>>();
381
382            match &*spec_parts {
383                ["e"] => dl.endian = Endian::Little,
384                ["E"] => dl.endian = Endian::Big,
385                [p] if p.starts_with('P') => {
386                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
387                }
388                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
389                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
390                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
391                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
392                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
393                [p, s, a @ ..] if p.starts_with("p") => {
394                    let mut p = p.strip_prefix('p').unwrap();
395                    let mut _is_fat = false;
396
397                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
398                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
399
400                    if p.starts_with('f') {
401                        p = p.strip_prefix('f').unwrap();
402                        _is_fat = true;
403                    }
404
405                    // However, we currently don't take into account further specifications:
406                    // an error is emitted instead.
407                    if p.starts_with(char::is_alphabetic) {
408                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
409                            err: p.to_string(),
410                        });
411                    }
412
413                    let addr_space = if !p.is_empty() {
414                        parse_address_space(p, "p-")?
415                    } else {
416                        AddressSpace::ZERO
417                    };
418
419                    let pointer_size = parse_size(s, "p-")?;
420                    let pointer_align = parse_align_seq(a, "p-")?;
421                    let info = PointerSpec {
422                        pointer_offset: pointer_size,
423                        pointer_size,
424                        pointer_align,
425                        _is_fat,
426                    };
427                    if addr_space == default_address_space {
428                        dl.default_address_space_pointer_spec = info;
429                    } else {
430                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
431                            Some(e) => e.1 = info,
432                            None => {
433                                dl.address_space_info.push((addr_space, info));
434                            }
435                        }
436                    }
437                }
438                [p, s, a, _pr, i] if p.starts_with("p") => {
439                    let mut p = p.strip_prefix('p').unwrap();
440                    let mut _is_fat = false;
441
442                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
443                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
444
445                    if p.starts_with('f') {
446                        p = p.strip_prefix('f').unwrap();
447                        _is_fat = true;
448                    }
449
450                    // However, we currently don't take into account further specifications:
451                    // an error is emitted instead.
452                    if p.starts_with(char::is_alphabetic) {
453                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
454                            err: p.to_string(),
455                        });
456                    }
457
458                    let addr_space = if !p.is_empty() {
459                        parse_address_space(p, "p")?
460                    } else {
461                        AddressSpace::ZERO
462                    };
463
464                    let info = PointerSpec {
465                        pointer_size: parse_size(s, "p-")?,
466                        pointer_align: parse_align_str(a, "p-")?,
467                        pointer_offset: parse_size(i, "p-")?,
468                        _is_fat,
469                    };
470
471                    if addr_space == default_address_space {
472                        dl.default_address_space_pointer_spec = info;
473                    } else {
474                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
475                            Some(e) => e.1 = info,
476                            None => {
477                                dl.address_space_info.push((addr_space, info));
478                            }
479                        }
480                    }
481                }
482
483                [s, a @ ..] if s.starts_with('i') => {
484                    let Ok(bits) = s[1..].parse::<u64>() else {
485                        parse_size(&s[1..], "i")?; // For the user error.
486                        continue;
487                    };
488                    let a = parse_align_seq(a, s)?;
489                    match bits {
490                        1 => dl.i1_align = a,
491                        8 => dl.i8_align = a,
492                        16 => dl.i16_align = a,
493                        32 => dl.i32_align = a,
494                        64 => dl.i64_align = a,
495                        _ => {}
496                    }
497                    if bits >= i128_align_src && bits <= 128 {
498                        // Default alignment for i128 is decided by taking the alignment of
499                        // largest-sized i{64..=128}.
500                        i128_align_src = bits;
501                        dl.i128_align = a;
502                    }
503                }
504                [s, a @ ..] if s.starts_with('v') => {
505                    let v_size = parse_size(&s[1..], "v")?;
506                    let a = parse_align_seq(a, s)?;
507                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
508                        v.1 = a;
509                        continue;
510                    }
511                    // No existing entry, add a new one.
512                    dl.vector_align.push((v_size, a));
513                }
514                _ => {} // Ignore everything else.
515            }
516        }
517
518        // Inherit, if not given, address space information for specific LLVM elements from the
519        // default data address space.
520        if (dl.instruction_address_space != dl.default_address_space)
521            && dl
522                .address_space_info
523                .iter()
524                .find(|(a, _)| *a == dl.instruction_address_space)
525                .is_none()
526        {
527            dl.address_space_info.push((
528                dl.instruction_address_space,
529                dl.default_address_space_pointer_spec.clone(),
530            ));
531        }
532
533        Ok(dl)
534    }
535
536    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
537    /// space.
538    ///
539    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
540    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
541    /// index every address within an object along with one byte past the end, along with allowing
542    /// `isize` to store the difference between any two pointers into an object.
543    ///
544    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
545    /// so we adopt such a more-constrained size bound due to its technical limitations.
546    #[inline]
547    pub fn obj_size_bound(&self) -> u64 {
548        match self.pointer_size().bits() {
549            16 => 1 << 15,
550            32 => 1 << 31,
551            64 => 1 << 61,
552            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
553        }
554    }
555
556    /// Returns **exclusive** upper bound on object size in bytes.
557    ///
558    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
559    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
560    /// index every address within an object along with one byte past the end, along with allowing
561    /// `isize` to store the difference between any two pointers into an object.
562    ///
563    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
564    /// so we adopt such a more-constrained size bound due to its technical limitations.
565    #[inline]
566    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
567        match self.pointer_size_in(address_space).bits() {
568            16 => 1 << 15,
569            32 => 1 << 31,
570            64 => 1 << 61,
571            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
572        }
573    }
574
575    #[inline]
576    pub fn ptr_sized_integer(&self) -> Integer {
577        use Integer::*;
578        match self.pointer_offset().bits() {
579            16 => I16,
580            32 => I32,
581            64 => I64,
582            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
583        }
584    }
585
586    #[inline]
587    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
588        use Integer::*;
589        match self.pointer_offset_in(address_space).bits() {
590            16 => I16,
591            32 => I32,
592            64 => I64,
593            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
594        }
595    }
596
597    /// psABI-mandated alignment for a vector type, if any
598    #[inline]
599    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
600        self.vector_align
601            .iter()
602            .find(|(size, _align)| *size == vec_size)
603            .map(|(_size, align)| *align)
604    }
605
606    /// an alignment resembling the one LLVM would pick for a vector
607    #[inline]
608    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
609        self.cabi_vector_align(vec_size)
610            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
611    }
612
613    /// Get the pointer size in the default data address space.
614    #[inline]
615    pub fn pointer_size(&self) -> Size {
616        self.default_address_space_pointer_spec.pointer_size
617    }
618
619    /// Get the pointer size in a specific address space.
620    #[inline]
621    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
622        if c == self.default_address_space {
623            return self.default_address_space_pointer_spec.pointer_size;
624        }
625
626        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
627            e.1.pointer_size
628        } else {
629            panic!("Use of unknown address space {c:?}");
630        }
631    }
632
633    /// Get the pointer index in the default data address space.
634    #[inline]
635    pub fn pointer_offset(&self) -> Size {
636        self.default_address_space_pointer_spec.pointer_offset
637    }
638
639    /// Get the pointer index in a specific address space.
640    #[inline]
641    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
642        if c == self.default_address_space {
643            return self.default_address_space_pointer_spec.pointer_offset;
644        }
645
646        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
647            e.1.pointer_offset
648        } else {
649            panic!("Use of unknown address space {c:?}");
650        }
651    }
652
653    /// Get the pointer alignment in the default data address space.
654    #[inline]
655    pub fn pointer_align(&self) -> AbiAlign {
656        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
657    }
658
659    /// Get the pointer alignment in a specific address space.
660    #[inline]
661    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
662        AbiAlign::new(if c == self.default_address_space {
663            self.default_address_space_pointer_spec.pointer_align
664        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
665            e.1.pointer_align
666        } else {
667            panic!("Use of unknown address space {c:?}");
668        })
669    }
670}
671
672pub trait HasDataLayout {
673    fn data_layout(&self) -> &TargetDataLayout;
674}
675
676impl HasDataLayout for TargetDataLayout {
677    #[inline]
678    fn data_layout(&self) -> &TargetDataLayout {
679        self
680    }
681}
682
683// used by rust-analyzer
684impl HasDataLayout for &TargetDataLayout {
685    #[inline]
686    fn data_layout(&self) -> &TargetDataLayout {
687        (**self).data_layout()
688    }
689}
690
691/// Endianness of the target, which must match cfg(target-endian).
692#[derive(Copy, Clone, PartialEq, Eq)]
693pub enum Endian {
694    Little,
695    Big,
696}
697
698impl Endian {
699    pub fn as_str(&self) -> &'static str {
700        match self {
701            Self::Little => "little",
702            Self::Big => "big",
703        }
704    }
705}
706
707impl fmt::Debug for Endian {
708    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
709        f.write_str(self.as_str())
710    }
711}
712
713impl FromStr for Endian {
714    type Err = String;
715
716    fn from_str(s: &str) -> Result<Self, Self::Err> {
717        match s {
718            "little" => Ok(Self::Little),
719            "big" => Ok(Self::Big),
720            _ => Err(format!(r#"unknown endian: "{s}""#)),
721        }
722    }
723}
724
725/// Size of a type in bytes.
726#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
727#[cfg_attr(
728    feature = "nightly",
729    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
730)]
731pub struct Size {
732    raw: u64,
733}
734
735#[cfg(feature = "nightly")]
736impl StableOrd for Size {
737    const CAN_USE_UNSTABLE_SORT: bool = true;
738
739    // `Ord` is implemented as just comparing numerical values and numerical values
740    // are not changed by (de-)serialization.
741    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
742}
743
744// This is debug-printed a lot in larger structs, don't waste too much space there
745impl fmt::Debug for Size {
746    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
747        write!(f, "Size({} bytes)", self.bytes())
748    }
749}
750
751impl Size {
752    pub const ZERO: Size = Size { raw: 0 };
753
754    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
755    /// not a multiple of 8.
756    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
757        let bits = bits.try_into().ok().unwrap();
758        Size { raw: bits.div_ceil(8) }
759    }
760
761    #[inline]
762    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
763        let bytes: u64 = bytes.try_into().ok().unwrap();
764        Size { raw: bytes }
765    }
766
767    #[inline]
768    pub fn bytes(self) -> u64 {
769        self.raw
770    }
771
772    #[inline]
773    pub fn bytes_usize(self) -> usize {
774        self.bytes().try_into().unwrap()
775    }
776
777    #[inline]
778    pub fn bits(self) -> u64 {
779        #[cold]
780        fn overflow(bytes: u64) -> ! {
781            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
782        }
783
784        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
785    }
786
787    #[inline]
788    pub fn bits_usize(self) -> usize {
789        self.bits().try_into().unwrap()
790    }
791
792    #[inline]
793    pub fn align_to(self, align: Align) -> Size {
794        let mask = align.bytes() - 1;
795        Size::from_bytes((self.bytes() + mask) & !mask)
796    }
797
798    #[inline]
799    pub fn is_aligned(self, align: Align) -> bool {
800        let mask = align.bytes() - 1;
801        self.bytes() & mask == 0
802    }
803
804    #[inline]
805    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
806        let dl = cx.data_layout();
807
808        let bytes = self.bytes().checked_add(offset.bytes())?;
809
810        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
811    }
812
813    #[inline]
814    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
815        let dl = cx.data_layout();
816
817        let bytes = self.bytes().checked_mul(count)?;
818        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
819    }
820
821    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
822    /// (i.e., if it is negative, fill with 1's on the left).
823    #[inline]
824    pub fn sign_extend(self, value: u128) -> i128 {
825        let size = self.bits();
826        if size == 0 {
827            // Truncated until nothing is left.
828            return 0;
829        }
830        // Sign-extend it.
831        let shift = 128 - size;
832        // Shift the unsigned value to the left, then shift back to the right as signed
833        // (essentially fills with sign bit on the left).
834        ((value << shift) as i128) >> shift
835    }
836
837    /// Truncates `value` to `self` bits.
838    #[inline]
839    pub fn truncate(self, value: u128) -> u128 {
840        let size = self.bits();
841        if size == 0 {
842            // Truncated until nothing is left.
843            return 0;
844        }
845        let shift = 128 - size;
846        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
847        (value << shift) >> shift
848    }
849
850    #[inline]
851    pub fn signed_int_min(&self) -> i128 {
852        self.sign_extend(1_u128 << (self.bits() - 1))
853    }
854
855    #[inline]
856    pub fn signed_int_max(&self) -> i128 {
857        i128::MAX >> (128 - self.bits())
858    }
859
860    #[inline]
861    pub fn unsigned_int_max(&self) -> u128 {
862        u128::MAX >> (128 - self.bits())
863    }
864}
865
866// Panicking addition, subtraction and multiplication for convenience.
867// Avoid during layout computation, return `LayoutError` instead.
868
869impl Add for Size {
870    type Output = Size;
871    #[inline]
872    fn add(self, other: Size) -> Size {
873        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
874            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
875        }))
876    }
877}
878
879impl Sub for Size {
880    type Output = Size;
881    #[inline]
882    fn sub(self, other: Size) -> Size {
883        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
884            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
885        }))
886    }
887}
888
889impl Mul<Size> for u64 {
890    type Output = Size;
891    #[inline]
892    fn mul(self, size: Size) -> Size {
893        size * self
894    }
895}
896
897impl Mul<u64> for Size {
898    type Output = Size;
899    #[inline]
900    fn mul(self, count: u64) -> Size {
901        match self.bytes().checked_mul(count) {
902            Some(bytes) => Size::from_bytes(bytes),
903            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
904        }
905    }
906}
907
908impl AddAssign for Size {
909    #[inline]
910    fn add_assign(&mut self, other: Size) {
911        *self = *self + other;
912    }
913}
914
915#[cfg(feature = "nightly")]
916impl Step for Size {
917    #[inline]
918    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
919        u64::steps_between(&start.bytes(), &end.bytes())
920    }
921
922    #[inline]
923    fn forward_checked(start: Self, count: usize) -> Option<Self> {
924        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
925    }
926
927    #[inline]
928    fn forward(start: Self, count: usize) -> Self {
929        Self::from_bytes(u64::forward(start.bytes(), count))
930    }
931
932    #[inline]
933    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
934        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
935    }
936
937    #[inline]
938    fn backward_checked(start: Self, count: usize) -> Option<Self> {
939        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
940    }
941
942    #[inline]
943    fn backward(start: Self, count: usize) -> Self {
944        Self::from_bytes(u64::backward(start.bytes(), count))
945    }
946
947    #[inline]
948    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
949        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
950    }
951}
952
953/// Alignment of a type in bytes (always a power of two).
954#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
955#[cfg_attr(
956    feature = "nightly",
957    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
958)]
959pub struct Align {
960    pow2: u8,
961}
962
963// This is debug-printed a lot in larger structs, don't waste too much space there
964impl fmt::Debug for Align {
965    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
966        write!(f, "Align({} bytes)", self.bytes())
967    }
968}
969
970#[derive(Clone, Copy)]
971pub enum AlignFromBytesError {
972    NotPowerOfTwo(u64),
973    TooLarge(u64),
974}
975
976impl AlignFromBytesError {
977    pub fn diag_ident(self) -> &'static str {
978        match self {
979            Self::NotPowerOfTwo(_) => "not_power_of_two",
980            Self::TooLarge(_) => "too_large",
981        }
982    }
983
984    pub fn align(self) -> u64 {
985        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
986        align
987    }
988}
989
990impl fmt::Debug for AlignFromBytesError {
991    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
992        fmt::Display::fmt(self, f)
993    }
994}
995
996impl fmt::Display for AlignFromBytesError {
997    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
998        match self {
999            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
1000            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
1001        }
1002    }
1003}
1004
1005impl Align {
1006    pub const ONE: Align = Align { pow2: 0 };
1007    pub const EIGHT: Align = Align { pow2: 3 };
1008    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1009    pub const MAX: Align = Align { pow2: 29 };
1010
1011    #[inline]
1012    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1013        Align::from_bytes(Size::from_bits(bits).bytes())
1014    }
1015
1016    #[inline]
1017    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1018        // Treat an alignment of 0 bytes like 1-byte alignment.
1019        if align == 0 {
1020            return Ok(Align::ONE);
1021        }
1022
1023        #[cold]
1024        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1025            AlignFromBytesError::NotPowerOfTwo(align)
1026        }
1027
1028        #[cold]
1029        const fn too_large(align: u64) -> AlignFromBytesError {
1030            AlignFromBytesError::TooLarge(align)
1031        }
1032
1033        let tz = align.trailing_zeros();
1034        if align != (1 << tz) {
1035            return Err(not_power_of_2(align));
1036        }
1037
1038        let pow2 = tz as u8;
1039        if pow2 > Self::MAX.pow2 {
1040            return Err(too_large(align));
1041        }
1042
1043        Ok(Align { pow2 })
1044    }
1045
1046    #[inline]
1047    pub const fn bytes(self) -> u64 {
1048        1 << self.pow2
1049    }
1050
1051    #[inline]
1052    pub fn bytes_usize(self) -> usize {
1053        self.bytes().try_into().unwrap()
1054    }
1055
1056    #[inline]
1057    pub const fn bits(self) -> u64 {
1058        self.bytes() * 8
1059    }
1060
1061    #[inline]
1062    pub fn bits_usize(self) -> usize {
1063        self.bits().try_into().unwrap()
1064    }
1065
1066    /// Obtain the greatest factor of `size` that is an alignment
1067    /// (the largest power of two the Size is a multiple of).
1068    ///
1069    /// Note that all numbers are factors of 0
1070    #[inline]
1071    pub fn max_aligned_factor(size: Size) -> Align {
1072        Align { pow2: size.bytes().trailing_zeros() as u8 }
1073    }
1074
1075    /// Reduces Align to an aligned factor of `size`.
1076    #[inline]
1077    pub fn restrict_for_offset(self, size: Size) -> Align {
1078        self.min(Align::max_aligned_factor(size))
1079    }
1080}
1081
1082/// A pair of alignments, ABI-mandated and preferred.
1083///
1084/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1085/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1086/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1087/// and thus in practice the two values are almost always identical.
1088///
1089/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1090/// It is of effectively no consequence for layout in structs and on the stack.
1091#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1092#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1093pub struct AbiAlign {
1094    pub abi: Align,
1095}
1096
1097impl AbiAlign {
1098    #[inline]
1099    pub fn new(align: Align) -> AbiAlign {
1100        AbiAlign { abi: align }
1101    }
1102
1103    #[inline]
1104    pub fn min(self, other: AbiAlign) -> AbiAlign {
1105        AbiAlign { abi: self.abi.min(other.abi) }
1106    }
1107
1108    #[inline]
1109    pub fn max(self, other: AbiAlign) -> AbiAlign {
1110        AbiAlign { abi: self.abi.max(other.abi) }
1111    }
1112}
1113
1114impl Deref for AbiAlign {
1115    type Target = Align;
1116
1117    fn deref(&self) -> &Self::Target {
1118        &self.abi
1119    }
1120}
1121
1122/// Integers, also used for enum discriminants.
1123#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1124#[cfg_attr(
1125    feature = "nightly",
1126    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
1127)]
1128pub enum Integer {
1129    I8,
1130    I16,
1131    I32,
1132    I64,
1133    I128,
1134}
1135
1136impl Integer {
1137    pub fn int_ty_str(self) -> &'static str {
1138        use Integer::*;
1139        match self {
1140            I8 => "i8",
1141            I16 => "i16",
1142            I32 => "i32",
1143            I64 => "i64",
1144            I128 => "i128",
1145        }
1146    }
1147
1148    pub fn uint_ty_str(self) -> &'static str {
1149        use Integer::*;
1150        match self {
1151            I8 => "u8",
1152            I16 => "u16",
1153            I32 => "u32",
1154            I64 => "u64",
1155            I128 => "u128",
1156        }
1157    }
1158
1159    #[inline]
1160    pub fn size(self) -> Size {
1161        use Integer::*;
1162        match self {
1163            I8 => Size::from_bytes(1),
1164            I16 => Size::from_bytes(2),
1165            I32 => Size::from_bytes(4),
1166            I64 => Size::from_bytes(8),
1167            I128 => Size::from_bytes(16),
1168        }
1169    }
1170
1171    /// Gets the Integer type from an IntegerType.
1172    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1173        let dl = cx.data_layout();
1174
1175        match ity {
1176            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1177            IntegerType::Fixed(x, _) => x,
1178        }
1179    }
1180
1181    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1182        use Integer::*;
1183        let dl = cx.data_layout();
1184
1185        AbiAlign::new(match self {
1186            I8 => dl.i8_align,
1187            I16 => dl.i16_align,
1188            I32 => dl.i32_align,
1189            I64 => dl.i64_align,
1190            I128 => dl.i128_align,
1191        })
1192    }
1193
1194    /// Returns the largest signed value that can be represented by this Integer.
1195    #[inline]
1196    pub fn signed_max(self) -> i128 {
1197        use Integer::*;
1198        match self {
1199            I8 => i8::MAX as i128,
1200            I16 => i16::MAX as i128,
1201            I32 => i32::MAX as i128,
1202            I64 => i64::MAX as i128,
1203            I128 => i128::MAX,
1204        }
1205    }
1206
1207    /// Returns the smallest signed value that can be represented by this Integer.
1208    #[inline]
1209    pub fn signed_min(self) -> i128 {
1210        use Integer::*;
1211        match self {
1212            I8 => i8::MIN as i128,
1213            I16 => i16::MIN as i128,
1214            I32 => i32::MIN as i128,
1215            I64 => i64::MIN as i128,
1216            I128 => i128::MIN,
1217        }
1218    }
1219
1220    /// Finds the smallest Integer type which can represent the signed value.
1221    #[inline]
1222    pub fn fit_signed(x: i128) -> Integer {
1223        use Integer::*;
1224        match x {
1225            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1226            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1227            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1228            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1229            _ => I128,
1230        }
1231    }
1232
1233    /// Finds the smallest Integer type which can represent the unsigned value.
1234    #[inline]
1235    pub fn fit_unsigned(x: u128) -> Integer {
1236        use Integer::*;
1237        match x {
1238            0..=0x0000_0000_0000_00ff => I8,
1239            0..=0x0000_0000_0000_ffff => I16,
1240            0..=0x0000_0000_ffff_ffff => I32,
1241            0..=0xffff_ffff_ffff_ffff => I64,
1242            _ => I128,
1243        }
1244    }
1245
1246    /// Finds the smallest integer with the given alignment.
1247    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1248        use Integer::*;
1249        let dl = cx.data_layout();
1250
1251        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1252            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1253        })
1254    }
1255
1256    /// Find the largest integer with the given alignment or less.
1257    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1258        use Integer::*;
1259        let dl = cx.data_layout();
1260
1261        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1262        for candidate in [I64, I32, I16] {
1263            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1264                return candidate;
1265            }
1266        }
1267        I8
1268    }
1269
1270    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1271    // `Integer` given some requirements.
1272    #[inline]
1273    pub fn from_size(size: Size) -> Result<Self, String> {
1274        match size.bits() {
1275            8 => Ok(Integer::I8),
1276            16 => Ok(Integer::I16),
1277            32 => Ok(Integer::I32),
1278            64 => Ok(Integer::I64),
1279            128 => Ok(Integer::I128),
1280            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1281        }
1282    }
1283}
1284
1285/// Floating-point types.
1286#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1287#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1288pub enum Float {
1289    F16,
1290    F32,
1291    F64,
1292    F128,
1293}
1294
1295impl Float {
1296    pub fn size(self) -> Size {
1297        use Float::*;
1298
1299        match self {
1300            F16 => Size::from_bits(16),
1301            F32 => Size::from_bits(32),
1302            F64 => Size::from_bits(64),
1303            F128 => Size::from_bits(128),
1304        }
1305    }
1306
1307    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1308        use Float::*;
1309        let dl = cx.data_layout();
1310
1311        AbiAlign::new(match self {
1312            F16 => dl.f16_align,
1313            F32 => dl.f32_align,
1314            F64 => dl.f64_align,
1315            F128 => dl.f128_align,
1316        })
1317    }
1318}
1319
1320/// Fundamental unit of memory access and layout.
1321#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1322#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1323pub enum Primitive {
1324    /// The `bool` is the signedness of the `Integer` type.
1325    ///
1326    /// One would think we would not care about such details this low down,
1327    /// but some ABIs are described in terms of C types and ISAs where the
1328    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1329    /// a negative integer passed by zero-extension will appear positive in
1330    /// the callee, and most operations on it will produce the wrong values.
1331    Int(Integer, bool),
1332    Float(Float),
1333    Pointer(AddressSpace),
1334}
1335
1336impl Primitive {
1337    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1338        use Primitive::*;
1339        let dl = cx.data_layout();
1340
1341        match self {
1342            Int(i, _) => i.size(),
1343            Float(f) => f.size(),
1344            Pointer(a) => dl.pointer_size_in(a),
1345        }
1346    }
1347
1348    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1349        use Primitive::*;
1350        let dl = cx.data_layout();
1351
1352        match self {
1353            Int(i, _) => i.align(dl),
1354            Float(f) => f.align(dl),
1355            Pointer(a) => dl.pointer_align_in(a),
1356        }
1357    }
1358}
1359
1360/// Inclusive wrap-around range of valid values, that is, if
1361/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1362///
1363/// That is, for an i8 primitive, a range of `254..=2` means following
1364/// sequence:
1365///
1366///    254 (-2), 255 (-1), 0, 1, 2
1367///
1368/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1369#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1370#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1371pub struct WrappingRange {
1372    pub start: u128,
1373    pub end: u128,
1374}
1375
1376impl WrappingRange {
1377    pub fn full(size: Size) -> Self {
1378        Self { start: 0, end: size.unsigned_int_max() }
1379    }
1380
1381    /// Returns `true` if `v` is contained in the range.
1382    #[inline(always)]
1383    pub fn contains(&self, v: u128) -> bool {
1384        if self.start <= self.end {
1385            self.start <= v && v <= self.end
1386        } else {
1387            self.start <= v || v <= self.end
1388        }
1389    }
1390
1391    /// Returns `true` if all the values in `other` are contained in this range,
1392    /// when the values are considered as having width `size`.
1393    #[inline(always)]
1394    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1395        if self.is_full_for(size) {
1396            true
1397        } else {
1398            let trunc = |x| size.truncate(x);
1399
1400            let delta = self.start;
1401            let max = trunc(self.end.wrapping_sub(delta));
1402
1403            let other_start = trunc(other.start.wrapping_sub(delta));
1404            let other_end = trunc(other.end.wrapping_sub(delta));
1405
1406            // Having shifted both input ranges by `delta`, now we only need to check
1407            // whether `0..=max` contains `other_start..=other_end`, which can only
1408            // happen if the other doesn't wrap since `self` isn't everything.
1409            (other_start <= other_end) && (other_end <= max)
1410        }
1411    }
1412
1413    /// Returns `self` with replaced `start`
1414    #[inline(always)]
1415    fn with_start(mut self, start: u128) -> Self {
1416        self.start = start;
1417        self
1418    }
1419
1420    /// Returns `self` with replaced `end`
1421    #[inline(always)]
1422    fn with_end(mut self, end: u128) -> Self {
1423        self.end = end;
1424        self
1425    }
1426
1427    /// Returns `true` if `size` completely fills the range.
1428    ///
1429    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1430    /// Niche calculations can produce full ranges which are not the canonical one;
1431    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1432    #[inline]
1433    fn is_full_for(&self, size: Size) -> bool {
1434        let max_value = size.unsigned_int_max();
1435        debug_assert!(self.start <= max_value && self.end <= max_value);
1436        self.start == (self.end.wrapping_add(1) & max_value)
1437    }
1438
1439    /// Checks whether this range is considered non-wrapping when the values are
1440    /// interpreted as *unsigned* numbers of width `size`.
1441    ///
1442    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1443    /// and `Err(..)` if the range is full so it depends how you think about it.
1444    #[inline]
1445    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1446        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1447    }
1448
1449    /// Checks whether this range is considered non-wrapping when the values are
1450    /// interpreted as *signed* numbers of width `size`.
1451    ///
1452    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1453    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1454    ///
1455    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1456    /// and `Err(..)` if the range is full so it depends how you think about it.
1457    #[inline]
1458    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1459        if self.is_full_for(size) {
1460            Err(..)
1461        } else {
1462            let start: i128 = size.sign_extend(self.start);
1463            let end: i128 = size.sign_extend(self.end);
1464            Ok(start <= end)
1465        }
1466    }
1467}
1468
1469impl fmt::Debug for WrappingRange {
1470    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1471        if self.start > self.end {
1472            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1473        } else {
1474            write!(fmt, "{}..={}", self.start, self.end)?;
1475        }
1476        Ok(())
1477    }
1478}
1479
1480/// Information about one scalar component of a Rust type.
1481#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1482#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1483pub enum Scalar {
1484    Initialized {
1485        value: Primitive,
1486
1487        // FIXME(eddyb) always use the shortest range, e.g., by finding
1488        // the largest space between two consecutive valid values and
1489        // taking everything else as the (shortest) valid range.
1490        valid_range: WrappingRange,
1491    },
1492    Union {
1493        /// Even for unions, we need to use the correct registers for the kind of
1494        /// values inside the union, so we keep the `Primitive` type around. We
1495        /// also use it to compute the size of the scalar.
1496        /// However, unions never have niches and even allow undef,
1497        /// so there is no `valid_range`.
1498        value: Primitive,
1499    },
1500}
1501
1502impl Scalar {
1503    #[inline]
1504    pub fn is_bool(&self) -> bool {
1505        use Integer::*;
1506        matches!(
1507            self,
1508            Scalar::Initialized {
1509                value: Primitive::Int(I8, false),
1510                valid_range: WrappingRange { start: 0, end: 1 }
1511            }
1512        )
1513    }
1514
1515    /// Get the primitive representation of this type, ignoring the valid range and whether the
1516    /// value is allowed to be undefined (due to being a union).
1517    pub fn primitive(&self) -> Primitive {
1518        match *self {
1519            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1520        }
1521    }
1522
1523    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1524        self.primitive().align(cx)
1525    }
1526
1527    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1528        self.primitive().size(cx)
1529    }
1530
1531    #[inline]
1532    pub fn to_union(&self) -> Self {
1533        Self::Union { value: self.primitive() }
1534    }
1535
1536    #[inline]
1537    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1538        match *self {
1539            Scalar::Initialized { valid_range, .. } => valid_range,
1540            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1541        }
1542    }
1543
1544    #[inline]
1545    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1546    /// union.
1547    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1548        match self {
1549            Scalar::Initialized { valid_range, .. } => valid_range,
1550            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1551        }
1552    }
1553
1554    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1555    /// layout.
1556    #[inline]
1557    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1558        match *self {
1559            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1560            Scalar::Union { .. } => true,
1561        }
1562    }
1563
1564    /// Returns `true` if this type can be left uninit.
1565    #[inline]
1566    pub fn is_uninit_valid(&self) -> bool {
1567        match *self {
1568            Scalar::Initialized { .. } => false,
1569            Scalar::Union { .. } => true,
1570        }
1571    }
1572
1573    /// Returns `true` if this is a signed integer scalar
1574    #[inline]
1575    pub fn is_signed(&self) -> bool {
1576        match self.primitive() {
1577            Primitive::Int(_, signed) => signed,
1578            _ => false,
1579        }
1580    }
1581}
1582
1583// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1584/// Describes how the fields of a type are located in memory.
1585#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1586#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1587pub enum FieldsShape<FieldIdx: Idx> {
1588    /// Scalar primitives and `!`, which never have fields.
1589    Primitive,
1590
1591    /// All fields start at no offset. The `usize` is the field count.
1592    Union(NonZeroUsize),
1593
1594    /// Array/vector-like placement, with all fields of identical types.
1595    Array { stride: Size, count: u64 },
1596
1597    /// Struct-like placement, with precomputed offsets.
1598    ///
1599    /// Fields are guaranteed to not overlap, but note that gaps
1600    /// before, between and after all the fields are NOT always
1601    /// padding, and as such their contents may not be discarded.
1602    /// For example, enum variants leave a gap at the start,
1603    /// where the discriminant field in the enum layout goes.
1604    Arbitrary {
1605        /// Offsets for the first byte of each field,
1606        /// ordered to match the source definition order.
1607        /// This vector does not go in increasing order.
1608        // FIXME(eddyb) use small vector optimization for the common case.
1609        offsets: IndexVec<FieldIdx, Size>,
1610
1611        /// Maps source order field indices to memory order indices,
1612        /// depending on how the fields were reordered (if at all).
1613        /// This is a permutation, with both the source order and the
1614        /// memory order using the same (0..n) index ranges.
1615        ///
1616        /// Note that during computation of `memory_index`, sometimes
1617        /// it is easier to operate on the inverse mapping (that is,
1618        /// from memory order to source order), and that is usually
1619        /// named `inverse_memory_index`.
1620        ///
1621        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1622        // FIXME(camlorn) also consider small vector optimization here.
1623        memory_index: IndexVec<FieldIdx, u32>,
1624    },
1625}
1626
1627impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1628    #[inline]
1629    pub fn count(&self) -> usize {
1630        match *self {
1631            FieldsShape::Primitive => 0,
1632            FieldsShape::Union(count) => count.get(),
1633            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1634            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1635        }
1636    }
1637
1638    #[inline]
1639    pub fn offset(&self, i: usize) -> Size {
1640        match *self {
1641            FieldsShape::Primitive => {
1642                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1643            }
1644            FieldsShape::Union(count) => {
1645                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1646                Size::ZERO
1647            }
1648            FieldsShape::Array { stride, count } => {
1649                let i = u64::try_from(i).unwrap();
1650                assert!(i < count, "tried to access field {i} of array with {count} fields");
1651                stride * i
1652            }
1653            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1654        }
1655    }
1656
1657    #[inline]
1658    pub fn memory_index(&self, i: usize) -> usize {
1659        match *self {
1660            FieldsShape::Primitive => {
1661                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1662            }
1663            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1664            FieldsShape::Arbitrary { ref memory_index, .. } => {
1665                memory_index[FieldIdx::new(i)].try_into().unwrap()
1666            }
1667        }
1668    }
1669
1670    /// Gets source indices of the fields by increasing offsets.
1671    #[inline]
1672    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1673        let mut inverse_small = [0u8; 64];
1674        let mut inverse_big = IndexVec::new();
1675        let use_small = self.count() <= inverse_small.len();
1676
1677        // We have to write this logic twice in order to keep the array small.
1678        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1679            if use_small {
1680                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1681                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1682                }
1683            } else {
1684                inverse_big = memory_index.invert_bijective_mapping();
1685            }
1686        }
1687
1688        // Primitives don't really have fields in the way that structs do,
1689        // but having this return an empty iterator for them is unhelpful
1690        // since that makes them look kinda like ZSTs, which they're not.
1691        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1692
1693        (0..pseudofield_count).map(move |i| match *self {
1694            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1695            FieldsShape::Arbitrary { .. } => {
1696                if use_small {
1697                    inverse_small[i] as usize
1698                } else {
1699                    inverse_big[i as u32].index()
1700                }
1701            }
1702        })
1703    }
1704}
1705
1706/// An identifier that specifies the address space that some operation
1707/// should operate on. Special address spaces have an effect on code generation,
1708/// depending on the target and the address spaces it implements.
1709#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1710#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1711pub struct AddressSpace(pub u32);
1712
1713impl AddressSpace {
1714    /// LLVM's `0` address space.
1715    pub const ZERO: Self = AddressSpace(0);
1716}
1717
1718/// The way we represent values to the backend
1719///
1720/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1721/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1722/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1723/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1724/// how the value will be lowered to the calling convention, in itself.
1725///
1726/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1727/// and larger values will usually prefer to be represented as memory.
1728#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1729#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1730pub enum BackendRepr {
1731    Scalar(Scalar),
1732    ScalarPair(Scalar, Scalar),
1733    SimdVector {
1734        element: Scalar,
1735        count: u64,
1736    },
1737    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1738    Memory {
1739        /// If true, the size is exact, otherwise it's only a lower bound.
1740        sized: bool,
1741    },
1742}
1743
1744impl BackendRepr {
1745    /// Returns `true` if the layout corresponds to an unsized type.
1746    #[inline]
1747    pub fn is_unsized(&self) -> bool {
1748        match *self {
1749            BackendRepr::Scalar(_)
1750            | BackendRepr::ScalarPair(..)
1751            | BackendRepr::SimdVector { .. } => false,
1752            BackendRepr::Memory { sized } => !sized,
1753        }
1754    }
1755
1756    #[inline]
1757    pub fn is_sized(&self) -> bool {
1758        !self.is_unsized()
1759    }
1760
1761    /// Returns `true` if this is a single signed integer scalar.
1762    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1763    #[inline]
1764    pub fn is_signed(&self) -> bool {
1765        match self {
1766            BackendRepr::Scalar(scal) => scal.is_signed(),
1767            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1768        }
1769    }
1770
1771    /// Returns `true` if this is a scalar type
1772    #[inline]
1773    pub fn is_scalar(&self) -> bool {
1774        matches!(*self, BackendRepr::Scalar(_))
1775    }
1776
1777    /// Returns `true` if this is a bool
1778    #[inline]
1779    pub fn is_bool(&self) -> bool {
1780        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1781    }
1782
1783    /// The psABI alignment for a `Scalar` or `ScalarPair`
1784    ///
1785    /// `None` for other variants.
1786    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1787        match *self {
1788            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1789            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1790            // The align of a Vector can vary in surprising ways
1791            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1792        }
1793    }
1794
1795    /// The psABI size for a `Scalar` or `ScalarPair`
1796    ///
1797    /// `None` for other variants
1798    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1799        match *self {
1800            // No padding in scalars.
1801            BackendRepr::Scalar(s) => Some(s.size(cx)),
1802            // May have some padding between the pair.
1803            BackendRepr::ScalarPair(s1, s2) => {
1804                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1805                let size = (field2_offset + s2.size(cx)).align_to(
1806                    self.scalar_align(cx)
1807                        // We absolutely must have an answer here or everything is FUBAR.
1808                        .unwrap(),
1809                );
1810                Some(size)
1811            }
1812            // The size of a Vector can vary in surprising ways
1813            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1814        }
1815    }
1816
1817    /// Discard validity range information and allow undef.
1818    pub fn to_union(&self) -> Self {
1819        match *self {
1820            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1821            BackendRepr::ScalarPair(s1, s2) => {
1822                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1823            }
1824            BackendRepr::SimdVector { element, count } => {
1825                BackendRepr::SimdVector { element: element.to_union(), count }
1826            }
1827            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1828        }
1829    }
1830
1831    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1832        match (self, other) {
1833            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1834            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1835            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1836            (
1837                BackendRepr::SimdVector { element: element_l, count: count_l },
1838                BackendRepr::SimdVector { element: element_r, count: count_r },
1839            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1840            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1841                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1842            }
1843            // Everything else must be strictly identical.
1844            _ => self == other,
1845        }
1846    }
1847}
1848
1849// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1850#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1851#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1852pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1853    /// A type with no valid variants. Must be uninhabited.
1854    Empty,
1855
1856    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1857    Single {
1858        /// Always `0` for types that cannot have multiple variants.
1859        index: VariantIdx,
1860    },
1861
1862    /// Enum-likes with more than one variant: each variant comes with
1863    /// a *discriminant* (usually the same as the variant index but the user can
1864    /// assign explicit discriminant values). That discriminant is encoded
1865    /// as a *tag* on the machine. The layout of each variant is
1866    /// a struct, and they all have space reserved for the tag.
1867    /// For enums, the tag is the sole field of the layout.
1868    Multiple {
1869        tag: Scalar,
1870        tag_encoding: TagEncoding<VariantIdx>,
1871        tag_field: FieldIdx,
1872        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1873    },
1874}
1875
1876// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1877#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1878#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1879pub enum TagEncoding<VariantIdx: Idx> {
1880    /// The tag directly stores the discriminant, but possibly with a smaller layout
1881    /// (so converting the tag to the discriminant can require sign extension).
1882    Direct,
1883
1884    /// Niche (values invalid for a type) encoding the discriminant.
1885    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1886    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1887    ///
1888    /// The variant `untagged_variant` contains a niche at an arbitrary
1889    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1890    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1891    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1892    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1893    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1894    /// query implementation).
1895    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1896    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1897    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1898    ///
1899    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1900    /// `None` is the null pointer in the second tuple field, and
1901    /// `Some` is the identity function (with a non-null reference)
1902    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1903    ///
1904    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1905    /// range cannot be represented; they must be uninhabited.
1906    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1907    Niche {
1908        untagged_variant: VariantIdx,
1909        /// This range *may* contain `untagged_variant` or uninhabited variants;
1910        /// these are then just "dead values" and not used to encode anything.
1911        niche_variants: RangeInclusive<VariantIdx>,
1912        /// This is inbounds of the type of the niche field
1913        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1914        niche_start: u128,
1915    },
1916}
1917
1918#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1919#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1920pub struct Niche {
1921    pub offset: Size,
1922    pub value: Primitive,
1923    pub valid_range: WrappingRange,
1924}
1925
1926impl Niche {
1927    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1928        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1929        let niche = Niche { offset, value, valid_range };
1930        if niche.available(cx) > 0 { Some(niche) } else { None }
1931    }
1932
1933    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1934        let Self { value, valid_range: v, .. } = *self;
1935        let size = value.size(cx);
1936        assert!(size.bits() <= 128);
1937        let max_value = size.unsigned_int_max();
1938
1939        // Find out how many values are outside the valid range.
1940        let niche = v.end.wrapping_add(1)..v.start;
1941        niche.end.wrapping_sub(niche.start) & max_value
1942    }
1943
1944    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1945        assert!(count > 0);
1946
1947        let Self { value, valid_range: v, .. } = *self;
1948        let size = value.size(cx);
1949        assert!(size.bits() <= 128);
1950        let max_value = size.unsigned_int_max();
1951
1952        let niche = v.end.wrapping_add(1)..v.start;
1953        let available = niche.end.wrapping_sub(niche.start) & max_value;
1954        if count > available {
1955            return None;
1956        }
1957
1958        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1959        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1960        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1961        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1962        // enable some special optimizations.
1963        //
1964        // Bound selection criteria:
1965        // 1. Select closest to zero given wrapping semantics.
1966        // 2. Avoid moving past zero if possible.
1967        //
1968        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1969        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1970        // bounds are of little interest.
1971        let move_start = |v: WrappingRange| {
1972            let start = v.start.wrapping_sub(count) & max_value;
1973            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1974        };
1975        let move_end = |v: WrappingRange| {
1976            let start = v.end.wrapping_add(1) & max_value;
1977            let end = v.end.wrapping_add(count) & max_value;
1978            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1979        };
1980        let distance_end_zero = max_value - v.end;
1981        if v.start > v.end {
1982            // zero is unavailable because wrapping occurs
1983            move_end(v)
1984        } else if v.start <= distance_end_zero {
1985            if count <= v.start {
1986                move_start(v)
1987            } else {
1988                // moved past zero, use other bound
1989                move_end(v)
1990            }
1991        } else {
1992            let end = v.end.wrapping_add(count) & max_value;
1993            let overshot_zero = (1..=v.end).contains(&end);
1994            if overshot_zero {
1995                // moved past zero, use other bound
1996                move_start(v)
1997            } else {
1998                move_end(v)
1999            }
2000        }
2001    }
2002}
2003
2004// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2005#[derive(PartialEq, Eq, Hash, Clone)]
2006#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
2007pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2008    /// Says where the fields are located within the layout.
2009    pub fields: FieldsShape<FieldIdx>,
2010
2011    /// Encodes information about multi-variant layouts.
2012    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2013    /// shared between all variants. One of them will be the discriminant,
2014    /// but e.g. coroutines can have more.
2015    ///
2016    /// To access all fields of this layout, both `fields` and the fields of the active variant
2017    /// must be taken into account.
2018    pub variants: Variants<FieldIdx, VariantIdx>,
2019
2020    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2021    /// and encodes value restrictions via `valid_range`.
2022    ///
2023    /// Note that this is entirely orthogonal to the recursive structure defined by
2024    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2025    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2026    /// have to be taken into account to find all fields of this layout.
2027    pub backend_repr: BackendRepr,
2028
2029    /// The leaf scalar with the largest number of invalid values
2030    /// (i.e. outside of its `valid_range`), if it exists.
2031    pub largest_niche: Option<Niche>,
2032    /// Is this type known to be uninhabted?
2033    ///
2034    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2035    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2036    pub uninhabited: bool,
2037
2038    pub align: AbiAlign,
2039    pub size: Size,
2040
2041    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2042    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2043    /// requested, even if the requested alignment is equal to the natural alignment.
2044    pub max_repr_align: Option<Align>,
2045
2046    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2047    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2048    /// in some cases.
2049    pub unadjusted_abi_align: Align,
2050
2051    /// The randomization seed based on this type's own repr and its fields.
2052    ///
2053    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2054    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2055    /// types.
2056    ///
2057    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2058    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2059    /// to reorder its fields based on that information. The current implementation is a conservative
2060    /// approximation of this goal.
2061    pub randomization_seed: Hash64,
2062}
2063
2064impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2065    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2066    pub fn is_aggregate(&self) -> bool {
2067        match self.backend_repr {
2068            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
2069            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2070        }
2071    }
2072
2073    /// Returns `true` if this is an uninhabited type
2074    pub fn is_uninhabited(&self) -> bool {
2075        self.uninhabited
2076    }
2077}
2078
2079impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2080where
2081    FieldsShape<FieldIdx>: fmt::Debug,
2082    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2083{
2084    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2085        // This is how `Layout` used to print before it become
2086        // `Interned<LayoutData>`. We print it like this to avoid having to update
2087        // expected output in a lot of tests.
2088        let LayoutData {
2089            size,
2090            align,
2091            backend_repr,
2092            fields,
2093            largest_niche,
2094            uninhabited,
2095            variants,
2096            max_repr_align,
2097            unadjusted_abi_align,
2098            randomization_seed,
2099        } = self;
2100        f.debug_struct("Layout")
2101            .field("size", size)
2102            .field("align", align)
2103            .field("backend_repr", backend_repr)
2104            .field("fields", fields)
2105            .field("largest_niche", largest_niche)
2106            .field("uninhabited", uninhabited)
2107            .field("variants", variants)
2108            .field("max_repr_align", max_repr_align)
2109            .field("unadjusted_abi_align", unadjusted_abi_align)
2110            .field("randomization_seed", randomization_seed)
2111            .finish()
2112    }
2113}
2114
2115#[derive(Copy, Clone, PartialEq, Eq, Debug)]
2116pub enum PointerKind {
2117    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2118    SharedRef { frozen: bool },
2119    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2120    MutableRef { unpin: bool },
2121    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2122    /// uses the global allocator or a custom one.
2123    Box { unpin: bool, global: bool },
2124}
2125
2126/// Encodes extra information we have about a pointer.
2127/// Note that this information is advisory only, and backends are free to ignore it:
2128/// if the information is wrong, that can cause UB, but if the information is absent,
2129/// that must always be okay.
2130#[derive(Copy, Clone, Debug)]
2131pub struct PointeeInfo {
2132    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
2133    /// be reliable.
2134    pub safe: Option<PointerKind>,
2135    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
2136    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2137    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2138    /// while this function is still running.
2139    /// The size can be zero if the pointer is not dereferenceable.
2140    pub size: Size,
2141    /// If `safe` is `Some`, then the pointer is aligned as indicated.
2142    pub align: Align,
2143}
2144
2145impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2146    /// Returns `true` if the layout corresponds to an unsized type.
2147    #[inline]
2148    pub fn is_unsized(&self) -> bool {
2149        self.backend_repr.is_unsized()
2150    }
2151
2152    #[inline]
2153    pub fn is_sized(&self) -> bool {
2154        self.backend_repr.is_sized()
2155    }
2156
2157    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2158    pub fn is_1zst(&self) -> bool {
2159        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
2160    }
2161
2162    /// Returns `true` if the type is a ZST and not unsized.
2163    ///
2164    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2165    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2166    pub fn is_zst(&self) -> bool {
2167        match self.backend_repr {
2168            BackendRepr::Scalar(_)
2169            | BackendRepr::ScalarPair(..)
2170            | BackendRepr::SimdVector { .. } => false,
2171            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2172        }
2173    }
2174
2175    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2176    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2177    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2178    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2179    /// checks would otherwise be required.
2180    pub fn eq_abi(&self, other: &Self) -> bool {
2181        // The one thing that we are not capturing here is that for unsized types, the metadata must
2182        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2183        // 2nd point is quite hard to check though.
2184        self.size == other.size
2185            && self.is_sized() == other.is_sized()
2186            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2187            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2188            && self.align.abi == other.align.abi
2189            && self.max_repr_align == other.max_repr_align
2190            && self.unadjusted_abi_align == other.unadjusted_abi_align
2191    }
2192}
2193
2194#[derive(Copy, Clone, Debug)]
2195pub enum StructKind {
2196    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2197    AlwaysSized,
2198    /// A univariant, the last field of which may be coerced to unsized.
2199    MaybeUnsized,
2200    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2201    Prefixed(Size, Align),
2202}
2203
2204#[derive(Clone, Debug)]
2205pub enum AbiFromStrErr {
2206    /// not a known ABI
2207    Unknown,
2208    /// no "-unwind" variant can be used here
2209    NoExplicitUnwind,
2210}