rustc_codegen_ssa/mir/
operand.rs

1use std::fmt;
2
3use itertools::Either;
4use rustc_abi as abi;
5use rustc_abi::{
6    Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
7};
8use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
9use rustc_middle::mir::{self, ConstValue};
10use rustc_middle::ty::Ty;
11use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
12use rustc_middle::{bug, span_bug};
13use rustc_session::config::OptLevel;
14use tracing::{debug, instrument};
15
16use super::place::{PlaceRef, PlaceValue};
17use super::rvalue::transmute_scalar;
18use super::{FunctionCx, LocalRef};
19use crate::MemFlags;
20use crate::common::IntPredicate;
21use crate::traits::*;
22
23/// The representation of a Rust value. The enum variant is in fact
24/// uniquely determined by the value's type, but is kept as a
25/// safety check.
26#[derive(Copy, Clone, Debug)]
27pub enum OperandValue<V> {
28    /// A reference to the actual operand. The data is guaranteed
29    /// to be valid for the operand's lifetime.
30    /// The second value, if any, is the extra data (vtable or length)
31    /// which indicates that it refers to an unsized rvalue.
32    ///
33    /// An `OperandValue` *must* be this variant for any type for which
34    /// [`LayoutTypeCodegenMethods::is_backend_ref`] returns `true`.
35    /// (That basically amounts to "isn't one of the other variants".)
36    ///
37    /// This holds a [`PlaceValue`] (like a [`PlaceRef`] does) with a pointer
38    /// to the location holding the value. The type behind that pointer is the
39    /// one returned by [`LayoutTypeCodegenMethods::backend_type`].
40    Ref(PlaceValue<V>),
41    /// A single LLVM immediate value.
42    ///
43    /// An `OperandValue` *must* be this variant for any type for which
44    /// [`LayoutTypeCodegenMethods::is_backend_immediate`] returns `true`.
45    /// The backend value in this variant must be the *immediate* backend type,
46    /// as returned by [`LayoutTypeCodegenMethods::immediate_backend_type`].
47    Immediate(V),
48    /// A pair of immediate LLVM values. Used by wide pointers too.
49    ///
50    /// # Invariants
51    /// - For `Pair(a, b)`, `a` is always at offset 0, but may have `FieldIdx(1..)`
52    /// - `b` is not at offset 0, because `V` is not a 1ZST type.
53    /// - `a` and `b` will have a different FieldIdx, but otherwise `b`'s may be lower
54    ///   or they may not be adjacent, due to arbitrary numbers of 1ZST fields that
55    ///   will not affect the shape of the data which determines if `Pair` will be used.
56    /// - An `OperandValue` *must* be this variant for any type for which
57    /// [`LayoutTypeCodegenMethods::is_backend_scalar_pair`] returns `true`.
58    /// - The backend values in this variant must be the *immediate* backend types,
59    /// as returned by [`LayoutTypeCodegenMethods::scalar_pair_element_backend_type`]
60    /// with `immediate: true`.
61    Pair(V, V),
62    /// A value taking no bytes, and which therefore needs no LLVM value at all.
63    ///
64    /// If you ever need a `V` to pass to something, get a fresh poison value
65    /// from [`ConstCodegenMethods::const_poison`].
66    ///
67    /// An `OperandValue` *must* be this variant for any type for which
68    /// `is_zst` on its `Layout` returns `true`. Note however that
69    /// these values can still require alignment.
70    ZeroSized,
71}
72
73impl<V: CodegenObject> OperandValue<V> {
74    /// Return the data pointer and optional metadata as backend values
75    /// if this value can be treat as a pointer.
76    pub(crate) fn try_pointer_parts(self) -> Option<(V, Option<V>)> {
77        match self {
78            OperandValue::Immediate(llptr) => Some((llptr, None)),
79            OperandValue::Pair(llptr, llextra) => Some((llptr, Some(llextra))),
80            OperandValue::Ref(_) | OperandValue::ZeroSized => None,
81        }
82    }
83
84    /// Treat this value as a pointer and return the data pointer and
85    /// optional metadata as backend values.
86    ///
87    /// If you're making a place, use [`Self::deref`] instead.
88    pub(crate) fn pointer_parts(self) -> (V, Option<V>) {
89        self.try_pointer_parts()
90            .unwrap_or_else(|| bug!("OperandValue cannot be a pointer: {self:?}"))
91    }
92
93    /// Treat this value as a pointer and return the place to which it points.
94    ///
95    /// The pointer immediate doesn't inherently know its alignment,
96    /// so you need to pass it in. If you want to get it from a type's ABI
97    /// alignment, then maybe you want [`OperandRef::deref`] instead.
98    ///
99    /// This is the inverse of [`PlaceValue::address`].
100    pub(crate) fn deref(self, align: Align) -> PlaceValue<V> {
101        let (llval, llextra) = self.pointer_parts();
102        PlaceValue { llval, llextra, align }
103    }
104
105    pub(crate) fn is_expected_variant_for_type<'tcx, Cx: LayoutTypeCodegenMethods<'tcx>>(
106        &self,
107        cx: &Cx,
108        ty: TyAndLayout<'tcx>,
109    ) -> bool {
110        match self {
111            OperandValue::ZeroSized => ty.is_zst(),
112            OperandValue::Immediate(_) => cx.is_backend_immediate(ty),
113            OperandValue::Pair(_, _) => cx.is_backend_scalar_pair(ty),
114            OperandValue::Ref(_) => cx.is_backend_ref(ty),
115        }
116    }
117}
118
119/// An `OperandRef` is an "SSA" reference to a Rust value, along with
120/// its type.
121///
122/// NOTE: unless you know a value's type exactly, you should not
123/// generate LLVM opcodes acting on it and instead act via methods,
124/// to avoid nasty edge cases. In particular, using `Builder::store`
125/// directly is sure to cause problems -- use `OperandRef::store`
126/// instead.
127#[derive(Copy, Clone)]
128pub struct OperandRef<'tcx, V> {
129    /// The value.
130    pub val: OperandValue<V>,
131
132    /// The layout of value, based on its Rust type.
133    pub layout: TyAndLayout<'tcx>,
134}
135
136impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
137    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
138        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
139    }
140}
141
142impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
143    pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
144        assert!(layout.is_zst());
145        OperandRef { val: OperandValue::ZeroSized, layout }
146    }
147
148    pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
149        bx: &mut Bx,
150        val: mir::ConstValue,
151        ty: Ty<'tcx>,
152    ) -> Self {
153        let layout = bx.layout_of(ty);
154
155        let val = match val {
156            ConstValue::Scalar(x) => {
157                let BackendRepr::Scalar(scalar) = layout.backend_repr else {
158                    bug!("from_const: invalid ByVal layout: {:#?}", layout);
159                };
160                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
161                OperandValue::Immediate(llval)
162            }
163            ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
164            ConstValue::Slice { alloc_id, meta } => {
165                let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
166                    bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
167                };
168                let a = Scalar::from_pointer(Pointer::new(alloc_id.into(), Size::ZERO), &bx.tcx());
169                let a_llval = bx.scalar_to_backend(
170                    a,
171                    a_scalar,
172                    bx.scalar_pair_element_backend_type(layout, 0, true),
173                );
174                let b_llval = bx.const_usize(meta);
175                OperandValue::Pair(a_llval, b_llval)
176            }
177            ConstValue::Indirect { alloc_id, offset } => {
178                let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
179                return Self::from_const_alloc(bx, layout, alloc, offset);
180            }
181        };
182
183        OperandRef { val, layout }
184    }
185
186    fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
187        bx: &mut Bx,
188        layout: TyAndLayout<'tcx>,
189        alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>,
190        offset: Size,
191    ) -> Self {
192        let alloc_align = alloc.inner().align;
193        assert!(alloc_align >= layout.align.abi, "{alloc_align:?} < {:?}", layout.align.abi);
194
195        let read_scalar = |start, size, s: abi::Scalar, ty| {
196            match alloc.0.read_scalar(
197                bx,
198                alloc_range(start, size),
199                /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)),
200            ) {
201                Ok(val) => bx.scalar_to_backend(val, s, ty),
202                Err(_) => bx.const_poison(ty),
203            }
204        };
205
206        // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
207        // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
208        // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
209        // case where some of the bytes are initialized and others are not. So, we need an extra
210        // check that walks over the type of `mplace` to make sure it is truly correct to treat this
211        // like a `Scalar` (or `ScalarPair`).
212        match layout.backend_repr {
213            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
214                let size = s.size(bx);
215                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
216                let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
217                OperandRef { val: OperandValue::Immediate(val), layout }
218            }
219            BackendRepr::ScalarPair(
220                a @ abi::Scalar::Initialized { .. },
221                b @ abi::Scalar::Initialized { .. },
222            ) => {
223                let (a_size, b_size) = (a.size(bx), b.size(bx));
224                let b_offset = (offset + a_size).align_to(b.align(bx).abi);
225                assert!(b_offset.bytes() > 0);
226                let a_val = read_scalar(
227                    offset,
228                    a_size,
229                    a,
230                    bx.scalar_pair_element_backend_type(layout, 0, true),
231                );
232                let b_val = read_scalar(
233                    b_offset,
234                    b_size,
235                    b,
236                    bx.scalar_pair_element_backend_type(layout, 1, true),
237                );
238                OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
239            }
240            _ if layout.is_zst() => OperandRef::zero_sized(layout),
241            _ => {
242                // Neither a scalar nor scalar pair. Load from a place
243                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
244                // same `ConstAllocation`?
245                let init = bx.const_data_from_alloc(alloc);
246                let base_addr = bx.static_addr_of(init, alloc_align, None);
247
248                let llval = bx.const_ptr_byte_offset(base_addr, offset);
249                bx.load_operand(PlaceRef::new_sized(llval, layout))
250            }
251        }
252    }
253
254    /// Asserts that this operand refers to a scalar and returns
255    /// a reference to its value.
256    pub fn immediate(self) -> V {
257        match self.val {
258            OperandValue::Immediate(s) => s,
259            _ => bug!("not immediate: {:?}", self),
260        }
261    }
262
263    /// Asserts that this operand is a pointer (or reference) and returns
264    /// the place to which it points.  (This requires no code to be emitted
265    /// as we represent places using the pointer to the place.)
266    ///
267    /// This uses [`Ty::builtin_deref`] to include the type of the place and
268    /// assumes the place is aligned to the pointee's usual ABI alignment.
269    ///
270    /// If you don't need the type, see [`OperandValue::pointer_parts`]
271    /// or [`OperandValue::deref`].
272    pub fn deref<Cx: CodegenMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
273        if self.layout.ty.is_box() {
274            // Derefer should have removed all Box derefs
275            bug!("dereferencing {:?} in codegen", self.layout.ty);
276        }
277
278        let projected_ty = self
279            .layout
280            .ty
281            .builtin_deref(true)
282            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self));
283
284        let layout = cx.layout_of(projected_ty);
285        self.val.deref(layout.align.abi).with_type(layout)
286    }
287
288    /// If this operand is a `Pair`, we return an aggregate with the two values.
289    /// For other cases, see `immediate`.
290    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
291        self,
292        bx: &mut Bx,
293    ) -> V {
294        if let OperandValue::Pair(a, b) = self.val {
295            let llty = bx.cx().immediate_backend_type(self.layout);
296            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
297            // Reconstruct the immediate aggregate.
298            let mut llpair = bx.cx().const_poison(llty);
299            llpair = bx.insert_value(llpair, a, 0);
300            llpair = bx.insert_value(llpair, b, 1);
301            llpair
302        } else {
303            self.immediate()
304        }
305    }
306
307    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
308    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
309        bx: &mut Bx,
310        llval: V,
311        layout: TyAndLayout<'tcx>,
312    ) -> Self {
313        let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
314            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
315
316            // Deconstruct the immediate aggregate.
317            let a_llval = bx.extract_value(llval, 0);
318            let b_llval = bx.extract_value(llval, 1);
319            OperandValue::Pair(a_llval, b_llval)
320        } else {
321            OperandValue::Immediate(llval)
322        };
323        OperandRef { val, layout }
324    }
325
326    pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
327        &self,
328        fx: &mut FunctionCx<'a, 'tcx, Bx>,
329        bx: &mut Bx,
330        i: usize,
331    ) -> Self {
332        let field = self.layout.field(bx.cx(), i);
333        let offset = self.layout.fields.offset(i);
334
335        if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) {
336            // Part of https://github.com/rust-lang/compiler-team/issues/838
337            span_bug!(
338                fx.mir.span,
339                "Non-ref type {self:?} cannot project to ref field type {field:?}",
340            );
341        }
342
343        let val = if field.is_zst() {
344            OperandValue::ZeroSized
345        } else if field.size == self.layout.size {
346            assert_eq!(offset.bytes(), 0);
347            fx.codegen_transmute_operand(bx, *self, field)
348        } else {
349            let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
350                // Extract a scalar component from a pair.
351                (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
352                    if offset.bytes() == 0 {
353                        assert_eq!(field.size, a.size(bx.cx()));
354                        (Some(a), a_llval)
355                    } else {
356                        assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
357                        assert_eq!(field.size, b.size(bx.cx()));
358                        (Some(b), b_llval)
359                    }
360                }
361
362                _ => {
363                    span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self)
364                }
365            };
366            OperandValue::Immediate(match field.backend_repr {
367                BackendRepr::SimdVector { .. } => imm,
368                BackendRepr::Scalar(out_scalar) => {
369                    let Some(in_scalar) = in_scalar else {
370                        span_bug!(
371                            fx.mir.span,
372                            "OperandRef::extract_field({:?}): missing input scalar for output scalar",
373                            self
374                        )
375                    };
376                    if in_scalar != out_scalar {
377                        // If the backend and backend_immediate types might differ,
378                        // flip back to the backend type then to the new immediate.
379                        // This avoids nop truncations, but still handles things like
380                        // Bools in union fields needs to be truncated.
381                        let backend = bx.from_immediate(imm);
382                        bx.to_immediate_scalar(backend, out_scalar)
383                    } else {
384                        imm
385                    }
386                }
387                BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
388            })
389        };
390
391        OperandRef { val, layout: field }
392    }
393
394    /// Obtain the actual discriminant of a value.
395    #[instrument(level = "trace", skip(fx, bx))]
396    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
397        self,
398        fx: &mut FunctionCx<'a, 'tcx, Bx>,
399        bx: &mut Bx,
400        cast_to: Ty<'tcx>,
401    ) -> V {
402        let dl = &bx.tcx().data_layout;
403        let cast_to_layout = bx.cx().layout_of(cast_to);
404        let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
405
406        // We check uninhabitedness separately because a type like
407        // `enum Foo { Bar(i32, !) }` is still reported as `Variants::Single`,
408        // *not* as `Variants::Empty`.
409        if self.layout.is_uninhabited() {
410            return bx.cx().const_poison(cast_to);
411        }
412
413        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
414            Variants::Empty => unreachable!("we already handled uninhabited types"),
415            Variants::Single { index } => {
416                let discr_val =
417                    if let Some(discr) = self.layout.ty.discriminant_for_variant(bx.tcx(), index) {
418                        discr.val
419                    } else {
420                        // This arm is for types which are neither enums nor coroutines,
421                        // and thus for which the only possible "variant" should be the first one.
422                        assert_eq!(index, FIRST_VARIANT);
423                        // There's thus no actual discriminant to return, so we return
424                        // what it would have been if this was a single-variant enum.
425                        0
426                    };
427                return bx.cx().const_uint_big(cast_to, discr_val);
428            }
429            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
430                (tag, tag_encoding, tag_field)
431            }
432        };
433
434        // Read the tag/niche-encoded discriminant from memory.
435        let tag_op = match self.val {
436            OperandValue::ZeroSized => bug!(),
437            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
438                self.extract_field(fx, bx, tag_field.as_usize())
439            }
440            OperandValue::Ref(place) => {
441                let tag = place.with_type(self.layout).project_field(bx, tag_field.as_usize());
442                bx.load_operand(tag)
443            }
444        };
445        let tag_imm = tag_op.immediate();
446
447        // Decode the discriminant (specifically if it's niche-encoded).
448        match *tag_encoding {
449            TagEncoding::Direct => {
450                let signed = match tag_scalar.primitive() {
451                    // We use `i1` for bytes that are always `0` or `1`,
452                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
453                    // let LLVM interpret the `i1` as signed, because
454                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
455                    Primitive::Int(_, signed) => !tag_scalar.is_bool() && signed,
456                    _ => false,
457                };
458                bx.intcast(tag_imm, cast_to, signed)
459            }
460            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
461                // Cast to an integer so we don't have to treat a pointer as a
462                // special case.
463                let (tag, tag_llty) = match tag_scalar.primitive() {
464                    // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
465                    Primitive::Pointer(_) => {
466                        let t = bx.type_from_integer(dl.ptr_sized_integer());
467                        let tag = bx.ptrtoint(tag_imm, t);
468                        (tag, t)
469                    }
470                    _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
471                };
472
473                // `layout_sanity_check` ensures that we only get here for cases where the discriminant
474                // value and the variant index match, since that's all `Niche` can encode.
475
476                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
477                let niche_start_const = bx.cx().const_uint_big(tag_llty, niche_start);
478
479                // We have a subrange `niche_start..=niche_end` inside `range`.
480                // If the value of the tag is inside this subrange, it's a
481                // "niche value", an increment of the discriminant. Otherwise it
482                // indicates the untagged variant.
483                // A general algorithm to extract the discriminant from the tag
484                // is:
485                // relative_tag = tag - niche_start
486                // is_niche = relative_tag <= (ule) relative_max
487                // discr = if is_niche {
488                //     cast(relative_tag) + niche_variants.start()
489                // } else {
490                //     untagged_variant
491                // }
492                // However, we will likely be able to emit simpler code.
493                let (is_niche, tagged_discr, delta) = if relative_max == 0 {
494                    // Best case scenario: only one tagged variant. This will
495                    // likely become just a comparison and a jump.
496                    // The algorithm is:
497                    // is_niche = tag == niche_start
498                    // discr = if is_niche {
499                    //     niche_start
500                    // } else {
501                    //     untagged_variant
502                    // }
503                    let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start_const);
504                    let tagged_discr =
505                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
506                    (is_niche, tagged_discr, 0)
507                } else {
508                    // Thanks to parameter attributes and load metadata, LLVM already knows
509                    // the general valid range of the tag. It's possible, though, for there
510                    // to be an impossible value *in the middle*, which those ranges don't
511                    // communicate, so it's worth an `assume` to let the optimizer know.
512                    // Most importantly, this means when optimizing a variant test like
513                    // `SELECT(is_niche, complex, CONST) == CONST` it's ok to simplify that
514                    // to `!is_niche` because the `complex` part can't possibly match.
515                    //
516                    // This was previously asserted on `tagged_discr` below, where the
517                    // impossible value is more obvious, but that caused an intermediate
518                    // value to become multi-use and thus not optimize, so instead this
519                    // assumes on the original input which is always multi-use. See
520                    // <https://github.com/llvm/llvm-project/issues/134024#issuecomment-3131782555>
521                    //
522                    // FIXME: If we ever get range assume operand bundles in LLVM (so we
523                    // don't need the `icmp`s in the instruction stream any more), it
524                    // might be worth moving this back to being on the switch argument
525                    // where it's more obviously applicable.
526                    if niche_variants.contains(&untagged_variant)
527                        && bx.cx().sess().opts.optimize != OptLevel::No
528                    {
529                        let impossible = niche_start
530                            .wrapping_add(u128::from(untagged_variant.as_u32()))
531                            .wrapping_sub(u128::from(niche_variants.start().as_u32()));
532                        let impossible = bx.cx().const_uint_big(tag_llty, impossible);
533                        let ne = bx.icmp(IntPredicate::IntNE, tag, impossible);
534                        bx.assume(ne);
535                    }
536
537                    // With multiple niched variants we'll have to actually compute
538                    // the variant index from the stored tag.
539                    //
540                    // However, there's still one small optimization we can often do for
541                    // determining *whether* a tag value is a natural value or a niched
542                    // variant. The general algorithm involves a subtraction that often
543                    // wraps in practice, making it tricky to analyse. However, in cases
544                    // where there are few enough possible values of the tag that it doesn't
545                    // need to wrap around, we can instead just look for the contiguous
546                    // tag values on the end of the range with a single comparison.
547                    //
548                    // For example, take the type `enum Demo { A, B, Untagged(bool) }`.
549                    // The `bool` is {0, 1}, and the two other variants are given the
550                    // tags {2, 3} respectively. That means the `tag_range` is
551                    // `[0, 3]`, which doesn't wrap as unsigned (nor as signed), so
552                    // we can test for the niched variants with just `>= 2`.
553                    //
554                    // That means we're looking either for the niche values *above*
555                    // the natural values of the untagged variant:
556                    //
557                    //             niche_start                  niche_end
558                    //                  |                           |
559                    //                  v                           v
560                    // MIN -------------+---------------------------+---------- MAX
561                    //         ^        |         is niche          |
562                    //         |        +---------------------------+
563                    //         |                                    |
564                    //   tag_range.start                      tag_range.end
565                    //
566                    // Or *below* the natural values:
567                    //
568                    //    niche_start              niche_end
569                    //         |                       |
570                    //         v                       v
571                    // MIN ----+-----------------------+---------------------- MAX
572                    //         |       is niche        |           ^
573                    //         +-----------------------+           |
574                    //         |                                   |
575                    //   tag_range.start                      tag_range.end
576                    //
577                    // With those two options and having the flexibility to choose
578                    // between a signed or unsigned comparison on the tag, that
579                    // covers most realistic scenarios. The tests have a (contrived)
580                    // example of a 1-byte enum with over 128 niched variants which
581                    // wraps both as signed as unsigned, though, and for something
582                    // like that we're stuck with the general algorithm.
583
584                    let tag_range = tag_scalar.valid_range(&dl);
585                    let tag_size = tag_scalar.size(&dl);
586                    let niche_end = u128::from(relative_max).wrapping_add(niche_start);
587                    let niche_end = tag_size.truncate(niche_end);
588
589                    let relative_discr = bx.sub(tag, niche_start_const);
590                    let cast_tag = bx.intcast(relative_discr, cast_to, false);
591                    let is_niche = if tag_range.no_unsigned_wraparound(tag_size) == Ok(true) {
592                        if niche_start == tag_range.start {
593                            let niche_end_const = bx.cx().const_uint_big(tag_llty, niche_end);
594                            bx.icmp(IntPredicate::IntULE, tag, niche_end_const)
595                        } else {
596                            assert_eq!(niche_end, tag_range.end);
597                            bx.icmp(IntPredicate::IntUGE, tag, niche_start_const)
598                        }
599                    } else if tag_range.no_signed_wraparound(tag_size) == Ok(true) {
600                        if niche_start == tag_range.start {
601                            let niche_end_const = bx.cx().const_uint_big(tag_llty, niche_end);
602                            bx.icmp(IntPredicate::IntSLE, tag, niche_end_const)
603                        } else {
604                            assert_eq!(niche_end, tag_range.end);
605                            bx.icmp(IntPredicate::IntSGE, tag, niche_start_const)
606                        }
607                    } else {
608                        bx.icmp(
609                            IntPredicate::IntULE,
610                            relative_discr,
611                            bx.cx().const_uint(tag_llty, relative_max as u64),
612                        )
613                    };
614
615                    (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
616                };
617
618                let tagged_discr = if delta == 0 {
619                    tagged_discr
620                } else {
621                    bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
622                };
623
624                let untagged_variant_const =
625                    bx.cx().const_uint(cast_to, u64::from(untagged_variant.as_u32()));
626
627                let discr = bx.select(is_niche, tagged_discr, untagged_variant_const);
628
629                // In principle we could insert assumes on the possible range of `discr`, but
630                // currently in LLVM this isn't worth it because the original `tag` will
631                // have either a `range` parameter attribute or `!range` metadata,
632                // or come from a `transmute` that already `assume`d it.
633
634                discr
635            }
636        }
637    }
638}
639
640/// Each of these variants starts out as `Either::Right` when it's uninitialized,
641/// then setting the field changes that to `Either::Left` with the backend value.
642#[derive(Debug, Copy, Clone)]
643enum OperandValueBuilder<V> {
644    ZeroSized,
645    Immediate(Either<V, abi::Scalar>),
646    Pair(Either<V, abi::Scalar>, Either<V, abi::Scalar>),
647    /// `repr(simd)` types need special handling because they each have a non-empty
648    /// array field (which uses [`OperandValue::Ref`]) despite the SIMD type itself
649    /// using [`OperandValue::Immediate`] which for any other kind of type would
650    /// mean that its one non-ZST field would also be [`OperandValue::Immediate`].
651    Vector(Either<V, ()>),
652}
653
654/// Allows building up an `OperandRef` by setting fields one at a time.
655#[derive(Debug, Copy, Clone)]
656pub(super) struct OperandRefBuilder<'tcx, V> {
657    val: OperandValueBuilder<V>,
658    layout: TyAndLayout<'tcx>,
659}
660
661impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
662    /// Creates an uninitialized builder for an instance of the `layout`.
663    ///
664    /// ICEs for [`BackendRepr::Memory`] types (other than ZSTs), which should
665    /// be built up inside a [`PlaceRef`] instead as they need an allocated place
666    /// into which to write the values of the fields.
667    pub(super) fn new(layout: TyAndLayout<'tcx>) -> Self {
668        let val = match layout.backend_repr {
669            BackendRepr::Memory { .. } if layout.is_zst() => OperandValueBuilder::ZeroSized,
670            BackendRepr::Scalar(s) => OperandValueBuilder::Immediate(Either::Right(s)),
671            BackendRepr::ScalarPair(a, b) => {
672                OperandValueBuilder::Pair(Either::Right(a), Either::Right(b))
673            }
674            BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())),
675            BackendRepr::Memory { .. } => {
676                bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}");
677            }
678        };
679        OperandRefBuilder { val, layout }
680    }
681
682    pub(super) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
683        &mut self,
684        bx: &mut Bx,
685        variant: VariantIdx,
686        field: FieldIdx,
687        field_operand: OperandRef<'tcx, V>,
688    ) {
689        if let OperandValue::ZeroSized = field_operand.val {
690            // A ZST never adds any state, so just ignore it.
691            // This special-casing is worth it because of things like
692            // `Result<!, !>` where `Ok(never)` is legal to write,
693            // but the type shows as FieldShape::Primitive so we can't
694            // actually look at the layout for the field being set.
695            return;
696        }
697
698        let is_zero_offset = if let abi::FieldsShape::Primitive = self.layout.fields {
699            // The other branch looking at field layouts ICEs for primitives,
700            // so we need to handle them separately.
701            // Because we handled ZSTs above (like the metadata in a thin pointer),
702            // the only possibility is that we're setting the one-and-only field.
703            assert!(!self.layout.is_zst());
704            assert_eq!(variant, FIRST_VARIANT);
705            assert_eq!(field, FieldIdx::ZERO);
706            true
707        } else {
708            let variant_layout = self.layout.for_variant(bx.cx(), variant);
709            let field_offset = variant_layout.fields.offset(field.as_usize());
710            field_offset == Size::ZERO
711        };
712
713        let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
714            let to_scalar = tgt.unwrap_right();
715            // We transmute here (rather than just `from_immediate`) because in
716            // `Result<usize, *const ()>` the field of the `Ok` is an integer,
717            // but the corresponding scalar in the enum is a pointer.
718            let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
719            *tgt = Either::Left(imm);
720        };
721
722        match (field_operand.val, field_operand.layout.backend_repr) {
723            (OperandValue::ZeroSized, _) => unreachable!("Handled above"),
724            (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
725                OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
726                    update(val, v, from_scalar);
727                }
728                OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
729                    update(fst, v, from_scalar);
730                }
731                OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
732                    update(snd, v, from_scalar);
733                }
734                _ => {
735                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
736                }
737            },
738            (OperandValue::Immediate(v), BackendRepr::SimdVector { .. }) => match &mut self.val {
739                OperandValueBuilder::Vector(val @ Either::Right(())) if is_zero_offset => {
740                    *val = Either::Left(v);
741                }
742                _ => {
743                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
744                }
745            },
746            (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
747                match &mut self.val {
748                    OperandValueBuilder::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
749                        update(fst, a, from_sa);
750                        update(snd, b, from_sb);
751                    }
752                    _ => bug!(
753                        "Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}"
754                    ),
755                }
756            }
757            (OperandValue::Ref(place), BackendRepr::Memory { .. }) => match &mut self.val {
758                OperandValueBuilder::Vector(val @ Either::Right(())) => {
759                    let ibty = bx.cx().immediate_backend_type(self.layout);
760                    let simd = bx.load_from_place(ibty, place);
761                    *val = Either::Left(simd);
762                }
763                _ => {
764                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
765                }
766            },
767            _ => bug!("Operand cannot be used with `insert_field`: {field_operand:?}"),
768        }
769    }
770
771    /// Insert the immediate value `imm` for field `f` in the *type itself*,
772    /// rather than into one of the variants.
773    ///
774    /// Most things want [`Self::insert_field`] instead, but this one is
775    /// necessary for writing things like enum tags that aren't in any variant.
776    pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
777        let field_offset = self.layout.fields.offset(f.as_usize());
778        let is_zero_offset = field_offset == Size::ZERO;
779        match &mut self.val {
780            OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
781                *val = Either::Left(imm);
782            }
783            OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
784                *fst = Either::Left(imm);
785            }
786            OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
787                *snd = Either::Left(imm);
788            }
789            _ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
790        }
791    }
792
793    /// After having set all necessary fields, this converts the builder back
794    /// to the normal `OperandRef`.
795    ///
796    /// ICEs if any required fields were not set.
797    pub(super) fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
798        let OperandRefBuilder { val, layout } = *self;
799
800        // For something like `Option::<u32>::None`, it's expected that the
801        // payload scalar will not actually have been set, so this converts
802        // unset scalars to corresponding `undef` values so long as the scalar
803        // from the layout allows uninit.
804        let unwrap = |r: Either<V, abi::Scalar>| match r {
805            Either::Left(v) => v,
806            Either::Right(s) if s.is_uninit_valid() => {
807                let bty = cx.type_from_scalar(s);
808                cx.const_undef(bty)
809            }
810            Either::Right(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
811        };
812
813        let val = match val {
814            OperandValueBuilder::ZeroSized => OperandValue::ZeroSized,
815            OperandValueBuilder::Immediate(v) => OperandValue::Immediate(unwrap(v)),
816            OperandValueBuilder::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
817            OperandValueBuilder::Vector(v) => match v {
818                Either::Left(v) => OperandValue::Immediate(v),
819                Either::Right(())
820                    if let BackendRepr::SimdVector { element, .. } = layout.backend_repr
821                        && element.is_uninit_valid() =>
822                {
823                    let bty = cx.immediate_backend_type(layout);
824                    OperandValue::Immediate(cx.const_undef(bty))
825                }
826                Either::Right(()) => {
827                    bug!("OperandRef::build called while fields are missing {self:?}")
828                }
829            },
830        };
831        OperandRef { val, layout }
832    }
833}
834
835impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
836    /// Returns an `OperandValue` that's generally UB to use in any way.
837    ///
838    /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or
839    /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer.
840    ///
841    /// Supports sized types only.
842    pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
843        bx: &mut Bx,
844        layout: TyAndLayout<'tcx>,
845    ) -> OperandValue<V> {
846        assert!(layout.is_sized());
847        if layout.is_zst() {
848            OperandValue::ZeroSized
849        } else if bx.cx().is_backend_immediate(layout) {
850            let ibty = bx.cx().immediate_backend_type(layout);
851            OperandValue::Immediate(bx.const_poison(ibty))
852        } else if bx.cx().is_backend_scalar_pair(layout) {
853            let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
854            let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
855            OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
856        } else {
857            let ptr = bx.cx().type_ptr();
858            OperandValue::Ref(PlaceValue::new_sized(bx.const_poison(ptr), layout.align.abi))
859        }
860    }
861
862    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
863        self,
864        bx: &mut Bx,
865        dest: PlaceRef<'tcx, V>,
866    ) {
867        self.store_with_flags(bx, dest, MemFlags::empty());
868    }
869
870    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
871        self,
872        bx: &mut Bx,
873        dest: PlaceRef<'tcx, V>,
874    ) {
875        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
876    }
877
878    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
879        self,
880        bx: &mut Bx,
881        dest: PlaceRef<'tcx, V>,
882    ) {
883        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
884    }
885
886    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
887        self,
888        bx: &mut Bx,
889        dest: PlaceRef<'tcx, V>,
890    ) {
891        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
892    }
893
894    pub(crate) fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
895        self,
896        bx: &mut Bx,
897        dest: PlaceRef<'tcx, V>,
898        flags: MemFlags,
899    ) {
900        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
901        match self {
902            OperandValue::ZeroSized => {
903                // Avoid generating stores of zero-sized values, because the only way to have a
904                // zero-sized value is through `undef`/`poison`, and the store itself is useless.
905            }
906            OperandValue::Ref(val) => {
907                assert!(dest.layout.is_sized(), "cannot directly store unsized values");
908                if val.llextra.is_some() {
909                    bug!("cannot directly store unsized values");
910                }
911                bx.typed_place_copy_with_flags(dest.val, val, dest.layout, flags);
912            }
913            OperandValue::Immediate(s) => {
914                let val = bx.from_immediate(s);
915                bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
916            }
917            OperandValue::Pair(a, b) => {
918                let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
919                    bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
920                };
921                let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
922
923                let val = bx.from_immediate(a);
924                let align = dest.val.align;
925                bx.store_with_flags(val, dest.val.llval, align, flags);
926
927                let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
928                let val = bx.from_immediate(b);
929                let align = dest.val.align.restrict_for_offset(b_offset);
930                bx.store_with_flags(val, llptr, align, flags);
931            }
932        }
933    }
934}
935
936impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
937    fn maybe_codegen_consume_direct(
938        &mut self,
939        bx: &mut Bx,
940        place_ref: mir::PlaceRef<'tcx>,
941    ) -> Option<OperandRef<'tcx, Bx::Value>> {
942        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
943
944        match self.locals[place_ref.local] {
945            LocalRef::Operand(mut o) => {
946                // We only need to handle the projections that
947                // `LocalAnalyzer::process_place` let make it here.
948                for elem in place_ref.projection {
949                    match *elem {
950                        mir::ProjectionElem::Field(f, _) => {
951                            assert!(
952                                !o.layout.ty.is_any_ptr(),
953                                "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
954                                 but tried to access field {f:?} of pointer {o:?}",
955                            );
956                            o = o.extract_field(self, bx, f.index());
957                        }
958                        mir::PlaceElem::Downcast(_, vidx) => {
959                            debug_assert_eq!(
960                                o.layout.variants,
961                                abi::Variants::Single { index: vidx },
962                            );
963                            let layout = o.layout.for_variant(bx.cx(), vidx);
964                            o = OperandRef { val: o.val, layout }
965                        }
966                        _ => return None,
967                    }
968                }
969
970                Some(o)
971            }
972            LocalRef::PendingOperand => {
973                bug!("use of {:?} before def", place_ref);
974            }
975            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
976                // watch out for locals that do not have an
977                // alloca; they are handled somewhat differently
978                None
979            }
980        }
981    }
982
983    pub fn codegen_consume(
984        &mut self,
985        bx: &mut Bx,
986        place_ref: mir::PlaceRef<'tcx>,
987    ) -> OperandRef<'tcx, Bx::Value> {
988        debug!("codegen_consume(place_ref={:?})", place_ref);
989
990        let ty = self.monomorphized_place_ty(place_ref);
991        let layout = bx.cx().layout_of(ty);
992
993        // ZSTs don't require any actual memory access.
994        if layout.is_zst() {
995            return OperandRef::zero_sized(layout);
996        }
997
998        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
999            return o;
1000        }
1001
1002        // for most places, to consume them we just load them
1003        // out from their home
1004        let place = self.codegen_place(bx, place_ref);
1005        bx.load_operand(place)
1006    }
1007
1008    pub fn codegen_operand(
1009        &mut self,
1010        bx: &mut Bx,
1011        operand: &mir::Operand<'tcx>,
1012    ) -> OperandRef<'tcx, Bx::Value> {
1013        debug!("codegen_operand(operand={:?})", operand);
1014
1015        match *operand {
1016            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
1017                self.codegen_consume(bx, place.as_ref())
1018            }
1019
1020            mir::Operand::Constant(ref constant) => {
1021                let constant_ty = self.monomorphize(constant.ty());
1022                // Most SIMD vector constants should be passed as immediates.
1023                // (In particular, some intrinsics really rely on this.)
1024                if constant_ty.is_simd() {
1025                    // However, some SIMD types do not actually use the vector ABI
1026                    // (in particular, packed SIMD types do not). Ensure we exclude those.
1027                    let layout = bx.layout_of(constant_ty);
1028                    if let BackendRepr::SimdVector { .. } = layout.backend_repr {
1029                        let (llval, ty) = self.immediate_const_vector(bx, constant);
1030                        return OperandRef {
1031                            val: OperandValue::Immediate(llval),
1032                            layout: bx.layout_of(ty),
1033                        };
1034                    }
1035                }
1036                self.eval_mir_constant_to_operand(bx, constant)
1037            }
1038        }
1039    }
1040}