rustc_codegen_ssa/mir/
operand.rs

1use std::fmt;
2
3use arrayvec::ArrayVec;
4use either::Either;
5use rustc_abi as abi;
6use rustc_abi::{Align, BackendRepr, FIRST_VARIANT, Primitive, Size, TagEncoding, Variants};
7use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
8use rustc_middle::mir::{self, ConstValue};
9use rustc_middle::ty::Ty;
10use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
11use rustc_middle::{bug, span_bug};
12use rustc_session::config::OptLevel;
13use tracing::{debug, instrument};
14
15use super::place::{PlaceRef, PlaceValue};
16use super::{FunctionCx, LocalRef};
17use crate::common::IntPredicate;
18use crate::traits::*;
19use crate::{MemFlags, size_of_val};
20
21/// The representation of a Rust value. The enum variant is in fact
22/// uniquely determined by the value's type, but is kept as a
23/// safety check.
24#[derive(Copy, Clone, Debug)]
25pub enum OperandValue<V> {
26    /// A reference to the actual operand. The data is guaranteed
27    /// to be valid for the operand's lifetime.
28    /// The second value, if any, is the extra data (vtable or length)
29    /// which indicates that it refers to an unsized rvalue.
30    ///
31    /// An `OperandValue` *must* be this variant for any type for which
32    /// [`LayoutTypeCodegenMethods::is_backend_ref`] returns `true`.
33    /// (That basically amounts to "isn't one of the other variants".)
34    ///
35    /// This holds a [`PlaceValue`] (like a [`PlaceRef`] does) with a pointer
36    /// to the location holding the value. The type behind that pointer is the
37    /// one returned by [`LayoutTypeCodegenMethods::backend_type`].
38    Ref(PlaceValue<V>),
39    /// A single LLVM immediate value.
40    ///
41    /// An `OperandValue` *must* be this variant for any type for which
42    /// [`LayoutTypeCodegenMethods::is_backend_immediate`] returns `true`.
43    /// The backend value in this variant must be the *immediate* backend type,
44    /// as returned by [`LayoutTypeCodegenMethods::immediate_backend_type`].
45    Immediate(V),
46    /// A pair of immediate LLVM values. Used by wide pointers too.
47    ///
48    /// An `OperandValue` *must* be this variant for any type for which
49    /// [`LayoutTypeCodegenMethods::is_backend_scalar_pair`] returns `true`.
50    /// The backend values in this variant must be the *immediate* backend types,
51    /// as returned by [`LayoutTypeCodegenMethods::scalar_pair_element_backend_type`]
52    /// with `immediate: true`.
53    Pair(V, V),
54    /// A value taking no bytes, and which therefore needs no LLVM value at all.
55    ///
56    /// If you ever need a `V` to pass to something, get a fresh poison value
57    /// from [`ConstCodegenMethods::const_poison`].
58    ///
59    /// An `OperandValue` *must* be this variant for any type for which
60    /// `is_zst` on its `Layout` returns `true`. Note however that
61    /// these values can still require alignment.
62    ZeroSized,
63}
64
65impl<V: CodegenObject> OperandValue<V> {
66    /// If this is ZeroSized/Immediate/Pair, return an array of the 0/1/2 values.
67    /// If this is Ref, return the place.
68    #[inline]
69    pub(crate) fn immediates_or_place(self) -> Either<ArrayVec<V, 2>, PlaceValue<V>> {
70        match self {
71            OperandValue::ZeroSized => Either::Left(ArrayVec::new()),
72            OperandValue::Immediate(a) => Either::Left(ArrayVec::from_iter([a])),
73            OperandValue::Pair(a, b) => Either::Left([a, b].into()),
74            OperandValue::Ref(p) => Either::Right(p),
75        }
76    }
77
78    /// Given an array of 0/1/2 immediate values, return ZeroSized/Immediate/Pair.
79    #[inline]
80    pub(crate) fn from_immediates(immediates: ArrayVec<V, 2>) -> Self {
81        let mut it = immediates.into_iter();
82        let Some(a) = it.next() else {
83            return OperandValue::ZeroSized;
84        };
85        let Some(b) = it.next() else {
86            return OperandValue::Immediate(a);
87        };
88        OperandValue::Pair(a, b)
89    }
90
91    /// Treat this value as a pointer and return the data pointer and
92    /// optional metadata as backend values.
93    ///
94    /// If you're making a place, use [`Self::deref`] instead.
95    pub(crate) fn pointer_parts(self) -> (V, Option<V>) {
96        match self {
97            OperandValue::Immediate(llptr) => (llptr, None),
98            OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
99            _ => bug!("OperandValue cannot be a pointer: {self:?}"),
100        }
101    }
102
103    /// Treat this value as a pointer and return the place to which it points.
104    ///
105    /// The pointer immediate doesn't inherently know its alignment,
106    /// so you need to pass it in. If you want to get it from a type's ABI
107    /// alignment, then maybe you want [`OperandRef::deref`] instead.
108    ///
109    /// This is the inverse of [`PlaceValue::address`].
110    pub(crate) fn deref(self, align: Align) -> PlaceValue<V> {
111        let (llval, llextra) = self.pointer_parts();
112        PlaceValue { llval, llextra, align }
113    }
114
115    pub(crate) fn is_expected_variant_for_type<'tcx, Cx: LayoutTypeCodegenMethods<'tcx>>(
116        &self,
117        cx: &Cx,
118        ty: TyAndLayout<'tcx>,
119    ) -> bool {
120        match self {
121            OperandValue::ZeroSized => ty.is_zst(),
122            OperandValue::Immediate(_) => cx.is_backend_immediate(ty),
123            OperandValue::Pair(_, _) => cx.is_backend_scalar_pair(ty),
124            OperandValue::Ref(_) => cx.is_backend_ref(ty),
125        }
126    }
127}
128
129/// An `OperandRef` is an "SSA" reference to a Rust value, along with
130/// its type.
131///
132/// NOTE: unless you know a value's type exactly, you should not
133/// generate LLVM opcodes acting on it and instead act via methods,
134/// to avoid nasty edge cases. In particular, using `Builder::store`
135/// directly is sure to cause problems -- use `OperandRef::store`
136/// instead.
137#[derive(Copy, Clone)]
138pub struct OperandRef<'tcx, V> {
139    /// The value.
140    pub val: OperandValue<V>,
141
142    /// The layout of value, based on its Rust type.
143    pub layout: TyAndLayout<'tcx>,
144}
145
146impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
147    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
148        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
149    }
150}
151
152impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
153    pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
154        assert!(layout.is_zst());
155        OperandRef { val: OperandValue::ZeroSized, layout }
156    }
157
158    pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
159        bx: &mut Bx,
160        val: mir::ConstValue<'tcx>,
161        ty: Ty<'tcx>,
162    ) -> Self {
163        let layout = bx.layout_of(ty);
164
165        let val = match val {
166            ConstValue::Scalar(x) => {
167                let BackendRepr::Scalar(scalar) = layout.backend_repr else {
168                    bug!("from_const: invalid ByVal layout: {:#?}", layout);
169                };
170                let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
171                OperandValue::Immediate(llval)
172            }
173            ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
174            ConstValue::Slice { data, meta } => {
175                let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
176                    bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
177                };
178                let a = Scalar::from_pointer(
179                    Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data).into(), Size::ZERO),
180                    &bx.tcx(),
181                );
182                let a_llval = bx.scalar_to_backend(
183                    a,
184                    a_scalar,
185                    bx.scalar_pair_element_backend_type(layout, 0, true),
186                );
187                let b_llval = bx.const_usize(meta);
188                OperandValue::Pair(a_llval, b_llval)
189            }
190            ConstValue::Indirect { alloc_id, offset } => {
191                let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
192                return Self::from_const_alloc(bx, layout, alloc, offset);
193            }
194        };
195
196        OperandRef { val, layout }
197    }
198
199    fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
200        bx: &mut Bx,
201        layout: TyAndLayout<'tcx>,
202        alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>,
203        offset: Size,
204    ) -> Self {
205        let alloc_align = alloc.inner().align;
206        assert!(alloc_align >= layout.align.abi);
207
208        let read_scalar = |start, size, s: abi::Scalar, ty| {
209            match alloc.0.read_scalar(
210                bx,
211                alloc_range(start, size),
212                /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)),
213            ) {
214                Ok(val) => bx.scalar_to_backend(val, s, ty),
215                Err(_) => bx.const_poison(ty),
216            }
217        };
218
219        // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
220        // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
221        // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
222        // case where some of the bytes are initialized and others are not. So, we need an extra
223        // check that walks over the type of `mplace` to make sure it is truly correct to treat this
224        // like a `Scalar` (or `ScalarPair`).
225        match layout.backend_repr {
226            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
227                let size = s.size(bx);
228                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
229                let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
230                OperandRef { val: OperandValue::Immediate(val), layout }
231            }
232            BackendRepr::ScalarPair(
233                a @ abi::Scalar::Initialized { .. },
234                b @ abi::Scalar::Initialized { .. },
235            ) => {
236                let (a_size, b_size) = (a.size(bx), b.size(bx));
237                let b_offset = (offset + a_size).align_to(b.align(bx).abi);
238                assert!(b_offset.bytes() > 0);
239                let a_val = read_scalar(
240                    offset,
241                    a_size,
242                    a,
243                    bx.scalar_pair_element_backend_type(layout, 0, true),
244                );
245                let b_val = read_scalar(
246                    b_offset,
247                    b_size,
248                    b,
249                    bx.scalar_pair_element_backend_type(layout, 1, true),
250                );
251                OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
252            }
253            _ if layout.is_zst() => OperandRef::zero_sized(layout),
254            _ => {
255                // Neither a scalar nor scalar pair. Load from a place
256                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
257                // same `ConstAllocation`?
258                let init = bx.const_data_from_alloc(alloc);
259                let base_addr = bx.static_addr_of(init, alloc_align, None);
260
261                let llval = bx.const_ptr_byte_offset(base_addr, offset);
262                bx.load_operand(PlaceRef::new_sized(llval, layout))
263            }
264        }
265    }
266
267    /// Asserts that this operand refers to a scalar and returns
268    /// a reference to its value.
269    pub fn immediate(self) -> V {
270        match self.val {
271            OperandValue::Immediate(s) => s,
272            _ => bug!("not immediate: {:?}", self),
273        }
274    }
275
276    /// Asserts that this operand is a pointer (or reference) and returns
277    /// the place to which it points.  (This requires no code to be emitted
278    /// as we represent places using the pointer to the place.)
279    ///
280    /// This uses [`Ty::builtin_deref`] to include the type of the place and
281    /// assumes the place is aligned to the pointee's usual ABI alignment.
282    ///
283    /// If you don't need the type, see [`OperandValue::pointer_parts`]
284    /// or [`OperandValue::deref`].
285    pub fn deref<Cx: CodegenMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
286        if self.layout.ty.is_box() {
287            // Derefer should have removed all Box derefs
288            bug!("dereferencing {:?} in codegen", self.layout.ty);
289        }
290
291        let projected_ty = self
292            .layout
293            .ty
294            .builtin_deref(true)
295            .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self));
296
297        let layout = cx.layout_of(projected_ty);
298        self.val.deref(layout.align.abi).with_type(layout)
299    }
300
301    /// If this operand is a `Pair`, we return an aggregate with the two values.
302    /// For other cases, see `immediate`.
303    pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
304        self,
305        bx: &mut Bx,
306    ) -> V {
307        if let OperandValue::Pair(a, b) = self.val {
308            let llty = bx.cx().immediate_backend_type(self.layout);
309            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
310            // Reconstruct the immediate aggregate.
311            let mut llpair = bx.cx().const_poison(llty);
312            llpair = bx.insert_value(llpair, a, 0);
313            llpair = bx.insert_value(llpair, b, 1);
314            llpair
315        } else {
316            self.immediate()
317        }
318    }
319
320    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
321    pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
322        bx: &mut Bx,
323        llval: V,
324        layout: TyAndLayout<'tcx>,
325    ) -> Self {
326        let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
327            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
328
329            // Deconstruct the immediate aggregate.
330            let a_llval = bx.extract_value(llval, 0);
331            let b_llval = bx.extract_value(llval, 1);
332            OperandValue::Pair(a_llval, b_llval)
333        } else {
334            OperandValue::Immediate(llval)
335        };
336        OperandRef { val, layout }
337    }
338
339    pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
340        &self,
341        fx: &mut FunctionCx<'a, 'tcx, Bx>,
342        bx: &mut Bx,
343        i: usize,
344    ) -> Self {
345        let field = self.layout.field(bx.cx(), i);
346        let offset = self.layout.fields.offset(i);
347
348        if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) {
349            if let BackendRepr::SimdVector { count, .. } = self.layout.backend_repr
350                && let BackendRepr::Memory { sized: true } = field.backend_repr
351                && count.is_power_of_two()
352            {
353                assert_eq!(field.size, self.layout.size);
354                // This is being deprecated, but for now stdarch still needs it for
355                // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
356                let place = PlaceRef::alloca(bx, field);
357                self.val.store(bx, place.val.with_type(self.layout));
358                return bx.load_operand(place);
359            } else {
360                // Part of https://github.com/rust-lang/compiler-team/issues/838
361                bug!("Non-ref type {self:?} cannot project to ref field type {field:?}");
362            }
363        }
364
365        let val = if field.is_zst() {
366            OperandValue::ZeroSized
367        } else if field.size == self.layout.size {
368            assert_eq!(offset.bytes(), 0);
369            fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
370                bug!(
371                    "Expected `codegen_transmute_operand` to handle equal-size \
372                      field {i:?} projection from {self:?} to {field:?}"
373                )
374            })
375        } else {
376            let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
377                // Extract a scalar component from a pair.
378                (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
379                    if offset.bytes() == 0 {
380                        assert_eq!(field.size, a.size(bx.cx()));
381                        (Some(a), a_llval)
382                    } else {
383                        assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
384                        assert_eq!(field.size, b.size(bx.cx()));
385                        (Some(b), b_llval)
386                    }
387                }
388
389                _ => {
390                    span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self)
391                }
392            };
393            OperandValue::Immediate(match field.backend_repr {
394                BackendRepr::SimdVector { .. } => imm,
395                BackendRepr::Scalar(out_scalar) => {
396                    let Some(in_scalar) = in_scalar else {
397                        span_bug!(
398                            fx.mir.span,
399                            "OperandRef::extract_field({:?}): missing input scalar for output scalar",
400                            self
401                        )
402                    };
403                    if in_scalar != out_scalar {
404                        // If the backend and backend_immediate types might differ,
405                        // flip back to the backend type then to the new immediate.
406                        // This avoids nop truncations, but still handles things like
407                        // Bools in union fields needs to be truncated.
408                        let backend = bx.from_immediate(imm);
409                        bx.to_immediate_scalar(backend, out_scalar)
410                    } else {
411                        imm
412                    }
413                }
414                BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
415            })
416        };
417
418        OperandRef { val, layout: field }
419    }
420
421    /// Obtain the actual discriminant of a value.
422    #[instrument(level = "trace", skip(fx, bx))]
423    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
424        self,
425        fx: &mut FunctionCx<'a, 'tcx, Bx>,
426        bx: &mut Bx,
427        cast_to: Ty<'tcx>,
428    ) -> V {
429        let dl = &bx.tcx().data_layout;
430        let cast_to_layout = bx.cx().layout_of(cast_to);
431        let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
432
433        // We check uninhabitedness separately because a type like
434        // `enum Foo { Bar(i32, !) }` is still reported as `Variants::Single`,
435        // *not* as `Variants::Empty`.
436        if self.layout.is_uninhabited() {
437            return bx.cx().const_poison(cast_to);
438        }
439
440        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
441            Variants::Empty => unreachable!("we already handled uninhabited types"),
442            Variants::Single { index } => {
443                let discr_val =
444                    if let Some(discr) = self.layout.ty.discriminant_for_variant(bx.tcx(), index) {
445                        discr.val
446                    } else {
447                        // This arm is for types which are neither enums nor coroutines,
448                        // and thus for which the only possible "variant" should be the first one.
449                        assert_eq!(index, FIRST_VARIANT);
450                        // There's thus no actual discriminant to return, so we return
451                        // what it would have been if this was a single-variant enum.
452                        0
453                    };
454                return bx.cx().const_uint_big(cast_to, discr_val);
455            }
456            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
457                (tag, tag_encoding, tag_field)
458            }
459        };
460
461        // Read the tag/niche-encoded discriminant from memory.
462        let tag_op = match self.val {
463            OperandValue::ZeroSized => bug!(),
464            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
465                self.extract_field(fx, bx, tag_field)
466            }
467            OperandValue::Ref(place) => {
468                let tag = place.with_type(self.layout).project_field(bx, tag_field);
469                bx.load_operand(tag)
470            }
471        };
472        let tag_imm = tag_op.immediate();
473
474        // Decode the discriminant (specifically if it's niche-encoded).
475        match *tag_encoding {
476            TagEncoding::Direct => {
477                let signed = match tag_scalar.primitive() {
478                    // We use `i1` for bytes that are always `0` or `1`,
479                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
480                    // let LLVM interpret the `i1` as signed, because
481                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
482                    Primitive::Int(_, signed) => !tag_scalar.is_bool() && signed,
483                    _ => false,
484                };
485                bx.intcast(tag_imm, cast_to, signed)
486            }
487            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
488                // Cast to an integer so we don't have to treat a pointer as a
489                // special case.
490                let (tag, tag_llty) = match tag_scalar.primitive() {
491                    // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
492                    Primitive::Pointer(_) => {
493                        let t = bx.type_from_integer(dl.ptr_sized_integer());
494                        let tag = bx.ptrtoint(tag_imm, t);
495                        (tag, t)
496                    }
497                    _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
498                };
499
500                // Layout ensures that we only get here for cases where the discriminant
501                // value and the variant index match, since that's all `Niche` can encode.
502                // But for emphasis and debugging, let's double-check one anyway.
503                debug_assert_eq!(
504                    self.layout
505                        .ty
506                        .discriminant_for_variant(bx.tcx(), untagged_variant)
507                        .unwrap()
508                        .val,
509                    u128::from(untagged_variant.as_u32()),
510                );
511
512                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
513
514                // We have a subrange `niche_start..=niche_end` inside `range`.
515                // If the value of the tag is inside this subrange, it's a
516                // "niche value", an increment of the discriminant. Otherwise it
517                // indicates the untagged variant.
518                // A general algorithm to extract the discriminant from the tag
519                // is:
520                // relative_tag = tag - niche_start
521                // is_niche = relative_tag <= (ule) relative_max
522                // discr = if is_niche {
523                //     cast(relative_tag) + niche_variants.start()
524                // } else {
525                //     untagged_variant
526                // }
527                // However, we will likely be able to emit simpler code.
528                let (is_niche, tagged_discr, delta) = if relative_max == 0 {
529                    // Best case scenario: only one tagged variant. This will
530                    // likely become just a comparison and a jump.
531                    // The algorithm is:
532                    // is_niche = tag == niche_start
533                    // discr = if is_niche {
534                    //     niche_start
535                    // } else {
536                    //     untagged_variant
537                    // }
538                    let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
539                    let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
540                    let tagged_discr =
541                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
542                    (is_niche, tagged_discr, 0)
543                } else {
544                    // The special cases don't apply, so we'll have to go with
545                    // the general algorithm.
546                    let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
547                    let cast_tag = bx.intcast(relative_discr, cast_to, false);
548                    let is_niche = bx.icmp(
549                        IntPredicate::IntULE,
550                        relative_discr,
551                        bx.cx().const_uint(tag_llty, relative_max as u64),
552                    );
553
554                    // Thanks to parameter attributes and load metadata, LLVM already knows
555                    // the general valid range of the tag. It's possible, though, for there
556                    // to be an impossible value *in the middle*, which those ranges don't
557                    // communicate, so it's worth an `assume` to let the optimizer know.
558                    if niche_variants.contains(&untagged_variant)
559                        && bx.cx().sess().opts.optimize != OptLevel::No
560                    {
561                        let impossible =
562                            u64::from(untagged_variant.as_u32() - niche_variants.start().as_u32());
563                        let impossible = bx.cx().const_uint(tag_llty, impossible);
564                        let ne = bx.icmp(IntPredicate::IntNE, relative_discr, impossible);
565                        bx.assume(ne);
566                    }
567
568                    (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
569                };
570
571                let tagged_discr = if delta == 0 {
572                    tagged_discr
573                } else {
574                    bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
575                };
576
577                let discr = bx.select(
578                    is_niche,
579                    tagged_discr,
580                    bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
581                );
582
583                // In principle we could insert assumes on the possible range of `discr`, but
584                // currently in LLVM this isn't worth it because the original `tag` will
585                // have either a `range` parameter attribute or `!range` metadata,
586                // or come from a `transmute` that already `assume`d it.
587
588                discr
589            }
590        }
591    }
592}
593
594impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
595    /// Returns an `OperandValue` that's generally UB to use in any way.
596    ///
597    /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or
598    /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer.
599    ///
600    /// Supports sized types only.
601    pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
602        bx: &mut Bx,
603        layout: TyAndLayout<'tcx>,
604    ) -> OperandValue<V> {
605        assert!(layout.is_sized());
606        if layout.is_zst() {
607            OperandValue::ZeroSized
608        } else if bx.cx().is_backend_immediate(layout) {
609            let ibty = bx.cx().immediate_backend_type(layout);
610            OperandValue::Immediate(bx.const_poison(ibty))
611        } else if bx.cx().is_backend_scalar_pair(layout) {
612            let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
613            let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
614            OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
615        } else {
616            let ptr = bx.cx().type_ptr();
617            OperandValue::Ref(PlaceValue::new_sized(bx.const_poison(ptr), layout.align.abi))
618        }
619    }
620
621    pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
622        self,
623        bx: &mut Bx,
624        dest: PlaceRef<'tcx, V>,
625    ) {
626        self.store_with_flags(bx, dest, MemFlags::empty());
627    }
628
629    pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
630        self,
631        bx: &mut Bx,
632        dest: PlaceRef<'tcx, V>,
633    ) {
634        self.store_with_flags(bx, dest, MemFlags::VOLATILE);
635    }
636
637    pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
638        self,
639        bx: &mut Bx,
640        dest: PlaceRef<'tcx, V>,
641    ) {
642        self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
643    }
644
645    pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
646        self,
647        bx: &mut Bx,
648        dest: PlaceRef<'tcx, V>,
649    ) {
650        self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
651    }
652
653    pub(crate) fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
654        self,
655        bx: &mut Bx,
656        dest: PlaceRef<'tcx, V>,
657        flags: MemFlags,
658    ) {
659        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
660        match self {
661            OperandValue::ZeroSized => {
662                // Avoid generating stores of zero-sized values, because the only way to have a
663                // zero-sized value is through `undef`/`poison`, and the store itself is useless.
664            }
665            OperandValue::Ref(val) => {
666                assert!(dest.layout.is_sized(), "cannot directly store unsized values");
667                if val.llextra.is_some() {
668                    bug!("cannot directly store unsized values");
669                }
670                bx.typed_place_copy_with_flags(dest.val, val, dest.layout, flags);
671            }
672            OperandValue::Immediate(s) => {
673                let val = bx.from_immediate(s);
674                bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
675            }
676            OperandValue::Pair(a, b) => {
677                let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
678                    bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
679                };
680                let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
681
682                let val = bx.from_immediate(a);
683                let align = dest.val.align;
684                bx.store_with_flags(val, dest.val.llval, align, flags);
685
686                let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
687                let val = bx.from_immediate(b);
688                let align = dest.val.align.restrict_for_offset(b_offset);
689                bx.store_with_flags(val, llptr, align, flags);
690            }
691        }
692    }
693
694    pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
695        self,
696        bx: &mut Bx,
697        indirect_dest: PlaceRef<'tcx, V>,
698    ) {
699        debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
700        // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
701        let unsized_ty = indirect_dest
702            .layout
703            .ty
704            .builtin_deref(true)
705            .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest));
706
707        let OperandValue::Ref(PlaceValue { llval: llptr, llextra: Some(llextra), .. }) = self
708        else {
709            bug!("store_unsized called with a sized value (or with an extern type)")
710        };
711
712        // Allocate an appropriate region on the stack, and copy the value into it. Since alloca
713        // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the
714        // pointer manually.
715        let (size, align) = size_of_val::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
716        let one = bx.const_usize(1);
717        let align_minus_1 = bx.sub(align, one);
718        let size_extra = bx.add(size, align_minus_1);
719        let min_align = Align::ONE;
720        let alloca = bx.dynamic_alloca(size_extra, min_align);
721        let address = bx.ptrtoint(alloca, bx.type_isize());
722        let neg_address = bx.neg(address);
723        let offset = bx.and(neg_address, align_minus_1);
724        let dst = bx.inbounds_ptradd(alloca, offset);
725        bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
726
727        // Store the allocated region and the extra to the indirect place.
728        let indirect_operand = OperandValue::Pair(dst, llextra);
729        indirect_operand.store(bx, indirect_dest);
730    }
731}
732
733impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
734    fn maybe_codegen_consume_direct(
735        &mut self,
736        bx: &mut Bx,
737        place_ref: mir::PlaceRef<'tcx>,
738    ) -> Option<OperandRef<'tcx, Bx::Value>> {
739        debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
740
741        match self.locals[place_ref.local] {
742            LocalRef::Operand(mut o) => {
743                // Moves out of scalar and scalar pair fields are trivial.
744                for elem in place_ref.projection.iter() {
745                    match elem {
746                        mir::ProjectionElem::Field(f, _) => {
747                            assert!(
748                                !o.layout.ty.is_any_ptr(),
749                                "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
750                                 but tried to access field {f:?} of pointer {o:?}",
751                            );
752                            o = o.extract_field(self, bx, f.index());
753                        }
754                        mir::ProjectionElem::Index(_)
755                        | mir::ProjectionElem::ConstantIndex { .. } => {
756                            // ZSTs don't require any actual memory access.
757                            // FIXME(eddyb) deduplicate this with the identical
758                            // checks in `codegen_consume` and `extract_field`.
759                            let elem = o.layout.field(bx.cx(), 0);
760                            if elem.is_zst() {
761                                o = OperandRef::zero_sized(elem);
762                            } else {
763                                return None;
764                            }
765                        }
766                        _ => return None,
767                    }
768                }
769
770                Some(o)
771            }
772            LocalRef::PendingOperand => {
773                bug!("use of {:?} before def", place_ref);
774            }
775            LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
776                // watch out for locals that do not have an
777                // alloca; they are handled somewhat differently
778                None
779            }
780        }
781    }
782
783    pub fn codegen_consume(
784        &mut self,
785        bx: &mut Bx,
786        place_ref: mir::PlaceRef<'tcx>,
787    ) -> OperandRef<'tcx, Bx::Value> {
788        debug!("codegen_consume(place_ref={:?})", place_ref);
789
790        let ty = self.monomorphized_place_ty(place_ref);
791        let layout = bx.cx().layout_of(ty);
792
793        // ZSTs don't require any actual memory access.
794        if layout.is_zst() {
795            return OperandRef::zero_sized(layout);
796        }
797
798        if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
799            return o;
800        }
801
802        // for most places, to consume them we just load them
803        // out from their home
804        let place = self.codegen_place(bx, place_ref);
805        bx.load_operand(place)
806    }
807
808    pub fn codegen_operand(
809        &mut self,
810        bx: &mut Bx,
811        operand: &mir::Operand<'tcx>,
812    ) -> OperandRef<'tcx, Bx::Value> {
813        debug!("codegen_operand(operand={:?})", operand);
814
815        match *operand {
816            mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
817                self.codegen_consume(bx, place.as_ref())
818            }
819
820            mir::Operand::Constant(ref constant) => {
821                let constant_ty = self.monomorphize(constant.ty());
822                // Most SIMD vector constants should be passed as immediates.
823                // (In particular, some intrinsics really rely on this.)
824                if constant_ty.is_simd() {
825                    // However, some SIMD types do not actually use the vector ABI
826                    // (in particular, packed SIMD types do not). Ensure we exclude those.
827                    let layout = bx.layout_of(constant_ty);
828                    if let BackendRepr::SimdVector { .. } = layout.backend_repr {
829                        let (llval, ty) = self.immediate_const_vector(bx, constant);
830                        return OperandRef {
831                            val: OperandValue::Immediate(llval),
832                            layout: bx.layout_of(ty),
833                        };
834                    }
835                }
836                self.eval_mir_constant_to_operand(bx, constant)
837            }
838        }
839    }
840}