rustc_codegen_ssa/mir/
place.rs

1use rustc_abi::Primitive::{Int, Pointer};
2use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants};
3use rustc_middle::mir::interpret::Scalar;
4use rustc_middle::mir::tcx::PlaceTy;
5use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
6use rustc_middle::ty::{self, Ty};
7use rustc_middle::{bug, mir};
8use tracing::{debug, instrument};
9
10use super::operand::OperandValue;
11use super::{FunctionCx, LocalRef};
12use crate::common::IntPredicate;
13use crate::size_of_val;
14use crate::traits::*;
15
16/// The location and extra runtime properties of the place.
17///
18/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`].
19///
20/// As a location in memory, this has no specific type. If you want to
21/// load or store it using a typed operation, use [`Self::with_type`].
22#[derive(Copy, Clone, Debug)]
23pub struct PlaceValue<V> {
24    /// A pointer to the contents of the place.
25    pub llval: V,
26
27    /// This place's extra data if it is unsized, or `None` if null.
28    pub llextra: Option<V>,
29
30    /// The alignment we know for this place.
31    pub align: Align,
32}
33
34impl<V: CodegenObject> PlaceValue<V> {
35    /// Constructor for the ordinary case of `Sized` types.
36    ///
37    /// Sets `llextra` to `None`.
38    pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> {
39        PlaceValue { llval, llextra: None, align }
40    }
41
42    /// Allocates a stack slot in the function for a value
43    /// of the specified size and alignment.
44    ///
45    /// The allocation itself is untyped.
46    pub fn alloca<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx, Value = V>>(
47        bx: &mut Bx,
48        size: Size,
49        align: Align,
50    ) -> PlaceValue<V> {
51        let llval = bx.alloca(size, align);
52        PlaceValue::new_sized(llval, align)
53    }
54
55    /// Creates a `PlaceRef` to this location with the given type.
56    pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
57        assert!(
58            layout.is_unsized() || layout.is_uninhabited() || self.llextra.is_none(),
59            "Had pointer metadata {:?} for sized type {layout:?}",
60            self.llextra,
61        );
62        PlaceRef { val: self, layout }
63    }
64
65    /// Gets the pointer to this place as an [`OperandValue::Immediate`]
66    /// or, for those needing metadata, an [`OperandValue::Pair`].
67    ///
68    /// This is the inverse of [`OperandValue::deref`].
69    pub fn address(self) -> OperandValue<V> {
70        if let Some(llextra) = self.llextra {
71            OperandValue::Pair(self.llval, llextra)
72        } else {
73            OperandValue::Immediate(self.llval)
74        }
75    }
76}
77
78#[derive(Copy, Clone, Debug)]
79pub struct PlaceRef<'tcx, V> {
80    /// The location and extra runtime properties of the place.
81    pub val: PlaceValue<V>,
82
83    /// The monomorphized type of this place, including variant information.
84    ///
85    /// You probably shouldn't use the alignment from this layout;
86    /// rather you should use the `.val.align` of the actual place,
87    /// which might be different from the type's normal alignment.
88    pub layout: TyAndLayout<'tcx>,
89}
90
91impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
92    pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
93        PlaceRef::new_sized_aligned(llval, layout, layout.align.abi)
94    }
95
96    pub fn new_sized_aligned(
97        llval: V,
98        layout: TyAndLayout<'tcx>,
99        align: Align,
100    ) -> PlaceRef<'tcx, V> {
101        assert!(layout.is_sized());
102        PlaceValue::new_sized(llval, align).with_type(layout)
103    }
104
105    // FIXME(eddyb) pass something else for the name so no work is done
106    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
107    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
108        bx: &mut Bx,
109        layout: TyAndLayout<'tcx>,
110    ) -> Self {
111        Self::alloca_size(bx, layout.size, layout)
112    }
113
114    pub fn alloca_size<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
115        bx: &mut Bx,
116        size: Size,
117        layout: TyAndLayout<'tcx>,
118    ) -> Self {
119        assert!(layout.is_sized(), "tried to statically allocate unsized place");
120        PlaceValue::alloca(bx, size, layout.align.abi).with_type(layout)
121    }
122
123    /// Returns a place for an indirect reference to an unsized place.
124    // FIXME(eddyb) pass something else for the name so no work is done
125    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
126    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
127        bx: &mut Bx,
128        layout: TyAndLayout<'tcx>,
129    ) -> Self {
130        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
131        let ptr_ty = Ty::new_mut_ptr(bx.cx().tcx(), layout.ty);
132        let ptr_layout = bx.cx().layout_of(ptr_ty);
133        Self::alloca(bx, ptr_layout)
134    }
135
136    pub fn len<Cx: ConstCodegenMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
137        if let FieldsShape::Array { count, .. } = self.layout.fields {
138            if self.layout.is_unsized() {
139                assert_eq!(count, 0);
140                self.val.llextra.unwrap()
141            } else {
142                cx.const_usize(count)
143            }
144        } else {
145            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
146        }
147    }
148}
149
150impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
151    /// Access a field, at a point when the value's case is known.
152    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
153        self,
154        bx: &mut Bx,
155        ix: usize,
156    ) -> Self {
157        let field = self.layout.field(bx.cx(), ix);
158        let offset = self.layout.fields.offset(ix);
159        let effective_field_align = self.val.align.restrict_for_offset(offset);
160
161        // `simple` is called when we don't need to adjust the offset to
162        // the dynamic alignment of the field.
163        let mut simple = || {
164            let llval = if offset.bytes() == 0 {
165                self.val.llval
166            } else {
167                bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes()))
168            };
169            let val = PlaceValue {
170                llval,
171                llextra: if bx.cx().type_has_metadata(field.ty) { self.val.llextra } else { None },
172                align: effective_field_align,
173            };
174            val.with_type(field)
175        };
176
177        // Simple cases, which don't need DST adjustment:
178        //   * known alignment - sized types, `[T]`, `str`
179        //   * offset 0 -- rounding up to alignment cannot change the offset
180        // Note that looking at `field.align` is incorrect since that is not necessarily equal
181        // to the dynamic alignment of the type.
182        match field.ty.kind() {
183            _ if field.is_sized() => return simple(),
184            ty::Slice(..) | ty::Str => return simple(),
185            _ if offset.bytes() == 0 => return simple(),
186            _ => {}
187        }
188
189        // We need to get the pointer manually now.
190        // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
191        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
192        // because the field may have an arbitrary alignment in the LLVM representation.
193        //
194        // To demonstrate:
195        //
196        //     struct Foo<T: ?Sized> {
197        //         x: u16,
198        //         y: T
199        //     }
200        //
201        // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
202        // the `y` field has 16-bit alignment.
203
204        let meta = self.val.llextra;
205
206        let unaligned_offset = bx.cx().const_usize(offset.bytes());
207
208        // Get the alignment of the field
209        let (_, mut unsized_align) = size_of_val::size_and_align_of_dst(bx, field.ty, meta);
210
211        // For packed types, we need to cap alignment.
212        if let ty::Adt(def, _) = self.layout.ty.kind()
213            && let Some(packed) = def.repr().pack
214        {
215            let packed = bx.const_usize(packed.bytes());
216            let cmp = bx.icmp(IntPredicate::IntULT, unsized_align, packed);
217            unsized_align = bx.select(cmp, unsized_align, packed)
218        }
219
220        // Bump the unaligned offset up to the appropriate alignment
221        let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
222
223        debug!("struct_field_ptr: DST field offset: {:?}", offset);
224
225        // Adjust pointer.
226        let ptr = bx.inbounds_ptradd(self.val.llval, offset);
227        let val =
228            PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align };
229        val.with_type(field)
230    }
231
232    /// Obtain the actual discriminant of a value.
233    #[instrument(level = "trace", skip(bx))]
234    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
235        self,
236        bx: &mut Bx,
237        cast_to: Ty<'tcx>,
238    ) -> V {
239        let dl = &bx.tcx().data_layout;
240        let cast_to_layout = bx.cx().layout_of(cast_to);
241        let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
242        if self.layout.is_uninhabited() {
243            return bx.cx().const_poison(cast_to);
244        }
245        let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
246            Variants::Empty => unreachable!("we already handled uninhabited types"),
247            Variants::Single { index } => {
248                let discr_val = self
249                    .layout
250                    .ty
251                    .discriminant_for_variant(bx.cx().tcx(), index)
252                    .map_or(index.as_u32() as u128, |discr| discr.val);
253                return bx.cx().const_uint_big(cast_to, discr_val);
254            }
255            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
256                (tag, tag_encoding, tag_field)
257            }
258        };
259
260        // Read the tag/niche-encoded discriminant from memory.
261        let tag = self.project_field(bx, tag_field);
262        let tag_op = bx.load_operand(tag);
263        let tag_imm = tag_op.immediate();
264
265        // Decode the discriminant (specifically if it's niche-encoded).
266        match *tag_encoding {
267            TagEncoding::Direct => {
268                let signed = match tag_scalar.primitive() {
269                    // We use `i1` for bytes that are always `0` or `1`,
270                    // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
271                    // let LLVM interpret the `i1` as signed, because
272                    // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
273                    Int(_, signed) => !tag_scalar.is_bool() && signed,
274                    _ => false,
275                };
276                bx.intcast(tag_imm, cast_to, signed)
277            }
278            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
279                // Cast to an integer so we don't have to treat a pointer as a
280                // special case.
281                let (tag, tag_llty) = match tag_scalar.primitive() {
282                    // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
283                    Pointer(_) => {
284                        let t = bx.type_from_integer(dl.ptr_sized_integer());
285                        let tag = bx.ptrtoint(tag_imm, t);
286                        (tag, t)
287                    }
288                    _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
289                };
290
291                let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
292
293                // We have a subrange `niche_start..=niche_end` inside `range`.
294                // If the value of the tag is inside this subrange, it's a
295                // "niche value", an increment of the discriminant. Otherwise it
296                // indicates the untagged variant.
297                // A general algorithm to extract the discriminant from the tag
298                // is:
299                // relative_tag = tag - niche_start
300                // is_niche = relative_tag <= (ule) relative_max
301                // discr = if is_niche {
302                //     cast(relative_tag) + niche_variants.start()
303                // } else {
304                //     untagged_variant
305                // }
306                // However, we will likely be able to emit simpler code.
307                let (is_niche, tagged_discr, delta) = if relative_max == 0 {
308                    // Best case scenario: only one tagged variant. This will
309                    // likely become just a comparison and a jump.
310                    // The algorithm is:
311                    // is_niche = tag == niche_start
312                    // discr = if is_niche {
313                    //     niche_start
314                    // } else {
315                    //     untagged_variant
316                    // }
317                    let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
318                    let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
319                    let tagged_discr =
320                        bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
321                    (is_niche, tagged_discr, 0)
322                } else {
323                    // The special cases don't apply, so we'll have to go with
324                    // the general algorithm.
325                    let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
326                    let cast_tag = bx.intcast(relative_discr, cast_to, false);
327                    let is_niche = bx.icmp(
328                        IntPredicate::IntULE,
329                        relative_discr,
330                        bx.cx().const_uint(tag_llty, relative_max as u64),
331                    );
332                    (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
333                };
334
335                let tagged_discr = if delta == 0 {
336                    tagged_discr
337                } else {
338                    bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
339                };
340
341                let discr = bx.select(
342                    is_niche,
343                    tagged_discr,
344                    bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
345                );
346
347                // In principle we could insert assumes on the possible range of `discr`, but
348                // currently in LLVM this seems to be a pessimization.
349
350                discr
351            }
352        }
353    }
354
355    /// Sets the discriminant for a new value of the given case of the given
356    /// representation.
357    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
358        &self,
359        bx: &mut Bx,
360        variant_index: VariantIdx,
361    ) {
362        if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() {
363            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
364            // if that turns out to be helpful.
365            bx.abort();
366            return;
367        }
368        match self.layout.variants {
369            Variants::Empty => unreachable!("we already handled uninhabited types"),
370            Variants::Single { index } => assert_eq!(index, variant_index),
371
372            Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
373                let ptr = self.project_field(bx, tag_field);
374                let to =
375                    self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
376                bx.store_to_place(
377                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
378                    ptr.val,
379                );
380            }
381            Variants::Multiple {
382                tag_encoding:
383                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
384                tag_field,
385                ..
386            } => {
387                if variant_index != untagged_variant {
388                    let niche = self.project_field(bx, tag_field);
389                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
390                    let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else {
391                        bug!("expected a scalar placeref for the niche");
392                    };
393                    // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
394                    // around the `niche`'s type.
395                    // The easiest way to do that is to do wrapping arithmetic on `u128` and then
396                    // masking off any extra bits that occur because we did the arithmetic with too many bits.
397                    let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
398                    let niche_value = (niche_value as u128).wrapping_add(niche_start);
399                    let niche_value = niche_value & niche.layout.size.unsigned_int_max();
400
401                    let niche_llval = bx.cx().scalar_to_backend(
402                        Scalar::from_uint(niche_value, niche.layout.size),
403                        scalar,
404                        niche_llty,
405                    );
406                    OperandValue::Immediate(niche_llval).store(bx, niche);
407                }
408            }
409        }
410    }
411
412    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
413        &self,
414        bx: &mut Bx,
415        llindex: V,
416    ) -> Self {
417        // Statically compute the offset if we can, otherwise just use the element size,
418        // as this will yield the lowest alignment.
419        let layout = self.layout.field(bx, 0);
420        let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
421            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
422        } else {
423            layout.size
424        };
425
426        let llval = bx.inbounds_gep(bx.cx().backend_type(layout), self.val.llval, &[llindex]);
427        let align = self.val.align.restrict_for_offset(offset);
428        PlaceValue::new_sized(llval, align).with_type(layout)
429    }
430
431    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
432        &self,
433        bx: &mut Bx,
434        variant_index: VariantIdx,
435    ) -> Self {
436        let mut downcast = *self;
437        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
438        downcast
439    }
440
441    pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
442        &self,
443        bx: &mut Bx,
444        ty: Ty<'tcx>,
445    ) -> Self {
446        let mut downcast = *self;
447        downcast.layout = bx.cx().layout_of(ty);
448        downcast
449    }
450
451    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
452        bx.lifetime_start(self.val.llval, self.layout.size);
453    }
454
455    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
456        bx.lifetime_end(self.val.llval, self.layout.size);
457    }
458}
459
460impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
461    #[instrument(level = "trace", skip(self, bx))]
462    pub fn codegen_place(
463        &mut self,
464        bx: &mut Bx,
465        place_ref: mir::PlaceRef<'tcx>,
466    ) -> PlaceRef<'tcx, Bx::Value> {
467        let cx = self.cx;
468        let tcx = self.cx.tcx();
469
470        let mut base = 0;
471        let mut cg_base = match self.locals[place_ref.local] {
472            LocalRef::Place(place) => place,
473            LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
474            LocalRef::Operand(..) => {
475                if place_ref.is_indirect_first_projection() {
476                    base = 1;
477                    let cg_base = self.codegen_consume(
478                        bx,
479                        mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
480                    );
481                    cg_base.deref(bx.cx())
482                } else {
483                    bug!("using operand local {:?} as place", place_ref);
484                }
485            }
486            LocalRef::PendingOperand => {
487                bug!("using still-pending operand local {:?} as place", place_ref);
488            }
489        };
490        for elem in place_ref.projection[base..].iter() {
491            cg_base = match *elem {
492                mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
493                mir::ProjectionElem::Field(ref field, _) => {
494                    assert!(
495                        !cg_base.layout.ty.is_any_ptr(),
496                        "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
497                         but tried to access field {field:?} of pointer {cg_base:?}",
498                    );
499                    cg_base.project_field(bx, field.index())
500                }
501                mir::ProjectionElem::OpaqueCast(ty) => {
502                    bug!("encountered OpaqueCast({ty}) in codegen")
503                }
504                mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
505                mir::ProjectionElem::UnwrapUnsafeBinder(ty) => {
506                    cg_base.project_type(bx, self.monomorphize(ty))
507                }
508                mir::ProjectionElem::Index(index) => {
509                    let index = &mir::Operand::Copy(mir::Place::from(index));
510                    let index = self.codegen_operand(bx, index);
511                    let llindex = index.immediate();
512                    cg_base.project_index(bx, llindex)
513                }
514                mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
515                    let lloffset = bx.cx().const_usize(offset);
516                    cg_base.project_index(bx, lloffset)
517                }
518                mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
519                    let lloffset = bx.cx().const_usize(offset);
520                    let lllen = cg_base.len(bx.cx());
521                    let llindex = bx.sub(lllen, lloffset);
522                    cg_base.project_index(bx, llindex)
523                }
524                mir::ProjectionElem::Subslice { from, to, from_end } => {
525                    let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from));
526                    let projected_ty =
527                        PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
528                    subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
529
530                    if subslice.layout.is_unsized() {
531                        assert!(from_end, "slice subslices should be `from_end`");
532                        subslice.val.llextra = Some(
533                            bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)),
534                        );
535                    }
536
537                    subslice
538                }
539                mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
540            };
541        }
542        debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
543        cg_base
544    }
545
546    pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
547        let tcx = self.cx.tcx();
548        let place_ty = place_ref.ty(self.mir, tcx);
549        self.monomorphize(place_ty.ty)
550    }
551}
552
553fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
554    bx: &mut Bx,
555    value: Bx::Value,
556    align: Bx::Value,
557) -> Bx::Value {
558    // In pseudo code:
559    //
560    //     if value & (align - 1) == 0 {
561    //         value
562    //     } else {
563    //         (value & !(align - 1)) + align
564    //     }
565    //
566    // Usually this is written without branches as
567    //
568    //     (value + align - 1) & !(align - 1)
569    //
570    // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
571    // at compile time to be `1`, this expression should be optimized to `align`. However,
572    // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
573    // that `align` is a power of two, it cannot perform this optimization.
574    //
575    // Instead we use
576    //
577    //     value + (-value & (align - 1))
578    //
579    // Since `align` is used only once, the expression can be optimized. For `value = 0`
580    // its optimized to `0` even in debug mode.
581    //
582    // NB: The previous version of this code used
583    //
584    //     (value + align - 1) & -align
585    //
586    // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
587    // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
588    let one = bx.const_usize(1);
589    let align_minus_1 = bx.sub(align, one);
590    let neg_value = bx.neg(value);
591    let offset = bx.and(neg_value, align_minus_1);
592    bx.add(value, offset)
593}