rustc_codegen_ssa/mir/
place.rs

1use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants};
2use rustc_middle::mir::PlaceTy;
3use rustc_middle::mir::interpret::Scalar;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Ty};
6use rustc_middle::{bug, mir};
7use tracing::{debug, instrument};
8
9use super::operand::OperandValue;
10use super::{FunctionCx, LocalRef};
11use crate::common::IntPredicate;
12use crate::size_of_val;
13use crate::traits::*;
14
15/// The location and extra runtime properties of the place.
16///
17/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`].
18///
19/// As a location in memory, this has no specific type. If you want to
20/// load or store it using a typed operation, use [`Self::with_type`].
21#[derive(Copy, Clone, Debug)]
22pub struct PlaceValue<V> {
23    /// A pointer to the contents of the place.
24    pub llval: V,
25
26    /// This place's extra data if it is unsized, or `None` if null.
27    pub llextra: Option<V>,
28
29    /// The alignment we know for this place.
30    pub align: Align,
31}
32
33impl<V: CodegenObject> PlaceValue<V> {
34    /// Constructor for the ordinary case of `Sized` types.
35    ///
36    /// Sets `llextra` to `None`.
37    pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> {
38        PlaceValue { llval, llextra: None, align }
39    }
40
41    /// Allocates a stack slot in the function for a value
42    /// of the specified size and alignment.
43    ///
44    /// The allocation itself is untyped.
45    pub fn alloca<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx, Value = V>>(
46        bx: &mut Bx,
47        size: Size,
48        align: Align,
49    ) -> PlaceValue<V> {
50        let llval = bx.alloca(size, align);
51        PlaceValue::new_sized(llval, align)
52    }
53
54    /// Creates a `PlaceRef` to this location with the given type.
55    pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
56        assert!(
57            layout.is_unsized() || layout.is_uninhabited() || self.llextra.is_none(),
58            "Had pointer metadata {:?} for sized type {layout:?}",
59            self.llextra,
60        );
61        PlaceRef { val: self, layout }
62    }
63
64    /// Gets the pointer to this place as an [`OperandValue::Immediate`]
65    /// or, for those needing metadata, an [`OperandValue::Pair`].
66    ///
67    /// This is the inverse of [`OperandValue::deref`].
68    pub fn address(self) -> OperandValue<V> {
69        if let Some(llextra) = self.llextra {
70            OperandValue::Pair(self.llval, llextra)
71        } else {
72            OperandValue::Immediate(self.llval)
73        }
74    }
75}
76
77#[derive(Copy, Clone, Debug)]
78pub struct PlaceRef<'tcx, V> {
79    /// The location and extra runtime properties of the place.
80    pub val: PlaceValue<V>,
81
82    /// The monomorphized type of this place, including variant information.
83    ///
84    /// You probably shouldn't use the alignment from this layout;
85    /// rather you should use the `.val.align` of the actual place,
86    /// which might be different from the type's normal alignment.
87    pub layout: TyAndLayout<'tcx>,
88}
89
90impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
91    pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
92        PlaceRef::new_sized_aligned(llval, layout, layout.align.abi)
93    }
94
95    pub fn new_sized_aligned(
96        llval: V,
97        layout: TyAndLayout<'tcx>,
98        align: Align,
99    ) -> PlaceRef<'tcx, V> {
100        assert!(layout.is_sized());
101        PlaceValue::new_sized(llval, align).with_type(layout)
102    }
103
104    // FIXME(eddyb) pass something else for the name so no work is done
105    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
106    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
107        bx: &mut Bx,
108        layout: TyAndLayout<'tcx>,
109    ) -> Self {
110        Self::alloca_size(bx, layout.size, layout)
111    }
112
113    pub fn alloca_size<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
114        bx: &mut Bx,
115        size: Size,
116        layout: TyAndLayout<'tcx>,
117    ) -> Self {
118        assert!(layout.is_sized(), "tried to statically allocate unsized place");
119        PlaceValue::alloca(bx, size, layout.align.abi).with_type(layout)
120    }
121
122    /// Returns a place for an indirect reference to an unsized place.
123    // FIXME(eddyb) pass something else for the name so no work is done
124    // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
125    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
126        bx: &mut Bx,
127        layout: TyAndLayout<'tcx>,
128    ) -> Self {
129        assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
130        let ptr_ty = Ty::new_mut_ptr(bx.cx().tcx(), layout.ty);
131        let ptr_layout = bx.cx().layout_of(ptr_ty);
132        Self::alloca(bx, ptr_layout)
133    }
134
135    pub fn len<Cx: ConstCodegenMethods<Value = V>>(&self, cx: &Cx) -> V {
136        if let FieldsShape::Array { count, .. } = self.layout.fields {
137            if self.layout.is_unsized() {
138                assert_eq!(count, 0);
139                self.val.llextra.unwrap()
140            } else {
141                cx.const_usize(count)
142            }
143        } else {
144            bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
145        }
146    }
147}
148
149impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
150    /// Access a field, at a point when the value's case is known.
151    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
152        self,
153        bx: &mut Bx,
154        ix: usize,
155    ) -> Self {
156        let field = self.layout.field(bx.cx(), ix);
157        let offset = self.layout.fields.offset(ix);
158        let effective_field_align = self.val.align.restrict_for_offset(offset);
159
160        // `simple` is called when we don't need to adjust the offset to
161        // the dynamic alignment of the field.
162        let mut simple = || {
163            let llval = if offset.bytes() == 0 {
164                self.val.llval
165            } else {
166                bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes()))
167            };
168            let val = PlaceValue {
169                llval,
170                llextra: if bx.cx().tcx().type_has_metadata(field.ty, bx.cx().typing_env()) {
171                    self.val.llextra
172                } else {
173                    None
174                },
175                align: effective_field_align,
176            };
177            val.with_type(field)
178        };
179
180        // Simple cases, which don't need DST adjustment:
181        //   * known alignment - sized types, `[T]`, `str`
182        //   * offset 0 -- rounding up to alignment cannot change the offset
183        // Note that looking at `field.align` is incorrect since that is not necessarily equal
184        // to the dynamic alignment of the type.
185        match field.ty.kind() {
186            _ if field.is_sized() => return simple(),
187            ty::Slice(..) | ty::Str => return simple(),
188            _ if offset.bytes() == 0 => return simple(),
189            _ => {}
190        }
191
192        // We need to get the pointer manually now.
193        // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
194        // We do this instead of, say, simply adjusting the pointer from the result of a GEP
195        // because the field may have an arbitrary alignment in the LLVM representation.
196        //
197        // To demonstrate:
198        //
199        //     struct Foo<T: ?Sized> {
200        //         x: u16,
201        //         y: T
202        //     }
203        //
204        // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
205        // the `y` field has 16-bit alignment.
206
207        let meta = self.val.llextra;
208
209        let unaligned_offset = bx.cx().const_usize(offset.bytes());
210
211        // Get the alignment of the field
212        let (_, mut unsized_align) = size_of_val::size_and_align_of_dst(bx, field.ty, meta);
213
214        // For packed types, we need to cap alignment.
215        if let ty::Adt(def, _) = self.layout.ty.kind()
216            && let Some(packed) = def.repr().pack
217        {
218            let packed = bx.const_usize(packed.bytes());
219            let cmp = bx.icmp(IntPredicate::IntULT, unsized_align, packed);
220            unsized_align = bx.select(cmp, unsized_align, packed)
221        }
222
223        // Bump the unaligned offset up to the appropriate alignment
224        let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
225
226        debug!("struct_field_ptr: DST field offset: {:?}", offset);
227
228        // Adjust pointer.
229        let ptr = bx.inbounds_ptradd(self.val.llval, offset);
230        let val =
231            PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align };
232        val.with_type(field)
233    }
234
235    /// Sets the discriminant for a new value of the given case of the given
236    /// representation.
237    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
238        &self,
239        bx: &mut Bx,
240        variant_index: VariantIdx,
241    ) {
242        if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() {
243            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
244            // if that turns out to be helpful.
245            bx.abort();
246            return;
247        }
248        match self.layout.variants {
249            Variants::Empty => unreachable!("we already handled uninhabited types"),
250            Variants::Single { index } => assert_eq!(index, variant_index),
251
252            Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
253                let ptr = self.project_field(bx, tag_field);
254                let to =
255                    self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
256                bx.store_to_place(
257                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
258                    ptr.val,
259                );
260            }
261            Variants::Multiple {
262                tag_encoding:
263                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
264                tag_field,
265                ..
266            } => {
267                if variant_index != untagged_variant {
268                    let niche = self.project_field(bx, tag_field);
269                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
270                    let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else {
271                        bug!("expected a scalar placeref for the niche");
272                    };
273                    // We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
274                    // around the `niche`'s type.
275                    // The easiest way to do that is to do wrapping arithmetic on `u128` and then
276                    // masking off any extra bits that occur because we did the arithmetic with too many bits.
277                    let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
278                    let niche_value = (niche_value as u128).wrapping_add(niche_start);
279                    let niche_value = niche_value & niche.layout.size.unsigned_int_max();
280
281                    let niche_llval = bx.cx().scalar_to_backend(
282                        Scalar::from_uint(niche_value, niche.layout.size),
283                        scalar,
284                        niche_llty,
285                    );
286                    OperandValue::Immediate(niche_llval).store(bx, niche);
287                }
288            }
289        }
290    }
291
292    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
293        &self,
294        bx: &mut Bx,
295        llindex: V,
296    ) -> Self {
297        // Statically compute the offset if we can, otherwise just use the element size,
298        // as this will yield the lowest alignment.
299        let layout = self.layout.field(bx, 0);
300        let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
301            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
302        } else {
303            layout.size
304        };
305
306        let llval = bx.inbounds_nuw_gep(bx.cx().backend_type(layout), self.val.llval, &[llindex]);
307        let align = self.val.align.restrict_for_offset(offset);
308        PlaceValue::new_sized(llval, align).with_type(layout)
309    }
310
311    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
312        &self,
313        bx: &mut Bx,
314        variant_index: VariantIdx,
315    ) -> Self {
316        let mut downcast = *self;
317        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
318        downcast
319    }
320
321    pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
322        &self,
323        bx: &mut Bx,
324        ty: Ty<'tcx>,
325    ) -> Self {
326        let mut downcast = *self;
327        downcast.layout = bx.cx().layout_of(ty);
328        downcast
329    }
330
331    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
332        bx.lifetime_start(self.val.llval, self.layout.size);
333    }
334
335    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
336        bx.lifetime_end(self.val.llval, self.layout.size);
337    }
338}
339
340impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
341    #[instrument(level = "trace", skip(self, bx))]
342    pub fn codegen_place(
343        &mut self,
344        bx: &mut Bx,
345        place_ref: mir::PlaceRef<'tcx>,
346    ) -> PlaceRef<'tcx, Bx::Value> {
347        let cx = self.cx;
348        let tcx = self.cx.tcx();
349
350        let mut base = 0;
351        let mut cg_base = match self.locals[place_ref.local] {
352            LocalRef::Place(place) => place,
353            LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
354            LocalRef::Operand(..) => {
355                if place_ref.is_indirect_first_projection() {
356                    base = 1;
357                    let cg_base = self.codegen_consume(
358                        bx,
359                        mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
360                    );
361                    cg_base.deref(bx.cx())
362                } else {
363                    bug!("using operand local {:?} as place", place_ref);
364                }
365            }
366            LocalRef::PendingOperand => {
367                bug!("using still-pending operand local {:?} as place", place_ref);
368            }
369        };
370        for elem in place_ref.projection[base..].iter() {
371            cg_base = match *elem {
372                mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
373                mir::ProjectionElem::Field(ref field, _) => {
374                    assert!(
375                        !cg_base.layout.ty.is_any_ptr(),
376                        "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
377                         but tried to access field {field:?} of pointer {cg_base:?}",
378                    );
379                    cg_base.project_field(bx, field.index())
380                }
381                mir::ProjectionElem::OpaqueCast(ty) => {
382                    bug!("encountered OpaqueCast({ty}) in codegen")
383                }
384                mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
385                mir::ProjectionElem::UnwrapUnsafeBinder(ty) => {
386                    cg_base.project_type(bx, self.monomorphize(ty))
387                }
388                mir::ProjectionElem::Index(index) => {
389                    let index = &mir::Operand::Copy(mir::Place::from(index));
390                    let index = self.codegen_operand(bx, index);
391                    let llindex = index.immediate();
392                    cg_base.project_index(bx, llindex)
393                }
394                mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
395                    let lloffset = bx.cx().const_usize(offset);
396                    cg_base.project_index(bx, lloffset)
397                }
398                mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
399                    let lloffset = bx.cx().const_usize(offset);
400                    let lllen = cg_base.len(bx.cx());
401                    let llindex = bx.sub(lllen, lloffset);
402                    cg_base.project_index(bx, llindex)
403                }
404                mir::ProjectionElem::Subslice { from, to, from_end } => {
405                    let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from));
406                    let projected_ty =
407                        PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
408                    subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
409
410                    if subslice.layout.is_unsized() {
411                        assert!(from_end, "slice subslices should be `from_end`");
412                        subslice.val.llextra = Some(
413                            bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)),
414                        );
415                    }
416
417                    subslice
418                }
419                mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
420            };
421        }
422        debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
423        cg_base
424    }
425
426    pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
427        let tcx = self.cx.tcx();
428        let place_ty = place_ref.ty(self.mir, tcx);
429        self.monomorphize(place_ty.ty)
430    }
431}
432
433fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
434    bx: &mut Bx,
435    value: Bx::Value,
436    align: Bx::Value,
437) -> Bx::Value {
438    // In pseudo code:
439    //
440    //     if value & (align - 1) == 0 {
441    //         value
442    //     } else {
443    //         (value & !(align - 1)) + align
444    //     }
445    //
446    // Usually this is written without branches as
447    //
448    //     (value + align - 1) & !(align - 1)
449    //
450    // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
451    // at compile time to be `1`, this expression should be optimized to `align`. However,
452    // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
453    // that `align` is a power of two, it cannot perform this optimization.
454    //
455    // Instead we use
456    //
457    //     value + (-value & (align - 1))
458    //
459    // Since `align` is used only once, the expression can be optimized. For `value = 0`
460    // its optimized to `0` even in debug mode.
461    //
462    // NB: The previous version of this code used
463    //
464    //     (value + align - 1) & -align
465    //
466    // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
467    // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
468    let one = bx.const_usize(1);
469    let align_minus_1 = bx.sub(align, one);
470    let neg_value = bx.neg(value);
471    let offset = bx.and(neg_value, align_minus_1);
472    bx.add(value, offset)
473}