rustc_codegen_llvm/
type_of.rs

1use std::fmt::Write;
2
3use rustc_abi::Primitive::{Float, Int, Pointer};
4use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
5use rustc_codegen_ssa::traits::*;
6use rustc_middle::bug;
7use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
8use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
9use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt};
10use tracing::debug;
11
12use crate::common::*;
13use crate::type_::Type;
14
15fn uncached_llvm_type<'a, 'tcx>(
16    cx: &CodegenCx<'a, 'tcx>,
17    layout: TyAndLayout<'tcx>,
18    defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
19) -> &'a Type {
20    match layout.backend_repr {
21        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
22        BackendRepr::Vector { element, count } => {
23            let element = layout.scalar_llvm_type_at(cx, element);
24            return cx.type_vector(element, count);
25        }
26        BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
27    }
28
29    let name = match layout.ty.kind() {
30        // FIXME(eddyb) producing readable type names for trait objects can result
31        // in problematically distinct types due to HRTB and subtyping (see #47638).
32        // ty::Dynamic(..) |
33        ty::Adt(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
34            // For performance reasons we use names only when emitting LLVM IR.
35            if !cx.sess().fewer_names() =>
36        {
37            let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
38            if let (&ty::Adt(def, _), &Variants::Single { index }) =
39                (layout.ty.kind(), &layout.variants)
40            {
41                if def.is_enum() {
42                    write!(&mut name, "::{}", def.variant(index).name).unwrap();
43                }
44            }
45            if let (&ty::Coroutine(_, _), &Variants::Single { index }) =
46                (layout.ty.kind(), &layout.variants)
47            {
48                write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
49            }
50            Some(name)
51        }
52        _ => None,
53    };
54
55    match layout.fields {
56        FieldsShape::Primitive | FieldsShape::Union(_) => {
57            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
58            let packed = false;
59            match name {
60                None => cx.type_struct(&[fill], packed),
61                Some(ref name) => {
62                    let llty = cx.type_named_struct(name);
63                    cx.set_struct_body(llty, &[fill], packed);
64                    llty
65                }
66            }
67        }
68        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
69        FieldsShape::Arbitrary { .. } => match name {
70            None => {
71                let (llfields, packed) = struct_llfields(cx, layout);
72                cx.type_struct(&llfields, packed)
73            }
74            Some(ref name) => {
75                let llty = cx.type_named_struct(name);
76                *defer = Some((llty, layout));
77                llty
78            }
79        },
80    }
81}
82
83fn struct_llfields<'a, 'tcx>(
84    cx: &CodegenCx<'a, 'tcx>,
85    layout: TyAndLayout<'tcx>,
86) -> (Vec<&'a Type>, bool) {
87    debug!("struct_llfields: {:#?}", layout);
88    let field_count = layout.fields.count();
89
90    let mut packed = false;
91    let mut offset = Size::ZERO;
92    let mut prev_effective_align = layout.align.abi;
93    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
94    for i in layout.fields.index_by_increasing_offset() {
95        let target_offset = layout.fields.offset(i as usize);
96        let field = layout.field(cx, i);
97        let effective_field_align =
98            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
99        packed |= effective_field_align < field.align.abi;
100
101        debug!(
102            "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
103                effective_field_align: {}",
104            i,
105            field,
106            offset,
107            target_offset,
108            effective_field_align.bytes()
109        );
110        assert!(target_offset >= offset);
111        let padding = target_offset - offset;
112        if padding != Size::ZERO {
113            let padding_align = prev_effective_align.min(effective_field_align);
114            assert_eq!(offset.align_to(padding_align) + padding, target_offset);
115            result.push(cx.type_padding_filler(padding, padding_align));
116            debug!("    padding before: {:?}", padding);
117        }
118        result.push(field.llvm_type(cx));
119        offset = target_offset + field.size;
120        prev_effective_align = effective_field_align;
121    }
122    if layout.is_sized() && field_count > 0 {
123        if offset > layout.size {
124            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
125        }
126        let padding = layout.size - offset;
127        if padding != Size::ZERO {
128            let padding_align = prev_effective_align;
129            assert_eq!(offset.align_to(padding_align) + padding, layout.size);
130            debug!(
131                "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
132                padding, offset, layout.size
133            );
134            result.push(cx.type_padding_filler(padding, padding_align));
135        }
136    } else {
137        debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
138    }
139    (result, packed)
140}
141
142impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
143    pub(crate) fn align_of(&self, ty: Ty<'tcx>) -> Align {
144        self.layout_of(ty).align.abi
145    }
146
147    pub(crate) fn size_of(&self, ty: Ty<'tcx>) -> Size {
148        self.layout_of(ty).size
149    }
150
151    pub(crate) fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
152        let layout = self.layout_of(ty);
153        (layout.size, layout.align.abi)
154    }
155}
156
157pub(crate) trait LayoutLlvmExt<'tcx> {
158    fn is_llvm_immediate(&self) -> bool;
159    fn is_llvm_scalar_pair(&self) -> bool;
160    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
161    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
162    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
163    fn scalar_pair_element_llvm_type<'a>(
164        &self,
165        cx: &CodegenCx<'a, 'tcx>,
166        index: usize,
167        immediate: bool,
168    ) -> &'a Type;
169}
170
171impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
172    fn is_llvm_immediate(&self) -> bool {
173        match self.backend_repr {
174            BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
175            BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
176                false
177            }
178        }
179    }
180
181    fn is_llvm_scalar_pair(&self) -> bool {
182        match self.backend_repr {
183            BackendRepr::ScalarPair(..) => true,
184            BackendRepr::Uninhabited
185            | BackendRepr::Scalar(_)
186            | BackendRepr::Vector { .. }
187            | BackendRepr::Memory { .. } => false,
188        }
189    }
190
191    /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
192    /// The pointee type of the pointer in `PlaceRef` is always this type.
193    /// For sized types, it is also the right LLVM type for an `alloca`
194    /// containing a value of that type, and most immediates (except `bool`).
195    /// Unsized types, however, are represented by a "minimal unit", e.g.
196    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
197    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
198    /// If the type is an unsized struct, the regular layout is generated,
199    /// with the innermost trailing unsized field using the "minimal unit"
200    /// of that field's type - this is useful for taking the address of
201    /// that field and ensuring the struct has the right alignment.
202    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
203        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
204        // In other words, this should generally not look at the type at all, but only at the
205        // layout.
206        if let BackendRepr::Scalar(scalar) = self.backend_repr {
207            // Use a different cache for scalars because pointers to DSTs
208            // can be either wide or thin (data pointers of wide pointers).
209            if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
210                return llty;
211            }
212            let llty = self.scalar_llvm_type_at(cx, scalar);
213            cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
214            return llty;
215        }
216
217        // Check the cache.
218        let variant_index = match self.variants {
219            Variants::Single { index } => Some(index),
220            _ => None,
221        };
222        if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
223            return llty;
224        }
225
226        debug!("llvm_type({:#?})", self);
227
228        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
229
230        // Make sure lifetimes are erased, to avoid generating distinct LLVM
231        // types for Rust types that only differ in the choice of lifetimes.
232        let normal_ty = cx.tcx.erase_regions(self.ty);
233
234        let mut defer = None;
235        let llty = if self.ty != normal_ty {
236            let mut layout = cx.layout_of(normal_ty);
237            if let Some(v) = variant_index {
238                layout = layout.for_variant(cx, v);
239            }
240            layout.llvm_type(cx)
241        } else {
242            uncached_llvm_type(cx, *self, &mut defer)
243        };
244        debug!("--> mapped {:#?} to llty={:?}", self, llty);
245
246        cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty);
247
248        if let Some((llty, layout)) = defer {
249            let (llfields, packed) = struct_llfields(cx, layout);
250            cx.set_struct_body(llty, &llfields, packed);
251        }
252        llty
253    }
254
255    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
256        match self.backend_repr {
257            BackendRepr::Scalar(scalar) => {
258                if scalar.is_bool() {
259                    return cx.type_i1();
260                }
261            }
262            BackendRepr::ScalarPair(..) => {
263                // An immediate pair always contains just the two elements, without any padding
264                // filler, as it should never be stored to memory.
265                return cx.type_struct(
266                    &[
267                        self.scalar_pair_element_llvm_type(cx, 0, true),
268                        self.scalar_pair_element_llvm_type(cx, 1, true),
269                    ],
270                    false,
271                );
272            }
273            _ => {}
274        };
275        self.llvm_type(cx)
276    }
277
278    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
279        match scalar.primitive() {
280            Int(i, _) => cx.type_from_integer(i),
281            Float(f) => cx.type_from_float(f),
282            Pointer(address_space) => cx.type_ptr_ext(address_space),
283        }
284    }
285
286    fn scalar_pair_element_llvm_type<'a>(
287        &self,
288        cx: &CodegenCx<'a, 'tcx>,
289        index: usize,
290        immediate: bool,
291    ) -> &'a Type {
292        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
293        // In other words, this should generally not look at the type at all, but only at the
294        // layout.
295        let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
296            bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
297        };
298        let scalar = [a, b][index];
299
300        // Make sure to return the same type `immediate_llvm_type` would when
301        // dealing with an immediate pair. This means that `(bool, bool)` is
302        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
303        // immediate, just like `bool` is typically `i8` in memory and only `i1`
304        // when immediate. We need to load/store `bool` as `i8` to avoid
305        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
306        if immediate && scalar.is_bool() {
307            return cx.type_i1();
308        }
309
310        self.scalar_llvm_type_at(cx, scalar)
311    }
312}