rustc_codegen_llvm/
type_of.rs

1use std::fmt::Write;
2
3use rustc_abi::Primitive::{Float, Int, Pointer};
4use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
5use rustc_codegen_ssa::traits::*;
6use rustc_middle::bug;
7use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
8use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
9use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt};
10use rustc_span::{DUMMY_SP, Span};
11use tracing::debug;
12
13use crate::common::*;
14use crate::type_::Type;
15
16fn uncached_llvm_type<'a, 'tcx>(
17    cx: &CodegenCx<'a, 'tcx>,
18    layout: TyAndLayout<'tcx>,
19    defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
20) -> &'a Type {
21    match layout.backend_repr {
22        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
23        BackendRepr::SimdVector { element, count } => {
24            let element = layout.scalar_llvm_type_at(cx, element);
25            return cx.type_vector(element, count);
26        }
27        BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
28    }
29
30    let name = match layout.ty.kind() {
31        // FIXME(eddyb) producing readable type names for trait objects can result
32        // in problematically distinct types due to HRTB and subtyping (see #47638).
33        // ty::Dynamic(..) |
34        ty::Adt(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
35            // For performance reasons we use names only when emitting LLVM IR.
36            if !cx.sess().fewer_names() =>
37        {
38            let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
39            if let (&ty::Adt(def, _), &Variants::Single { index }) =
40                (layout.ty.kind(), &layout.variants)
41            {
42                if def.is_enum() {
43                    write!(&mut name, "::{}", def.variant(index).name).unwrap();
44                }
45            }
46            if let (&ty::Coroutine(_, _), &Variants::Single { index }) =
47                (layout.ty.kind(), &layout.variants)
48            {
49                write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
50            }
51            Some(name)
52        }
53        _ => None,
54    };
55
56    match layout.fields {
57        FieldsShape::Primitive | FieldsShape::Union(_) => {
58            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
59            let packed = false;
60            match name {
61                None => cx.type_struct(&[fill], packed),
62                Some(ref name) => {
63                    let llty = cx.type_named_struct(name);
64                    cx.set_struct_body(llty, &[fill], packed);
65                    llty
66                }
67            }
68        }
69        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
70        FieldsShape::Arbitrary { .. } => match name {
71            None => {
72                let (llfields, packed) = struct_llfields(cx, layout);
73                cx.type_struct(&llfields, packed)
74            }
75            Some(ref name) => {
76                let llty = cx.type_named_struct(name);
77                *defer = Some((llty, layout));
78                llty
79            }
80        },
81    }
82}
83
84fn struct_llfields<'a, 'tcx>(
85    cx: &CodegenCx<'a, 'tcx>,
86    layout: TyAndLayout<'tcx>,
87) -> (Vec<&'a Type>, bool) {
88    debug!("struct_llfields: {:#?}", layout);
89    let field_count = layout.fields.count();
90
91    let mut packed = false;
92    let mut offset = Size::ZERO;
93    let mut prev_effective_align = layout.align.abi;
94    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
95    for i in layout.fields.index_by_increasing_offset() {
96        let target_offset = layout.fields.offset(i as usize);
97        let field = layout.field(cx, i);
98        let effective_field_align =
99            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
100        packed |= effective_field_align < field.align.abi;
101
102        debug!(
103            "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
104                effective_field_align: {}",
105            i,
106            field,
107            offset,
108            target_offset,
109            effective_field_align.bytes()
110        );
111        assert!(target_offset >= offset);
112        let padding = target_offset - offset;
113        if padding != Size::ZERO {
114            let padding_align = prev_effective_align.min(effective_field_align);
115            assert_eq!(offset.align_to(padding_align) + padding, target_offset);
116            result.push(cx.type_padding_filler(padding, padding_align));
117            debug!("    padding before: {:?}", padding);
118        }
119        result.push(field.llvm_type(cx));
120        offset = target_offset + field.size;
121        prev_effective_align = effective_field_align;
122    }
123    if layout.is_sized() && field_count > 0 {
124        if offset > layout.size {
125            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
126        }
127        let padding = layout.size - offset;
128        if padding != Size::ZERO {
129            let padding_align = prev_effective_align;
130            assert_eq!(offset.align_to(padding_align) + padding, layout.size);
131            debug!(
132                "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
133                padding, offset, layout.size
134            );
135            result.push(cx.type_padding_filler(padding, padding_align));
136        }
137    } else {
138        debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
139    }
140    (result, packed)
141}
142
143impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
144    pub(crate) fn align_of(&self, ty: Ty<'tcx>) -> Align {
145        self.layout_of(ty).align.abi
146    }
147
148    pub(crate) fn size_of(&self, ty: Ty<'tcx>) -> Size {
149        self.layout_of(ty).size
150    }
151
152    pub(crate) fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
153        self.spanned_size_and_align_of(ty, DUMMY_SP)
154    }
155
156    pub(crate) fn spanned_size_and_align_of(&self, ty: Ty<'tcx>, span: Span) -> (Size, Align) {
157        let layout = self.spanned_layout_of(ty, span);
158        (layout.size, layout.align.abi)
159    }
160}
161
162pub(crate) trait LayoutLlvmExt<'tcx> {
163    fn is_llvm_immediate(&self) -> bool;
164    fn is_llvm_scalar_pair(&self) -> bool;
165    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
166    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
167    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
168    fn scalar_pair_element_llvm_type<'a>(
169        &self,
170        cx: &CodegenCx<'a, 'tcx>,
171        index: usize,
172        immediate: bool,
173    ) -> &'a Type;
174}
175
176impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
177    fn is_llvm_immediate(&self) -> bool {
178        match self.backend_repr {
179            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
180            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
181        }
182    }
183
184    fn is_llvm_scalar_pair(&self) -> bool {
185        match self.backend_repr {
186            BackendRepr::ScalarPair(..) => true,
187            BackendRepr::Scalar(_)
188            | BackendRepr::SimdVector { .. }
189            | BackendRepr::Memory { .. } => false,
190        }
191    }
192
193    /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
194    /// The pointee type of the pointer in `PlaceRef` is always this type.
195    /// For sized types, it is also the right LLVM type for an `alloca`
196    /// containing a value of that type, and most immediates (except `bool`).
197    /// Unsized types, however, are represented by a "minimal unit", e.g.
198    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
199    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
200    /// If the type is an unsized struct, the regular layout is generated,
201    /// with the innermost trailing unsized field using the "minimal unit"
202    /// of that field's type - this is useful for taking the address of
203    /// that field and ensuring the struct has the right alignment.
204    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
205        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
206        // In other words, this should generally not look at the type at all, but only at the
207        // layout.
208        if let BackendRepr::Scalar(scalar) = self.backend_repr {
209            // Use a different cache for scalars because pointers to DSTs
210            // can be either wide or thin (data pointers of wide pointers).
211            if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
212                return llty;
213            }
214            let llty = self.scalar_llvm_type_at(cx, scalar);
215            cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
216            return llty;
217        }
218
219        // Check the cache.
220        let variant_index = match self.variants {
221            Variants::Single { index } => Some(index),
222            _ => None,
223        };
224        if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
225            return llty;
226        }
227
228        debug!("llvm_type({:#?})", self);
229
230        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
231
232        // Make sure lifetimes are erased, to avoid generating distinct LLVM
233        // types for Rust types that only differ in the choice of lifetimes.
234        let normal_ty = cx.tcx.erase_and_anonymize_regions(self.ty);
235
236        let mut defer = None;
237        let llty = if self.ty != normal_ty {
238            let mut layout = cx.layout_of(normal_ty);
239            if let Some(v) = variant_index {
240                layout = layout.for_variant(cx, v);
241            }
242            layout.llvm_type(cx)
243        } else {
244            uncached_llvm_type(cx, *self, &mut defer)
245        };
246        debug!("--> mapped {:#?} to llty={:?}", self, llty);
247
248        cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty);
249
250        if let Some((llty, layout)) = defer {
251            let (llfields, packed) = struct_llfields(cx, layout);
252            cx.set_struct_body(llty, &llfields, packed);
253        }
254        llty
255    }
256
257    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
258        match self.backend_repr {
259            BackendRepr::Scalar(scalar) => {
260                if scalar.is_bool() {
261                    return cx.type_i1();
262                }
263            }
264            BackendRepr::ScalarPair(..) => {
265                // An immediate pair always contains just the two elements, without any padding
266                // filler, as it should never be stored to memory.
267                return cx.type_struct(
268                    &[
269                        self.scalar_pair_element_llvm_type(cx, 0, true),
270                        self.scalar_pair_element_llvm_type(cx, 1, true),
271                    ],
272                    false,
273                );
274            }
275            _ => {}
276        };
277        self.llvm_type(cx)
278    }
279
280    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
281        match scalar.primitive() {
282            Int(i, _) => cx.type_from_integer(i),
283            Float(f) => cx.type_from_float(f),
284            Pointer(address_space) => cx.type_ptr_ext(address_space),
285        }
286    }
287
288    fn scalar_pair_element_llvm_type<'a>(
289        &self,
290        cx: &CodegenCx<'a, 'tcx>,
291        index: usize,
292        immediate: bool,
293    ) -> &'a Type {
294        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
295        // In other words, this should generally not look at the type at all, but only at the
296        // layout.
297        let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
298            bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
299        };
300        let scalar = [a, b][index];
301
302        // Make sure to return the same type `immediate_llvm_type` would when
303        // dealing with an immediate pair. This means that `(bool, bool)` is
304        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
305        // immediate, just like `bool` is typically `i8` in memory and only `i1`
306        // when immediate. We need to load/store `bool` as `i8` to avoid
307        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
308        if immediate && scalar.is_bool() {
309            return cx.type_i1();
310        }
311
312        self.scalar_llvm_type_at(cx, scalar)
313    }
314}