rustc_codegen_ssa/traits/
builder.rs

1use std::assert_matches::assert_matches;
2use std::ops::Deref;
3
4use rustc_abi::{Align, Scalar, Size, WrappingRange};
5use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
6use rustc_middle::mir;
7use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
8use rustc_middle::ty::{AtomicOrdering, Instance, Ty};
9use rustc_session::config::OptLevel;
10use rustc_span::Span;
11use rustc_target::callconv::FnAbi;
12
13use super::abi::AbiBuilderMethods;
14use super::asm::AsmBuilderMethods;
15use super::consts::ConstCodegenMethods;
16use super::coverageinfo::CoverageInfoBuilderMethods;
17use super::debuginfo::DebugInfoBuilderMethods;
18use super::intrinsic::IntrinsicCallBuilderMethods;
19use super::misc::MiscCodegenMethods;
20use super::type_::{ArgAbiBuilderMethods, BaseTypeCodegenMethods, LayoutTypeCodegenMethods};
21use super::{CodegenMethods, StaticBuilderMethods};
22use crate::MemFlags;
23use crate::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
24use crate::mir::operand::{OperandRef, OperandValue};
25use crate::mir::place::{PlaceRef, PlaceValue};
26
27#[derive(Copy, Clone, Debug, PartialEq, Eq)]
28pub enum OverflowOp {
29    Add,
30    Sub,
31    Mul,
32}
33
34pub trait BuilderMethods<'a, 'tcx>:
35    Sized
36    + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
37    + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
38    + Deref<Target = Self::CodegenCx>
39    + CoverageInfoBuilderMethods<'tcx>
40    + DebugInfoBuilderMethods<'tcx>
41    + ArgAbiBuilderMethods<'tcx>
42    + AbiBuilderMethods
43    + IntrinsicCallBuilderMethods<'tcx>
44    + AsmBuilderMethods<'tcx>
45    + StaticBuilderMethods
46{
47    // `BackendTypes` is a supertrait of both `CodegenMethods` and
48    // `BuilderMethods`. This bound ensures all impls agree on the associated
49    // types within.
50    type CodegenCx: CodegenMethods<
51            'tcx,
52            Value = Self::Value,
53            Metadata = Self::Metadata,
54            Function = Self::Function,
55            BasicBlock = Self::BasicBlock,
56            Type = Self::Type,
57            Funclet = Self::Funclet,
58            DIScope = Self::DIScope,
59            DILocation = Self::DILocation,
60            DIVariable = Self::DIVariable,
61        >;
62
63    fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
64
65    fn cx(&self) -> &Self::CodegenCx;
66    fn llbb(&self) -> Self::BasicBlock;
67
68    fn set_span(&mut self, span: Span);
69
70    // FIXME(eddyb) replace uses of this with `append_sibling_block`.
71    fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
72
73    fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
74
75    fn switch_to_block(&mut self, llbb: Self::BasicBlock);
76
77    fn ret_void(&mut self);
78    fn ret(&mut self, v: Self::Value);
79    fn br(&mut self, dest: Self::BasicBlock);
80    fn cond_br(
81        &mut self,
82        cond: Self::Value,
83        then_llbb: Self::BasicBlock,
84        else_llbb: Self::BasicBlock,
85    );
86
87    // Conditional with expectation.
88    //
89    // This function is opt-in for back ends.
90    //
91    // The default implementation calls `self.expect()` before emitting the branch
92    // by calling `self.cond_br()`
93    fn cond_br_with_expect(
94        &mut self,
95        mut cond: Self::Value,
96        then_llbb: Self::BasicBlock,
97        else_llbb: Self::BasicBlock,
98        expect: Option<bool>,
99    ) {
100        if let Some(expect) = expect {
101            cond = self.expect(cond, expect);
102        }
103        self.cond_br(cond, then_llbb, else_llbb)
104    }
105
106    fn switch(
107        &mut self,
108        v: Self::Value,
109        else_llbb: Self::BasicBlock,
110        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
111    );
112
113    // This is like `switch()`, but every case has a bool flag indicating whether it's cold.
114    //
115    // Default implementation throws away the cold flags and calls `switch()`.
116    fn switch_with_weights(
117        &mut self,
118        v: Self::Value,
119        else_llbb: Self::BasicBlock,
120        _else_is_cold: bool,
121        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock, bool)>,
122    ) {
123        self.switch(v, else_llbb, cases.map(|(val, bb, _)| (val, bb)))
124    }
125
126    fn invoke(
127        &mut self,
128        llty: Self::Type,
129        fn_attrs: Option<&CodegenFnAttrs>,
130        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
131        llfn: Self::Value,
132        args: &[Self::Value],
133        then: Self::BasicBlock,
134        catch: Self::BasicBlock,
135        funclet: Option<&Self::Funclet>,
136        instance: Option<Instance<'tcx>>,
137    ) -> Self::Value;
138    fn unreachable(&mut self);
139
140    /// Like [`Self::unreachable`], but for use in the middle of a basic block.
141    fn unreachable_nonterminator(&mut self) {
142        // This is the preferred LLVM incantation for this per
143        // https://llvm.org/docs/Frontend/PerformanceTips.html#other-things-to-consider
144        // Other backends may override if they have a better way.
145        let const_true = self.cx().const_bool(true);
146        let poison_ptr = self.const_poison(self.cx().type_ptr());
147        self.store(const_true, poison_ptr, Align::ONE);
148    }
149
150    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
151    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
152    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
153    fn fadd_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
154    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
155    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
156    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
157    fn fsub_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
158    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
159    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
160    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
161    fn fmul_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
162    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
163    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
164    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
165    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
166    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
167    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
168    fn fdiv_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
169    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
170    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
171    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
172    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
173    fn frem_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
174    /// Generate a left-shift. Both operands must have the same size. The right operand must be
175    /// interpreted as unsigned and can be assumed to be less than the size of the left operand.
176    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
177    /// Generate a logical right-shift. Both operands must have the same size. The right operand
178    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
179    /// operand.
180    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
181    /// Generate an arithmetic right-shift. Both operands must have the same size. The right operand
182    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
183    /// operand.
184    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
185    fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
186        self.add(lhs, rhs)
187    }
188    fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
189        self.add(lhs, rhs)
190    }
191    fn unchecked_suadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
192        self.unchecked_sadd(lhs, rhs)
193    }
194    fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
195        self.sub(lhs, rhs)
196    }
197    fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
198        self.sub(lhs, rhs)
199    }
200    fn unchecked_susub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
201        self.unchecked_ssub(lhs, rhs)
202    }
203    fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
204        self.mul(lhs, rhs)
205    }
206    fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
207        self.mul(lhs, rhs)
208    }
209    fn unchecked_sumul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
210        // Which to default to is a fairly arbitrary choice,
211        // but this is what slice layout was using before.
212        self.unchecked_smul(lhs, rhs)
213    }
214    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
215    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
216    /// Defaults to [`Self::or`], but guarantees `(lhs & rhs) == 0` so some backends
217    /// can emit something more helpful for optimizations.
218    fn or_disjoint(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
219        self.or(lhs, rhs)
220    }
221    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
222    fn neg(&mut self, v: Self::Value) -> Self::Value;
223    fn fneg(&mut self, v: Self::Value) -> Self::Value;
224    fn not(&mut self, v: Self::Value) -> Self::Value;
225
226    fn checked_binop(
227        &mut self,
228        oop: OverflowOp,
229        ty: Ty<'tcx>,
230        lhs: Self::Value,
231        rhs: Self::Value,
232    ) -> (Self::Value, Self::Value);
233
234    fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
235    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
236
237    fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
238    fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value;
239
240    fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
241    fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
242    fn atomic_load(
243        &mut self,
244        ty: Self::Type,
245        ptr: Self::Value,
246        order: AtomicOrdering,
247        size: Size,
248    ) -> Self::Value;
249    fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
250        assert_eq!(place.llextra, None);
251        self.load(ty, place.llval, place.align)
252    }
253    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
254    -> OperandRef<'tcx, Self::Value>;
255
256    /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
257    fn write_operand_repeatedly(
258        &mut self,
259        elem: OperandRef<'tcx, Self::Value>,
260        count: u64,
261        dest: PlaceRef<'tcx, Self::Value>,
262    );
263
264    /// Emits an `assume` that the integer value `imm` of type `ty` is contained in `range`.
265    ///
266    /// This *always* emits the assumption, so you probably want to check the
267    /// optimization level and `Scalar::is_always_valid` before calling it.
268    fn assume_integer_range(&mut self, imm: Self::Value, ty: Self::Type, range: WrappingRange) {
269        let WrappingRange { start, end } = range;
270
271        // Perhaps one day we'll be able to use assume operand bundles for this,
272        // but for now this encoding with a single icmp+assume is best per
273        // <https://github.com/llvm/llvm-project/issues/123278#issuecomment-2597440158>
274        let shifted = if start == 0 {
275            imm
276        } else {
277            let low = self.const_uint_big(ty, start);
278            self.sub(imm, low)
279        };
280        let width = self.const_uint_big(ty, u128::wrapping_sub(end, start));
281        let cmp = self.icmp(IntPredicate::IntULE, shifted, width);
282        self.assume(cmp);
283    }
284
285    /// Emits an `assume` that the `val` of pointer type is non-null.
286    ///
287    /// You may want to check the optimization level before bothering calling this.
288    fn assume_nonnull(&mut self, val: Self::Value) {
289        // Arguably in LLVM it'd be better to emit an assume operand bundle instead
290        // <https://llvm.org/docs/LangRef.html#assume-operand-bundles>
291        // but this works fine for all backends.
292
293        let null = self.const_null(self.type_ptr());
294        let is_null = self.icmp(IntPredicate::IntNE, val, null);
295        self.assume(is_null);
296    }
297
298    fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
299    fn nonnull_metadata(&mut self, load: Self::Value);
300
301    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
302    fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
303        assert_eq!(place.llextra, None);
304        self.store(val, place.llval, place.align)
305    }
306    fn store_with_flags(
307        &mut self,
308        val: Self::Value,
309        ptr: Self::Value,
310        align: Align,
311        flags: MemFlags,
312    ) -> Self::Value;
313    fn store_to_place_with_flags(
314        &mut self,
315        val: Self::Value,
316        place: PlaceValue<Self::Value>,
317        flags: MemFlags,
318    ) -> Self::Value {
319        assert_eq!(place.llextra, None);
320        self.store_with_flags(val, place.llval, place.align, flags)
321    }
322    fn atomic_store(
323        &mut self,
324        val: Self::Value,
325        ptr: Self::Value,
326        order: AtomicOrdering,
327        size: Size,
328    );
329
330    fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
331    fn inbounds_gep(
332        &mut self,
333        ty: Self::Type,
334        ptr: Self::Value,
335        indices: &[Self::Value],
336    ) -> Self::Value;
337    fn inbounds_nuw_gep(
338        &mut self,
339        ty: Self::Type,
340        ptr: Self::Value,
341        indices: &[Self::Value],
342    ) -> Self::Value {
343        self.inbounds_gep(ty, ptr, indices)
344    }
345    fn ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
346        self.gep(self.cx().type_i8(), ptr, &[offset])
347    }
348    fn inbounds_ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
349        self.inbounds_gep(self.cx().type_i8(), ptr, &[offset])
350    }
351
352    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
353    /// Produces the same value as [`Self::trunc`] (and defaults to that),
354    /// but is UB unless the *zero*-extending the result can reproduce `val`.
355    fn unchecked_utrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
356        self.trunc(val, dest_ty)
357    }
358    /// Produces the same value as [`Self::trunc`] (and defaults to that),
359    /// but is UB unless the *sign*-extending the result can reproduce `val`.
360    fn unchecked_strunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
361        self.trunc(val, dest_ty)
362    }
363
364    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
365    fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
366    fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
367    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
368    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
369    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
370    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
371    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
372    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
373    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
374    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
375    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
376    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
377    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
378
379    fn cast_float_to_int(
380        &mut self,
381        signed: bool,
382        x: Self::Value,
383        dest_ty: Self::Type,
384    ) -> Self::Value {
385        let in_ty = self.cx().val_ty(x);
386        let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
387            && self.cx().type_kind(in_ty) == TypeKind::Vector
388        {
389            (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
390        } else {
391            (in_ty, dest_ty)
392        };
393        assert_matches!(
394            self.cx().type_kind(float_ty),
395            TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::FP128
396        );
397        assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
398
399        if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
400            return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
401        }
402
403        if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) }
404    }
405
406    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
407    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
408
409    /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`.
410    fn three_way_compare(
411        &mut self,
412        ty: Ty<'tcx>,
413        lhs: Self::Value,
414        rhs: Self::Value,
415    ) -> Self::Value {
416        // FIXME: This implementation was designed around LLVM's ability to optimize, but `cg_llvm`
417        // overrides this to just use `@llvm.scmp`/`ucmp` since LLVM 20. This default impl should be
418        // reevaluated with respect to the remaining backends like cg_gcc, whether they might use
419        // specialized implementations as well, or continue to use a generic implementation here.
420        use std::cmp::Ordering;
421        let pred = |op| crate::base::bin_op_to_icmp_predicate(op, ty.is_signed());
422        if self.cx().sess().opts.optimize == OptLevel::No {
423            // This actually generates tighter assembly, and is a classic trick:
424            // <https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>.
425            // However, as of 2023-11 it optimized worse in LLVM in things like derived
426            // `PartialOrd`, so we were only using it in debug. Since LLVM now uses its own
427            // intrinsics, it may be be worth trying it in optimized builds for other backends.
428            let is_gt = self.icmp(pred(mir::BinOp::Gt), lhs, rhs);
429            let gtext = self.zext(is_gt, self.type_i8());
430            let is_lt = self.icmp(pred(mir::BinOp::Lt), lhs, rhs);
431            let ltext = self.zext(is_lt, self.type_i8());
432            self.unchecked_ssub(gtext, ltext)
433        } else {
434            // These operations were better optimized by LLVM, before `@llvm.scmp`/`ucmp` in 20.
435            // See <https://github.com/rust-lang/rust/pull/63767>.
436            let is_lt = self.icmp(pred(mir::BinOp::Lt), lhs, rhs);
437            let is_ne = self.icmp(pred(mir::BinOp::Ne), lhs, rhs);
438            let ge = self.select(
439                is_ne,
440                self.cx().const_i8(Ordering::Greater as i8),
441                self.cx().const_i8(Ordering::Equal as i8),
442            );
443            self.select(is_lt, self.cx().const_i8(Ordering::Less as i8), ge)
444        }
445    }
446
447    fn memcpy(
448        &mut self,
449        dst: Self::Value,
450        dst_align: Align,
451        src: Self::Value,
452        src_align: Align,
453        size: Self::Value,
454        flags: MemFlags,
455        tt: Option<rustc_ast::expand::typetree::FncTree>,
456    );
457    fn memmove(
458        &mut self,
459        dst: Self::Value,
460        dst_align: Align,
461        src: Self::Value,
462        src_align: Align,
463        size: Self::Value,
464        flags: MemFlags,
465    );
466    fn memset(
467        &mut self,
468        ptr: Self::Value,
469        fill_byte: Self::Value,
470        size: Self::Value,
471        align: Align,
472        flags: MemFlags,
473    );
474
475    /// *Typed* copy for non-overlapping places.
476    ///
477    /// Has a default implementation in terms of `memcpy`, but specific backends
478    /// can override to do something smarter if possible.
479    ///
480    /// (For example, typed load-stores with alias metadata.)
481    fn typed_place_copy(
482        &mut self,
483        dst: PlaceValue<Self::Value>,
484        src: PlaceValue<Self::Value>,
485        layout: TyAndLayout<'tcx>,
486    ) {
487        self.typed_place_copy_with_flags(dst, src, layout, MemFlags::empty());
488    }
489
490    fn typed_place_copy_with_flags(
491        &mut self,
492        dst: PlaceValue<Self::Value>,
493        src: PlaceValue<Self::Value>,
494        layout: TyAndLayout<'tcx>,
495        flags: MemFlags,
496    ) {
497        assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
498        assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
499        assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
500        if flags.contains(MemFlags::NONTEMPORAL) {
501            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
502            let ty = self.backend_type(layout);
503            let val = self.load_from_place(ty, src);
504            self.store_to_place_with_flags(val, dst, flags);
505        } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(layout) {
506            // If we're not optimizing, the aliasing information from `memcpy`
507            // isn't useful, so just load-store the value for smaller code.
508            let temp = self.load_operand(src.with_type(layout));
509            temp.val.store_with_flags(self, dst.with_type(layout), flags);
510        } else if !layout.is_zst() {
511            let bytes = self.const_usize(layout.size.bytes());
512            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags, None);
513        }
514    }
515
516    /// *Typed* swap for non-overlapping places.
517    ///
518    /// Avoids `alloca`s for Immediates and ScalarPairs.
519    ///
520    /// FIXME: Maybe do something smarter for Ref types too?
521    /// For now, the `typed_swap_nonoverlapping` intrinsic just doesn't call this for those
522    /// cases (in non-debug), preferring the fallback body instead.
523    fn typed_place_swap(
524        &mut self,
525        left: PlaceValue<Self::Value>,
526        right: PlaceValue<Self::Value>,
527        layout: TyAndLayout<'tcx>,
528    ) {
529        let mut temp = self.load_operand(left.with_type(layout));
530        if let OperandValue::Ref(..) = temp.val {
531            // The SSA value isn't stand-alone, so we need to copy it elsewhere
532            let alloca = PlaceRef::alloca(self, layout);
533            self.typed_place_copy(alloca.val, left, layout);
534            temp = self.load_operand(alloca);
535        }
536        self.typed_place_copy(left, right, layout);
537        temp.val.store(self, right.with_type(layout));
538    }
539
540    fn select(
541        &mut self,
542        cond: Self::Value,
543        then_val: Self::Value,
544        else_val: Self::Value,
545    ) -> Self::Value;
546
547    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
548    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
549    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
550    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
551    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
552
553    fn set_personality_fn(&mut self, personality: Self::Function);
554
555    // These are used by everyone except msvc
556    fn cleanup_landing_pad(&mut self, pers_fn: Self::Function) -> (Self::Value, Self::Value);
557    fn filter_landing_pad(&mut self, pers_fn: Self::Function);
558    fn resume(&mut self, exn0: Self::Value, exn1: Self::Value);
559
560    // These are used only by msvc
561    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
562    fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
563    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
564    fn catch_switch(
565        &mut self,
566        parent: Option<Self::Value>,
567        unwind: Option<Self::BasicBlock>,
568        handlers: &[Self::BasicBlock],
569    ) -> Self::Value;
570
571    fn atomic_cmpxchg(
572        &mut self,
573        dst: Self::Value,
574        cmp: Self::Value,
575        src: Self::Value,
576        order: AtomicOrdering,
577        failure_order: AtomicOrdering,
578        weak: bool,
579    ) -> (Self::Value, Self::Value);
580    /// `ret_ptr` indicates whether the return type (which is also the type `dst` points to)
581    /// is a pointer or the same type as `src`.
582    fn atomic_rmw(
583        &mut self,
584        op: AtomicRmwBinOp,
585        dst: Self::Value,
586        src: Self::Value,
587        order: AtomicOrdering,
588        ret_ptr: bool,
589    ) -> Self::Value;
590    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
591    fn set_invariant_load(&mut self, load: Self::Value);
592
593    /// Called for `StorageLive`
594    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
595
596    /// Called for `StorageDead`
597    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
598
599    /// "Finally codegen the call"
600    ///
601    /// ## Arguments
602    ///
603    /// The `fn_attrs`, `fn_abi`, and `instance` arguments are Options because they are advisory.
604    /// They relate to optional codegen enhancements like LLVM CFI, and do not affect ABI per se.
605    /// Any ABI-related transformations should be handled by different, earlier stages of codegen.
606    /// For instance, in the caller of `BuilderMethods::call`.
607    ///
608    /// This means that a codegen backend which disregards `fn_attrs`, `fn_abi`, and `instance`
609    /// should still do correct codegen, and code should not be miscompiled if they are omitted.
610    /// It is not a miscompilation in this sense if it fails to run under CFI, other sanitizers, or
611    /// in the context of other compiler-enhanced security features.
612    ///
613    /// The typical case that they are None is during the codegen of intrinsics and lang-items,
614    /// as those are "fake functions" with only a trivial ABI if any, et cetera.
615    ///
616    /// ## Return
617    ///
618    /// Must return the value the function will return so it can be written to the destination,
619    /// assuming the function does not explicitly pass the destination as a pointer in `args`.
620    fn call(
621        &mut self,
622        llty: Self::Type,
623        fn_attrs: Option<&CodegenFnAttrs>,
624        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
625        fn_val: Self::Value,
626        args: &[Self::Value],
627        funclet: Option<&Self::Funclet>,
628        instance: Option<Instance<'tcx>>,
629    ) -> Self::Value;
630
631    fn tail_call(
632        &mut self,
633        llty: Self::Type,
634        fn_attrs: Option<&CodegenFnAttrs>,
635        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
636        llfn: Self::Value,
637        args: &[Self::Value],
638        funclet: Option<&Self::Funclet>,
639        instance: Option<Instance<'tcx>>,
640    );
641
642    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
643
644    fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
645}