rustc_codegen_ssa/traits/
builder.rs

1use std::assert_matches::assert_matches;
2use std::ops::Deref;
3
4use rustc_abi::{Align, Scalar, Size, WrappingRange};
5use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
6use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{Instance, Ty};
8use rustc_session::config::OptLevel;
9use rustc_span::Span;
10use rustc_target::callconv::FnAbi;
11
12use super::abi::AbiBuilderMethods;
13use super::asm::AsmBuilderMethods;
14use super::consts::ConstCodegenMethods;
15use super::coverageinfo::CoverageInfoBuilderMethods;
16use super::debuginfo::DebugInfoBuilderMethods;
17use super::intrinsic::IntrinsicCallBuilderMethods;
18use super::misc::MiscCodegenMethods;
19use super::type_::{ArgAbiBuilderMethods, BaseTypeCodegenMethods, LayoutTypeCodegenMethods};
20use super::{CodegenMethods, StaticBuilderMethods};
21use crate::MemFlags;
22use crate::common::{
23    AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
24};
25use crate::mir::operand::{OperandRef, OperandValue};
26use crate::mir::place::{PlaceRef, PlaceValue};
27
28#[derive(Copy, Clone, Debug)]
29pub enum OverflowOp {
30    Add,
31    Sub,
32    Mul,
33}
34
35pub trait BuilderMethods<'a, 'tcx>:
36    Sized
37    + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
38    + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
39    + Deref<Target = Self::CodegenCx>
40    + CoverageInfoBuilderMethods<'tcx>
41    + DebugInfoBuilderMethods
42    + ArgAbiBuilderMethods<'tcx>
43    + AbiBuilderMethods
44    + IntrinsicCallBuilderMethods<'tcx>
45    + AsmBuilderMethods<'tcx>
46    + StaticBuilderMethods
47{
48    // `BackendTypes` is a supertrait of both `CodegenMethods` and
49    // `BuilderMethods`. This bound ensures all impls agree on the associated
50    // types within.
51    type CodegenCx: CodegenMethods<
52            'tcx,
53            Value = Self::Value,
54            Metadata = Self::Metadata,
55            Function = Self::Function,
56            BasicBlock = Self::BasicBlock,
57            Type = Self::Type,
58            Funclet = Self::Funclet,
59            DIScope = Self::DIScope,
60            DILocation = Self::DILocation,
61            DIVariable = Self::DIVariable,
62        >;
63
64    fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
65
66    fn cx(&self) -> &Self::CodegenCx;
67    fn llbb(&self) -> Self::BasicBlock;
68
69    fn set_span(&mut self, span: Span);
70
71    // FIXME(eddyb) replace uses of this with `append_sibling_block`.
72    fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
73
74    fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
75
76    fn switch_to_block(&mut self, llbb: Self::BasicBlock);
77
78    fn ret_void(&mut self);
79    fn ret(&mut self, v: Self::Value);
80    fn br(&mut self, dest: Self::BasicBlock);
81    fn cond_br(
82        &mut self,
83        cond: Self::Value,
84        then_llbb: Self::BasicBlock,
85        else_llbb: Self::BasicBlock,
86    );
87
88    // Conditional with expectation.
89    //
90    // This function is opt-in for back ends.
91    //
92    // The default implementation calls `self.expect()` before emiting the branch
93    // by calling `self.cond_br()`
94    fn cond_br_with_expect(
95        &mut self,
96        mut cond: Self::Value,
97        then_llbb: Self::BasicBlock,
98        else_llbb: Self::BasicBlock,
99        expect: Option<bool>,
100    ) {
101        if let Some(expect) = expect {
102            cond = self.expect(cond, expect);
103        }
104        self.cond_br(cond, then_llbb, else_llbb)
105    }
106
107    fn switch(
108        &mut self,
109        v: Self::Value,
110        else_llbb: Self::BasicBlock,
111        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
112    );
113
114    // This is like `switch()`, but every case has a bool flag indicating whether it's cold.
115    //
116    // Default implementation throws away the cold flags and calls `switch()`.
117    fn switch_with_weights(
118        &mut self,
119        v: Self::Value,
120        else_llbb: Self::BasicBlock,
121        _else_is_cold: bool,
122        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock, bool)>,
123    ) {
124        self.switch(v, else_llbb, cases.map(|(val, bb, _)| (val, bb)))
125    }
126
127    fn invoke(
128        &mut self,
129        llty: Self::Type,
130        fn_attrs: Option<&CodegenFnAttrs>,
131        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
132        llfn: Self::Value,
133        args: &[Self::Value],
134        then: Self::BasicBlock,
135        catch: Self::BasicBlock,
136        funclet: Option<&Self::Funclet>,
137        instance: Option<Instance<'tcx>>,
138    ) -> Self::Value;
139    fn unreachable(&mut self);
140
141    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
142    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
143    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
144    fn fadd_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
145    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
146    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
147    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
148    fn fsub_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
149    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
150    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
151    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
152    fn fmul_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
153    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
154    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
155    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
156    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
157    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
158    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
159    fn fdiv_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
160    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
161    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
162    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
163    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
164    fn frem_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
165    /// Generate a left-shift. Both operands must have the same size. The right operand must be
166    /// interpreted as unsigned and can be assumed to be less than the size of the left operand.
167    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
168    /// Generate a logical right-shift. Both operands must have the same size. The right operand
169    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
170    /// operand.
171    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
172    /// Generate an arithmetic right-shift. Both operands must have the same size. The right operand
173    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
174    /// operand.
175    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
176    fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
177        self.add(lhs, rhs)
178    }
179    fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
180        self.add(lhs, rhs)
181    }
182    fn unchecked_suadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
183        self.unchecked_sadd(lhs, rhs)
184    }
185    fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
186        self.sub(lhs, rhs)
187    }
188    fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
189        self.sub(lhs, rhs)
190    }
191    fn unchecked_susub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
192        self.unchecked_ssub(lhs, rhs)
193    }
194    fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
195        self.mul(lhs, rhs)
196    }
197    fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
198        self.mul(lhs, rhs)
199    }
200    fn unchecked_sumul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
201        // Which to default to is a fairly arbitrary choice,
202        // but this is what slice layout was using before.
203        self.unchecked_smul(lhs, rhs)
204    }
205    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
206    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
207    /// Defaults to [`Self::or`], but guarantees `(lhs & rhs) == 0` so some backends
208    /// can emit something more helpful for optimizations.
209    fn or_disjoint(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
210        self.or(lhs, rhs)
211    }
212    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
213    fn neg(&mut self, v: Self::Value) -> Self::Value;
214    fn fneg(&mut self, v: Self::Value) -> Self::Value;
215    fn not(&mut self, v: Self::Value) -> Self::Value;
216
217    fn checked_binop(
218        &mut self,
219        oop: OverflowOp,
220        ty: Ty<'_>,
221        lhs: Self::Value,
222        rhs: Self::Value,
223    ) -> (Self::Value, Self::Value);
224
225    fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
226    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
227
228    fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
229    fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
230
231    fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
232    fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
233    fn atomic_load(
234        &mut self,
235        ty: Self::Type,
236        ptr: Self::Value,
237        order: AtomicOrdering,
238        size: Size,
239    ) -> Self::Value;
240    fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
241        assert_eq!(place.llextra, None);
242        self.load(ty, place.llval, place.align)
243    }
244    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
245    -> OperandRef<'tcx, Self::Value>;
246
247    /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
248    fn write_operand_repeatedly(
249        &mut self,
250        elem: OperandRef<'tcx, Self::Value>,
251        count: u64,
252        dest: PlaceRef<'tcx, Self::Value>,
253    );
254
255    /// Emits an `assume` that the integer value `imm` of type `ty` is contained in `range`.
256    ///
257    /// This *always* emits the assumption, so you probably want to check the
258    /// optimization level and `Scalar::is_always_valid` before calling it.
259    fn assume_integer_range(&mut self, imm: Self::Value, ty: Self::Type, range: WrappingRange) {
260        let WrappingRange { start, end } = range;
261
262        // Perhaps one day we'll be able to use assume operand bundles for this,
263        // but for now this encoding with a single icmp+assume is best per
264        // <https://github.com/llvm/llvm-project/issues/123278#issuecomment-2597440158>
265        let shifted = if start == 0 {
266            imm
267        } else {
268            let low = self.const_uint_big(ty, start);
269            self.sub(imm, low)
270        };
271        let width = self.const_uint_big(ty, u128::wrapping_sub(end, start));
272        let cmp = self.icmp(IntPredicate::IntULE, shifted, width);
273        self.assume(cmp);
274    }
275
276    /// Emits an `assume` that the `val` of pointer type is non-null.
277    ///
278    /// You may want to check the optimization level before bothering calling this.
279    fn assume_nonnull(&mut self, val: Self::Value) {
280        // Arguably in LLVM it'd be better to emit an assume operand bundle instead
281        // <https://llvm.org/docs/LangRef.html#assume-operand-bundles>
282        // but this works fine for all backends.
283
284        let null = self.const_null(self.type_ptr());
285        let is_null = self.icmp(IntPredicate::IntNE, val, null);
286        self.assume(is_null);
287    }
288
289    fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
290    fn nonnull_metadata(&mut self, load: Self::Value);
291
292    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
293    fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
294        assert_eq!(place.llextra, None);
295        self.store(val, place.llval, place.align)
296    }
297    fn store_with_flags(
298        &mut self,
299        val: Self::Value,
300        ptr: Self::Value,
301        align: Align,
302        flags: MemFlags,
303    ) -> Self::Value;
304    fn store_to_place_with_flags(
305        &mut self,
306        val: Self::Value,
307        place: PlaceValue<Self::Value>,
308        flags: MemFlags,
309    ) -> Self::Value {
310        assert_eq!(place.llextra, None);
311        self.store_with_flags(val, place.llval, place.align, flags)
312    }
313    fn atomic_store(
314        &mut self,
315        val: Self::Value,
316        ptr: Self::Value,
317        order: AtomicOrdering,
318        size: Size,
319    );
320
321    fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
322    fn inbounds_gep(
323        &mut self,
324        ty: Self::Type,
325        ptr: Self::Value,
326        indices: &[Self::Value],
327    ) -> Self::Value;
328    fn inbounds_nuw_gep(
329        &mut self,
330        ty: Self::Type,
331        ptr: Self::Value,
332        indices: &[Self::Value],
333    ) -> Self::Value {
334        self.inbounds_gep(ty, ptr, indices)
335    }
336    fn ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
337        self.gep(self.cx().type_i8(), ptr, &[offset])
338    }
339    fn inbounds_ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
340        self.inbounds_gep(self.cx().type_i8(), ptr, &[offset])
341    }
342
343    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
344    /// Produces the same value as [`Self::trunc`] (and defaults to that),
345    /// but is UB unless the *zero*-extending the result can reproduce `val`.
346    fn unchecked_utrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
347        self.trunc(val, dest_ty)
348    }
349    /// Produces the same value as [`Self::trunc`] (and defaults to that),
350    /// but is UB unless the *sign*-extending the result can reproduce `val`.
351    fn unchecked_strunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
352        self.trunc(val, dest_ty)
353    }
354
355    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
356    fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
357    fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
358    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
359    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
360    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
361    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
362    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
363    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
364    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
365    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
366    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
367    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
368    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
369
370    fn cast_float_to_int(
371        &mut self,
372        signed: bool,
373        x: Self::Value,
374        dest_ty: Self::Type,
375    ) -> Self::Value {
376        let in_ty = self.cx().val_ty(x);
377        let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
378            && self.cx().type_kind(in_ty) == TypeKind::Vector
379        {
380            (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
381        } else {
382            (in_ty, dest_ty)
383        };
384        assert_matches!(
385            self.cx().type_kind(float_ty),
386            TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::FP128
387        );
388        assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
389
390        if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
391            return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
392        }
393
394        if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) }
395    }
396
397    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
398    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
399
400    /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`.
401    // FIXME: Move the default implementation from `codegen_scalar_binop` into this method and
402    // remove the `Option` return once LLVM 20 is the minimum version.
403    fn three_way_compare(
404        &mut self,
405        _ty: Ty<'tcx>,
406        _lhs: Self::Value,
407        _rhs: Self::Value,
408    ) -> Option<Self::Value> {
409        None
410    }
411
412    fn memcpy(
413        &mut self,
414        dst: Self::Value,
415        dst_align: Align,
416        src: Self::Value,
417        src_align: Align,
418        size: Self::Value,
419        flags: MemFlags,
420    );
421    fn memmove(
422        &mut self,
423        dst: Self::Value,
424        dst_align: Align,
425        src: Self::Value,
426        src_align: Align,
427        size: Self::Value,
428        flags: MemFlags,
429    );
430    fn memset(
431        &mut self,
432        ptr: Self::Value,
433        fill_byte: Self::Value,
434        size: Self::Value,
435        align: Align,
436        flags: MemFlags,
437    );
438
439    /// *Typed* copy for non-overlapping places.
440    ///
441    /// Has a default implementation in terms of `memcpy`, but specific backends
442    /// can override to do something smarter if possible.
443    ///
444    /// (For example, typed load-stores with alias metadata.)
445    fn typed_place_copy(
446        &mut self,
447        dst: PlaceValue<Self::Value>,
448        src: PlaceValue<Self::Value>,
449        layout: TyAndLayout<'tcx>,
450    ) {
451        self.typed_place_copy_with_flags(dst, src, layout, MemFlags::empty());
452    }
453
454    fn typed_place_copy_with_flags(
455        &mut self,
456        dst: PlaceValue<Self::Value>,
457        src: PlaceValue<Self::Value>,
458        layout: TyAndLayout<'tcx>,
459        flags: MemFlags,
460    ) {
461        assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
462        assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
463        assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
464        if flags.contains(MemFlags::NONTEMPORAL) {
465            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
466            let ty = self.backend_type(layout);
467            let val = self.load_from_place(ty, src);
468            self.store_to_place_with_flags(val, dst, flags);
469        } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(layout) {
470            // If we're not optimizing, the aliasing information from `memcpy`
471            // isn't useful, so just load-store the value for smaller code.
472            let temp = self.load_operand(src.with_type(layout));
473            temp.val.store_with_flags(self, dst.with_type(layout), flags);
474        } else if !layout.is_zst() {
475            let bytes = self.const_usize(layout.size.bytes());
476            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
477        }
478    }
479
480    /// *Typed* swap for non-overlapping places.
481    ///
482    /// Avoids `alloca`s for Immediates and ScalarPairs.
483    ///
484    /// FIXME: Maybe do something smarter for Ref types too?
485    /// For now, the `typed_swap_nonoverlapping` intrinsic just doesn't call this for those
486    /// cases (in non-debug), preferring the fallback body instead.
487    fn typed_place_swap(
488        &mut self,
489        left: PlaceValue<Self::Value>,
490        right: PlaceValue<Self::Value>,
491        layout: TyAndLayout<'tcx>,
492    ) {
493        let mut temp = self.load_operand(left.with_type(layout));
494        if let OperandValue::Ref(..) = temp.val {
495            // The SSA value isn't stand-alone, so we need to copy it elsewhere
496            let alloca = PlaceRef::alloca(self, layout);
497            self.typed_place_copy(alloca.val, left, layout);
498            temp = self.load_operand(alloca);
499        }
500        self.typed_place_copy(left, right, layout);
501        temp.val.store(self, right.with_type(layout));
502    }
503
504    fn select(
505        &mut self,
506        cond: Self::Value,
507        then_val: Self::Value,
508        else_val: Self::Value,
509    ) -> Self::Value;
510
511    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
512    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
513    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
514    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
515    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
516
517    fn set_personality_fn(&mut self, personality: Self::Value);
518
519    // These are used by everyone except msvc
520    fn cleanup_landing_pad(&mut self, pers_fn: Self::Value) -> (Self::Value, Self::Value);
521    fn filter_landing_pad(&mut self, pers_fn: Self::Value) -> (Self::Value, Self::Value);
522    fn resume(&mut self, exn0: Self::Value, exn1: Self::Value);
523
524    // These are used only by msvc
525    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
526    fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
527    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
528    fn catch_switch(
529        &mut self,
530        parent: Option<Self::Value>,
531        unwind: Option<Self::BasicBlock>,
532        handlers: &[Self::BasicBlock],
533    ) -> Self::Value;
534
535    fn atomic_cmpxchg(
536        &mut self,
537        dst: Self::Value,
538        cmp: Self::Value,
539        src: Self::Value,
540        order: AtomicOrdering,
541        failure_order: AtomicOrdering,
542        weak: bool,
543    ) -> (Self::Value, Self::Value);
544    fn atomic_rmw(
545        &mut self,
546        op: AtomicRmwBinOp,
547        dst: Self::Value,
548        src: Self::Value,
549        order: AtomicOrdering,
550    ) -> Self::Value;
551    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
552    fn set_invariant_load(&mut self, load: Self::Value);
553
554    /// Called for `StorageLive`
555    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
556
557    /// Called for `StorageDead`
558    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
559
560    fn call(
561        &mut self,
562        llty: Self::Type,
563        fn_attrs: Option<&CodegenFnAttrs>,
564        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
565        llfn: Self::Value,
566        args: &[Self::Value],
567        funclet: Option<&Self::Funclet>,
568        instance: Option<Instance<'tcx>>,
569    ) -> Self::Value;
570    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
571
572    fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
573}