Skip to main content

rustc_codegen_ssa/mir/
rvalue.rs

1use itertools::Itertools as _;
2use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT};
3use rustc_middle::ty::adjustment::PointerCoercion;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
6use rustc_middle::{bug, mir, span_bug};
7use rustc_session::config::OptLevel;
8use tracing::{debug, instrument};
9
10use super::FunctionCx;
11use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
12use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18    #[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::TRACE <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("codegen_rvalue",
                                    "rustc_codegen_ssa::mir::rvalue", ::tracing::Level::TRACE,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/mir/rvalue.rs"),
                                    ::tracing_core::__macro_support::Option::Some(18u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::mir::rvalue"),
                                    ::tracing_core::field::FieldSet::new(&["dest", "rvalue"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::TRACE <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::TRACE <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = meta.fields().iter();
                                meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&dest)
                                                            as &dyn Value)),
                                                (&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&rvalue)
                                                            as &dyn Value))])
                            })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: () = loop {};
            return __tracing_attr_fake_return;
        }
        {
            match *rvalue {
                mir::Rvalue::Use(ref operand) => {
                    if let mir::Operand::Constant(const_op) = operand {
                        let val = self.eval_mir_constant(&const_op);
                        if val.all_bytes_uninit(self.cx.tcx()) { return; }
                    }
                    let cg_operand = self.codegen_operand(bx, operand);
                    if #[allow(non_exhaustive_omitted_patterns)] match cg_operand.layout.backend_repr
                            {
                            BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) =>
                                true,
                            _ => false,
                        } {
                        if true {
                            if !!#[allow(non_exhaustive_omitted_patterns)] match cg_operand.val
                                            {
                                            OperandValue::Ref(..) => true,
                                            _ => false,
                                        } {
                                ::core::panicking::panic("assertion failed: !matches!(cg_operand.val, OperandValue::Ref(..))")
                            };
                        };
                    }
                    cg_operand.store_with_annotation(bx, dest);
                }
                mir::Rvalue::Cast(mir::CastKind::PointerCoercion(PointerCoercion::Unsize,
                    _), ref source, _) => {
                    if bx.cx().is_backend_scalar_pair(dest.layout) {
                        let temp = self.codegen_rvalue_operand(bx, rvalue);
                        temp.store_with_annotation(bx, dest);
                        return;
                    }
                    let operand = self.codegen_operand(bx, source);
                    match operand.val {
                        OperandValue::Pair(..) | OperandValue::Immediate(_) => {
                            {
                                use ::tracing::__macro_support::Callsite as _;
                                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                                    {
                                        static META: ::tracing::Metadata<'static> =
                                            {
                                                ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/mir/rvalue.rs:76",
                                                    "rustc_codegen_ssa::mir::rvalue", ::tracing::Level::DEBUG,
                                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/mir/rvalue.rs"),
                                                    ::tracing_core::__macro_support::Option::Some(76u32),
                                                    ::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::mir::rvalue"),
                                                    ::tracing_core::field::FieldSet::new(&["message"],
                                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                                    ::tracing::metadata::Kind::EVENT)
                                            };
                                        ::tracing::callsite::DefaultCallsite::new(&META)
                                    };
                                let enabled =
                                    ::tracing::Level::DEBUG <=
                                                ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                            ::tracing::Level::DEBUG <=
                                                ::tracing::level_filters::LevelFilter::current() &&
                                        {
                                            let interest = __CALLSITE.interest();
                                            !interest.is_never() &&
                                                ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                                                    interest)
                                        };
                                if enabled {
                                    (|value_set: ::tracing::field::ValueSet|
                                                {
                                                    let meta = __CALLSITE.metadata();
                                                    ::tracing::Event::dispatch(meta, &value_set);
                                                    ;
                                                })({
                                            #[allow(unused_imports)]
                                            use ::tracing::field::{debug, display, Value};
                                            let mut iter = __CALLSITE.metadata().fields().iter();
                                            __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                                ::tracing::__macro_support::Option::Some(&format_args!("codegen_rvalue: creating ugly alloca")
                                                                        as &dyn Value))])
                                        });
                                } else { ; }
                            };
                            let scratch = PlaceRef::alloca(bx, operand.layout);
                            scratch.storage_live(bx);
                            operand.store_with_annotation(bx, scratch);
                            base::coerce_unsized_into(bx, scratch, dest);
                            scratch.storage_dead(bx);
                        }
                        OperandValue::Ref(val) => {
                            if val.llextra.is_some() {
                                ::rustc_middle::util::bug::bug_fmt(format_args!("unsized coercion on an unsized rvalue"));
                            }
                            base::coerce_unsized_into(bx, val.with_type(operand.layout),
                                dest);
                        }
                        OperandValue::ZeroSized => {
                            ::rustc_middle::util::bug::bug_fmt(format_args!("unsized coercion on a ZST rvalue"));
                        }
                    }
                }
                mir::Rvalue::Cast(mir::CastKind::Transmute |
                    mir::CastKind::Subtype, ref operand, _ty) => {
                    let src = self.codegen_operand(bx, operand);
                    self.codegen_transmute(bx, src, dest);
                }
                mir::Rvalue::Repeat(ref elem, count) => {
                    if dest.layout.is_zst() { return; }
                    if let mir::Operand::Constant(const_op) = elem {
                        let val = self.eval_mir_constant(const_op);
                        if val.all_bytes_uninit(self.cx.tcx()) {
                            let size = bx.const_usize(dest.layout.size.bytes());
                            bx.memset(dest.val.llval, bx.const_undef(bx.type_i8()),
                                size, dest.val.align, MemFlags::empty());
                            return;
                        }
                    }
                    let cg_elem = self.codegen_operand(bx, elem);
                    let try_init_all_same =
                        |bx: &mut Bx, v|
                            {
                                let start = dest.val.llval;
                                let size = bx.const_usize(dest.layout.size.bytes());
                                if let Some(int) = bx.cx().const_to_opt_u128(v, false) &&
                                            let bytes =
                                                &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()] &&
                                        let Ok(&byte) = bytes.iter().all_equal_value() {
                                    let fill = bx.cx().const_u8(byte);
                                    bx.memset(start, fill, size, dest.val.align,
                                        MemFlags::empty());
                                    return true;
                                }
                                let v = bx.from_immediate(v);
                                if bx.cx().val_ty(v) == bx.cx().type_i8() {
                                    bx.memset(start, v, size, dest.val.align,
                                        MemFlags::empty());
                                    return true;
                                }
                                false
                            };
                    if let OperandValue::Immediate(v) = cg_elem.val &&
                            try_init_all_same(bx, v) {
                        return;
                    }
                    let count =
                        self.monomorphize(count).try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
                    bx.write_operand_repeatedly(cg_elem, count, dest);
                }
                mir::Rvalue::Aggregate(ref kind, ref operands) if
                    !#[allow(non_exhaustive_omitted_patterns)] match **kind {
                            mir::AggregateKind::RawPtr(..) => true,
                            _ => false,
                        } => {
                    let (variant_index, variant_dest, active_field_index) =
                        match **kind {
                            mir::AggregateKind::Adt(_, variant_index, _, _,
                                active_field_index) => {
                                let variant_dest = dest.project_downcast(bx, variant_index);
                                (variant_index, variant_dest, active_field_index)
                            }
                            _ => (FIRST_VARIANT, dest, None),
                        };
                    if active_field_index.is_some() {
                        match (&operands.len(), &1) {
                            (left_val, right_val) => {
                                if !(*left_val == *right_val) {
                                    let kind = ::core::panicking::AssertKind::Eq;
                                    ::core::panicking::assert_failed(kind, &*left_val,
                                        &*right_val, ::core::option::Option::None);
                                }
                            }
                        };
                    }
                    for (i, operand) in operands.iter_enumerated() {
                        let op = self.codegen_operand(bx, operand);
                        if !op.layout.is_zst() {
                            let field_index = active_field_index.unwrap_or(i);
                            let field =
                                if let mir::AggregateKind::Array(_) = **kind {
                                    let llindex =
                                        bx.cx().const_usize(field_index.as_u32().into());
                                    variant_dest.project_index(bx, llindex)
                                } else {
                                    variant_dest.project_field(bx, field_index.as_usize())
                                };
                            op.store_with_annotation(bx, field);
                        }
                    }
                    dest.codegen_set_discr(bx, variant_index);
                }
                _ => {
                    let temp = self.codegen_rvalue_operand(bx, rvalue);
                    temp.store_with_annotation(bx, dest);
                }
            }
        }
    }
}#[instrument(level = "trace", skip(self, bx))]
19    pub(crate) fn codegen_rvalue(
20        &mut self,
21        bx: &mut Bx,
22        dest: PlaceRef<'tcx, Bx::Value>,
23        rvalue: &mir::Rvalue<'tcx>,
24    ) {
25        match *rvalue {
26            mir::Rvalue::Use(ref operand) => {
27                if let mir::Operand::Constant(const_op) = operand {
28                    let val = self.eval_mir_constant(&const_op);
29                    if val.all_bytes_uninit(self.cx.tcx()) {
30                        return;
31                    }
32                }
33                let cg_operand = self.codegen_operand(bx, operand);
34                // Crucially, we do *not* use `OperandValue::Ref` for types with
35                // `BackendRepr::Scalar | BackendRepr::ScalarPair`. This ensures we match the MIR
36                // semantics regarding when assignment operators allow overlap of LHS and RHS.
37                if matches!(
38                    cg_operand.layout.backend_repr,
39                    BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..),
40                ) {
41                    debug_assert!(!matches!(cg_operand.val, OperandValue::Ref(..)));
42                }
43                // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
44                // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
45                cg_operand.store_with_annotation(bx, dest);
46            }
47
48            mir::Rvalue::Cast(
49                mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
50                ref source,
51                _,
52            ) => {
53                // The destination necessarily contains a wide pointer, so if
54                // it's a scalar pair, it's a wide pointer or newtype thereof.
55                if bx.cx().is_backend_scalar_pair(dest.layout) {
56                    // Into-coerce of a thin pointer to a wide pointer -- just
57                    // use the operand path.
58                    let temp = self.codegen_rvalue_operand(bx, rvalue);
59                    temp.store_with_annotation(bx, dest);
60                    return;
61                }
62
63                // Unsize of a nontrivial struct. I would prefer for
64                // this to be eliminated by MIR building, but
65                // `CoerceUnsized` can be passed by a where-clause,
66                // so the (generic) MIR may not be able to expand it.
67                let operand = self.codegen_operand(bx, source);
68                match operand.val {
69                    OperandValue::Pair(..) | OperandValue::Immediate(_) => {
70                        // Unsize from an immediate structure. We don't
71                        // really need a temporary alloca here, but
72                        // avoiding it would require us to have
73                        // `coerce_unsized_into` use `extractvalue` to
74                        // index into the struct, and this case isn't
75                        // important enough for it.
76                        debug!("codegen_rvalue: creating ugly alloca");
77                        let scratch = PlaceRef::alloca(bx, operand.layout);
78                        scratch.storage_live(bx);
79                        operand.store_with_annotation(bx, scratch);
80                        base::coerce_unsized_into(bx, scratch, dest);
81                        scratch.storage_dead(bx);
82                    }
83                    OperandValue::Ref(val) => {
84                        if val.llextra.is_some() {
85                            bug!("unsized coercion on an unsized rvalue");
86                        }
87                        base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
88                    }
89                    OperandValue::ZeroSized => {
90                        bug!("unsized coercion on a ZST rvalue");
91                    }
92                }
93            }
94
95            mir::Rvalue::Cast(
96                mir::CastKind::Transmute | mir::CastKind::Subtype,
97                ref operand,
98                _ty,
99            ) => {
100                let src = self.codegen_operand(bx, operand);
101                self.codegen_transmute(bx, src, dest);
102            }
103
104            mir::Rvalue::Repeat(ref elem, count) => {
105                // Do not generate the loop for zero-sized elements or empty arrays.
106                if dest.layout.is_zst() {
107                    return;
108                }
109
110                // When the element is a const with all bytes uninit, emit a single memset that
111                // writes undef to the entire destination.
112                if let mir::Operand::Constant(const_op) = elem {
113                    let val = self.eval_mir_constant(const_op);
114                    if val.all_bytes_uninit(self.cx.tcx()) {
115                        let size = bx.const_usize(dest.layout.size.bytes());
116                        bx.memset(
117                            dest.val.llval,
118                            bx.const_undef(bx.type_i8()),
119                            size,
120                            dest.val.align,
121                            MemFlags::empty(),
122                        );
123                        return;
124                    }
125                }
126
127                let cg_elem = self.codegen_operand(bx, elem);
128
129                let try_init_all_same = |bx: &mut Bx, v| {
130                    let start = dest.val.llval;
131                    let size = bx.const_usize(dest.layout.size.bytes());
132
133                    // Use llvm.memset.p0i8.* to initialize all same byte arrays
134                    if let Some(int) = bx.cx().const_to_opt_u128(v, false)
135                        && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]
136                        && let Ok(&byte) = bytes.iter().all_equal_value()
137                    {
138                        let fill = bx.cx().const_u8(byte);
139                        bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
140                        return true;
141                    }
142
143                    // Use llvm.memset.p0i8.* to initialize byte arrays
144                    let v = bx.from_immediate(v);
145                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
146                        bx.memset(start, v, size, dest.val.align, MemFlags::empty());
147                        return true;
148                    }
149                    false
150                };
151
152                if let OperandValue::Immediate(v) = cg_elem.val
153                    && try_init_all_same(bx, v)
154                {
155                    return;
156                }
157
158                let count = self
159                    .monomorphize(count)
160                    .try_to_target_usize(bx.tcx())
161                    .expect("expected monomorphic const in codegen");
162
163                bx.write_operand_repeatedly(cg_elem, count, dest);
164            }
165
166            // This implementation does field projection, so never use it for `RawPtr`,
167            // which will always be fine with the `codegen_rvalue_operand` path below.
168            mir::Rvalue::Aggregate(ref kind, ref operands)
169                if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
170            {
171                let (variant_index, variant_dest, active_field_index) = match **kind {
172                    mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
173                        let variant_dest = dest.project_downcast(bx, variant_index);
174                        (variant_index, variant_dest, active_field_index)
175                    }
176                    _ => (FIRST_VARIANT, dest, None),
177                };
178                if active_field_index.is_some() {
179                    assert_eq!(operands.len(), 1);
180                }
181                for (i, operand) in operands.iter_enumerated() {
182                    let op = self.codegen_operand(bx, operand);
183                    // Do not generate stores and GEPis for zero-sized fields.
184                    if !op.layout.is_zst() {
185                        let field_index = active_field_index.unwrap_or(i);
186                        let field = if let mir::AggregateKind::Array(_) = **kind {
187                            let llindex = bx.cx().const_usize(field_index.as_u32().into());
188                            variant_dest.project_index(bx, llindex)
189                        } else {
190                            variant_dest.project_field(bx, field_index.as_usize())
191                        };
192                        op.store_with_annotation(bx, field);
193                    }
194                }
195                dest.codegen_set_discr(bx, variant_index);
196            }
197
198            _ => {
199                let temp = self.codegen_rvalue_operand(bx, rvalue);
200                temp.store_with_annotation(bx, dest);
201            }
202        }
203    }
204
205    /// Transmutes the `src` value to the destination type by writing it to `dst`.
206    ///
207    /// See also [`Self::codegen_transmute_operand`] for cases that can be done
208    /// without needing a pre-allocated place for the destination.
209    fn codegen_transmute(
210        &mut self,
211        bx: &mut Bx,
212        src: OperandRef<'tcx, Bx::Value>,
213        dst: PlaceRef<'tcx, Bx::Value>,
214    ) {
215        // The MIR validator enforces no unsized transmutes.
216        if !src.layout.is_sized() {
    ::core::panicking::panic("assertion failed: src.layout.is_sized()")
};assert!(src.layout.is_sized());
217        if !dst.layout.is_sized() {
    ::core::panicking::panic("assertion failed: dst.layout.is_sized()")
};assert!(dst.layout.is_sized());
218
219        if src.layout.size != dst.layout.size
220            || src.layout.is_uninhabited()
221            || dst.layout.is_uninhabited()
222        {
223            // These cases are all UB to actually hit, so don't emit code for them.
224            // (The size mismatches are reachable via `transmute_unchecked`.)
225            bx.unreachable_nonterminator();
226        } else {
227            // Since in this path we have a place anyway, we can store or copy to it,
228            // making sure we use the destination place's alignment even if the
229            // source would normally have a higher one.
230            src.store_with_annotation(bx, dst.val.with_type(src.layout));
231        }
232    }
233
234    /// Transmutes an `OperandValue` to another `OperandValue`.
235    ///
236    /// This is supported for all cases where the `cast` type is SSA,
237    /// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs.
238    pub(crate) fn codegen_transmute_operand(
239        &mut self,
240        bx: &mut Bx,
241        operand: OperandRef<'tcx, Bx::Value>,
242        cast: TyAndLayout<'tcx>,
243    ) -> OperandValue<Bx::Value> {
244        if let abi::BackendRepr::Memory { .. } = cast.backend_repr
245            && !cast.is_zst()
246        {
247            ::rustc_middle::util::bug::span_bug_fmt(self.mir.span,
    format_args!("Use `codegen_transmute` to transmute to {0:?}", cast));span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
248        }
249
250        // `Layout` is interned, so we can do a cheap check for things that are
251        // exactly the same and thus don't need any handling.
252        if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
253            return operand.val;
254        }
255
256        // Check for transmutes that are always UB.
257        if operand.layout.size != cast.size
258            || operand.layout.is_uninhabited()
259            || cast.is_uninhabited()
260        {
261            bx.unreachable_nonterminator();
262
263            // We still need to return a value of the appropriate type, but
264            // it's already UB so do the easiest thing available.
265            return OperandValue::poison(bx, cast);
266        }
267
268        // To or from pointers takes different methods, so we use this to restrict
269        // the SimdVector case to types which can be `bitcast` between each other.
270        #[inline]
271        fn vector_can_bitcast(x: abi::Scalar) -> bool {
272            #[allow(non_exhaustive_omitted_patterns)] match x {
    abi::Scalar::Initialized {
        value: abi::Primitive::Int(..) | abi::Primitive::Float(..), .. } =>
        true,
    _ => false,
}matches!(
273                x,
274                abi::Scalar::Initialized {
275                    value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
276                    ..
277                }
278            )
279        }
280
281        let cx = bx.cx();
282        match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
283            _ if cast.is_zst() => OperandValue::ZeroSized,
284            (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
285                match (&source_place_val.llextra, &None) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(source_place_val.llextra, None);
286                // The existing alignment is part of `source_place_val`,
287                // so that alignment will be used, not `cast`'s.
288                bx.load_operand(source_place_val.with_type(cast)).val
289            }
290            (
291                OperandValue::Immediate(imm),
292                abi::BackendRepr::Scalar(from_scalar),
293                abi::BackendRepr::Scalar(to_scalar),
294            ) if from_scalar.size(cx) == to_scalar.size(cx) => {
295                OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
296            }
297            (
298                OperandValue::Immediate(imm),
299                abi::BackendRepr::SimdVector { element: from_scalar, .. },
300                abi::BackendRepr::SimdVector { element: to_scalar, .. },
301            ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
302                let to_backend_ty = bx.cx().immediate_backend_type(cast);
303                OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
304            }
305            (
306                OperandValue::Immediate(imm),
307                abi::BackendRepr::SimdScalableVector { element: from_scalar, .. },
308                abi::BackendRepr::SimdScalableVector { element: to_scalar, .. },
309            ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
310                let to_backend_ty = bx.cx().immediate_backend_type(cast);
311                OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
312            }
313            (
314                OperandValue::Pair(imm_a, imm_b),
315                abi::BackendRepr::ScalarPair(in_a, in_b),
316                abi::BackendRepr::ScalarPair(out_a, out_b),
317            ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
318                OperandValue::Pair(
319                    transmute_scalar(bx, imm_a, in_a, out_a),
320                    transmute_scalar(bx, imm_b, in_b, out_b),
321                )
322            }
323            _ => {
324                // For any other potentially-tricky cases, make a temporary instead.
325                // If anything else wants the target local to be in memory this won't
326                // be hit, as `codegen_transmute` will get called directly. Thus this
327                // is only for places where everything else wants the operand form,
328                // and thus it's not worth making those places get it from memory.
329                //
330                // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
331                // and endianness issues, as do SimdVector ones to avoid worrying
332                // about things like f32x8 ⇌ ptrx4 that would need multiple steps.
333                let align = Ord::max(operand.layout.align.abi, cast.align.abi);
334                let size = Ord::max(operand.layout.size, cast.size);
335                let temp = PlaceValue::alloca(bx, size, align);
336                bx.lifetime_start(temp.llval, size);
337                operand.store_with_annotation(bx, temp.with_type(operand.layout));
338                let val = bx.load_operand(temp.with_type(cast)).val;
339                bx.lifetime_end(temp.llval, size);
340                val
341            }
342        }
343    }
344
345    /// Cast one of the immediates from an [`OperandValue::Immediate`]
346    /// or an [`OperandValue::Pair`] to an immediate of the target type.
347    ///
348    /// Returns `None` if the cast is not possible.
349    fn cast_immediate(
350        &self,
351        bx: &mut Bx,
352        mut imm: Bx::Value,
353        from_scalar: abi::Scalar,
354        from_backend_ty: Bx::Type,
355        to_scalar: abi::Scalar,
356        to_backend_ty: Bx::Type,
357    ) -> Option<Bx::Value> {
358        use abi::Primitive::*;
359
360        // When scalars are passed by value, there's no metadata recording their
361        // valid ranges. For example, `char`s are passed as just `i32`, with no
362        // way for LLVM to know that they're 0x10FFFF at most. Thus we assume
363        // the range of the input value too, not just the output range.
364        assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
365
366        imm = match (from_scalar.primitive(), to_scalar.primitive()) {
367            (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
368            (Float(_), Float(_)) => {
369                let srcsz = bx.cx().float_width(from_backend_ty);
370                let dstsz = bx.cx().float_width(to_backend_ty);
371                if dstsz > srcsz {
372                    bx.fpext(imm, to_backend_ty)
373                } else if srcsz > dstsz {
374                    bx.fptrunc(imm, to_backend_ty)
375                } else {
376                    imm
377                }
378            }
379            (Int(_, is_signed), Float(_)) => {
380                if is_signed {
381                    bx.sitofp(imm, to_backend_ty)
382                } else {
383                    bx.uitofp(imm, to_backend_ty)
384                }
385            }
386            (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
387            (Int(_, is_signed), Pointer(..)) => {
388                let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
389                bx.inttoptr(usize_imm, to_backend_ty)
390            }
391            (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
392            _ => return None,
393        };
394        Some(imm)
395    }
396
397    pub(crate) fn codegen_rvalue_operand(
398        &mut self,
399        bx: &mut Bx,
400        rvalue: &mir::Rvalue<'tcx>,
401    ) -> OperandRef<'tcx, Bx::Value> {
402        match *rvalue {
403            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
404                let operand = self.codegen_operand(bx, source);
405                {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/mir/rvalue.rs:405",
                        "rustc_codegen_ssa::mir::rvalue", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/mir/rvalue.rs"),
                        ::tracing_core::__macro_support::Option::Some(405u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::mir::rvalue"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("cast operand is {0:?}",
                                                    operand) as &dyn Value))])
            });
    } else { ; }
};debug!("cast operand is {:?}", operand);
406                let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
407
408                let val = match *kind {
409                    mir::CastKind::PointerExposeProvenance => {
410                        if !bx.cx().is_backend_immediate(cast) {
    ::core::panicking::panic("assertion failed: bx.cx().is_backend_immediate(cast)")
};assert!(bx.cx().is_backend_immediate(cast));
411                        let llptr = operand.immediate();
412                        let llcast_ty = bx.cx().immediate_backend_type(cast);
413                        let lladdr = bx.ptrtoint(llptr, llcast_ty);
414                        OperandValue::Immediate(lladdr)
415                    }
416                    mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer(_), _) => {
417                        match *operand.layout.ty.kind() {
418                            ty::FnDef(def_id, args) => {
419                                let instance = ty::Instance::resolve_for_fn_ptr(
420                                    bx.tcx(),
421                                    bx.typing_env(),
422                                    def_id,
423                                    args,
424                                )
425                                .unwrap();
426                                OperandValue::Immediate(bx.get_fn_addr(instance))
427                            }
428                            _ => ::rustc_middle::util::bug::bug_fmt(format_args!("{0} cannot be reified to a fn ptr",
        operand.layout.ty))bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
429                        }
430                    }
431                    mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
432                        match *operand.layout.ty.kind() {
433                            ty::Closure(def_id, args) => {
434                                let instance = Instance::resolve_closure(
435                                    bx.cx().tcx(),
436                                    def_id,
437                                    args,
438                                    ty::ClosureKind::FnOnce,
439                                );
440                                OperandValue::Immediate(bx.cx().get_fn_addr(instance))
441                            }
442                            _ => ::rustc_middle::util::bug::bug_fmt(format_args!("{0} cannot be cast to a fn ptr",
        operand.layout.ty))bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
443                        }
444                    }
445                    mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
446                        // This is a no-op at the LLVM level.
447                        operand.val
448                    }
449                    mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
450                        if !bx.cx().is_backend_scalar_pair(cast) {
    ::core::panicking::panic("assertion failed: bx.cx().is_backend_scalar_pair(cast)")
};assert!(bx.cx().is_backend_scalar_pair(cast));
451                        let (lldata, llextra) = operand.val.pointer_parts();
452                        let (lldata, llextra) =
453                            base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
454                        OperandValue::Pair(lldata, llextra)
455                    }
456                    mir::CastKind::PointerCoercion(
457                        PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
458                    ) => {
459                        ::rustc_middle::util::bug::bug_fmt(format_args!("{0:?} is for borrowck, and should never appear in codegen",
        kind));bug!("{kind:?} is for borrowck, and should never appear in codegen");
460                    }
461                    mir::CastKind::PtrToPtr
462                        if bx.cx().is_backend_scalar_pair(operand.layout) =>
463                    {
464                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
465                            if bx.cx().is_backend_scalar_pair(cast) {
466                                OperandValue::Pair(data_ptr, meta)
467                            } else {
468                                // Cast of wide-ptr to thin-ptr is an extraction of data-ptr.
469                                OperandValue::Immediate(data_ptr)
470                            }
471                        } else {
472                            ::rustc_middle::util::bug::bug_fmt(format_args!("unexpected non-pair operand"));bug!("unexpected non-pair operand");
473                        }
474                    }
475                    | mir::CastKind::IntToInt
476                    | mir::CastKind::FloatToInt
477                    | mir::CastKind::FloatToFloat
478                    | mir::CastKind::IntToFloat
479                    | mir::CastKind::PtrToPtr
480                    | mir::CastKind::FnPtrToPtr
481                    // Since int2ptr can have arbitrary integer types as input (so we have to do
482                    // sign extension and all that), it is currently best handled in the same code
483                    // path as the other integer-to-X casts.
484                    | mir::CastKind::PointerWithExposedProvenance => {
485                        let imm = operand.immediate();
486                        let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
487                            ::rustc_middle::util::bug::bug_fmt(format_args!("Found non-scalar for operand {0:?}",
        operand));bug!("Found non-scalar for operand {operand:?}");
488                        };
489                        let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
490
491                        if !bx.cx().is_backend_immediate(cast) {
    ::core::panicking::panic("assertion failed: bx.cx().is_backend_immediate(cast)")
};assert!(bx.cx().is_backend_immediate(cast));
492                        let to_backend_ty = bx.cx().immediate_backend_type(cast);
493                        if operand.layout.is_uninhabited() {
494                            let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
495                            return OperandRef { val, layout: cast, move_annotation: None };
496                        }
497                        let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
498                            ::rustc_middle::util::bug::bug_fmt(format_args!("Found non-scalar for cast {0:?}",
        cast));bug!("Found non-scalar for cast {cast:?}");
499                        };
500
501                        self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
502                            .map(OperandValue::Immediate)
503                            .unwrap_or_else(|| {
504                                ::rustc_middle::util::bug::bug_fmt(format_args!("Unsupported cast of {0:?} to {1:?}",
        operand, cast));bug!("Unsupported cast of {operand:?} to {cast:?}");
505                            })
506                    }
507                    mir::CastKind::Transmute | mir::CastKind::Subtype => {
508                        self.codegen_transmute_operand(bx, operand, cast)
509                    }
510                };
511                OperandRef { val, layout: cast, move_annotation: None }
512            }
513
514            mir::Rvalue::Ref(_, bk, place) => {
515                let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
516                    Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
517                };
518                self.codegen_place_to_pointer(bx, place, mk_ref)
519            }
520
521            mir::Rvalue::RawPtr(kind, place) => {
522                let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
523                    Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
524                };
525                self.codegen_place_to_pointer(bx, place, mk_ptr)
526            }
527
528            mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
529                if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
530            {
531                let lhs = self.codegen_operand(bx, lhs);
532                let rhs = self.codegen_operand(bx, rhs);
533                let result = self.codegen_scalar_checked_binop(
534                    bx,
535                    op,
536                    lhs.immediate(),
537                    rhs.immediate(),
538                    lhs.layout.ty,
539                );
540                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
541                let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
542                OperandRef {
543                    val: result,
544                    layout: bx.cx().layout_of(operand_ty),
545                    move_annotation: None,
546                }
547            }
548            mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
549                let lhs = self.codegen_operand(bx, lhs);
550                let rhs = self.codegen_operand(bx, rhs);
551                let llresult = match (lhs.val, rhs.val) {
552                    (
553                        OperandValue::Pair(lhs_addr, lhs_extra),
554                        OperandValue::Pair(rhs_addr, rhs_extra),
555                    ) => self.codegen_wide_ptr_binop(
556                        bx,
557                        op,
558                        lhs_addr,
559                        lhs_extra,
560                        rhs_addr,
561                        rhs_extra,
562                        lhs.layout.ty,
563                    ),
564
565                    (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
566                        .codegen_scalar_binop(
567                            bx,
568                            op,
569                            lhs_val,
570                            rhs_val,
571                            lhs.layout.ty,
572                            rhs.layout.ty,
573                        ),
574
575                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
576                };
577                OperandRef {
578                    val: OperandValue::Immediate(llresult),
579                    layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
580                    move_annotation: None,
581                }
582            }
583
584            mir::Rvalue::UnaryOp(op, ref operand) => {
585                let operand = self.codegen_operand(bx, operand);
586                let is_float = operand.layout.ty.is_floating_point();
587                let (val, layout) = match op {
588                    mir::UnOp::Not => {
589                        let llval = bx.not(operand.immediate());
590                        (OperandValue::Immediate(llval), operand.layout)
591                    }
592                    mir::UnOp::Neg => {
593                        let llval = if is_float {
594                            bx.fneg(operand.immediate())
595                        } else {
596                            bx.neg(operand.immediate())
597                        };
598                        (OperandValue::Immediate(llval), operand.layout)
599                    }
600                    mir::UnOp::PtrMetadata => {
601                        if !(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref()) {
    ::core::panicking::panic("assertion failed: operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref()")
};assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
602                        let (_, meta) = operand.val.pointer_parts();
603                        match (&(operand.layout.fields.count() > 1), &meta.is_some()) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
604                        if let Some(meta) = meta {
605                            (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
606                        } else {
607                            (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
608                        }
609                    }
610                };
611                if !val.is_expected_variant_for_type(self.cx, layout) {
    {
        ::core::panicking::panic_fmt(format_args!("Made wrong variant {0:?} for type {1:?}",
                val, layout));
    }
};assert!(
612                    val.is_expected_variant_for_type(self.cx, layout),
613                    "Made wrong variant {val:?} for type {layout:?}",
614                );
615                OperandRef { val, layout, move_annotation: None }
616            }
617
618            mir::Rvalue::Discriminant(ref place) => {
619                let discr_ty = rvalue.ty(self.mir, bx.tcx());
620                let discr_ty = self.monomorphize(discr_ty);
621                let operand = self.codegen_consume(bx, place.as_ref());
622                let discr = operand.codegen_get_discr(self, bx, discr_ty);
623                OperandRef {
624                    val: OperandValue::Immediate(discr),
625                    layout: self.cx.layout_of(discr_ty),
626                    move_annotation: None,
627                }
628            }
629
630            mir::Rvalue::ThreadLocalRef(def_id) => {
631                if !bx.cx().tcx().is_static(def_id) {
    ::core::panicking::panic("assertion failed: bx.cx().tcx().is_static(def_id)")
};assert!(bx.cx().tcx().is_static(def_id));
632                let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
633                let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
634                {
635                    let instance = ty::Instance {
636                        def: ty::InstanceKind::ThreadLocalShim(def_id),
637                        args: ty::GenericArgs::empty(),
638                    };
639                    let fn_ptr = bx.get_fn_addr(instance);
640                    let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
641                    let fn_ty = bx.fn_decl_backend_type(fn_abi);
642                    let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
643                        Some(bx.tcx().codegen_instance_attrs(instance.def))
644                    } else {
645                        None
646                    };
647                    bx.call(
648                        fn_ty,
649                        fn_attrs.as_deref(),
650                        Some(fn_abi),
651                        fn_ptr,
652                        &[],
653                        None,
654                        Some(instance),
655                    )
656                } else {
657                    bx.get_static(def_id)
658                };
659                OperandRef { val: OperandValue::Immediate(static_), layout, move_annotation: None }
660            }
661            mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
662            mir::Rvalue::Repeat(ref elem, len_const) => {
663                // All arrays have `BackendRepr::Memory`, so only the ZST cases
664                // end up here. Anything else forces the destination local to be
665                // `Memory`, and thus ends up handled in `codegen_rvalue` instead.
666                let operand = self.codegen_operand(bx, elem);
667                let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
668                let array_ty = self.monomorphize(array_ty);
669                let array_layout = bx.layout_of(array_ty);
670                if !array_layout.is_zst() {
    ::core::panicking::panic("assertion failed: array_layout.is_zst()")
};assert!(array_layout.is_zst());
671                OperandRef {
672                    val: OperandValue::ZeroSized,
673                    layout: array_layout,
674                    move_annotation: None,
675                }
676            }
677            mir::Rvalue::Aggregate(ref kind, ref fields) => {
678                let (variant_index, active_field_index) = match **kind {
679                    mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
680                        (variant_index, active_field_index)
681                    }
682                    _ => (FIRST_VARIANT, None),
683                };
684
685                let ty = rvalue.ty(self.mir, self.cx.tcx());
686                let ty = self.monomorphize(ty);
687                let layout = self.cx.layout_of(ty);
688
689                let mut builder = OperandRefBuilder::new(layout);
690                for (field_idx, field) in fields.iter_enumerated() {
691                    let op = self.codegen_operand(bx, field);
692                    let fi = active_field_index.unwrap_or(field_idx);
693                    builder.insert_field(bx, variant_index, fi, op);
694                }
695
696                let tag_result = codegen_tag_value(self.cx, variant_index, layout);
697                match tag_result {
698                    Err(super::place::UninhabitedVariantError) => {
699                        // Like codegen_set_discr we use a sound abort, but could
700                        // potentially `unreachable` or just return the poison for
701                        // more optimizability, if that turns out to be helpful.
702                        bx.abort();
703                        let val = OperandValue::poison(bx, layout);
704                        OperandRef { val, layout, move_annotation: None }
705                    }
706                    Ok(maybe_tag_value) => {
707                        if let Some((tag_field, tag_imm)) = maybe_tag_value {
708                            builder.insert_imm(tag_field, tag_imm);
709                        }
710                        builder.build(bx.cx())
711                    }
712                }
713            }
714            mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
715                let operand = self.codegen_operand(bx, operand);
716                let binder_ty = self.monomorphize(binder_ty);
717                let layout = bx.cx().layout_of(binder_ty);
718                OperandRef { val: operand.val, layout, move_annotation: None }
719            }
720            mir::Rvalue::CopyForDeref(_) => ::rustc_middle::util::bug::bug_fmt(format_args!("`CopyForDeref` in codegen"))bug!("`CopyForDeref` in codegen"),
721        }
722    }
723
724    /// Codegen an `Rvalue::RawPtr` or `Rvalue::Ref`
725    fn codegen_place_to_pointer(
726        &mut self,
727        bx: &mut Bx,
728        place: mir::Place<'tcx>,
729        mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
730    ) -> OperandRef<'tcx, Bx::Value> {
731        let cg_place = self.codegen_place(bx, place.as_ref());
732        let val = cg_place.val.address();
733
734        let ty = cg_place.layout.ty;
735        if !if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {

            #[allow(non_exhaustive_omitted_patterns)]
            match val { OperandValue::Pair(..) => true, _ => false, }
        } else {

            #[allow(non_exhaustive_omitted_patterns)]
            match val { OperandValue::Immediate(..) => true, _ => false, }
        } {
    {
        ::core::panicking::panic_fmt(format_args!("Address of place was unexpectedly {0:?} for pointee type {1:?}",
                val, ty));
    }
};assert!(
736            if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
737                matches!(val, OperandValue::Pair(..))
738            } else {
739                matches!(val, OperandValue::Immediate(..))
740            },
741            "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
742        );
743
744        OperandRef {
745            val,
746            layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)),
747            move_annotation: None,
748        }
749    }
750
751    fn codegen_scalar_binop(
752        &mut self,
753        bx: &mut Bx,
754        op: mir::BinOp,
755        lhs: Bx::Value,
756        rhs: Bx::Value,
757        lhs_ty: Ty<'tcx>,
758        rhs_ty: Ty<'tcx>,
759    ) -> Bx::Value {
760        let is_float = lhs_ty.is_floating_point();
761        let is_signed = lhs_ty.is_signed();
762        match op {
763            mir::BinOp::Add => {
764                if is_float {
765                    bx.fadd(lhs, rhs)
766                } else {
767                    bx.add(lhs, rhs)
768                }
769            }
770            mir::BinOp::AddUnchecked => {
771                if is_signed {
772                    bx.unchecked_sadd(lhs, rhs)
773                } else {
774                    bx.unchecked_uadd(lhs, rhs)
775                }
776            }
777            mir::BinOp::Sub => {
778                if is_float {
779                    bx.fsub(lhs, rhs)
780                } else {
781                    bx.sub(lhs, rhs)
782                }
783            }
784            mir::BinOp::SubUnchecked => {
785                if is_signed {
786                    bx.unchecked_ssub(lhs, rhs)
787                } else {
788                    bx.unchecked_usub(lhs, rhs)
789                }
790            }
791            mir::BinOp::Mul => {
792                if is_float {
793                    bx.fmul(lhs, rhs)
794                } else {
795                    bx.mul(lhs, rhs)
796                }
797            }
798            mir::BinOp::MulUnchecked => {
799                if is_signed {
800                    bx.unchecked_smul(lhs, rhs)
801                } else {
802                    bx.unchecked_umul(lhs, rhs)
803                }
804            }
805            mir::BinOp::Div => {
806                if is_float {
807                    bx.fdiv(lhs, rhs)
808                } else if is_signed {
809                    bx.sdiv(lhs, rhs)
810                } else {
811                    bx.udiv(lhs, rhs)
812                }
813            }
814            mir::BinOp::Rem => {
815                if is_float {
816                    bx.frem(lhs, rhs)
817                } else if is_signed {
818                    bx.srem(lhs, rhs)
819                } else {
820                    bx.urem(lhs, rhs)
821                }
822            }
823            mir::BinOp::BitOr => bx.or(lhs, rhs),
824            mir::BinOp::BitAnd => bx.and(lhs, rhs),
825            mir::BinOp::BitXor => bx.xor(lhs, rhs),
826            mir::BinOp::Offset => {
827                let pointee_type = lhs_ty
828                    .builtin_deref(true)
829                    .unwrap_or_else(|| ::rustc_middle::util::bug::bug_fmt(format_args!("deref of non-pointer {0:?}",
        lhs_ty))bug!("deref of non-pointer {:?}", lhs_ty));
830                let pointee_layout = bx.cx().layout_of(pointee_type);
831                if pointee_layout.is_zst() {
832                    // `Offset` works in terms of the size of pointee,
833                    // so offsetting a pointer to ZST is a noop.
834                    lhs
835                } else {
836                    let llty = bx.cx().backend_type(pointee_layout);
837                    if !rhs_ty.is_signed() {
838                        bx.inbounds_nuw_gep(llty, lhs, &[rhs])
839                    } else {
840                        bx.inbounds_gep(llty, lhs, &[rhs])
841                    }
842                }
843            }
844            mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
845                let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
846                bx.shl(lhs, rhs)
847            }
848            mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
849                let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
850                if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
851            }
852            mir::BinOp::Ne
853            | mir::BinOp::Lt
854            | mir::BinOp::Gt
855            | mir::BinOp::Eq
856            | mir::BinOp::Le
857            | mir::BinOp::Ge => {
858                if is_float {
859                    bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
860                } else {
861                    bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
862                }
863            }
864            mir::BinOp::Cmp => {
865                if !!is_float { ::core::panicking::panic("assertion failed: !is_float") };assert!(!is_float);
866                bx.three_way_compare(lhs_ty, lhs, rhs)
867            }
868            mir::BinOp::AddWithOverflow
869            | mir::BinOp::SubWithOverflow
870            | mir::BinOp::MulWithOverflow => {
871                ::rustc_middle::util::bug::bug_fmt(format_args!("{0:?} needs to return a pair, so call codegen_scalar_checked_binop instead",
        op))bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
872            }
873        }
874    }
875
876    fn codegen_wide_ptr_binop(
877        &mut self,
878        bx: &mut Bx,
879        op: mir::BinOp,
880        lhs_addr: Bx::Value,
881        lhs_extra: Bx::Value,
882        rhs_addr: Bx::Value,
883        rhs_extra: Bx::Value,
884        _input_ty: Ty<'tcx>,
885    ) -> Bx::Value {
886        match op {
887            mir::BinOp::Eq => {
888                let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
889                let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
890                bx.and(lhs, rhs)
891            }
892            mir::BinOp::Ne => {
893                let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
894                let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
895                bx.or(lhs, rhs)
896            }
897            mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
898                // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
899                let (op, strict_op) = match op {
900                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
901                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
902                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
903                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
904                    _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
905                };
906                let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
907                let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
908                let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
909                let rhs = bx.and(and_lhs, and_rhs);
910                bx.or(lhs, rhs)
911            }
912            _ => {
913                ::rustc_middle::util::bug::bug_fmt(format_args!("unexpected wide ptr binop"));bug!("unexpected wide ptr binop");
914            }
915        }
916    }
917
918    fn codegen_scalar_checked_binop(
919        &mut self,
920        bx: &mut Bx,
921        op: mir::BinOp,
922        lhs: Bx::Value,
923        rhs: Bx::Value,
924        input_ty: Ty<'tcx>,
925    ) -> OperandValue<Bx::Value> {
926        let (val, of) = match op {
927            // These are checked using intrinsics
928            mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
929                let oop = match op {
930                    mir::BinOp::Add => OverflowOp::Add,
931                    mir::BinOp::Sub => OverflowOp::Sub,
932                    mir::BinOp::Mul => OverflowOp::Mul,
933                    _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
934                };
935                bx.checked_binop(oop, input_ty, lhs, rhs)
936            }
937            _ => ::rustc_middle::util::bug::bug_fmt(format_args!("Operator `{0:?}` is not a checkable operator",
        op))bug!("Operator `{:?}` is not a checkable operator", op),
938        };
939
940        OperandValue::Pair(val, of)
941    }
942}
943
944/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.
945///
946/// This is expected to be in *immediate* form, as seen in [`OperandValue::Immediate`]
947/// or [`OperandValue::Pair`] (so `i1` for bools, not `i8`, for example).
948///
949/// ICEs if the passed-in `imm` is not a value of the expected type for
950/// `from_scalar`, such as if it's a vector or a pair.
951pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
952    bx: &mut Bx,
953    mut imm: Bx::Value,
954    from_scalar: abi::Scalar,
955    to_scalar: abi::Scalar,
956) -> Bx::Value {
957    match (&from_scalar.size(bx.cx()), &to_scalar.size(bx.cx())) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
958    let imm_ty = bx.cx().val_ty(imm);
959    match (&(bx.cx().type_kind(imm_ty)), &(TypeKind::Vector)) {
    (left_val, right_val) => {
        if *left_val == *right_val {
            let kind = ::core::panicking::AssertKind::Ne;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("Vector type {0:?} not allowed in transmute_scalar {1:?} -> {2:?}",
                        imm_ty, from_scalar, to_scalar)));
        }
    }
};assert_ne!(
960        bx.cx().type_kind(imm_ty),
961        TypeKind::Vector,
962        "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
963    );
964
965    // While optimizations will remove no-op transmutes, they might still be
966    // there in debug or things that aren't no-op in MIR because they change
967    // the Rust type but not the underlying layout/niche.
968    if from_scalar == to_scalar {
969        return imm;
970    }
971
972    use abi::Primitive::*;
973    imm = bx.from_immediate(imm);
974
975    let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
976    if true {
    match (&bx.cx().val_ty(imm), &from_backend_ty) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
977    let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
978
979    // If we have a scalar, we must already know its range. Either
980    //
981    // 1) It's a parameter with `range` parameter metadata,
982    // 2) It's something we `load`ed with `!range` metadata, or
983    // 3) After a transmute we `assume`d the range (see below).
984    //
985    // That said, last time we tried removing this, it didn't actually help
986    // the rustc-perf results, so might as well keep doing it
987    // <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182>
988    assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
989
990    imm = match (from_scalar.primitive(), to_scalar.primitive()) {
991        (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
992        (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
993        (Int(..), Pointer(..)) => bx.inttoptr(imm, to_backend_ty),
994        (Pointer(..), Int(..)) => {
995            // FIXME: this exposes the provenance, which shouldn't be necessary.
996            bx.ptrtoint(imm, to_backend_ty)
997        }
998        (Float(_), Pointer(..)) => {
999            let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1000            bx.inttoptr(int_imm, to_backend_ty)
1001        }
1002        (Pointer(..), Float(_)) => {
1003            // FIXME: this exposes the provenance, which shouldn't be necessary.
1004            let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1005            bx.bitcast(int_imm, to_backend_ty)
1006        }
1007    };
1008
1009    if true {
    match (&bx.cx().val_ty(imm), &to_backend_ty) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1010
1011    // This `assume` remains important for cases like (a conceptual)
1012    //    transmute::<u32, NonZeroU32>(x) == 0
1013    // since it's never passed to something with parameter metadata (especially
1014    // after MIR inlining) so the only way to tell the backend about the
1015    // constraint that the `transmute` introduced is to `assume` it.
1016    assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1017
1018    imm = bx.to_immediate_scalar(imm, to_scalar);
1019    imm
1020}
1021
1022/// Emits an `assume` call that `imm`'s value is within the known range of `scalar`.
1023///
1024/// If `known` is `Some`, only emits the assume if it's more specific than
1025/// whatever is already known from the range of *that* scalar.
1026fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1027    bx: &mut Bx,
1028    imm: Bx::Value,
1029    scalar: abi::Scalar,
1030    backend_ty: Bx::Type,
1031    known: Option<&abi::Scalar>,
1032) {
1033    if #[allow(non_exhaustive_omitted_patterns)] match bx.cx().sess().opts.optimize {
    OptLevel::No => true,
    _ => false,
}matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1034        return;
1035    }
1036
1037    match (scalar, known) {
1038        (abi::Scalar::Union { .. }, _) => return,
1039        (_, None) => {
1040            if scalar.is_always_valid(bx.cx()) {
1041                return;
1042            }
1043        }
1044        (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1045            let known_range = known.valid_range(bx.cx());
1046            if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1047                return;
1048            }
1049        }
1050    }
1051
1052    match scalar.primitive() {
1053        abi::Primitive::Int(..) => {
1054            let range = scalar.valid_range(bx.cx());
1055            bx.assume_integer_range(imm, backend_ty, range);
1056        }
1057        abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1058            if !scalar.valid_range(bx.cx()).contains(0) =>
1059        {
1060            bx.assume_nonnull(imm);
1061        }
1062        abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1063    }
1064}