rustc_const_eval/interpret/
cast.rs

1use std::assert_matches::assert_matches;
2
3use rustc_abi::Integer;
4use rustc_apfloat::ieee::{Double, Half, Quad, Single};
5use rustc_apfloat::{Float, FloatConvert};
6use rustc_middle::mir::CastKind;
7use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
8use rustc_middle::ty::adjustment::PointerCoercion;
9use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
10use rustc_middle::ty::{self, FloatTy, Ty};
11use rustc_middle::{bug, span_bug};
12use tracing::trace;
13
14use super::util::ensure_monomorphic_enough;
15use super::{
16    FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, interp_ok, throw_ub,
17    throw_ub_custom,
18};
19use crate::fluent_generated as fluent;
20
21impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
22    pub fn cast(
23        &mut self,
24        src: &OpTy<'tcx, M::Provenance>,
25        cast_kind: CastKind,
26        cast_ty: Ty<'tcx>,
27        dest: &PlaceTy<'tcx, M::Provenance>,
28    ) -> InterpResult<'tcx> {
29        // `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
30        // possible.
31        let cast_layout =
32            if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
33        // FIXME: In which cases should we trigger UB when the source is uninit?
34        match cast_kind {
35            CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
36                self.unsize_into(src, cast_layout, dest)?;
37            }
38
39            CastKind::PointerExposeProvenance => {
40                let src = self.read_immediate(src)?;
41                let res = self.pointer_expose_provenance_cast(&src, cast_layout)?;
42                self.write_immediate(*res, dest)?;
43            }
44
45            CastKind::PointerWithExposedProvenance => {
46                let src = self.read_immediate(src)?;
47                let res = self.pointer_with_exposed_provenance_cast(&src, cast_layout)?;
48                self.write_immediate(*res, dest)?;
49            }
50
51            CastKind::IntToInt | CastKind::IntToFloat => {
52                let src = self.read_immediate(src)?;
53                let res = self.int_to_int_or_float(&src, cast_layout)?;
54                self.write_immediate(*res, dest)?;
55            }
56
57            CastKind::FloatToFloat | CastKind::FloatToInt => {
58                let src = self.read_immediate(src)?;
59                let res = self.float_to_float_or_int(&src, cast_layout)?;
60                self.write_immediate(*res, dest)?;
61            }
62
63            CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
64                let src = self.read_immediate(src)?;
65                let res = self.ptr_to_ptr(&src, cast_layout)?;
66                self.write_immediate(*res, dest)?;
67            }
68
69            CastKind::PointerCoercion(
70                PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer,
71                _,
72            ) => {
73                bug!("{cast_kind:?} casts are for borrowck only, not runtime MIR");
74            }
75
76            CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
77                // All reifications must be monomorphic, bail out otherwise.
78                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
79
80                // The src operand does not matter, just its type
81                match *src.layout.ty.kind() {
82                    ty::FnDef(def_id, args) => {
83                        let instance = ty::Instance::resolve_for_fn_ptr(
84                            *self.tcx,
85                            self.typing_env,
86                            def_id,
87                            args,
88                        )
89                        .ok_or_else(|| err_inval!(TooGeneric))?;
90
91                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
92                        self.write_pointer(fn_ptr, dest)?;
93                    }
94                    _ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
95                }
96            }
97
98            CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
99                let src = self.read_immediate(src)?;
100                match cast_ty.kind() {
101                    ty::FnPtr(..) => {
102                        // No change to value
103                        self.write_immediate(*src, dest)?;
104                    }
105                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
106                }
107            }
108
109            CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
110                // All reifications must be monomorphic, bail out otherwise.
111                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
112
113                // The src operand does not matter, just its type
114                match *src.layout.ty.kind() {
115                    ty::Closure(def_id, args) => {
116                        let instance = ty::Instance::resolve_closure(
117                            *self.tcx,
118                            def_id,
119                            args,
120                            ty::ClosureKind::FnOnce,
121                        );
122                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
123                        self.write_pointer(fn_ptr, dest)?;
124                    }
125                    _ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
126                }
127            }
128
129            CastKind::PointerCoercion(PointerCoercion::DynStar, _) => {
130                if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() {
131                    // Initial cast from sized to dyn trait
132                    let vtable = self.get_vtable_ptr(src.layout.ty, data)?;
133                    let vtable = Scalar::from_maybe_pointer(vtable, self);
134                    let data = self.read_immediate(src)?.to_scalar();
135                    let _assert_pointer_like = data.to_pointer(self)?;
136                    let val = Immediate::ScalarPair(data, vtable);
137                    self.write_immediate(val, dest)?;
138                } else {
139                    bug!()
140                }
141            }
142
143            CastKind::Transmute => {
144                assert!(src.layout.is_sized());
145                assert!(dest.layout.is_sized());
146                assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
147                if src.layout.size != dest.layout.size {
148                    throw_ub_custom!(
149                        fluent::const_eval_invalid_transmute,
150                        src_bytes = src.layout.size.bytes(),
151                        dest_bytes = dest.layout.size.bytes(),
152                        src = src.layout.ty,
153                        dest = dest.layout.ty,
154                    );
155                }
156
157                self.copy_op_allow_transmute(src, dest)?;
158            }
159        }
160        interp_ok(())
161    }
162
163    /// Handles 'IntToInt' and 'IntToFloat' casts.
164    pub fn int_to_int_or_float(
165        &self,
166        src: &ImmTy<'tcx, M::Provenance>,
167        cast_to: TyAndLayout<'tcx>,
168    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
169        assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
170        assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
171
172        interp_ok(ImmTy::from_scalar(
173            self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
174            cast_to,
175        ))
176    }
177
178    /// Handles 'FloatToFloat' and 'FloatToInt' casts.
179    pub fn float_to_float_or_int(
180        &self,
181        src: &ImmTy<'tcx, M::Provenance>,
182        cast_to: TyAndLayout<'tcx>,
183    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
184        let ty::Float(fty) = src.layout.ty.kind() else {
185            bug!("FloatToFloat/FloatToInt cast: source type {} is not a float type", src.layout.ty)
186        };
187        let val = match fty {
188            FloatTy::F16 => self.cast_from_float(src.to_scalar().to_f16()?, cast_to.ty),
189            FloatTy::F32 => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
190            FloatTy::F64 => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
191            FloatTy::F128 => self.cast_from_float(src.to_scalar().to_f128()?, cast_to.ty),
192        };
193        interp_ok(ImmTy::from_scalar(val, cast_to))
194    }
195
196    /// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
197    pub fn ptr_to_ptr(
198        &self,
199        src: &ImmTy<'tcx, M::Provenance>,
200        cast_to: TyAndLayout<'tcx>,
201    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
202        assert!(src.layout.ty.is_any_ptr());
203        assert!(cast_to.ty.is_raw_ptr());
204        // Handle casting any ptr to raw ptr (might be a wide ptr).
205        if cast_to.size == src.layout.size {
206            // Thin or wide pointer that just has the ptr kind of target type changed.
207            return interp_ok(ImmTy::from_immediate(**src, cast_to));
208        } else {
209            // Casting the metadata away from a wide ptr.
210            assert_eq!(src.layout.size, 2 * self.pointer_size());
211            assert_eq!(cast_to.size, self.pointer_size());
212            assert!(src.layout.ty.is_raw_ptr());
213            return match **src {
214                Immediate::ScalarPair(data, _) => interp_ok(ImmTy::from_scalar(data, cast_to)),
215                Immediate::Scalar(..) => span_bug!(
216                    self.cur_span(),
217                    "{:?} input to a fat-to-thin cast ({} -> {})",
218                    *src,
219                    src.layout.ty,
220                    cast_to.ty
221                ),
222                Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
223            };
224        }
225    }
226
227    pub fn pointer_expose_provenance_cast(
228        &mut self,
229        src: &ImmTy<'tcx, M::Provenance>,
230        cast_to: TyAndLayout<'tcx>,
231    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
232        assert_matches!(src.layout.ty.kind(), ty::RawPtr(_, _) | ty::FnPtr(..));
233        assert!(cast_to.ty.is_integral());
234
235        let scalar = src.to_scalar();
236        let ptr = scalar.to_pointer(self)?;
237        match ptr.into_pointer_or_addr() {
238            Ok(ptr) => M::expose_provenance(self, ptr.provenance)?,
239            Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
240        };
241        interp_ok(ImmTy::from_scalar(
242            self.cast_from_int_like(scalar, src.layout, cast_to.ty)?,
243            cast_to,
244        ))
245    }
246
247    pub fn pointer_with_exposed_provenance_cast(
248        &self,
249        src: &ImmTy<'tcx, M::Provenance>,
250        cast_to: TyAndLayout<'tcx>,
251    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
252        assert!(src.layout.ty.is_integral());
253        assert_matches!(cast_to.ty.kind(), ty::RawPtr(_, _));
254
255        // First cast to usize.
256        let scalar = src.to_scalar();
257        let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
258        let addr = addr.to_target_usize(self)?;
259
260        // Then turn address into pointer.
261        let ptr = M::ptr_from_addr_cast(self, addr)?;
262        interp_ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
263    }
264
265    /// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
266    /// type (basically everything with a scalar layout) to int/float/char types.
267    fn cast_from_int_like(
268        &self,
269        scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
270        src_layout: TyAndLayout<'tcx>,
271        cast_ty: Ty<'tcx>,
272    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
273        // Let's make sure v is sign-extended *if* it has a signed type.
274        let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
275
276        let v = match src_layout.ty.kind() {
277            ty::Uint(_) | ty::RawPtr(..) | ty::FnPtr(..) => scalar.to_uint(src_layout.size)?,
278            ty::Int(_) => scalar.to_int(src_layout.size)? as u128, // we will cast back to `i128` below if the sign matters
279            ty::Bool => scalar.to_bool()?.into(),
280            ty::Char => scalar.to_char()?.into(),
281            _ => span_bug!(self.cur_span(), "invalid int-like cast from {}", src_layout.ty),
282        };
283
284        interp_ok(match *cast_ty.kind() {
285            // int -> int
286            ty::Int(_) | ty::Uint(_) => {
287                let size = match *cast_ty.kind() {
288                    ty::Int(t) => Integer::from_int_ty(self, t).size(),
289                    ty::Uint(t) => Integer::from_uint_ty(self, t).size(),
290                    _ => bug!(),
291                };
292                let v = size.truncate(v);
293                Scalar::from_uint(v, size)
294            }
295
296            // signed int -> float
297            ty::Float(fty) if signed => {
298                let v = v as i128;
299                match fty {
300                    FloatTy::F16 => Scalar::from_f16(Half::from_i128(v).value),
301                    FloatTy::F32 => Scalar::from_f32(Single::from_i128(v).value),
302                    FloatTy::F64 => Scalar::from_f64(Double::from_i128(v).value),
303                    FloatTy::F128 => Scalar::from_f128(Quad::from_i128(v).value),
304                }
305            }
306            // unsigned int -> float
307            ty::Float(fty) => match fty {
308                FloatTy::F16 => Scalar::from_f16(Half::from_u128(v).value),
309                FloatTy::F32 => Scalar::from_f32(Single::from_u128(v).value),
310                FloatTy::F64 => Scalar::from_f64(Double::from_u128(v).value),
311                FloatTy::F128 => Scalar::from_f128(Quad::from_u128(v).value),
312            },
313
314            // u8 -> char
315            ty::Char => Scalar::from_u32(u8::try_from(v).unwrap().into()),
316
317            // Casts to bool are not permitted by rustc, no need to handle them here.
318            _ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
319        })
320    }
321
322    /// Low-level cast helper function. Converts an apfloat `f` into int or float types.
323    fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
324    where
325        F: Float
326            + Into<Scalar<M::Provenance>>
327            + FloatConvert<Half>
328            + FloatConvert<Single>
329            + FloatConvert<Double>
330            + FloatConvert<Quad>,
331    {
332        match *dest_ty.kind() {
333            // float -> uint
334            ty::Uint(t) => {
335                let size = Integer::from_uint_ty(self, t).size();
336                // `to_u128` is a saturating cast, which is what we need
337                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
338                let v = f.to_u128(size.bits_usize()).value;
339                // This should already fit the bit width
340                Scalar::from_uint(v, size)
341            }
342            // float -> int
343            ty::Int(t) => {
344                let size = Integer::from_int_ty(self, t).size();
345                // `to_i128` is a saturating cast, which is what we need
346                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
347                let v = f.to_i128(size.bits_usize()).value;
348                Scalar::from_int(v, size)
349            }
350            // float -> float
351            ty::Float(fty) => match fty {
352                FloatTy::F16 => {
353                    Scalar::from_f16(self.adjust_nan(f.convert(&mut false).value, &[f]))
354                }
355                FloatTy::F32 => {
356                    Scalar::from_f32(self.adjust_nan(f.convert(&mut false).value, &[f]))
357                }
358                FloatTy::F64 => {
359                    Scalar::from_f64(self.adjust_nan(f.convert(&mut false).value, &[f]))
360                }
361                FloatTy::F128 => {
362                    Scalar::from_f128(self.adjust_nan(f.convert(&mut false).value, &[f]))
363                }
364            },
365            // That's it.
366            _ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
367        }
368    }
369
370    /// `src` is a *pointer to* a `source_ty`, and in `dest` we should store a pointer to th same
371    /// data at type `cast_ty`.
372    fn unsize_into_ptr(
373        &mut self,
374        src: &OpTy<'tcx, M::Provenance>,
375        dest: &PlaceTy<'tcx, M::Provenance>,
376        // The pointee types
377        source_ty: Ty<'tcx>,
378        cast_ty: Ty<'tcx>,
379    ) -> InterpResult<'tcx> {
380        // A<Struct> -> A<Trait> conversion
381        let (src_pointee_ty, dest_pointee_ty) =
382            self.tcx.struct_lockstep_tails_for_codegen(source_ty, cast_ty, self.typing_env);
383
384        match (src_pointee_ty.kind(), dest_pointee_ty.kind()) {
385            (&ty::Array(_, length), &ty::Slice(_)) => {
386                let ptr = self.read_pointer(src)?;
387                let val = Immediate::new_slice(
388                    ptr,
389                    length
390                        .try_to_target_usize(*self.tcx)
391                        .expect("expected monomorphic const in const eval"),
392                    self,
393                );
394                self.write_immediate(val, dest)
395            }
396            (ty::Dynamic(data_a, _, ty::Dyn), ty::Dynamic(data_b, _, ty::Dyn)) => {
397                let val = self.read_immediate(src)?;
398                // MIR building generates odd NOP casts, prevent them from causing unexpected trouble.
399                // See <https://github.com/rust-lang/rust/issues/128880>.
400                // FIXME: ideally we wouldn't have to do this.
401                if data_a == data_b {
402                    return self.write_immediate(*val, dest);
403                }
404                // Take apart the old pointer, and find the dynamic type.
405                let (old_data, old_vptr) = val.to_scalar_pair();
406                let old_data = old_data.to_pointer(self)?;
407                let old_vptr = old_vptr.to_pointer(self)?;
408                let ty = self.get_ptr_vtable_ty(old_vptr, Some(data_a))?;
409
410                // Sanity-check that `supertrait_vtable_slot` in this type's vtable indeed produces
411                // our destination trait.
412                let vptr_entry_idx =
413                    self.tcx.supertrait_vtable_slot((src_pointee_ty, dest_pointee_ty));
414                let vtable_entries = self.vtable_entries(data_a.principal(), ty);
415                if let Some(entry_idx) = vptr_entry_idx {
416                    let Some(&ty::VtblEntry::TraitVPtr(upcast_trait_ref)) =
417                        vtable_entries.get(entry_idx)
418                    else {
419                        span_bug!(
420                            self.cur_span(),
421                            "invalid vtable entry index in {} -> {} upcast",
422                            src_pointee_ty,
423                            dest_pointee_ty
424                        );
425                    };
426                    let erased_trait_ref =
427                        ty::ExistentialTraitRef::erase_self_ty(*self.tcx, upcast_trait_ref);
428                    assert_eq!(
429                        data_b.principal().map(|b| {
430                            self.tcx.normalize_erasing_late_bound_regions(self.typing_env, b)
431                        }),
432                        Some(erased_trait_ref),
433                    );
434                } else {
435                    // In this case codegen would keep using the old vtable. We don't want to do
436                    // that as it has the wrong trait. The reason codegen can do this is that
437                    // one vtable is a prefix of the other, so we double-check that.
438                    let vtable_entries_b = self.vtable_entries(data_b.principal(), ty);
439                    assert!(&vtable_entries[..vtable_entries_b.len()] == vtable_entries_b);
440                };
441
442                // Get the destination trait vtable and return that.
443                let new_vptr = self.get_vtable_ptr(ty, data_b)?;
444                self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
445            }
446            (_, &ty::Dynamic(data, _, ty::Dyn)) => {
447                // Initial cast from sized to dyn trait
448                let vtable = self.get_vtable_ptr(src_pointee_ty, data)?;
449                let ptr = self.read_pointer(src)?;
450                let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
451                self.write_immediate(val, dest)
452            }
453            _ => {
454                // Do not ICE if we are not monomorphic enough.
455                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
456                ensure_monomorphic_enough(*self.tcx, cast_ty)?;
457
458                span_bug!(
459                    self.cur_span(),
460                    "invalid pointer unsizing {} -> {}",
461                    src.layout.ty,
462                    cast_ty
463                )
464            }
465        }
466    }
467
468    pub fn unsize_into(
469        &mut self,
470        src: &OpTy<'tcx, M::Provenance>,
471        cast_ty: TyAndLayout<'tcx>,
472        dest: &PlaceTy<'tcx, M::Provenance>,
473    ) -> InterpResult<'tcx> {
474        trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
475        match (src.layout.ty.kind(), cast_ty.ty.kind()) {
476            (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(c, _))
477            | (&ty::RawPtr(s, _), &ty::RawPtr(c, _)) => self.unsize_into_ptr(src, dest, s, c),
478            (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
479                assert_eq!(def_a, def_b); // implies same number of fields
480
481                // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
482                // There can be extra fields as long as they don't change their type or are 1-ZST.
483                // There might also be no field that actually needs unsizing.
484                let mut found_cast_field = false;
485                for i in 0..src.layout.fields.count() {
486                    let cast_ty_field = cast_ty.field(self, i);
487                    let src_field = self.project_field(src, i)?;
488                    let dst_field = self.project_field(dest, i)?;
489                    if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
490                        // Skip 1-ZST fields.
491                    } else if src_field.layout.ty == cast_ty_field.ty {
492                        self.copy_op(&src_field, &dst_field)?;
493                    } else {
494                        if found_cast_field {
495                            span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
496                        }
497                        found_cast_field = true;
498                        self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
499                    }
500                }
501                interp_ok(())
502            }
503            _ => {
504                // Do not ICE if we are not monomorphic enough.
505                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
506                ensure_monomorphic_enough(*self.tcx, cast_ty.ty)?;
507
508                span_bug!(
509                    self.cur_span(),
510                    "unsize_into: invalid conversion: {:?} -> {:?}",
511                    src.layout,
512                    dest.layout
513                )
514            }
515        }
516    }
517}