rustc_const_eval/interpret/
operator.rs

1use either::Either;
2use rustc_abi::Size;
3use rustc_apfloat::{Float, FloatConvert};
4use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
5use rustc_middle::ty::layout::TyAndLayout;
6use rustc_middle::ty::{self, FloatTy, ScalarInt};
7use rustc_middle::{bug, mir, span_bug};
8use rustc_span::sym;
9use tracing::trace;
10
11use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, interp_ok, throw_ub};
12
13impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
14    fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
15        let res = Ord::cmp(&lhs, &rhs);
16        return ImmTy::from_ordering(res, *self.tcx);
17    }
18
19    fn binary_char_op(&self, bin_op: mir::BinOp, l: char, r: char) -> ImmTy<'tcx, M::Provenance> {
20        use rustc_middle::mir::BinOp::*;
21
22        if bin_op == Cmp {
23            return self.three_way_compare(l, r);
24        }
25
26        let res = match bin_op {
27            Eq => l == r,
28            Ne => l != r,
29            Lt => l < r,
30            Le => l <= r,
31            Gt => l > r,
32            Ge => l >= r,
33            _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
34        };
35        ImmTy::from_bool(res, *self.tcx)
36    }
37
38    fn binary_bool_op(&self, bin_op: mir::BinOp, l: bool, r: bool) -> ImmTy<'tcx, M::Provenance> {
39        use rustc_middle::mir::BinOp::*;
40
41        let res = match bin_op {
42            Eq => l == r,
43            Ne => l != r,
44            Lt => l < r,
45            Le => l <= r,
46            Gt => l > r,
47            Ge => l >= r,
48            BitAnd => l & r,
49            BitOr => l | r,
50            BitXor => l ^ r,
51            _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
52        };
53        ImmTy::from_bool(res, *self.tcx)
54    }
55
56    fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
57        &self,
58        bin_op: mir::BinOp,
59        layout: TyAndLayout<'tcx>,
60        l: F,
61        r: F,
62    ) -> ImmTy<'tcx, M::Provenance> {
63        use rustc_middle::mir::BinOp::*;
64
65        // Performs appropriate non-deterministic adjustments of NaN results.
66        let adjust_nan = |f: F| -> F { self.adjust_nan(f, &[l, r]) };
67
68        match bin_op {
69            Eq => ImmTy::from_bool(l == r, *self.tcx),
70            Ne => ImmTy::from_bool(l != r, *self.tcx),
71            Lt => ImmTy::from_bool(l < r, *self.tcx),
72            Le => ImmTy::from_bool(l <= r, *self.tcx),
73            Gt => ImmTy::from_bool(l > r, *self.tcx),
74            Ge => ImmTy::from_bool(l >= r, *self.tcx),
75            Add => ImmTy::from_scalar(adjust_nan((l + r).value).into(), layout),
76            Sub => ImmTy::from_scalar(adjust_nan((l - r).value).into(), layout),
77            Mul => ImmTy::from_scalar(adjust_nan((l * r).value).into(), layout),
78            Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
79            Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
80            _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
81        }
82    }
83
84    fn binary_int_op(
85        &self,
86        bin_op: mir::BinOp,
87        left: &ImmTy<'tcx, M::Provenance>,
88        right: &ImmTy<'tcx, M::Provenance>,
89    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
90        use rustc_middle::mir::BinOp::*;
91
92        // This checks the size, so that we can just assert it below.
93        let l = left.to_scalar_int()?;
94        let r = right.to_scalar_int()?;
95        // Prepare to convert the values to signed or unsigned form.
96        let l_signed = || l.to_int(left.layout.size);
97        let l_unsigned = || l.to_uint(left.layout.size);
98        let r_signed = || r.to_int(right.layout.size);
99        let r_unsigned = || r.to_uint(right.layout.size);
100
101        let throw_ub_on_overflow = match bin_op {
102            AddUnchecked => Some(sym::unchecked_add),
103            SubUnchecked => Some(sym::unchecked_sub),
104            MulUnchecked => Some(sym::unchecked_mul),
105            ShlUnchecked => Some(sym::unchecked_shl),
106            ShrUnchecked => Some(sym::unchecked_shr),
107            _ => None,
108        };
109        let with_overflow = bin_op.is_overflowing();
110
111        // Shift ops can have an RHS with a different numeric type.
112        if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
113            let l_bits = left.layout.size.bits();
114            // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
115            // the one MIR operator that does *not* directly map to a single LLVM operation.)
116            let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
117                let shift_amount = r_signed();
118                let rem = shift_amount.rem_euclid(l_bits.into());
119                // `rem` is guaranteed positive, so the `unwrap` cannot fail
120                (u128::try_from(rem).unwrap(), rem != shift_amount)
121            } else {
122                let shift_amount = r_unsigned();
123                let rem = shift_amount.rem_euclid(l_bits.into());
124                (rem, rem != shift_amount)
125            };
126            let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
127            // Compute the shifted result.
128            let result = if left.layout.backend_repr.is_signed() {
129                let l = l_signed();
130                let result = match bin_op {
131                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
132                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
133                    _ => bug!(),
134                };
135                ScalarInt::truncate_from_int(result, left.layout.size).0
136            } else {
137                let l = l_unsigned();
138                let result = match bin_op {
139                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
140                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
141                    _ => bug!(),
142                };
143                ScalarInt::truncate_from_uint(result, left.layout.size).0
144            };
145
146            if overflow && let Some(intrinsic) = throw_ub_on_overflow {
147                throw_ub!(ShiftOverflow {
148                    intrinsic,
149                    shift_amount: if right.layout.backend_repr.is_signed() {
150                        Either::Right(r_signed())
151                    } else {
152                        Either::Left(r_unsigned())
153                    }
154                });
155            }
156
157            return interp_ok(ImmTy::from_scalar_int(result, left.layout));
158        }
159
160        // For the remaining ops, the types must be the same on both sides
161        if left.layout.ty != right.layout.ty {
162            span_bug!(
163                self.cur_span(),
164                "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
165                l_ty = left.layout.ty,
166                r_ty = right.layout.ty,
167            )
168        }
169
170        let size = left.layout.size;
171
172        // Operations that need special treatment for signed integers
173        if left.layout.backend_repr.is_signed() {
174            let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
175                Lt => Some(i128::lt),
176                Le => Some(i128::le),
177                Gt => Some(i128::gt),
178                Ge => Some(i128::ge),
179                _ => None,
180            };
181            if let Some(op) = op {
182                return interp_ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
183            }
184            if bin_op == Cmp {
185                return interp_ok(self.three_way_compare(l_signed(), r_signed()));
186            }
187            let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
188                Div if r.is_null() => throw_ub!(DivisionByZero),
189                Rem if r.is_null() => throw_ub!(RemainderByZero),
190                Div => Some(i128::overflowing_div),
191                Rem => Some(i128::overflowing_rem),
192                Add | AddUnchecked | AddWithOverflow => Some(i128::overflowing_add),
193                Sub | SubUnchecked | SubWithOverflow => Some(i128::overflowing_sub),
194                Mul | MulUnchecked | MulWithOverflow => Some(i128::overflowing_mul),
195                _ => None,
196            };
197            if let Some(op) = op {
198                let l = l_signed();
199                let r = r_signed();
200
201                // We need a special check for overflowing Rem and Div since they are *UB*
202                // on overflow, which can happen with "int_min $OP -1".
203                if matches!(bin_op, Rem | Div) {
204                    if l == size.signed_int_min() && r == -1 {
205                        if bin_op == Rem {
206                            throw_ub!(RemainderOverflow)
207                        } else {
208                            throw_ub!(DivisionOverflow)
209                        }
210                    }
211                }
212
213                let (result, oflo) = op(l, r);
214                // This may be out-of-bounds for the result type, so we have to truncate.
215                // If that truncation loses any information, we have an overflow.
216                let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size);
217                let overflow = oflo || lossy;
218                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
219                    throw_ub!(ArithOverflow { intrinsic });
220                }
221                let res = ImmTy::from_scalar_int(result, left.layout);
222                return interp_ok(if with_overflow {
223                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
224                    ImmTy::from_pair(res, overflow, self)
225                } else {
226                    res
227                });
228            }
229        }
230        // From here on it's okay to treat everything as unsigned.
231        let l = l_unsigned();
232        let r = r_unsigned();
233
234        if bin_op == Cmp {
235            return interp_ok(self.three_way_compare(l, r));
236        }
237
238        interp_ok(match bin_op {
239            Eq => ImmTy::from_bool(l == r, *self.tcx),
240            Ne => ImmTy::from_bool(l != r, *self.tcx),
241
242            Lt => ImmTy::from_bool(l < r, *self.tcx),
243            Le => ImmTy::from_bool(l <= r, *self.tcx),
244            Gt => ImmTy::from_bool(l > r, *self.tcx),
245            Ge => ImmTy::from_bool(l >= r, *self.tcx),
246
247            BitOr => ImmTy::from_uint(l | r, left.layout),
248            BitAnd => ImmTy::from_uint(l & r, left.layout),
249            BitXor => ImmTy::from_uint(l ^ r, left.layout),
250
251            _ => {
252                assert!(!left.layout.backend_repr.is_signed());
253                let op: fn(u128, u128) -> (u128, bool) = match bin_op {
254                    Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
255                    Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
256                    Mul | MulUnchecked | MulWithOverflow => u128::overflowing_mul,
257                    Div if r == 0 => throw_ub!(DivisionByZero),
258                    Rem if r == 0 => throw_ub!(RemainderByZero),
259                    Div => u128::overflowing_div,
260                    Rem => u128::overflowing_rem,
261                    _ => span_bug!(
262                        self.cur_span(),
263                        "invalid binary op {:?}: {:?}, {:?} (both {})",
264                        bin_op,
265                        left,
266                        right,
267                        right.layout.ty,
268                    ),
269                };
270                let (result, oflo) = op(l, r);
271                // Truncate to target type.
272                // If that truncation loses any information, we have an overflow.
273                let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size);
274                let overflow = oflo || lossy;
275                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
276                    throw_ub!(ArithOverflow { intrinsic });
277                }
278                let res = ImmTy::from_scalar_int(result, left.layout);
279                if with_overflow {
280                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
281                    ImmTy::from_pair(res, overflow, self)
282                } else {
283                    res
284                }
285            }
286        })
287    }
288
289    /// Computes the total size of this access, `count * elem_size`,
290    /// checking for overflow beyond isize::MAX.
291    pub fn compute_size_in_bytes(&self, elem_size: Size, count: u64) -> Option<Size> {
292        // `checked_mul` applies `u64` limits independent of the target pointer size... but the
293        // subsequent check for `max_size_of_val` means we also handle 32bit targets correctly.
294        // (We cannot use `Size::checked_mul` as that enforces `obj_size_bound` as the limit, which
295        // would be wrong here.)
296        elem_size
297            .bytes()
298            .checked_mul(count)
299            .map(Size::from_bytes)
300            .filter(|&total| total <= self.max_size_of_val())
301    }
302
303    fn binary_ptr_op(
304        &self,
305        bin_op: mir::BinOp,
306        left: &ImmTy<'tcx, M::Provenance>,
307        right: &ImmTy<'tcx, M::Provenance>,
308    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
309        use rustc_middle::mir::BinOp::*;
310
311        match bin_op {
312            // Pointer ops that are always supported.
313            Offset => {
314                let ptr = left.to_scalar().to_pointer(self)?;
315                let pointee_ty = left.layout.ty.builtin_deref(true).unwrap();
316                let pointee_layout = self.layout_of(pointee_ty)?;
317                assert!(pointee_layout.is_sized());
318
319                // The size always fits in `i64` as it can be at most `isize::MAX`.
320                let pointee_size = i64::try_from(pointee_layout.size.bytes()).unwrap();
321                // This uses the same type as `right`, which can be `isize` or `usize`.
322                // `pointee_size` is guaranteed to fit into both types.
323                let pointee_size = ImmTy::from_int(pointee_size, right.layout);
324                // Multiply element size and element count.
325                let (val, overflowed) = self
326                    .binary_op(mir::BinOp::MulWithOverflow, right, &pointee_size)?
327                    .to_scalar_pair();
328                // This must not overflow.
329                if overflowed.to_bool()? {
330                    throw_ub!(PointerArithOverflow)
331                }
332
333                let offset_bytes = val.to_target_isize(self)?;
334                if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
335                    // We were supposed to do an unsigned offset but the result is negative -- this
336                    // can only mean that the cast wrapped around.
337                    throw_ub!(PointerArithOverflow)
338                }
339                let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
340                interp_ok(ImmTy::from_scalar(
341                    Scalar::from_maybe_pointer(offset_ptr, self),
342                    left.layout,
343                ))
344            }
345
346            // Fall back to machine hook so Miri can support more pointer ops.
347            _ => M::binary_ptr_op(self, bin_op, left, right),
348        }
349    }
350
351    /// Returns the result of the specified operation.
352    ///
353    /// Whether this produces a scalar or a pair depends on the specific `bin_op`.
354    pub fn binary_op(
355        &self,
356        bin_op: mir::BinOp,
357        left: &ImmTy<'tcx, M::Provenance>,
358        right: &ImmTy<'tcx, M::Provenance>,
359    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
360        trace!(
361            "Running binary op {:?}: {:?} ({}), {:?} ({})",
362            bin_op, *left, left.layout.ty, *right, right.layout.ty
363        );
364
365        match left.layout.ty.kind() {
366            ty::Char => {
367                assert_eq!(left.layout.ty, right.layout.ty);
368                let left = left.to_scalar();
369                let right = right.to_scalar();
370                interp_ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
371            }
372            ty::Bool => {
373                assert_eq!(left.layout.ty, right.layout.ty);
374                let left = left.to_scalar();
375                let right = right.to_scalar();
376                interp_ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
377            }
378            ty::Float(fty) => {
379                assert_eq!(left.layout.ty, right.layout.ty);
380                let layout = left.layout;
381                let left = left.to_scalar();
382                let right = right.to_scalar();
383                interp_ok(match fty {
384                    FloatTy::F16 => {
385                        self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
386                    }
387                    FloatTy::F32 => {
388                        self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
389                    }
390                    FloatTy::F64 => {
391                        self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
392                    }
393                    FloatTy::F128 => {
394                        self.binary_float_op(bin_op, layout, left.to_f128()?, right.to_f128()?)
395                    }
396                })
397            }
398            _ if left.layout.ty.is_integral() => {
399                // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
400                assert!(
401                    right.layout.ty.is_integral(),
402                    "Unexpected types for BinOp: {} {:?} {}",
403                    left.layout.ty,
404                    bin_op,
405                    right.layout.ty
406                );
407
408                self.binary_int_op(bin_op, left, right)
409            }
410            _ if left.layout.ty.is_any_ptr() => {
411                // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
412                // (Even when both sides are pointers, their type might differ, see issue #91636)
413                assert!(
414                    right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
415                    "Unexpected types for BinOp: {} {:?} {}",
416                    left.layout.ty,
417                    bin_op,
418                    right.layout.ty
419                );
420
421                self.binary_ptr_op(bin_op, left, right)
422            }
423            _ => span_bug!(
424                self.cur_span(),
425                "Invalid MIR: bad LHS type for binop: {}",
426                left.layout.ty
427            ),
428        }
429    }
430
431    /// Returns the result of the specified operation, whether it overflowed, and
432    /// the result type.
433    pub fn unary_op(
434        &self,
435        un_op: mir::UnOp,
436        val: &ImmTy<'tcx, M::Provenance>,
437    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
438        use rustc_middle::mir::UnOp::*;
439
440        let layout = val.layout;
441        trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
442
443        match layout.ty.kind() {
444            ty::Bool => {
445                let val = val.to_scalar();
446                let val = val.to_bool()?;
447                let res = match un_op {
448                    Not => !val,
449                    _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
450                };
451                interp_ok(ImmTy::from_bool(res, *self.tcx))
452            }
453            ty::Float(fty) => {
454                let val = val.to_scalar();
455                if un_op != Neg {
456                    span_bug!(self.cur_span(), "Invalid float op {:?}", un_op);
457                }
458
459                // No NaN adjustment here, `-` is a bitwise operation!
460                let res = match fty {
461                    FloatTy::F16 => Scalar::from_f16(-val.to_f16()?),
462                    FloatTy::F32 => Scalar::from_f32(-val.to_f32()?),
463                    FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
464                    FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
465                };
466                interp_ok(ImmTy::from_scalar(res, layout))
467            }
468            ty::Int(..) => {
469                let val = val.to_scalar().to_int(layout.size)?;
470                let res = match un_op {
471                    Not => !val,
472                    Neg => val.wrapping_neg(),
473                    _ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op),
474                };
475                let res = ScalarInt::truncate_from_int(res, layout.size).0;
476                interp_ok(ImmTy::from_scalar(res.into(), layout))
477            }
478            ty::Uint(..) => {
479                let val = val.to_scalar().to_uint(layout.size)?;
480                let res = match un_op {
481                    Not => !val,
482                    _ => span_bug!(self.cur_span(), "Invalid unsigned integer op {:?}", un_op),
483                };
484                let res = ScalarInt::truncate_from_uint(res, layout.size).0;
485                interp_ok(ImmTy::from_scalar(res.into(), layout))
486            }
487            ty::RawPtr(..) | ty::Ref(..) => {
488                assert_eq!(un_op, PtrMetadata);
489                let (_, meta) = val.to_scalar_and_meta();
490                interp_ok(match meta {
491                    MemPlaceMeta::Meta(scalar) => {
492                        let ty = un_op.ty(*self.tcx, val.layout.ty);
493                        let layout = self.layout_of(ty)?;
494                        ImmTy::from_scalar(scalar, layout)
495                    }
496                    MemPlaceMeta::None => {
497                        let unit_layout = self.layout_of(self.tcx.types.unit)?;
498                        ImmTy::uninit(unit_layout)
499                    }
500                })
501            }
502            _ => {
503                bug!("Unexpected unary op argument {val:?}")
504            }
505        }
506    }
507}