rustc_const_eval/interpret/
operator.rs

1use either::Either;
2use rustc_abi::Size;
3use rustc_apfloat::{Float, FloatConvert};
4use rustc_middle::mir::NullOp;
5use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
6use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
7use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty};
8use rustc_middle::{bug, mir, span_bug};
9use rustc_span::sym;
10use tracing::trace;
11
12use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, interp_ok, throw_ub};
13
14impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
15    fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
16        let res = Ord::cmp(&lhs, &rhs);
17        return ImmTy::from_ordering(res, *self.tcx);
18    }
19
20    fn binary_char_op(&self, bin_op: mir::BinOp, l: char, r: char) -> ImmTy<'tcx, M::Provenance> {
21        use rustc_middle::mir::BinOp::*;
22
23        if bin_op == Cmp {
24            return self.three_way_compare(l, r);
25        }
26
27        let res = match bin_op {
28            Eq => l == r,
29            Ne => l != r,
30            Lt => l < r,
31            Le => l <= r,
32            Gt => l > r,
33            Ge => l >= r,
34            _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
35        };
36        ImmTy::from_bool(res, *self.tcx)
37    }
38
39    fn binary_bool_op(&self, bin_op: mir::BinOp, l: bool, r: bool) -> ImmTy<'tcx, M::Provenance> {
40        use rustc_middle::mir::BinOp::*;
41
42        let res = match bin_op {
43            Eq => l == r,
44            Ne => l != r,
45            Lt => l < r,
46            Le => l <= r,
47            Gt => l > r,
48            Ge => l >= r,
49            BitAnd => l & r,
50            BitOr => l | r,
51            BitXor => l ^ r,
52            _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
53        };
54        ImmTy::from_bool(res, *self.tcx)
55    }
56
57    fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
58        &self,
59        bin_op: mir::BinOp,
60        layout: TyAndLayout<'tcx>,
61        l: F,
62        r: F,
63    ) -> ImmTy<'tcx, M::Provenance> {
64        use rustc_middle::mir::BinOp::*;
65
66        // Performs appropriate non-deterministic adjustments of NaN results.
67        let adjust_nan = |f: F| -> F { self.adjust_nan(f, &[l, r]) };
68
69        match bin_op {
70            Eq => ImmTy::from_bool(l == r, *self.tcx),
71            Ne => ImmTy::from_bool(l != r, *self.tcx),
72            Lt => ImmTy::from_bool(l < r, *self.tcx),
73            Le => ImmTy::from_bool(l <= r, *self.tcx),
74            Gt => ImmTy::from_bool(l > r, *self.tcx),
75            Ge => ImmTy::from_bool(l >= r, *self.tcx),
76            Add => ImmTy::from_scalar(adjust_nan((l + r).value).into(), layout),
77            Sub => ImmTy::from_scalar(adjust_nan((l - r).value).into(), layout),
78            Mul => ImmTy::from_scalar(adjust_nan((l * r).value).into(), layout),
79            Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
80            Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
81            _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
82        }
83    }
84
85    fn binary_int_op(
86        &self,
87        bin_op: mir::BinOp,
88        left: &ImmTy<'tcx, M::Provenance>,
89        right: &ImmTy<'tcx, M::Provenance>,
90    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
91        use rustc_middle::mir::BinOp::*;
92
93        // This checks the size, so that we can just assert it below.
94        let l = left.to_scalar_int()?;
95        let r = right.to_scalar_int()?;
96        // Prepare to convert the values to signed or unsigned form.
97        let l_signed = || l.to_int(left.layout.size);
98        let l_unsigned = || l.to_uint(left.layout.size);
99        let r_signed = || r.to_int(right.layout.size);
100        let r_unsigned = || r.to_uint(right.layout.size);
101
102        let throw_ub_on_overflow = match bin_op {
103            AddUnchecked => Some(sym::unchecked_add),
104            SubUnchecked => Some(sym::unchecked_sub),
105            MulUnchecked => Some(sym::unchecked_mul),
106            ShlUnchecked => Some(sym::unchecked_shl),
107            ShrUnchecked => Some(sym::unchecked_shr),
108            _ => None,
109        };
110        let with_overflow = bin_op.is_overflowing();
111
112        // Shift ops can have an RHS with a different numeric type.
113        if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
114            let l_bits = left.layout.size.bits();
115            // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
116            // the one MIR operator that does *not* directly map to a single LLVM operation.)
117            let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
118                let shift_amount = r_signed();
119                let rem = shift_amount.rem_euclid(l_bits.into());
120                // `rem` is guaranteed positive, so the `unwrap` cannot fail
121                (u128::try_from(rem).unwrap(), rem != shift_amount)
122            } else {
123                let shift_amount = r_unsigned();
124                let rem = shift_amount.rem_euclid(l_bits.into());
125                (rem, rem != shift_amount)
126            };
127            let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
128            // Compute the shifted result.
129            let result = if left.layout.backend_repr.is_signed() {
130                let l = l_signed();
131                let result = match bin_op {
132                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
133                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
134                    _ => bug!(),
135                };
136                ScalarInt::truncate_from_int(result, left.layout.size).0
137            } else {
138                let l = l_unsigned();
139                let result = match bin_op {
140                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
141                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
142                    _ => bug!(),
143                };
144                ScalarInt::truncate_from_uint(result, left.layout.size).0
145            };
146
147            if overflow && let Some(intrinsic) = throw_ub_on_overflow {
148                throw_ub!(ShiftOverflow {
149                    intrinsic,
150                    shift_amount: if right.layout.backend_repr.is_signed() {
151                        Either::Right(r_signed())
152                    } else {
153                        Either::Left(r_unsigned())
154                    }
155                });
156            }
157
158            return interp_ok(ImmTy::from_scalar_int(result, left.layout));
159        }
160
161        // For the remaining ops, the types must be the same on both sides
162        if left.layout.ty != right.layout.ty {
163            span_bug!(
164                self.cur_span(),
165                "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
166                l_ty = left.layout.ty,
167                r_ty = right.layout.ty,
168            )
169        }
170
171        let size = left.layout.size;
172
173        // Operations that need special treatment for signed integers
174        if left.layout.backend_repr.is_signed() {
175            let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
176                Lt => Some(i128::lt),
177                Le => Some(i128::le),
178                Gt => Some(i128::gt),
179                Ge => Some(i128::ge),
180                _ => None,
181            };
182            if let Some(op) = op {
183                return interp_ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
184            }
185            if bin_op == Cmp {
186                return interp_ok(self.three_way_compare(l_signed(), r_signed()));
187            }
188            let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
189                Div if r.is_null() => throw_ub!(DivisionByZero),
190                Rem if r.is_null() => throw_ub!(RemainderByZero),
191                Div => Some(i128::overflowing_div),
192                Rem => Some(i128::overflowing_rem),
193                Add | AddUnchecked | AddWithOverflow => Some(i128::overflowing_add),
194                Sub | SubUnchecked | SubWithOverflow => Some(i128::overflowing_sub),
195                Mul | MulUnchecked | MulWithOverflow => Some(i128::overflowing_mul),
196                _ => None,
197            };
198            if let Some(op) = op {
199                let l = l_signed();
200                let r = r_signed();
201
202                // We need a special check for overflowing Rem and Div since they are *UB*
203                // on overflow, which can happen with "int_min $OP -1".
204                if matches!(bin_op, Rem | Div) {
205                    if l == size.signed_int_min() && r == -1 {
206                        if bin_op == Rem {
207                            throw_ub!(RemainderOverflow)
208                        } else {
209                            throw_ub!(DivisionOverflow)
210                        }
211                    }
212                }
213
214                let (result, oflo) = op(l, r);
215                // This may be out-of-bounds for the result type, so we have to truncate.
216                // If that truncation loses any information, we have an overflow.
217                let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size);
218                let overflow = oflo || lossy;
219                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
220                    throw_ub!(ArithOverflow { intrinsic });
221                }
222                let res = ImmTy::from_scalar_int(result, left.layout);
223                return interp_ok(if with_overflow {
224                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
225                    ImmTy::from_pair(res, overflow, self)
226                } else {
227                    res
228                });
229            }
230        }
231        // From here on it's okay to treat everything as unsigned.
232        let l = l_unsigned();
233        let r = r_unsigned();
234
235        if bin_op == Cmp {
236            return interp_ok(self.three_way_compare(l, r));
237        }
238
239        interp_ok(match bin_op {
240            Eq => ImmTy::from_bool(l == r, *self.tcx),
241            Ne => ImmTy::from_bool(l != r, *self.tcx),
242
243            Lt => ImmTy::from_bool(l < r, *self.tcx),
244            Le => ImmTy::from_bool(l <= r, *self.tcx),
245            Gt => ImmTy::from_bool(l > r, *self.tcx),
246            Ge => ImmTy::from_bool(l >= r, *self.tcx),
247
248            BitOr => ImmTy::from_uint(l | r, left.layout),
249            BitAnd => ImmTy::from_uint(l & r, left.layout),
250            BitXor => ImmTy::from_uint(l ^ r, left.layout),
251
252            _ => {
253                assert!(!left.layout.backend_repr.is_signed());
254                let op: fn(u128, u128) -> (u128, bool) = match bin_op {
255                    Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
256                    Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
257                    Mul | MulUnchecked | MulWithOverflow => u128::overflowing_mul,
258                    Div if r == 0 => throw_ub!(DivisionByZero),
259                    Rem if r == 0 => throw_ub!(RemainderByZero),
260                    Div => u128::overflowing_div,
261                    Rem => u128::overflowing_rem,
262                    _ => span_bug!(
263                        self.cur_span(),
264                        "invalid binary op {:?}: {:?}, {:?} (both {})",
265                        bin_op,
266                        left,
267                        right,
268                        right.layout.ty,
269                    ),
270                };
271                let (result, oflo) = op(l, r);
272                // Truncate to target type.
273                // If that truncation loses any information, we have an overflow.
274                let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size);
275                let overflow = oflo || lossy;
276                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
277                    throw_ub!(ArithOverflow { intrinsic });
278                }
279                let res = ImmTy::from_scalar_int(result, left.layout);
280                if with_overflow {
281                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
282                    ImmTy::from_pair(res, overflow, self)
283                } else {
284                    res
285                }
286            }
287        })
288    }
289
290    /// Computes the total size of this access, `count * elem_size`,
291    /// checking for overflow beyond isize::MAX.
292    pub fn compute_size_in_bytes(&self, elem_size: Size, count: u64) -> Option<Size> {
293        // `checked_mul` applies `u64` limits independent of the target pointer size... but the
294        // subsequent check for `max_size_of_val` means we also handle 32bit targets correctly.
295        // (We cannot use `Size::checked_mul` as that enforces `obj_size_bound` as the limit, which
296        // would be wrong here.)
297        elem_size
298            .bytes()
299            .checked_mul(count)
300            .map(Size::from_bytes)
301            .filter(|&total| total <= self.max_size_of_val())
302    }
303
304    fn binary_ptr_op(
305        &self,
306        bin_op: mir::BinOp,
307        left: &ImmTy<'tcx, M::Provenance>,
308        right: &ImmTy<'tcx, M::Provenance>,
309    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
310        use rustc_middle::mir::BinOp::*;
311
312        match bin_op {
313            // Pointer ops that are always supported.
314            Offset => {
315                let ptr = left.to_scalar().to_pointer(self)?;
316                let pointee_ty = left.layout.ty.builtin_deref(true).unwrap();
317                let pointee_layout = self.layout_of(pointee_ty)?;
318                assert!(pointee_layout.is_sized());
319
320                // The size always fits in `i64` as it can be at most `isize::MAX`.
321                let pointee_size = i64::try_from(pointee_layout.size.bytes()).unwrap();
322                // This uses the same type as `right`, which can be `isize` or `usize`.
323                // `pointee_size` is guaranteed to fit into both types.
324                let pointee_size = ImmTy::from_int(pointee_size, right.layout);
325                // Multiply element size and element count.
326                let (val, overflowed) = self
327                    .binary_op(mir::BinOp::MulWithOverflow, right, &pointee_size)?
328                    .to_scalar_pair();
329                // This must not overflow.
330                if overflowed.to_bool()? {
331                    throw_ub!(PointerArithOverflow)
332                }
333
334                let offset_bytes = val.to_target_isize(self)?;
335                if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
336                    // We were supposed to do an unsigned offset but the result is negative -- this
337                    // can only mean that the cast wrapped around.
338                    throw_ub!(PointerArithOverflow)
339                }
340                let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
341                interp_ok(ImmTy::from_scalar(
342                    Scalar::from_maybe_pointer(offset_ptr, self),
343                    left.layout,
344                ))
345            }
346
347            // Fall back to machine hook so Miri can support more pointer ops.
348            _ => M::binary_ptr_op(self, bin_op, left, right),
349        }
350    }
351
352    /// Returns the result of the specified operation.
353    ///
354    /// Whether this produces a scalar or a pair depends on the specific `bin_op`.
355    pub fn binary_op(
356        &self,
357        bin_op: mir::BinOp,
358        left: &ImmTy<'tcx, M::Provenance>,
359        right: &ImmTy<'tcx, M::Provenance>,
360    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
361        trace!(
362            "Running binary op {:?}: {:?} ({}), {:?} ({})",
363            bin_op, *left, left.layout.ty, *right, right.layout.ty
364        );
365
366        match left.layout.ty.kind() {
367            ty::Char => {
368                assert_eq!(left.layout.ty, right.layout.ty);
369                let left = left.to_scalar();
370                let right = right.to_scalar();
371                interp_ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
372            }
373            ty::Bool => {
374                assert_eq!(left.layout.ty, right.layout.ty);
375                let left = left.to_scalar();
376                let right = right.to_scalar();
377                interp_ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
378            }
379            ty::Float(fty) => {
380                assert_eq!(left.layout.ty, right.layout.ty);
381                let layout = left.layout;
382                let left = left.to_scalar();
383                let right = right.to_scalar();
384                interp_ok(match fty {
385                    FloatTy::F16 => {
386                        self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
387                    }
388                    FloatTy::F32 => {
389                        self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
390                    }
391                    FloatTy::F64 => {
392                        self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
393                    }
394                    FloatTy::F128 => {
395                        self.binary_float_op(bin_op, layout, left.to_f128()?, right.to_f128()?)
396                    }
397                })
398            }
399            _ if left.layout.ty.is_integral() => {
400                // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
401                assert!(
402                    right.layout.ty.is_integral(),
403                    "Unexpected types for BinOp: {} {:?} {}",
404                    left.layout.ty,
405                    bin_op,
406                    right.layout.ty
407                );
408
409                self.binary_int_op(bin_op, left, right)
410            }
411            _ if left.layout.ty.is_any_ptr() => {
412                // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
413                // (Even when both sides are pointers, their type might differ, see issue #91636)
414                assert!(
415                    right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
416                    "Unexpected types for BinOp: {} {:?} {}",
417                    left.layout.ty,
418                    bin_op,
419                    right.layout.ty
420                );
421
422                self.binary_ptr_op(bin_op, left, right)
423            }
424            _ => span_bug!(
425                self.cur_span(),
426                "Invalid MIR: bad LHS type for binop: {}",
427                left.layout.ty
428            ),
429        }
430    }
431
432    /// Returns the result of the specified operation, whether it overflowed, and
433    /// the result type.
434    pub fn unary_op(
435        &self,
436        un_op: mir::UnOp,
437        val: &ImmTy<'tcx, M::Provenance>,
438    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
439        use rustc_middle::mir::UnOp::*;
440
441        let layout = val.layout;
442        trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
443
444        match layout.ty.kind() {
445            ty::Bool => {
446                let val = val.to_scalar();
447                let val = val.to_bool()?;
448                let res = match un_op {
449                    Not => !val,
450                    _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
451                };
452                interp_ok(ImmTy::from_bool(res, *self.tcx))
453            }
454            ty::Float(fty) => {
455                let val = val.to_scalar();
456                if un_op != Neg {
457                    span_bug!(self.cur_span(), "Invalid float op {:?}", un_op);
458                }
459
460                // No NaN adjustment here, `-` is a bitwise operation!
461                let res = match fty {
462                    FloatTy::F16 => Scalar::from_f16(-val.to_f16()?),
463                    FloatTy::F32 => Scalar::from_f32(-val.to_f32()?),
464                    FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
465                    FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
466                };
467                interp_ok(ImmTy::from_scalar(res, layout))
468            }
469            ty::Int(..) => {
470                let val = val.to_scalar().to_int(layout.size)?;
471                let res = match un_op {
472                    Not => !val,
473                    Neg => val.wrapping_neg(),
474                    _ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op),
475                };
476                let res = ScalarInt::truncate_from_int(res, layout.size).0;
477                interp_ok(ImmTy::from_scalar(res.into(), layout))
478            }
479            ty::Uint(..) => {
480                let val = val.to_scalar().to_uint(layout.size)?;
481                let res = match un_op {
482                    Not => !val,
483                    _ => span_bug!(self.cur_span(), "Invalid unsigned integer op {:?}", un_op),
484                };
485                let res = ScalarInt::truncate_from_uint(res, layout.size).0;
486                interp_ok(ImmTy::from_scalar(res.into(), layout))
487            }
488            ty::RawPtr(..) | ty::Ref(..) => {
489                assert_eq!(un_op, PtrMetadata);
490                let (_, meta) = val.to_scalar_and_meta();
491                interp_ok(match meta {
492                    MemPlaceMeta::Meta(scalar) => {
493                        let ty = un_op.ty(*self.tcx, val.layout.ty);
494                        let layout = self.layout_of(ty)?;
495                        ImmTy::from_scalar(scalar, layout)
496                    }
497                    MemPlaceMeta::None => {
498                        let unit_layout = self.layout_of(self.tcx.types.unit)?;
499                        ImmTy::uninit(unit_layout)
500                    }
501                })
502            }
503            _ => {
504                bug!("Unexpected unary op argument {val:?}")
505            }
506        }
507    }
508
509    pub fn nullary_op(
510        &self,
511        null_op: NullOp<'tcx>,
512        arg_ty: Ty<'tcx>,
513    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
514        use rustc_middle::mir::NullOp::*;
515
516        let layout = self.layout_of(arg_ty)?;
517        let usize_layout = || self.layout_of(self.tcx.types.usize).unwrap();
518
519        interp_ok(match null_op {
520            SizeOf => {
521                if !layout.is_sized() {
522                    span_bug!(self.cur_span(), "unsized type for `NullaryOp::SizeOf`");
523                }
524                let val = layout.size.bytes();
525                ImmTy::from_uint(val, usize_layout())
526            }
527            AlignOf => {
528                if !layout.is_sized() {
529                    span_bug!(self.cur_span(), "unsized type for `NullaryOp::AlignOf`");
530                }
531                let val = layout.align.abi.bytes();
532                ImmTy::from_uint(val, usize_layout())
533            }
534            OffsetOf(fields) => {
535                let val =
536                    self.tcx.offset_of_subfield(self.typing_env, layout, fields.iter()).bytes();
537                ImmTy::from_uint(val, usize_layout())
538            }
539            UbChecks => ImmTy::from_bool(M::ub_checks(self)?, *self.tcx),
540            ContractChecks => ImmTy::from_bool(M::contract_checks(self)?, *self.tcx),
541        })
542    }
543}