miri/
operator.rs

1use std::iter;
2
3use rand::Rng;
4use rand::seq::IteratorRandom;
5use rustc_abi::Size;
6use rustc_apfloat::{Float, FloatConvert};
7use rustc_middle::mir;
8
9use crate::*;
10
11impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
12pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
13    fn binary_ptr_op(
14        &self,
15        bin_op: mir::BinOp,
16        left: &ImmTy<'tcx>,
17        right: &ImmTy<'tcx>,
18    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
19        use rustc_middle::mir::BinOp::*;
20
21        let this = self.eval_context_ref();
22        trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
23
24        interp_ok(match bin_op {
25            Eq | Ne | Lt | Le | Gt | Ge => {
26                assert_eq!(left.layout.backend_repr, right.layout.backend_repr); // types can differ, e.g. fn ptrs with different `for`
27                let size = this.pointer_size();
28                // Just compare the bits. ScalarPairs are compared lexicographically.
29                // We thus always compare pairs and simply fill scalars up with 0.
30                let left = match **left {
31                    Immediate::Scalar(l) => (l.to_bits(size)?, 0),
32                    Immediate::ScalarPair(l1, l2) => (l1.to_bits(size)?, l2.to_bits(size)?),
33                    Immediate::Uninit => panic!("we should never see uninit data here"),
34                };
35                let right = match **right {
36                    Immediate::Scalar(r) => (r.to_bits(size)?, 0),
37                    Immediate::ScalarPair(r1, r2) => (r1.to_bits(size)?, r2.to_bits(size)?),
38                    Immediate::Uninit => panic!("we should never see uninit data here"),
39                };
40                let res = match bin_op {
41                    Eq => left == right,
42                    Ne => left != right,
43                    Lt => left < right,
44                    Le => left <= right,
45                    Gt => left > right,
46                    Ge => left >= right,
47                    _ => bug!(),
48                };
49                ImmTy::from_bool(res, *this.tcx)
50            }
51
52            // Some more operations are possible with atomics.
53            // The return value always has the provenance of the *left* operand.
54            Add | Sub | BitOr | BitAnd | BitXor => {
55                assert!(left.layout.ty.is_raw_ptr());
56                assert!(right.layout.ty.is_raw_ptr());
57                let ptr = left.to_scalar().to_pointer(this)?;
58                // We do the actual operation with usize-typed scalars.
59                let left = ImmTy::from_uint(ptr.addr().bytes(), this.machine.layouts.usize);
60                let right = ImmTy::from_uint(
61                    right.to_scalar().to_target_usize(this)?,
62                    this.machine.layouts.usize,
63                );
64                let result = this.binary_op(bin_op, &left, &right)?;
65                // Construct a new pointer with the provenance of `ptr` (the LHS).
66                let result_ptr = Pointer::new(
67                    ptr.provenance,
68                    Size::from_bytes(result.to_scalar().to_target_usize(this)?),
69                );
70
71                ImmTy::from_scalar(Scalar::from_maybe_pointer(result_ptr, this), left.layout)
72            }
73
74            _ => span_bug!(this.cur_span(), "Invalid operator on pointers: {:?}", bin_op),
75        })
76    }
77
78    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(&self, inputs: &[F1]) -> F2 {
79        /// Make the given NaN a signaling NaN.
80        /// Returns `None` if this would not result in a NaN.
81        fn make_signaling<F: Float>(f: F) -> Option<F> {
82            // The quiet/signaling bit is the leftmost bit in the mantissa.
83            // That's position `PRECISION-1`, since `PRECISION` includes the fixed leading 1 bit,
84            // and then we subtract 1 more since this is 0-indexed.
85            let quiet_bit_mask = 1 << (F::PRECISION - 2);
86            // Unset the bit. Double-check that this wasn't the last bit set in the payload.
87            // (which would turn the NaN into an infinity).
88            let f = F::from_bits(f.to_bits() & !quiet_bit_mask);
89            if f.is_nan() { Some(f) } else { None }
90        }
91
92        let this = self.eval_context_ref();
93        let mut rand = this.machine.rng.borrow_mut();
94        // Assemble an iterator of possible NaNs: preferred, quieting propagation, unchanged propagation.
95        // On some targets there are more possibilities; for now we just generate those options that
96        // are possible everywhere.
97        let preferred_nan = F2::qnan(Some(0));
98        let nans = iter::once(preferred_nan)
99            .chain(inputs.iter().filter(|f| f.is_nan()).map(|&f| {
100                // Regular apfloat cast is quieting.
101                f.convert(&mut false).value
102            }))
103            .chain(inputs.iter().filter(|f| f.is_signaling()).filter_map(|&f| {
104                let f: F2 = f.convert(&mut false).value;
105                // We have to de-quiet this again for unchanged propagation.
106                make_signaling(f)
107            }));
108        // Pick one of the NaNs.
109        let nan = nans.choose(&mut *rand).unwrap();
110        // Non-deterministically flip the sign.
111        if rand.random() {
112            // This will properly flip even for NaN.
113            -nan
114        } else {
115            nan
116        }
117    }
118
119    fn equal_float_min_max<F: Float>(&self, a: F, b: F) -> F {
120        let this = self.eval_context_ref();
121        // Return one side non-deterministically.
122        let mut rand = this.machine.rng.borrow_mut();
123        if rand.random() { a } else { b }
124    }
125}