1use rustc_middle::mir::BinOp;
2use rustc_middle::{mir, ty};
3
4use self::helpers::check_arg_count;
5use crate::*;
6
7pub enum AtomicOp {
8 MirOp(mir::BinOp, bool),
11 Max,
12 Min,
13}
14
15impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
16pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
17 fn emulate_atomic_intrinsic(
20 &mut self,
21 intrinsic_name: &str,
22 args: &[OpTy<'tcx>],
23 dest: &MPlaceTy<'tcx>,
24 ) -> InterpResult<'tcx, EmulateItemResult> {
25 let this = self.eval_context_mut();
26
27 let intrinsic_structure: Vec<_> = intrinsic_name.split('_').collect();
28
29 fn read_ord(ord: &str) -> AtomicReadOrd {
30 match ord {
31 "seqcst" => AtomicReadOrd::SeqCst,
32 "acquire" => AtomicReadOrd::Acquire,
33 "relaxed" => AtomicReadOrd::Relaxed,
34 _ => panic!("invalid read ordering `{ord}`"),
35 }
36 }
37
38 fn write_ord(ord: &str) -> AtomicWriteOrd {
39 match ord {
40 "seqcst" => AtomicWriteOrd::SeqCst,
41 "release" => AtomicWriteOrd::Release,
42 "relaxed" => AtomicWriteOrd::Relaxed,
43 _ => panic!("invalid write ordering `{ord}`"),
44 }
45 }
46
47 fn rw_ord(ord: &str) -> AtomicRwOrd {
48 match ord {
49 "seqcst" => AtomicRwOrd::SeqCst,
50 "acqrel" => AtomicRwOrd::AcqRel,
51 "acquire" => AtomicRwOrd::Acquire,
52 "release" => AtomicRwOrd::Release,
53 "relaxed" => AtomicRwOrd::Relaxed,
54 _ => panic!("invalid read-write ordering `{ord}`"),
55 }
56 }
57
58 fn fence_ord(ord: &str) -> AtomicFenceOrd {
59 match ord {
60 "seqcst" => AtomicFenceOrd::SeqCst,
61 "acqrel" => AtomicFenceOrd::AcqRel,
62 "acquire" => AtomicFenceOrd::Acquire,
63 "release" => AtomicFenceOrd::Release,
64 _ => panic!("invalid fence ordering `{ord}`"),
65 }
66 }
67
68 match &*intrinsic_structure {
69 ["load", ord] => this.atomic_load(args, dest, read_ord(ord))?,
70 ["store", ord] => this.atomic_store(args, write_ord(ord))?,
71
72 ["fence", ord] => this.atomic_fence_intrinsic(args, fence_ord(ord))?,
73 ["singlethreadfence", ord] => this.compiler_fence_intrinsic(args, fence_ord(ord))?,
74
75 ["xchg", ord] => this.atomic_exchange(args, dest, rw_ord(ord))?,
76 ["cxchg", ord1, ord2] =>
77 this.atomic_compare_exchange(args, dest, rw_ord(ord1), read_ord(ord2))?,
78 ["cxchgweak", ord1, ord2] =>
79 this.atomic_compare_exchange_weak(args, dest, rw_ord(ord1), read_ord(ord2))?,
80
81 ["or", ord] =>
82 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), rw_ord(ord))?,
83 ["xor", ord] =>
84 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), rw_ord(ord))?,
85 ["and", ord] =>
86 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), rw_ord(ord))?,
87 ["nand", ord] =>
88 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), rw_ord(ord))?,
89 ["xadd", ord] =>
90 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), rw_ord(ord))?,
91 ["xsub", ord] =>
92 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), rw_ord(ord))?,
93 ["min", ord] => {
94 assert!(matches!(args[1].layout.ty.kind(), ty::Int(_)));
97 this.atomic_rmw_op(args, dest, AtomicOp::Min, rw_ord(ord))?;
98 }
99 ["umin", ord] => {
100 assert!(matches!(args[1].layout.ty.kind(), ty::Uint(_)));
103 this.atomic_rmw_op(args, dest, AtomicOp::Min, rw_ord(ord))?;
104 }
105 ["max", ord] => {
106 assert!(matches!(args[1].layout.ty.kind(), ty::Int(_)));
109 this.atomic_rmw_op(args, dest, AtomicOp::Max, rw_ord(ord))?;
110 }
111 ["umax", ord] => {
112 assert!(matches!(args[1].layout.ty.kind(), ty::Uint(_)));
115 this.atomic_rmw_op(args, dest, AtomicOp::Max, rw_ord(ord))?;
116 }
117
118 _ => return interp_ok(EmulateItemResult::NotSupported),
119 }
120 interp_ok(EmulateItemResult::NeedsReturn)
121 }
122}
123
124impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
125trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
126 fn atomic_load(
127 &mut self,
128 args: &[OpTy<'tcx>],
129 dest: &MPlaceTy<'tcx>,
130 atomic: AtomicReadOrd,
131 ) -> InterpResult<'tcx> {
132 let this = self.eval_context_mut();
133
134 let [place] = check_arg_count(args)?;
135 let place = this.deref_pointer(place)?;
136
137 let val = this.read_scalar_atomic(&place, atomic)?;
139 this.write_scalar(val, dest)?;
141 interp_ok(())
142 }
143
144 fn atomic_store(&mut self, args: &[OpTy<'tcx>], atomic: AtomicWriteOrd) -> InterpResult<'tcx> {
145 let this = self.eval_context_mut();
146
147 let [place, val] = check_arg_count(args)?;
148 let place = this.deref_pointer(place)?;
149
150 let val = this.read_scalar(val)?;
152 this.write_scalar_atomic(val, &place, atomic)?;
154 interp_ok(())
155 }
156
157 fn compiler_fence_intrinsic(
158 &mut self,
159 args: &[OpTy<'tcx>],
160 atomic: AtomicFenceOrd,
161 ) -> InterpResult<'tcx> {
162 let [] = check_arg_count(args)?;
163 let _ = atomic;
164 interp_ok(())
166 }
167
168 fn atomic_fence_intrinsic(
169 &mut self,
170 args: &[OpTy<'tcx>],
171 atomic: AtomicFenceOrd,
172 ) -> InterpResult<'tcx> {
173 let this = self.eval_context_mut();
174 let [] = check_arg_count(args)?;
175 this.atomic_fence(atomic)?;
176 interp_ok(())
177 }
178
179 fn atomic_rmw_op(
180 &mut self,
181 args: &[OpTy<'tcx>],
182 dest: &MPlaceTy<'tcx>,
183 atomic_op: AtomicOp,
184 atomic: AtomicRwOrd,
185 ) -> InterpResult<'tcx> {
186 let this = self.eval_context_mut();
187
188 let [place, rhs] = check_arg_count(args)?;
189 let place = this.deref_pointer(place)?;
190 let rhs = this.read_immediate(rhs)?;
191
192 if !place.layout.ty.is_integral() && !place.layout.ty.is_raw_ptr() {
193 span_bug!(
194 this.cur_span(),
195 "atomic arithmetic operations only work on integer and raw pointer types",
196 );
197 }
198 if rhs.layout.ty != place.layout.ty {
199 span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
200 }
201
202 match atomic_op {
203 AtomicOp::Min => {
204 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
205 this.write_immediate(*old, dest)?; interp_ok(())
207 }
208 AtomicOp::Max => {
209 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
210 this.write_immediate(*old, dest)?; interp_ok(())
212 }
213 AtomicOp::MirOp(op, not) => {
214 let old = this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?;
215 this.write_immediate(*old, dest)?; interp_ok(())
217 }
218 }
219 }
220
221 fn atomic_exchange(
222 &mut self,
223 args: &[OpTy<'tcx>],
224 dest: &MPlaceTy<'tcx>,
225 atomic: AtomicRwOrd,
226 ) -> InterpResult<'tcx> {
227 let this = self.eval_context_mut();
228
229 let [place, new] = check_arg_count(args)?;
230 let place = this.deref_pointer(place)?;
231 let new = this.read_scalar(new)?;
232
233 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
234 this.write_scalar(old, dest)?; interp_ok(())
236 }
237
238 fn atomic_compare_exchange_impl(
239 &mut self,
240 args: &[OpTy<'tcx>],
241 dest: &MPlaceTy<'tcx>,
242 success: AtomicRwOrd,
243 fail: AtomicReadOrd,
244 can_fail_spuriously: bool,
245 ) -> InterpResult<'tcx> {
246 let this = self.eval_context_mut();
247
248 let [place, expect_old, new] = check_arg_count(args)?;
249 let place = this.deref_pointer(place)?;
250 let expect_old = this.read_immediate(expect_old)?; let new = this.read_scalar(new)?;
252
253 let old = this.atomic_compare_exchange_scalar(
254 &place,
255 &expect_old,
256 new,
257 success,
258 fail,
259 can_fail_spuriously,
260 )?;
261
262 this.write_immediate(old, dest)?;
264 interp_ok(())
265 }
266
267 fn atomic_compare_exchange(
268 &mut self,
269 args: &[OpTy<'tcx>],
270 dest: &MPlaceTy<'tcx>,
271 success: AtomicRwOrd,
272 fail: AtomicReadOrd,
273 ) -> InterpResult<'tcx> {
274 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
275 }
276
277 fn atomic_compare_exchange_weak(
278 &mut self,
279 args: &[OpTy<'tcx>],
280 dest: &MPlaceTy<'tcx>,
281 success: AtomicRwOrd,
282 fail: AtomicReadOrd,
283 ) -> InterpResult<'tcx> {
284 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
285 }
286}