1use rustc_middle::mir::BinOp;
2use rustc_middle::ty::AtomicOrdering;
3use rustc_middle::{mir, ty};
4
5use super::check_intrinsic_arg_count;
6use crate::*;
7
8pub enum AtomicRmwOp {
9 MirOp {
10 op: mir::BinOp,
11 neg: bool,
14 },
15 Max,
16 Min,
17}
18
19impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
20pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
21 fn emulate_atomic_intrinsic(
24 &mut self,
25 intrinsic_name: &str,
26 generic_args: ty::GenericArgsRef<'tcx>,
27 args: &[OpTy<'tcx>],
28 dest: &MPlaceTy<'tcx>,
29 ) -> InterpResult<'tcx, EmulateItemResult> {
30 let this = self.eval_context_mut();
31
32 let get_ord_at = |i: usize| {
33 let ordering = generic_args.const_at(i).to_value();
34 ordering.valtree.unwrap_branch()[0].unwrap_leaf().to_atomic_ordering()
35 };
36
37 fn read_ord(ord: AtomicOrdering) -> AtomicReadOrd {
38 match ord {
39 AtomicOrdering::SeqCst => AtomicReadOrd::SeqCst,
40 AtomicOrdering::Acquire => AtomicReadOrd::Acquire,
41 AtomicOrdering::Relaxed => AtomicReadOrd::Relaxed,
42 _ => panic!("invalid read ordering `{ord:?}`"),
43 }
44 }
45
46 fn write_ord(ord: AtomicOrdering) -> AtomicWriteOrd {
47 match ord {
48 AtomicOrdering::SeqCst => AtomicWriteOrd::SeqCst,
49 AtomicOrdering::Release => AtomicWriteOrd::Release,
50 AtomicOrdering::Relaxed => AtomicWriteOrd::Relaxed,
51 _ => panic!("invalid write ordering `{ord:?}`"),
52 }
53 }
54
55 fn rw_ord(ord: AtomicOrdering) -> AtomicRwOrd {
56 match ord {
57 AtomicOrdering::SeqCst => AtomicRwOrd::SeqCst,
58 AtomicOrdering::AcqRel => AtomicRwOrd::AcqRel,
59 AtomicOrdering::Acquire => AtomicRwOrd::Acquire,
60 AtomicOrdering::Release => AtomicRwOrd::Release,
61 AtomicOrdering::Relaxed => AtomicRwOrd::Relaxed,
62 }
63 }
64
65 fn fence_ord(ord: AtomicOrdering) -> AtomicFenceOrd {
66 match ord {
67 AtomicOrdering::SeqCst => AtomicFenceOrd::SeqCst,
68 AtomicOrdering::AcqRel => AtomicFenceOrd::AcqRel,
69 AtomicOrdering::Acquire => AtomicFenceOrd::Acquire,
70 AtomicOrdering::Release => AtomicFenceOrd::Release,
71 _ => panic!("invalid fence ordering `{ord:?}`"),
72 }
73 }
74
75 match intrinsic_name {
76 "load" => {
77 let ord = get_ord_at(1);
78 this.atomic_load(args, dest, read_ord(ord))?;
79 }
80
81 "store" => {
82 let ord = get_ord_at(1);
83 this.atomic_store(args, write_ord(ord))?
84 }
85
86 "fence" => {
87 let ord = get_ord_at(0);
88 this.atomic_fence_intrinsic(args, fence_ord(ord))?
89 }
90 "singlethreadfence" => {
91 let ord = get_ord_at(0);
92 this.compiler_fence_intrinsic(args, fence_ord(ord))?;
93 }
94
95 "xchg" => {
96 let ord = get_ord_at(1);
97 this.atomic_exchange(args, dest, rw_ord(ord))?;
98 }
99 "cxchg" => {
100 let ord1 = get_ord_at(1);
101 let ord2 = get_ord_at(2);
102 this.atomic_compare_exchange(args, dest, rw_ord(ord1), read_ord(ord2))?;
103 }
104 "cxchgweak" => {
105 let ord1 = get_ord_at(1);
106 let ord2 = get_ord_at(2);
107 this.atomic_compare_exchange_weak(args, dest, rw_ord(ord1), read_ord(ord2))?;
108 }
109
110 "or" => {
111 let ord = get_ord_at(2);
112 this.atomic_rmw_op(
113 args,
114 dest,
115 AtomicRmwOp::MirOp { op: BinOp::BitOr, neg: false },
116 rw_ord(ord),
117 )?;
118 }
119 "xor" => {
120 let ord = get_ord_at(2);
121 this.atomic_rmw_op(
122 args,
123 dest,
124 AtomicRmwOp::MirOp { op: BinOp::BitXor, neg: false },
125 rw_ord(ord),
126 )?;
127 }
128 "and" => {
129 let ord = get_ord_at(2);
130 this.atomic_rmw_op(
131 args,
132 dest,
133 AtomicRmwOp::MirOp { op: BinOp::BitAnd, neg: false },
134 rw_ord(ord),
135 )?;
136 }
137 "nand" => {
138 let ord = get_ord_at(2);
139 this.atomic_rmw_op(
140 args,
141 dest,
142 AtomicRmwOp::MirOp { op: BinOp::BitAnd, neg: true },
143 rw_ord(ord),
144 )?;
145 }
146 "xadd" => {
147 let ord = get_ord_at(2);
148 this.atomic_rmw_op(
149 args,
150 dest,
151 AtomicRmwOp::MirOp { op: BinOp::Add, neg: false },
152 rw_ord(ord),
153 )?;
154 }
155 "xsub" => {
156 let ord = get_ord_at(2);
157 this.atomic_rmw_op(
158 args,
159 dest,
160 AtomicRmwOp::MirOp { op: BinOp::Sub, neg: false },
161 rw_ord(ord),
162 )?;
163 }
164 "min" => {
165 let ord = get_ord_at(1);
166 assert!(matches!(args[1].layout.ty.kind(), ty::Int(_)));
169 this.atomic_rmw_op(args, dest, AtomicRmwOp::Min, rw_ord(ord))?;
170 }
171 "umin" => {
172 let ord = get_ord_at(1);
173 assert!(matches!(args[1].layout.ty.kind(), ty::Uint(_)));
176 this.atomic_rmw_op(args, dest, AtomicRmwOp::Min, rw_ord(ord))?;
177 }
178 "max" => {
179 let ord = get_ord_at(1);
180 assert!(matches!(args[1].layout.ty.kind(), ty::Int(_)));
183 this.atomic_rmw_op(args, dest, AtomicRmwOp::Max, rw_ord(ord))?;
184 }
185 "umax" => {
186 let ord = get_ord_at(1);
187 assert!(matches!(args[1].layout.ty.kind(), ty::Uint(_)));
190 this.atomic_rmw_op(args, dest, AtomicRmwOp::Max, rw_ord(ord))?;
191 }
192
193 _ => return interp_ok(EmulateItemResult::NotSupported),
194 }
195 interp_ok(EmulateItemResult::NeedsReturn)
196 }
197}
198
199impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
200trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
201 fn atomic_load(
202 &mut self,
203 args: &[OpTy<'tcx>],
204 dest: &MPlaceTy<'tcx>,
205 atomic: AtomicReadOrd,
206 ) -> InterpResult<'tcx> {
207 let this = self.eval_context_mut();
208
209 let [place] = check_intrinsic_arg_count(args)?;
210 let place = this.deref_pointer(place)?;
211
212 let val = this.read_scalar_atomic(&place, atomic)?;
214 this.write_scalar(val, dest)?;
216 interp_ok(())
217 }
218
219 fn atomic_store(&mut self, args: &[OpTy<'tcx>], atomic: AtomicWriteOrd) -> InterpResult<'tcx> {
220 let this = self.eval_context_mut();
221
222 let [place, val] = check_intrinsic_arg_count(args)?;
223 let place = this.deref_pointer(place)?;
224
225 let val = this.read_scalar(val)?;
227 this.write_scalar_atomic(val, &place, atomic)?;
229 interp_ok(())
230 }
231
232 fn compiler_fence_intrinsic(
233 &mut self,
234 args: &[OpTy<'tcx>],
235 atomic: AtomicFenceOrd,
236 ) -> InterpResult<'tcx> {
237 let [] = check_intrinsic_arg_count(args)?;
238 let _ = atomic;
239 interp_ok(())
241 }
242
243 fn atomic_fence_intrinsic(
244 &mut self,
245 args: &[OpTy<'tcx>],
246 atomic: AtomicFenceOrd,
247 ) -> InterpResult<'tcx> {
248 let this = self.eval_context_mut();
249 let [] = check_intrinsic_arg_count(args)?;
250 this.atomic_fence(atomic)?;
251 interp_ok(())
252 }
253
254 fn atomic_rmw_op(
255 &mut self,
256 args: &[OpTy<'tcx>],
257 dest: &MPlaceTy<'tcx>,
258 atomic_op: AtomicRmwOp,
259 ord: AtomicRwOrd,
260 ) -> InterpResult<'tcx> {
261 let this = self.eval_context_mut();
262
263 let [place, rhs] = check_intrinsic_arg_count(args)?;
264 let place = this.deref_pointer(place)?;
265 let rhs = this.read_immediate(rhs)?;
266
267 if !(place.layout.ty.is_integral() || place.layout.ty.is_raw_ptr())
269 || !rhs.layout.ty.is_integral()
270 {
271 span_bug!(
272 this.cur_span(),
273 "atomic arithmetic operations only work on integer and raw pointer types",
274 );
275 }
276
277 let old = this.atomic_rmw_op_immediate(&place, &rhs, atomic_op, ord)?;
278 this.write_immediate(*old, dest)?; interp_ok(())
280 }
281
282 fn atomic_exchange(
283 &mut self,
284 args: &[OpTy<'tcx>],
285 dest: &MPlaceTy<'tcx>,
286 atomic: AtomicRwOrd,
287 ) -> InterpResult<'tcx> {
288 let this = self.eval_context_mut();
289
290 let [place, new] = check_intrinsic_arg_count(args)?;
291 let place = this.deref_pointer(place)?;
292 let new = this.read_scalar(new)?;
293
294 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
295 this.write_scalar(old, dest)?; interp_ok(())
297 }
298
299 fn atomic_compare_exchange_impl(
300 &mut self,
301 args: &[OpTy<'tcx>],
302 dest: &MPlaceTy<'tcx>,
303 success: AtomicRwOrd,
304 fail: AtomicReadOrd,
305 can_fail_spuriously: bool,
306 ) -> InterpResult<'tcx> {
307 let this = self.eval_context_mut();
308
309 let [place, expect_old, new] = check_intrinsic_arg_count(args)?;
310 let place = this.deref_pointer(place)?;
311 let expect_old = this.read_immediate(expect_old)?; let new = this.read_scalar(new)?;
313
314 let old = this.atomic_compare_exchange_scalar(
315 &place,
316 &expect_old,
317 new,
318 success,
319 fail,
320 can_fail_spuriously,
321 )?;
322
323 this.write_immediate(old, dest)?;
325 interp_ok(())
326 }
327
328 fn atomic_compare_exchange(
329 &mut self,
330 args: &[OpTy<'tcx>],
331 dest: &MPlaceTy<'tcx>,
332 success: AtomicRwOrd,
333 fail: AtomicReadOrd,
334 ) -> InterpResult<'tcx> {
335 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
336 }
337
338 fn atomic_compare_exchange_weak(
339 &mut self,
340 args: &[OpTy<'tcx>],
341 dest: &MPlaceTy<'tcx>,
342 success: AtomicRwOrd,
343 fail: AtomicReadOrd,
344 ) -> InterpResult<'tcx> {
345 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
346 }
347}