rustc_const_eval/interpret/intrinsics/
simd.rs1use either::Either;
2use rustc_abi::Endian;
3use rustc_apfloat::{Float, Round};
4use rustc_middle::mir::interpret::{InterpErrorKind, UndefinedBehaviorInfo};
5use rustc_middle::ty::FloatTy;
6use rustc_middle::{bug, err_ub_format, mir, span_bug, throw_unsup_format, ty};
7use rustc_span::{Symbol, sym};
8use tracing::trace;
9
10use super::{
11 ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Provenance, Scalar, Size, interp_ok,
12 throw_ub_format,
13};
14use crate::interpret::Writeable;
15
16#[derive(Copy, Clone)]
17pub(crate) enum MinMax {
18 Min,
19 Max,
20}
21
22impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
23 pub fn eval_simd_intrinsic(
27 &mut self,
28 intrinsic_name: Symbol,
29 generic_args: ty::GenericArgsRef<'tcx>,
30 args: &[OpTy<'tcx, M::Provenance>],
31 dest: &PlaceTy<'tcx, M::Provenance>,
32 ret: Option<mir::BasicBlock>,
33 ) -> InterpResult<'tcx, bool> {
34 let dest = dest.force_mplace(self)?;
35
36 match intrinsic_name {
37 sym::simd_insert => {
38 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
39 let elem = &args[2];
40 let (input, input_len) = self.project_to_simd(&args[0])?;
41 let (dest, dest_len) = self.project_to_simd(&dest)?;
42 assert_eq!(input_len, dest_len, "Return vector length must match input length");
43 if index >= input_len {
45 throw_ub_format!(
46 "`simd_insert` index {index} is out-of-bounds of vector with length {input_len}"
47 );
48 }
49
50 for i in 0..dest_len {
51 let place = self.project_index(&dest, i)?;
52 let value =
53 if i == index { elem.clone() } else { self.project_index(&input, i)? };
54 self.copy_op(&value, &place)?;
55 }
56 }
57 sym::simd_extract => {
58 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
59 let (input, input_len) = self.project_to_simd(&args[0])?;
60 if index >= input_len {
62 throw_ub_format!(
63 "`simd_extract` index {index} is out-of-bounds of vector with length {input_len}"
64 );
65 }
66 self.copy_op(&self.project_index(&input, index)?, &dest)?;
67 }
68 sym::simd_neg
69 | sym::simd_fabs
70 | sym::simd_ceil
71 | sym::simd_floor
72 | sym::simd_round
73 | sym::simd_round_ties_even
74 | sym::simd_trunc
75 | sym::simd_ctlz
76 | sym::simd_ctpop
77 | sym::simd_cttz
78 | sym::simd_bswap
79 | sym::simd_bitreverse => {
80 let (op, op_len) = self.project_to_simd(&args[0])?;
81 let (dest, dest_len) = self.project_to_simd(&dest)?;
82
83 assert_eq!(dest_len, op_len);
84
85 #[derive(Copy, Clone)]
86 enum Op {
87 MirOp(mir::UnOp),
88 Abs,
89 Round(rustc_apfloat::Round),
90 Numeric(Symbol),
91 }
92 let which = match intrinsic_name {
93 sym::simd_neg => Op::MirOp(mir::UnOp::Neg),
94 sym::simd_fabs => Op::Abs,
95 sym::simd_ceil => Op::Round(rustc_apfloat::Round::TowardPositive),
96 sym::simd_floor => Op::Round(rustc_apfloat::Round::TowardNegative),
97 sym::simd_round => Op::Round(rustc_apfloat::Round::NearestTiesToAway),
98 sym::simd_round_ties_even => Op::Round(rustc_apfloat::Round::NearestTiesToEven),
99 sym::simd_trunc => Op::Round(rustc_apfloat::Round::TowardZero),
100 sym::simd_ctlz => Op::Numeric(sym::ctlz),
101 sym::simd_ctpop => Op::Numeric(sym::ctpop),
102 sym::simd_cttz => Op::Numeric(sym::cttz),
103 sym::simd_bswap => Op::Numeric(sym::bswap),
104 sym::simd_bitreverse => Op::Numeric(sym::bitreverse),
105 _ => unreachable!(),
106 };
107
108 for i in 0..dest_len {
109 let op = self.read_immediate(&self.project_index(&op, i)?)?;
110 let dest = self.project_index(&dest, i)?;
111 let val = match which {
112 Op::MirOp(mir_op) => {
113 self.unary_op(mir_op, &op)?.to_scalar()
115 }
116 Op::Abs => {
117 let ty::Float(float_ty) = op.layout.ty.kind() else {
119 span_bug!(
120 self.cur_span(),
121 "{} operand is not a float",
122 intrinsic_name
123 )
124 };
125 let op = op.to_scalar();
126 match float_ty {
128 FloatTy::F16 => unimplemented!("f16_f128"),
129 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
130 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
131 FloatTy::F128 => unimplemented!("f16_f128"),
132 }
133 }
134 Op::Round(rounding) => {
135 let ty::Float(float_ty) = op.layout.ty.kind() else {
136 span_bug!(
137 self.cur_span(),
138 "{} operand is not a float",
139 intrinsic_name
140 )
141 };
142 match float_ty {
143 FloatTy::F16 => unimplemented!("f16_f128"),
144 FloatTy::F32 => {
145 let f = op.to_scalar().to_f32()?;
146 let res = f.round_to_integral(rounding).value;
147 let res = self.adjust_nan(res, &[f]);
148 Scalar::from_f32(res)
149 }
150 FloatTy::F64 => {
151 let f = op.to_scalar().to_f64()?;
152 let res = f.round_to_integral(rounding).value;
153 let res = self.adjust_nan(res, &[f]);
154 Scalar::from_f64(res)
155 }
156 FloatTy::F128 => unimplemented!("f16_f128"),
157 }
158 }
159 Op::Numeric(name) => {
160 self.numeric_intrinsic(name, op.to_scalar(), op.layout, op.layout)?
161 }
162 };
163 self.write_scalar(val, &dest)?;
164 }
165 }
166 sym::simd_add
167 | sym::simd_sub
168 | sym::simd_mul
169 | sym::simd_div
170 | sym::simd_rem
171 | sym::simd_shl
172 | sym::simd_shr
173 | sym::simd_and
174 | sym::simd_or
175 | sym::simd_xor
176 | sym::simd_eq
177 | sym::simd_ne
178 | sym::simd_lt
179 | sym::simd_le
180 | sym::simd_gt
181 | sym::simd_ge
182 | sym::simd_fmax
183 | sym::simd_fmin
184 | sym::simd_saturating_add
185 | sym::simd_saturating_sub
186 | sym::simd_arith_offset => {
187 use mir::BinOp;
188
189 let (left, left_len) = self.project_to_simd(&args[0])?;
190 let (right, right_len) = self.project_to_simd(&args[1])?;
191 let (dest, dest_len) = self.project_to_simd(&dest)?;
192
193 assert_eq!(dest_len, left_len);
194 assert_eq!(dest_len, right_len);
195
196 enum Op {
197 MirOp(BinOp),
198 SaturatingOp(BinOp),
199 FMinMax(MinMax),
200 WrappingOffset,
201 }
202 let which = match intrinsic_name {
203 sym::simd_add => Op::MirOp(BinOp::Add),
204 sym::simd_sub => Op::MirOp(BinOp::Sub),
205 sym::simd_mul => Op::MirOp(BinOp::Mul),
206 sym::simd_div => Op::MirOp(BinOp::Div),
207 sym::simd_rem => Op::MirOp(BinOp::Rem),
208 sym::simd_shl => Op::MirOp(BinOp::ShlUnchecked),
209 sym::simd_shr => Op::MirOp(BinOp::ShrUnchecked),
210 sym::simd_and => Op::MirOp(BinOp::BitAnd),
211 sym::simd_or => Op::MirOp(BinOp::BitOr),
212 sym::simd_xor => Op::MirOp(BinOp::BitXor),
213 sym::simd_eq => Op::MirOp(BinOp::Eq),
214 sym::simd_ne => Op::MirOp(BinOp::Ne),
215 sym::simd_lt => Op::MirOp(BinOp::Lt),
216 sym::simd_le => Op::MirOp(BinOp::Le),
217 sym::simd_gt => Op::MirOp(BinOp::Gt),
218 sym::simd_ge => Op::MirOp(BinOp::Ge),
219 sym::simd_fmax => Op::FMinMax(MinMax::Max),
220 sym::simd_fmin => Op::FMinMax(MinMax::Min),
221 sym::simd_saturating_add => Op::SaturatingOp(BinOp::Add),
222 sym::simd_saturating_sub => Op::SaturatingOp(BinOp::Sub),
223 sym::simd_arith_offset => Op::WrappingOffset,
224 _ => unreachable!(),
225 };
226
227 for i in 0..dest_len {
228 let left = self.read_immediate(&self.project_index(&left, i)?)?;
229 let right = self.read_immediate(&self.project_index(&right, i)?)?;
230 let dest = self.project_index(&dest, i)?;
231 let val = match which {
232 Op::MirOp(mir_op) => {
233 let val = self.binary_op(mir_op, &left, &right).map_err_kind(|kind| {
235 match kind {
236 InterpErrorKind::UndefinedBehavior(UndefinedBehaviorInfo::ShiftOverflow { shift_amount, .. }) => {
237 let shift_amount = match shift_amount {
239 Either::Left(v) => v.to_string(),
240 Either::Right(v) => v.to_string(),
241 };
242 err_ub_format!("overflowing shift by {shift_amount} in `{intrinsic_name}` in lane {i}")
243 }
244 kind => kind
245 }
246 })?;
247 if matches!(
248 mir_op,
249 BinOp::Eq
250 | BinOp::Ne
251 | BinOp::Lt
252 | BinOp::Le
253 | BinOp::Gt
254 | BinOp::Ge
255 ) {
256 assert_eq!(val.layout.ty, self.tcx.types.bool);
258 let val = val.to_scalar().to_bool().unwrap();
259 bool_to_simd_element(val, dest.layout.size)
260 } else {
261 assert_ne!(val.layout.ty, self.tcx.types.bool);
262 assert_eq!(val.layout.ty, dest.layout.ty);
263 val.to_scalar()
264 }
265 }
266 Op::SaturatingOp(mir_op) => self.saturating_arith(mir_op, &left, &right)?,
267 Op::WrappingOffset => {
268 let ptr = left.to_scalar().to_pointer(self)?;
269 let offset_count = right.to_scalar().to_target_isize(self)?;
270 let pointee_ty = left.layout.ty.builtin_deref(true).unwrap();
271
272 let pointee_size =
273 i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
274 let offset_bytes = offset_count.wrapping_mul(pointee_size);
275 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
276 Scalar::from_maybe_pointer(offset_ptr, self)
277 }
278 Op::FMinMax(op) => self.fminmax_op(op, &left, &right)?,
279 };
280 self.write_scalar(val, &dest)?;
281 }
282 }
283 sym::simd_reduce_and
284 | sym::simd_reduce_or
285 | sym::simd_reduce_xor
286 | sym::simd_reduce_any
287 | sym::simd_reduce_all
288 | sym::simd_reduce_max
289 | sym::simd_reduce_min => {
290 use mir::BinOp;
291
292 let (op, op_len) = self.project_to_simd(&args[0])?;
293
294 let imm_from_bool = |b| {
295 ImmTy::from_scalar(
296 Scalar::from_bool(b),
297 self.layout_of(self.tcx.types.bool).unwrap(),
298 )
299 };
300
301 enum Op {
302 MirOp(BinOp),
303 MirOpBool(BinOp),
304 MinMax(MinMax),
305 }
306 let which = match intrinsic_name {
307 sym::simd_reduce_and => Op::MirOp(BinOp::BitAnd),
308 sym::simd_reduce_or => Op::MirOp(BinOp::BitOr),
309 sym::simd_reduce_xor => Op::MirOp(BinOp::BitXor),
310 sym::simd_reduce_any => Op::MirOpBool(BinOp::BitOr),
311 sym::simd_reduce_all => Op::MirOpBool(BinOp::BitAnd),
312 sym::simd_reduce_max => Op::MinMax(MinMax::Max),
313 sym::simd_reduce_min => Op::MinMax(MinMax::Min),
314 _ => unreachable!(),
315 };
316
317 let mut res = self.read_immediate(&self.project_index(&op, 0)?)?;
319 if matches!(which, Op::MirOpBool(_)) {
320 res = imm_from_bool(simd_element_to_bool(res)?);
322 }
323 for i in 1..op_len {
324 let op = self.read_immediate(&self.project_index(&op, i)?)?;
325 res = match which {
326 Op::MirOp(mir_op) => self.binary_op(mir_op, &res, &op)?,
327 Op::MirOpBool(mir_op) => {
328 let op = imm_from_bool(simd_element_to_bool(op)?);
329 self.binary_op(mir_op, &res, &op)?
330 }
331 Op::MinMax(mmop) => {
332 if matches!(res.layout.ty.kind(), ty::Float(_)) {
333 ImmTy::from_scalar(self.fminmax_op(mmop, &res, &op)?, res.layout)
334 } else {
335 let mirop = match mmop {
337 MinMax::Min => BinOp::Le,
338 MinMax::Max => BinOp::Ge,
339 };
340 if self.binary_op(mirop, &res, &op)?.to_scalar().to_bool()? {
341 res
342 } else {
343 op
344 }
345 }
346 }
347 };
348 }
349 self.write_immediate(*res, &dest)?;
350 }
351 sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
352 use mir::BinOp;
353
354 let (op, op_len) = self.project_to_simd(&args[0])?;
355 let init = self.read_immediate(&args[1])?;
356
357 let mir_op = match intrinsic_name {
358 sym::simd_reduce_add_ordered => BinOp::Add,
359 sym::simd_reduce_mul_ordered => BinOp::Mul,
360 _ => unreachable!(),
361 };
362
363 let mut res = init;
364 for i in 0..op_len {
365 let op = self.read_immediate(&self.project_index(&op, i)?)?;
366 res = self.binary_op(mir_op, &res, &op)?;
367 }
368 self.write_immediate(*res, &dest)?;
369 }
370 sym::simd_select => {
371 let (mask, mask_len) = self.project_to_simd(&args[0])?;
372 let (yes, yes_len) = self.project_to_simd(&args[1])?;
373 let (no, no_len) = self.project_to_simd(&args[2])?;
374 let (dest, dest_len) = self.project_to_simd(&dest)?;
375
376 assert_eq!(dest_len, mask_len);
377 assert_eq!(dest_len, yes_len);
378 assert_eq!(dest_len, no_len);
379
380 for i in 0..dest_len {
381 let mask = self.read_immediate(&self.project_index(&mask, i)?)?;
382 let yes = self.read_immediate(&self.project_index(&yes, i)?)?;
383 let no = self.read_immediate(&self.project_index(&no, i)?)?;
384 let dest = self.project_index(&dest, i)?;
385
386 let val = if simd_element_to_bool(mask)? { yes } else { no };
387 self.write_immediate(*val, &dest)?;
388 }
389 }
390 sym::simd_select_bitmask => {
392 let mask = &args[0];
393 let (yes, yes_len) = self.project_to_simd(&args[1])?;
394 let (no, no_len) = self.project_to_simd(&args[2])?;
395 let (dest, dest_len) = self.project_to_simd(&dest)?;
396 let bitmask_len = dest_len.next_multiple_of(8);
397 if bitmask_len > 64 {
398 throw_unsup_format!(
399 "simd_select_bitmask: vectors larger than 64 elements are currently not supported"
400 );
401 }
402
403 assert_eq!(dest_len, yes_len);
404 assert_eq!(dest_len, no_len);
405
406 let mask: u64 = match mask.layout.ty.kind() {
408 ty::Uint(_) => {
409 assert!(mask.layout.size.bits() >= bitmask_len);
411 self.read_scalar(mask)?.to_bits(mask.layout.size)?.try_into().unwrap()
412 }
413 ty::Array(elem, _len) if elem == &self.tcx.types.u8 => {
414 assert_eq!(mask.layout.size.bits(), bitmask_len);
416 let mask = mask.assert_mem_place(); let mask_bytes =
419 self.read_bytes_ptr_strip_provenance(mask.ptr(), mask.layout.size)?;
420 let mask_size = mask.layout.size.bytes_usize();
422 let mut mask_arr = [0u8; 8];
423 match self.tcx.data_layout.endian {
424 Endian::Little => {
425 mask_arr[..mask_size].copy_from_slice(mask_bytes);
427 u64::from_le_bytes(mask_arr)
428 }
429 Endian::Big => {
430 let i = mask_arr.len().strict_sub(mask_size);
432 mask_arr[i..].copy_from_slice(mask_bytes);
433 u64::from_be_bytes(mask_arr)
434 }
435 }
436 }
437 _ => bug!("simd_select_bitmask: invalid mask type {}", mask.layout.ty),
438 };
439
440 let dest_len = u32::try_from(dest_len).unwrap();
441 for i in 0..dest_len {
442 let bit_i = simd_bitmask_index(i, dest_len, self.tcx.data_layout.endian);
443 let mask = mask & 1u64.strict_shl(bit_i);
444 let yes = self.read_immediate(&self.project_index(&yes, i.into())?)?;
445 let no = self.read_immediate(&self.project_index(&no, i.into())?)?;
446 let dest = self.project_index(&dest, i.into())?;
447
448 let val = if mask != 0 { yes } else { no };
449 self.write_immediate(*val, &dest)?;
450 }
451 }
453 sym::simd_bitmask => {
455 let (op, op_len) = self.project_to_simd(&args[0])?;
456 let bitmask_len = op_len.next_multiple_of(8);
457 if bitmask_len > 64 {
458 throw_unsup_format!(
459 "simd_bitmask: vectors larger than 64 elements are currently not supported"
460 );
461 }
462
463 let op_len = u32::try_from(op_len).unwrap();
464 let mut res = 0u64;
465 for i in 0..op_len {
466 let op = self.read_immediate(&self.project_index(&op, i.into())?)?;
467 if simd_element_to_bool(op)? {
468 let bit_i = simd_bitmask_index(i, op_len, self.tcx.data_layout.endian);
469 res |= 1u64.strict_shl(bit_i);
470 }
471 }
472 match dest.layout.ty.kind() {
475 ty::Uint(_) => {
476 assert!(dest.layout.size.bits() >= bitmask_len);
478 self.write_scalar(Scalar::from_uint(res, dest.layout.size), &dest)?;
479 }
480 ty::Array(elem, _len) if elem == &self.tcx.types.u8 => {
481 assert_eq!(dest.layout.size.bits(), bitmask_len);
483 let res_size = dest.layout.size.bytes_usize();
485 let res_bytes;
486 let res_bytes_slice = match self.tcx.data_layout.endian {
487 Endian::Little => {
488 res_bytes = res.to_le_bytes();
489 &res_bytes[..res_size] }
491 Endian::Big => {
492 res_bytes = res.to_be_bytes();
493 &res_bytes[res_bytes.len().strict_sub(res_size)..] }
495 };
496 self.write_bytes_ptr(dest.ptr(), res_bytes_slice.iter().cloned())?;
497 }
498 _ => bug!("simd_bitmask: invalid return type {}", dest.layout.ty),
499 }
500 }
501 sym::simd_cast
502 | sym::simd_as
503 | sym::simd_cast_ptr
504 | sym::simd_with_exposed_provenance => {
505 let (op, op_len) = self.project_to_simd(&args[0])?;
506 let (dest, dest_len) = self.project_to_simd(&dest)?;
507
508 assert_eq!(dest_len, op_len);
509
510 let unsafe_cast = intrinsic_name == sym::simd_cast;
511 let safe_cast = intrinsic_name == sym::simd_as;
512 let ptr_cast = intrinsic_name == sym::simd_cast_ptr;
513 let from_exposed_cast = intrinsic_name == sym::simd_with_exposed_provenance;
514
515 for i in 0..dest_len {
516 let op = self.read_immediate(&self.project_index(&op, i)?)?;
517 let dest = self.project_index(&dest, i)?;
518
519 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
520 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_))
522 if safe_cast || unsafe_cast =>
523 self.int_to_int_or_float(&op, dest.layout)?,
524 (ty::Float(_), ty::Float(_)) if safe_cast || unsafe_cast =>
526 self.float_to_float_or_int(&op, dest.layout)?,
527 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
529 self.float_to_float_or_int(&op, dest.layout)?,
530 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if unsafe_cast => {
532 self.float_to_int_checked(&op, dest.layout, Round::TowardZero)?
533 .ok_or_else(|| {
534 err_ub_format!(
535 "`simd_cast` intrinsic called on {op} which cannot be represented in target type `{:?}`",
536 dest.layout.ty
537 )
538 })?
539 }
540 (ty::RawPtr(..), ty::RawPtr(..)) if ptr_cast =>
542 self.ptr_to_ptr(&op, dest.layout)?,
543 (ty::Int(_) | ty::Uint(_), ty::RawPtr(..)) if from_exposed_cast =>
545 self.pointer_with_exposed_provenance_cast(&op, dest.layout)?,
546 _ =>
548 throw_unsup_format!(
549 "Unsupported SIMD cast from element type {from_ty} to {to_ty}",
550 from_ty = op.layout.ty,
551 to_ty = dest.layout.ty,
552 ),
553 };
554 self.write_immediate(*val, &dest)?;
555 }
556 }
557 sym::simd_shuffle_const_generic => {
558 let (left, left_len) = self.project_to_simd(&args[0])?;
559 let (right, right_len) = self.project_to_simd(&args[1])?;
560 let (dest, dest_len) = self.project_to_simd(&dest)?;
561
562 let index = generic_args[2].expect_const().to_value().valtree.unwrap_branch();
563 let index_len = index.len();
564
565 assert_eq!(left_len, right_len);
566 assert_eq!(u64::try_from(index_len).unwrap(), dest_len);
567
568 for i in 0..dest_len {
569 let src_index: u64 =
570 index[usize::try_from(i).unwrap()].unwrap_leaf().to_u32().into();
571 let dest = self.project_index(&dest, i)?;
572
573 let val = if src_index < left_len {
574 self.read_immediate(&self.project_index(&left, src_index)?)?
575 } else if src_index < left_len.strict_add(right_len) {
576 let right_idx = src_index.strict_sub(left_len);
577 self.read_immediate(&self.project_index(&right, right_idx)?)?
578 } else {
579 throw_ub_format!(
580 "`simd_shuffle_const_generic` index {src_index} is out-of-bounds for 2 vectors with length {dest_len}"
581 );
582 };
583 self.write_immediate(*val, &dest)?;
584 }
585 }
586 sym::simd_shuffle => {
587 let (left, left_len) = self.project_to_simd(&args[0])?;
588 let (right, right_len) = self.project_to_simd(&args[1])?;
589 let (index, index_len) = self.project_to_simd(&args[2])?;
590 let (dest, dest_len) = self.project_to_simd(&dest)?;
591
592 assert_eq!(left_len, right_len);
593 assert_eq!(index_len, dest_len);
594
595 for i in 0..dest_len {
596 let src_index: u64 = self
597 .read_immediate(&self.project_index(&index, i)?)?
598 .to_scalar()
599 .to_u32()?
600 .into();
601 let dest = self.project_index(&dest, i)?;
602
603 let val = if src_index < left_len {
604 self.read_immediate(&self.project_index(&left, src_index)?)?
605 } else if src_index < left_len.strict_add(right_len) {
606 let right_idx = src_index.strict_sub(left_len);
607 self.read_immediate(&self.project_index(&right, right_idx)?)?
608 } else {
609 throw_ub_format!(
610 "`simd_shuffle` index {src_index} is out-of-bounds for 2 vectors with length {dest_len}"
611 );
612 };
613 self.write_immediate(*val, &dest)?;
614 }
615 }
616 sym::simd_gather => {
617 let (passthru, passthru_len) = self.project_to_simd(&args[0])?;
618 let (ptrs, ptrs_len) = self.project_to_simd(&args[1])?;
619 let (mask, mask_len) = self.project_to_simd(&args[2])?;
620 let (dest, dest_len) = self.project_to_simd(&dest)?;
621
622 assert_eq!(dest_len, passthru_len);
623 assert_eq!(dest_len, ptrs_len);
624 assert_eq!(dest_len, mask_len);
625
626 for i in 0..dest_len {
627 let passthru = self.read_immediate(&self.project_index(&passthru, i)?)?;
628 let ptr = self.read_immediate(&self.project_index(&ptrs, i)?)?;
629 let mask = self.read_immediate(&self.project_index(&mask, i)?)?;
630 let dest = self.project_index(&dest, i)?;
631
632 let val = if simd_element_to_bool(mask)? {
633 let place = self.deref_pointer(&ptr)?;
634 self.read_immediate(&place)?
635 } else {
636 passthru
637 };
638 self.write_immediate(*val, &dest)?;
639 }
640 }
641 sym::simd_scatter => {
642 let (value, value_len) = self.project_to_simd(&args[0])?;
643 let (ptrs, ptrs_len) = self.project_to_simd(&args[1])?;
644 let (mask, mask_len) = self.project_to_simd(&args[2])?;
645
646 assert_eq!(ptrs_len, value_len);
647 assert_eq!(ptrs_len, mask_len);
648
649 for i in 0..ptrs_len {
650 let value = self.read_immediate(&self.project_index(&value, i)?)?;
651 let ptr = self.read_immediate(&self.project_index(&ptrs, i)?)?;
652 let mask = self.read_immediate(&self.project_index(&mask, i)?)?;
653
654 if simd_element_to_bool(mask)? {
655 let place = self.deref_pointer(&ptr)?;
656 self.write_immediate(*value, &place)?;
657 }
658 }
659 }
660 sym::simd_masked_load => {
661 let (mask, mask_len) = self.project_to_simd(&args[0])?;
662 let ptr = self.read_pointer(&args[1])?;
663 let (default, default_len) = self.project_to_simd(&args[2])?;
664 let (dest, dest_len) = self.project_to_simd(&dest)?;
665
666 assert_eq!(dest_len, mask_len);
667 assert_eq!(dest_len, default_len);
668
669 for i in 0..dest_len {
670 let mask = self.read_immediate(&self.project_index(&mask, i)?)?;
671 let default = self.read_immediate(&self.project_index(&default, i)?)?;
672 let dest = self.project_index(&dest, i)?;
673
674 let val = if simd_element_to_bool(mask)? {
675 let ptr = ptr.wrapping_offset(dest.layout.size * i, self);
677 let place = self.ptr_to_mplace(ptr, dest.layout);
678 self.read_immediate(&place)?
679 } else {
680 default
681 };
682 self.write_immediate(*val, &dest)?;
683 }
684 }
685 sym::simd_masked_store => {
686 let (mask, mask_len) = self.project_to_simd(&args[0])?;
687 let ptr = self.read_pointer(&args[1])?;
688 let (vals, vals_len) = self.project_to_simd(&args[2])?;
689
690 assert_eq!(mask_len, vals_len);
691
692 for i in 0..vals_len {
693 let mask = self.read_immediate(&self.project_index(&mask, i)?)?;
694 let val = self.read_immediate(&self.project_index(&vals, i)?)?;
695
696 if simd_element_to_bool(mask)? {
697 let ptr = ptr.wrapping_offset(val.layout.size * i, self);
699 let place = self.ptr_to_mplace(ptr, val.layout);
700 self.write_immediate(*val, &place)?
701 };
702 }
703 }
704
705 _ => return interp_ok(false),
707 }
708
709 trace!("{:?}", self.dump_place(&dest.clone().into()));
710 self.return_to_block(ret)?;
711 interp_ok(true)
712 }
713
714 fn fminmax_op<Prov: Provenance>(
715 &self,
716 op: MinMax,
717 left: &ImmTy<'tcx, Prov>,
718 right: &ImmTy<'tcx, Prov>,
719 ) -> InterpResult<'tcx, Scalar<Prov>> {
720 assert_eq!(left.layout.ty, right.layout.ty);
721 let ty::Float(float_ty) = left.layout.ty.kind() else {
722 bug!("fmax operand is not a float")
723 };
724 let left = left.to_scalar();
725 let right = right.to_scalar();
726 interp_ok(match float_ty {
727 FloatTy::F16 => unimplemented!("f16_f128"),
728 FloatTy::F32 => {
729 let left = left.to_f32()?;
730 let right = right.to_f32()?;
731 let res = match op {
732 MinMax::Min => left.min(right),
733 MinMax::Max => left.max(right),
734 };
735 let res = self.adjust_nan(res, &[left, right]);
736 Scalar::from_f32(res)
737 }
738 FloatTy::F64 => {
739 let left = left.to_f64()?;
740 let right = right.to_f64()?;
741 let res = match op {
742 MinMax::Min => left.min(right),
743 MinMax::Max => left.max(right),
744 };
745 let res = self.adjust_nan(res, &[left, right]);
746 Scalar::from_f64(res)
747 }
748 FloatTy::F128 => unimplemented!("f16_f128"),
749 })
750 }
751}
752
753fn simd_bitmask_index(idx: u32, vec_len: u32, endianness: Endian) -> u32 {
754 assert!(idx < vec_len);
755 match endianness {
756 Endian::Little => idx,
757 #[expect(clippy::arithmetic_side_effects)] Endian::Big => vec_len - 1 - idx, }
760}
761
762fn bool_to_simd_element<Prov: Provenance>(b: bool, size: Size) -> Scalar<Prov> {
763 let val = if b { -1 } else { 0 };
767 Scalar::from_int(val, size)
768}
769
770fn simd_element_to_bool<Prov: Provenance>(elem: ImmTy<'_, Prov>) -> InterpResult<'_, bool> {
771 assert!(
772 matches!(elem.layout.ty.kind(), ty::Int(_) | ty::Uint(_)),
773 "SIMD mask element type must be an integer, but this is `{}`",
774 elem.layout.ty
775 );
776 let val = elem.to_scalar().to_int(elem.layout.size)?;
777 interp_ok(match val {
778 0 => false,
779 -1 => true,
780 _ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),
781 })
782}