1mod simd;
6
7use std::assert_matches::assert_matches;
8
9use rustc_abi::{FieldIdx, HasDataLayout, Size, VariantIdx};
10use rustc_apfloat::ieee::{Double, Half, Quad, Single};
11use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, read_target_uint, write_target_uint};
12use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
13use rustc_middle::ty::layout::TyAndLayout;
14use rustc_middle::ty::{FloatTy, Ty, TyCtxt};
15use rustc_middle::{bug, span_bug, ty};
16use rustc_span::{Symbol, sym};
17use tracing::trace;
18
19use super::memory::MemoryKind;
20use super::util::ensure_monomorphic_enough;
21use super::{
22 AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer,
23 PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, throw_inval,
24 throw_ub_custom, throw_ub_format,
25};
26use crate::fluent_generated as fluent;
27
28#[derive(Copy, Clone, Debug, PartialEq, Eq)]
29enum MulAddType {
30 Fused,
32 Nondeterministic,
35}
36
37#[derive(Copy, Clone)]
38pub(crate) enum MinMax {
39 Minimum,
43 MinimumNumber,
48 Maximum,
52 MaximumNumber,
57}
58
59pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (AllocId, u64) {
61 let path = crate::util::type_name(tcx, ty);
62 let bytes = path.into_bytes();
63 let len = bytes.len().try_into().unwrap();
64 (tcx.allocate_bytes_dedup(bytes, CTFE_ALLOC_SALT), len)
65}
66impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
67 fn write_type_id(
69 &mut self,
70 ty: Ty<'tcx>,
71 dest: &PlaceTy<'tcx, M::Provenance>,
72 ) -> InterpResult<'tcx, ()> {
73 let tcx = self.tcx;
74 let type_id_hash = tcx.type_id_hash(ty).as_u128();
75 let op = self.const_val_to_op(
76 ConstValue::Scalar(Scalar::from_u128(type_id_hash)),
77 tcx.types.u128,
78 None,
79 )?;
80 self.copy_op_allow_transmute(&op, dest)?;
81
82 let alloc_id = tcx.reserve_and_set_type_id_alloc(ty);
86 let arr = self.project_field(dest, FieldIdx::ZERO)?;
87 let mut elem_iter = self.project_array_fields(&arr)?;
88 while let Some((_, elem)) = elem_iter.next(self)? {
89 let hash_fragment = self.read_scalar(&elem)?.to_target_usize(&tcx)?;
91 let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(hash_fragment));
92 let ptr = self.global_root_pointer(ptr)?;
93 let val = Scalar::from_pointer(ptr, &tcx);
94 self.write_scalar(val, &elem)?;
95 }
96 interp_ok(())
97 }
98
99 pub(crate) fn read_type_id(
101 &self,
102 op: &OpTy<'tcx, M::Provenance>,
103 ) -> InterpResult<'tcx, Ty<'tcx>> {
104 let ptr_size = self.pointer_size().bytes_usize();
107 let arr = self.project_field(op, FieldIdx::ZERO)?;
108
109 let mut ty_and_hash = None;
110 let mut elem_iter = self.project_array_fields(&arr)?;
111 while let Some((idx, elem)) = elem_iter.next(self)? {
112 let elem = self.read_pointer(&elem)?;
113 let (elem_ty, elem_hash) = self.get_ptr_type_id(elem)?;
114 let full_hash = match ty_and_hash {
117 None => {
118 let hash = self.tcx.type_id_hash(elem_ty).as_u128();
119 let mut hash_bytes = [0u8; 16];
120 write_target_uint(self.data_layout().endian, &mut hash_bytes, hash).unwrap();
121 ty_and_hash = Some((elem_ty, hash_bytes));
122 hash_bytes
123 }
124 Some((ty, hash_bytes)) => {
125 if ty != elem_ty {
126 throw_ub_format!(
127 "invalid `TypeId` value: not all bytes carry the same type id metadata"
128 );
129 }
130 hash_bytes
131 }
132 };
133 let hash_frag = &full_hash[(idx as usize) * ptr_size..][..ptr_size];
135 if read_target_uint(self.data_layout().endian, hash_frag).unwrap() != elem_hash.into() {
136 throw_ub_format!(
137 "invalid `TypeId` value: the hash does not match the type id metadata"
138 );
139 }
140 }
141
142 interp_ok(ty_and_hash.unwrap().0)
143 }
144
145 pub fn eval_intrinsic(
149 &mut self,
150 instance: ty::Instance<'tcx>,
151 args: &[OpTy<'tcx, M::Provenance>],
152 dest: &PlaceTy<'tcx, M::Provenance>,
153 ret: Option<mir::BasicBlock>,
154 ) -> InterpResult<'tcx, bool> {
155 let instance_args = instance.args;
156 let intrinsic_name = self.tcx.item_name(instance.def_id());
157
158 if intrinsic_name.as_str().starts_with("simd_") {
159 return self.eval_simd_intrinsic(intrinsic_name, instance_args, args, dest, ret);
160 }
161
162 let tcx = self.tcx.tcx;
163
164 match intrinsic_name {
165 sym::type_name => {
166 let tp_ty = instance.args.type_at(0);
167 ensure_monomorphic_enough(tcx, tp_ty)?;
168 let (alloc_id, meta) = alloc_type_name(tcx, tp_ty);
169 let val = ConstValue::Slice { alloc_id, meta };
170 let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
171 self.copy_op(&val, dest)?;
172 }
173 sym::needs_drop => {
174 let tp_ty = instance.args.type_at(0);
175 ensure_monomorphic_enough(tcx, tp_ty)?;
176 let val = ConstValue::from_bool(tp_ty.needs_drop(tcx, self.typing_env));
177 let val = self.const_val_to_op(val, tcx.types.bool, Some(dest.layout))?;
178 self.copy_op(&val, dest)?;
179 }
180 sym::type_id => {
181 let tp_ty = instance.args.type_at(0);
182 ensure_monomorphic_enough(tcx, tp_ty)?;
183 self.write_type_id(tp_ty, dest)?;
184 }
185 sym::type_id_eq => {
186 let a_ty = self.read_type_id(&args[0])?;
187 let b_ty = self.read_type_id(&args[1])?;
188 self.write_scalar(Scalar::from_bool(a_ty == b_ty), dest)?;
189 }
190 sym::size_of => {
191 let tp_ty = instance.args.type_at(0);
192 let layout = self.layout_of(tp_ty)?;
193 if !layout.is_sized() {
194 span_bug!(self.cur_span(), "unsized type for `size_of`");
195 }
196 let val = layout.size.bytes();
197 self.write_scalar(Scalar::from_target_usize(val, self), dest)?;
198 }
199 sym::align_of => {
200 let tp_ty = instance.args.type_at(0);
201 let layout = self.layout_of(tp_ty)?;
202 if !layout.is_sized() {
203 span_bug!(self.cur_span(), "unsized type for `align_of`");
204 }
205 let val = layout.align.bytes();
206 self.write_scalar(Scalar::from_target_usize(val, self), dest)?;
207 }
208 sym::offset_of => {
209 let tp_ty = instance.args.type_at(0);
210
211 let variant = self.read_scalar(&args[0])?.to_u32()?;
212 let field = self.read_scalar(&args[1])?.to_u32()? as usize;
213
214 let layout = self.layout_of(tp_ty)?;
215 let cx = ty::layout::LayoutCx::new(*self.tcx, self.typing_env);
216
217 let layout = layout.for_variant(&cx, VariantIdx::from_u32(variant));
218 let offset = layout.fields.offset(field).bytes();
219
220 self.write_scalar(Scalar::from_target_usize(offset, self), dest)?;
221 }
222 sym::variant_count => {
223 let tp_ty = instance.args.type_at(0);
224 let ty = match tp_ty.kind() {
225 ty::Pat(base, _) => *base,
229 _ => tp_ty,
230 };
231 let val = match ty.kind() {
232 ty::Adt(adt, _) => {
234 ConstValue::from_target_usize(adt.variants().len() as u64, &tcx)
235 }
236 ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
237 throw_inval!(TooGeneric)
238 }
239 ty::Pat(..) => unreachable!(),
240 ty::Bound(_, _) => bug!("bound ty during ctfe"),
241 ty::Bool
242 | ty::Char
243 | ty::Int(_)
244 | ty::Uint(_)
245 | ty::Float(_)
246 | ty::Foreign(_)
247 | ty::Str
248 | ty::Array(_, _)
249 | ty::Slice(_)
250 | ty::RawPtr(_, _)
251 | ty::Ref(_, _, _)
252 | ty::FnDef(_, _)
253 | ty::FnPtr(..)
254 | ty::Dynamic(_, _)
255 | ty::Closure(_, _)
256 | ty::CoroutineClosure(_, _)
257 | ty::Coroutine(_, _)
258 | ty::CoroutineWitness(..)
259 | ty::UnsafeBinder(_)
260 | ty::Never
261 | ty::Tuple(_)
262 | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
263 };
264 let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
265 self.copy_op(&val, dest)?;
266 }
267
268 sym::caller_location => {
269 let span = self.find_closest_untracked_caller_location();
270 let val = self.tcx.span_as_caller_location(span);
271 let val =
272 self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
273 self.copy_op(&val, dest)?;
274 }
275
276 sym::align_of_val | sym::size_of_val => {
277 let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
280 let (size, align) = self
281 .size_and_align_of_val(&place)?
282 .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
283
284 let result = match intrinsic_name {
285 sym::align_of_val => align.bytes(),
286 sym::size_of_val => size.bytes(),
287 _ => bug!(),
288 };
289
290 self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
291 }
292
293 sym::fadd_algebraic
294 | sym::fsub_algebraic
295 | sym::fmul_algebraic
296 | sym::fdiv_algebraic
297 | sym::frem_algebraic => {
298 let a = self.read_immediate(&args[0])?;
299 let b = self.read_immediate(&args[1])?;
300
301 let op = match intrinsic_name {
302 sym::fadd_algebraic => BinOp::Add,
303 sym::fsub_algebraic => BinOp::Sub,
304 sym::fmul_algebraic => BinOp::Mul,
305 sym::fdiv_algebraic => BinOp::Div,
306 sym::frem_algebraic => BinOp::Rem,
307
308 _ => bug!(),
309 };
310
311 let res = self.binary_op(op, &a, &b)?;
312 let res = M::apply_float_nondet(self, res)?;
314 self.write_immediate(*res, dest)?;
315 }
316
317 sym::ctpop
318 | sym::cttz
319 | sym::cttz_nonzero
320 | sym::ctlz
321 | sym::ctlz_nonzero
322 | sym::bswap
323 | sym::bitreverse => {
324 let ty = instance_args.type_at(0);
325 let layout = self.layout_of(ty)?;
326 let val = self.read_scalar(&args[0])?;
327
328 let out_val = self.numeric_intrinsic(intrinsic_name, val, layout, dest.layout)?;
329 self.write_scalar(out_val, dest)?;
330 }
331 sym::saturating_add | sym::saturating_sub => {
332 let l = self.read_immediate(&args[0])?;
333 let r = self.read_immediate(&args[1])?;
334 let val = self.saturating_arith(
335 if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
336 &l,
337 &r,
338 )?;
339 self.write_scalar(val, dest)?;
340 }
341 sym::discriminant_value => {
342 let place = self.deref_pointer(&args[0])?;
343 let variant = self.read_discriminant(&place)?;
344 let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
345 self.write_immediate(*discr, dest)?;
346 }
347 sym::exact_div => {
348 let l = self.read_immediate(&args[0])?;
349 let r = self.read_immediate(&args[1])?;
350 self.exact_div(&l, &r, dest)?;
351 }
352 sym::copy => {
353 self.copy_intrinsic(&args[0], &args[1], &args[2], false)?;
354 }
355 sym::write_bytes => {
356 self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?;
357 }
358 sym::compare_bytes => {
359 let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
360 self.write_scalar(result, dest)?;
361 }
362 sym::arith_offset => {
363 let ptr = self.read_pointer(&args[0])?;
364 let offset_count = self.read_target_isize(&args[1])?;
365 let pointee_ty = instance_args.type_at(0);
366
367 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
368 let offset_bytes = offset_count.wrapping_mul(pointee_size);
369 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
370 self.write_pointer(offset_ptr, dest)?;
371 }
372 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
373 let a = self.read_pointer(&args[0])?;
374 let b = self.read_pointer(&args[1])?;
375
376 let usize_layout = self.layout_of(self.tcx.types.usize)?;
377 let isize_layout = self.layout_of(self.tcx.types.isize)?;
378
379 let (a_offset, b_offset, is_addr) = if M::Provenance::OFFSET_IS_ADDR {
383 (a.addr().bytes(), b.addr().bytes(), true)
384 } else {
385 match (self.ptr_try_get_alloc_id(a, 0), self.ptr_try_get_alloc_id(b, 0)) {
386 (Err(a), Err(b)) => {
387 (a, b, true)
389 }
390 (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _)))
391 if a_alloc_id == b_alloc_id =>
392 {
393 (a_offset.bytes(), b_offset.bytes(), false)
396 }
397 _ => {
398 throw_ub_custom!(
400 fluent::const_eval_offset_from_different_allocations,
401 name = intrinsic_name,
402 );
403 }
404 }
405 };
406
407 let dist = {
409 let (val, overflowed) = {
412 let a_offset = ImmTy::from_uint(a_offset, usize_layout);
413 let b_offset = ImmTy::from_uint(b_offset, usize_layout);
414 self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
415 .to_scalar_pair()
416 };
417 if overflowed.to_bool()? {
418 if intrinsic_name == sym::ptr_offset_from_unsigned {
420 throw_ub_custom!(
421 fluent::const_eval_offset_from_unsigned_overflow,
422 a_offset = a_offset,
423 b_offset = b_offset,
424 is_addr = is_addr,
425 );
426 }
427 let dist = val.to_target_isize(self)?;
431 if dist >= 0 || i128::from(dist) == self.pointer_size().signed_int_min() {
432 throw_ub_custom!(
433 fluent::const_eval_offset_from_underflow,
434 name = intrinsic_name,
435 );
436 }
437 dist
438 } else {
439 let dist = val.to_target_isize(self)?;
441 if dist < 0 {
444 throw_ub_custom!(
445 fluent::const_eval_offset_from_overflow,
446 name = intrinsic_name,
447 );
448 }
449 dist
450 }
451 };
452
453 self.check_ptr_access_signed(b, dist, CheckInAllocMsg::Dereferenceable)
456 .map_err_kind(|_| {
457 if let Ok((a_alloc_id, ..)) = self.ptr_try_get_alloc_id(a, 0)
460 && let Ok((b_alloc_id, ..)) = self.ptr_try_get_alloc_id(b, 0)
461 && a_alloc_id == b_alloc_id
462 {
463 err_ub_custom!(
464 fluent::const_eval_offset_from_out_of_bounds,
465 name = intrinsic_name,
466 )
467 } else {
468 err_ub_custom!(
469 fluent::const_eval_offset_from_different_allocations,
470 name = intrinsic_name,
471 )
472 }
473 })?;
474 self.check_ptr_access_signed(
477 a,
478 dist.checked_neg().unwrap(), CheckInAllocMsg::Dereferenceable,
480 )
481 .map_err_kind(|_| {
482 err_ub_custom!(
484 fluent::const_eval_offset_from_different_allocations,
485 name = intrinsic_name,
486 )
487 })?;
488
489 let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
491 assert!(0 <= dist && dist <= self.target_isize_max());
492 usize_layout
493 } else {
494 assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
495 isize_layout
496 };
497 let pointee_layout = self.layout_of(instance_args.type_at(0))?;
498 let val = ImmTy::from_int(dist, ret_layout);
500 let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
501 self.exact_div(&val, &size, dest)?;
502 }
503
504 sym::black_box => {
505 self.copy_op(&args[0], dest)?;
507 }
508 sym::raw_eq => {
509 let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
510 self.write_scalar(result, dest)?;
511 }
512 sym::typed_swap_nonoverlapping => {
513 self.typed_swap_nonoverlapping_intrinsic(&args[0], &args[1])?;
514 }
515
516 sym::vtable_size => {
517 let ptr = self.read_pointer(&args[0])?;
518 let (size, _align) = self.get_vtable_size_and_align(ptr, None)?;
520 self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
521 }
522 sym::vtable_align => {
523 let ptr = self.read_pointer(&args[0])?;
524 let (_size, align) = self.get_vtable_size_and_align(ptr, None)?;
526 self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
527 }
528
529 sym::minnumf16 => {
530 self.float_minmax_intrinsic::<Half>(args, MinMax::MinimumNumber, dest)?
531 }
532 sym::minnumf32 => {
533 self.float_minmax_intrinsic::<Single>(args, MinMax::MinimumNumber, dest)?
534 }
535 sym::minnumf64 => {
536 self.float_minmax_intrinsic::<Double>(args, MinMax::MinimumNumber, dest)?
537 }
538 sym::minnumf128 => {
539 self.float_minmax_intrinsic::<Quad>(args, MinMax::MinimumNumber, dest)?
540 }
541
542 sym::minimumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Minimum, dest)?,
543 sym::minimumf32 => {
544 self.float_minmax_intrinsic::<Single>(args, MinMax::Minimum, dest)?
545 }
546 sym::minimumf64 => {
547 self.float_minmax_intrinsic::<Double>(args, MinMax::Minimum, dest)?
548 }
549 sym::minimumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::Minimum, dest)?,
550
551 sym::maxnumf16 => {
552 self.float_minmax_intrinsic::<Half>(args, MinMax::MaximumNumber, dest)?
553 }
554 sym::maxnumf32 => {
555 self.float_minmax_intrinsic::<Single>(args, MinMax::MaximumNumber, dest)?
556 }
557 sym::maxnumf64 => {
558 self.float_minmax_intrinsic::<Double>(args, MinMax::MaximumNumber, dest)?
559 }
560 sym::maxnumf128 => {
561 self.float_minmax_intrinsic::<Quad>(args, MinMax::MaximumNumber, dest)?
562 }
563
564 sym::maximumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Maximum, dest)?,
565 sym::maximumf32 => {
566 self.float_minmax_intrinsic::<Single>(args, MinMax::Maximum, dest)?
567 }
568 sym::maximumf64 => {
569 self.float_minmax_intrinsic::<Double>(args, MinMax::Maximum, dest)?
570 }
571 sym::maximumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::Maximum, dest)?,
572
573 sym::copysignf16 => self.float_copysign_intrinsic::<Half>(args, dest)?,
574 sym::copysignf32 => self.float_copysign_intrinsic::<Single>(args, dest)?,
575 sym::copysignf64 => self.float_copysign_intrinsic::<Double>(args, dest)?,
576 sym::copysignf128 => self.float_copysign_intrinsic::<Quad>(args, dest)?,
577
578 sym::fabsf16 => self.float_abs_intrinsic::<Half>(args, dest)?,
579 sym::fabsf32 => self.float_abs_intrinsic::<Single>(args, dest)?,
580 sym::fabsf64 => self.float_abs_intrinsic::<Double>(args, dest)?,
581 sym::fabsf128 => self.float_abs_intrinsic::<Quad>(args, dest)?,
582
583 sym::floorf16 => self.float_round_intrinsic::<Half>(
584 args,
585 dest,
586 rustc_apfloat::Round::TowardNegative,
587 )?,
588 sym::floorf32 => self.float_round_intrinsic::<Single>(
589 args,
590 dest,
591 rustc_apfloat::Round::TowardNegative,
592 )?,
593 sym::floorf64 => self.float_round_intrinsic::<Double>(
594 args,
595 dest,
596 rustc_apfloat::Round::TowardNegative,
597 )?,
598 sym::floorf128 => self.float_round_intrinsic::<Quad>(
599 args,
600 dest,
601 rustc_apfloat::Round::TowardNegative,
602 )?,
603
604 sym::ceilf16 => self.float_round_intrinsic::<Half>(
605 args,
606 dest,
607 rustc_apfloat::Round::TowardPositive,
608 )?,
609 sym::ceilf32 => self.float_round_intrinsic::<Single>(
610 args,
611 dest,
612 rustc_apfloat::Round::TowardPositive,
613 )?,
614 sym::ceilf64 => self.float_round_intrinsic::<Double>(
615 args,
616 dest,
617 rustc_apfloat::Round::TowardPositive,
618 )?,
619 sym::ceilf128 => self.float_round_intrinsic::<Quad>(
620 args,
621 dest,
622 rustc_apfloat::Round::TowardPositive,
623 )?,
624
625 sym::truncf16 => {
626 self.float_round_intrinsic::<Half>(args, dest, rustc_apfloat::Round::TowardZero)?
627 }
628 sym::truncf32 => {
629 self.float_round_intrinsic::<Single>(args, dest, rustc_apfloat::Round::TowardZero)?
630 }
631 sym::truncf64 => {
632 self.float_round_intrinsic::<Double>(args, dest, rustc_apfloat::Round::TowardZero)?
633 }
634 sym::truncf128 => {
635 self.float_round_intrinsic::<Quad>(args, dest, rustc_apfloat::Round::TowardZero)?
636 }
637
638 sym::roundf16 => self.float_round_intrinsic::<Half>(
639 args,
640 dest,
641 rustc_apfloat::Round::NearestTiesToAway,
642 )?,
643 sym::roundf32 => self.float_round_intrinsic::<Single>(
644 args,
645 dest,
646 rustc_apfloat::Round::NearestTiesToAway,
647 )?,
648 sym::roundf64 => self.float_round_intrinsic::<Double>(
649 args,
650 dest,
651 rustc_apfloat::Round::NearestTiesToAway,
652 )?,
653 sym::roundf128 => self.float_round_intrinsic::<Quad>(
654 args,
655 dest,
656 rustc_apfloat::Round::NearestTiesToAway,
657 )?,
658
659 sym::round_ties_even_f16 => self.float_round_intrinsic::<Half>(
660 args,
661 dest,
662 rustc_apfloat::Round::NearestTiesToEven,
663 )?,
664 sym::round_ties_even_f32 => self.float_round_intrinsic::<Single>(
665 args,
666 dest,
667 rustc_apfloat::Round::NearestTiesToEven,
668 )?,
669 sym::round_ties_even_f64 => self.float_round_intrinsic::<Double>(
670 args,
671 dest,
672 rustc_apfloat::Round::NearestTiesToEven,
673 )?,
674 sym::round_ties_even_f128 => self.float_round_intrinsic::<Quad>(
675 args,
676 dest,
677 rustc_apfloat::Round::NearestTiesToEven,
678 )?,
679 sym::fmaf16 => self.float_muladd_intrinsic::<Half>(args, dest, MulAddType::Fused)?,
680 sym::fmaf32 => self.float_muladd_intrinsic::<Single>(args, dest, MulAddType::Fused)?,
681 sym::fmaf64 => self.float_muladd_intrinsic::<Double>(args, dest, MulAddType::Fused)?,
682 sym::fmaf128 => self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Fused)?,
683 sym::fmuladdf16 => {
684 self.float_muladd_intrinsic::<Half>(args, dest, MulAddType::Nondeterministic)?
685 }
686 sym::fmuladdf32 => {
687 self.float_muladd_intrinsic::<Single>(args, dest, MulAddType::Nondeterministic)?
688 }
689 sym::fmuladdf64 => {
690 self.float_muladd_intrinsic::<Double>(args, dest, MulAddType::Nondeterministic)?
691 }
692 sym::fmuladdf128 => {
693 self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Nondeterministic)?
694 }
695
696 _ => return interp_ok(false),
698 }
699
700 trace!("{:?}", self.dump_place(&dest.clone().into()));
701 self.return_to_block(ret)?;
702 interp_ok(true)
703 }
704
705 pub(super) fn eval_nondiverging_intrinsic(
706 &mut self,
707 intrinsic: &NonDivergingIntrinsic<'tcx>,
708 ) -> InterpResult<'tcx> {
709 match intrinsic {
710 NonDivergingIntrinsic::Assume(op) => {
711 let op = self.eval_operand(op, None)?;
712 let cond = self.read_scalar(&op)?.to_bool()?;
713 if !cond {
714 throw_ub_custom!(fluent::const_eval_assume_false);
715 }
716 interp_ok(())
717 }
718 NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
719 count,
720 src,
721 dst,
722 }) => {
723 let src = self.eval_operand(src, None)?;
724 let dst = self.eval_operand(dst, None)?;
725 let count = self.eval_operand(count, None)?;
726 self.copy_intrinsic(&src, &dst, &count, true)
727 }
728 }
729 }
730
731 pub fn numeric_intrinsic(
732 &self,
733 name: Symbol,
734 val: Scalar<M::Provenance>,
735 layout: TyAndLayout<'tcx>,
736 ret_layout: TyAndLayout<'tcx>,
737 ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
738 assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
739 let bits = val.to_bits(layout.size)?; let extra = 128 - u128::from(layout.size.bits());
741 let bits_out = match name {
742 sym::ctpop => u128::from(bits.count_ones()),
743 sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => {
744 throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,);
745 }
746 sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
747 sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
748 sym::bswap => {
749 assert_eq!(layout, ret_layout);
750 (bits << extra).swap_bytes()
751 }
752 sym::bitreverse => {
753 assert_eq!(layout, ret_layout);
754 (bits << extra).reverse_bits()
755 }
756 _ => bug!("not a numeric intrinsic: {}", name),
757 };
758 interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
759 }
760
761 pub fn exact_div(
762 &mut self,
763 a: &ImmTy<'tcx, M::Provenance>,
764 b: &ImmTy<'tcx, M::Provenance>,
765 dest: &PlaceTy<'tcx, M::Provenance>,
766 ) -> InterpResult<'tcx> {
767 assert_eq!(a.layout.ty, b.layout.ty);
768 assert_matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
769
770 let rem = self.binary_op(BinOp::Rem, a, b)?;
774 if rem.to_scalar().to_bits(a.layout.size)? != 0 {
776 throw_ub_custom!(
777 fluent::const_eval_exact_div_has_remainder,
778 a = format!("{a}"),
779 b = format!("{b}")
780 )
781 }
782 let res = self.binary_op(BinOp::Div, a, b)?;
784 self.write_immediate(*res, dest)
785 }
786
787 pub fn saturating_arith(
788 &self,
789 mir_op: BinOp,
790 l: &ImmTy<'tcx, M::Provenance>,
791 r: &ImmTy<'tcx, M::Provenance>,
792 ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
793 assert_eq!(l.layout.ty, r.layout.ty);
794 assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
795 assert_matches!(mir_op, BinOp::Add | BinOp::Sub);
796
797 let (val, overflowed) =
798 self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
799 interp_ok(if overflowed.to_bool()? {
800 let size = l.layout.size;
801 if l.layout.backend_repr.is_signed() {
802 let first_term: i128 = l.to_scalar().to_int(l.layout.size)?;
807 if first_term >= 0 {
808 Scalar::from_int(size.signed_int_max(), size)
812 } else {
813 Scalar::from_int(size.signed_int_min(), size)
815 }
816 } else {
817 if matches!(mir_op, BinOp::Add) {
819 Scalar::from_uint(size.unsigned_int_max(), size)
821 } else {
822 Scalar::from_uint(0u128, size)
824 }
825 }
826 } else {
827 val
828 })
829 }
830
831 pub fn ptr_offset_inbounds(
834 &self,
835 ptr: Pointer<Option<M::Provenance>>,
836 offset_bytes: i64,
837 ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
838 self.check_ptr_access_signed(
840 ptr,
841 offset_bytes,
842 CheckInAllocMsg::InboundsPointerArithmetic,
843 )?;
844 interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
846 }
847
848 pub(crate) fn copy_intrinsic(
850 &mut self,
851 src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
852 dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
853 count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
854 nonoverlapping: bool,
855 ) -> InterpResult<'tcx> {
856 let count = self.read_target_usize(count)?;
857 let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
858 let (size, align) = (layout.size, layout.align.abi);
859
860 let size = self.compute_size_in_bytes(size, count).ok_or_else(|| {
861 err_ub_custom!(
862 fluent::const_eval_size_overflow,
863 name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
864 )
865 })?;
866
867 let src = self.read_pointer(src)?;
868 let dst = self.read_pointer(dst)?;
869
870 self.check_ptr_align(src, align)?;
871 self.check_ptr_align(dst, align)?;
872
873 self.mem_copy(src, dst, size, nonoverlapping)
874 }
875
876 fn typed_swap_nonoverlapping_intrinsic(
878 &mut self,
879 left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
880 right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
881 ) -> InterpResult<'tcx> {
882 let left = self.deref_pointer(left)?;
883 let right = self.deref_pointer(right)?;
884 assert_eq!(left.layout, right.layout);
885 assert!(left.layout.is_sized());
886 let kind = MemoryKind::Stack;
887 let temp = self.allocate(left.layout, kind)?;
888 self.copy_op(&left, &temp)?; self.mem_copy(right.ptr(), left.ptr(), left.layout.size, true)?;
893 if M::enforce_validity(self, left.layout) {
897 self.validate_operand(
898 &left.clone().into(),
899 M::enforce_validity_recursively(self, left.layout),
900 true,
901 )?;
902 }
903
904 self.copy_op(&temp, &right)?; self.deallocate_ptr(temp.ptr(), None, kind)?;
907 interp_ok(())
908 }
909
910 pub fn write_bytes_intrinsic(
911 &mut self,
912 dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
913 byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
914 count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
915 name: &'static str,
916 ) -> InterpResult<'tcx> {
917 let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
918
919 let dst = self.read_pointer(dst)?;
920 let byte = self.read_scalar(byte)?.to_u8()?;
921 let count = self.read_target_usize(count)?;
922
923 let len = self
926 .compute_size_in_bytes(layout.size, count)
927 .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?;
928
929 let bytes = std::iter::repeat_n(byte, len.bytes_usize());
930 self.write_bytes_ptr(dst, bytes)
931 }
932
933 pub(crate) fn compare_bytes_intrinsic(
934 &mut self,
935 left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
936 right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
937 byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
938 ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
939 let left = self.read_pointer(left)?;
940 let right = self.read_pointer(right)?;
941 let n = Size::from_bytes(self.read_target_usize(byte_count)?);
942
943 let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
944 let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
945
946 let result = Ord::cmp(left_bytes, right_bytes) as i32;
948 interp_ok(Scalar::from_i32(result))
949 }
950
951 pub(crate) fn raw_eq_intrinsic(
952 &mut self,
953 lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
954 rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
955 ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
956 let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?;
957 assert!(layout.is_sized());
958
959 let get_bytes = |this: &InterpCx<'tcx, M>,
960 op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>|
961 -> InterpResult<'tcx, &[u8]> {
962 let ptr = this.read_pointer(op)?;
963 this.check_ptr_align(ptr, layout.align.abi)?;
964 let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
965 return interp_ok(&[]);
967 };
968 alloc_ref.get_bytes_strip_provenance()
969 };
970
971 let lhs_bytes = get_bytes(self, lhs)?;
972 let rhs_bytes = get_bytes(self, rhs)?;
973 interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
974 }
975
976 fn float_minmax<F>(
977 &self,
978 a: Scalar<M::Provenance>,
979 b: Scalar<M::Provenance>,
980 op: MinMax,
981 ) -> InterpResult<'tcx, Scalar<M::Provenance>>
982 where
983 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
984 {
985 let a: F = a.to_float()?;
986 let b: F = b.to_float()?;
987 let res = if matches!(op, MinMax::MinimumNumber | MinMax::MaximumNumber) && a == b {
988 M::equal_float_min_max(self, a, b)
991 } else {
992 let result = match op {
993 MinMax::Minimum => a.minimum(b),
994 MinMax::MinimumNumber => a.min(b),
995 MinMax::Maximum => a.maximum(b),
996 MinMax::MaximumNumber => a.max(b),
997 };
998 self.adjust_nan(result, &[a, b])
999 };
1000
1001 interp_ok(res.into())
1002 }
1003
1004 fn float_minmax_intrinsic<F>(
1005 &mut self,
1006 args: &[OpTy<'tcx, M::Provenance>],
1007 op: MinMax,
1008 dest: &PlaceTy<'tcx, M::Provenance>,
1009 ) -> InterpResult<'tcx, ()>
1010 where
1011 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1012 {
1013 let res =
1014 self.float_minmax::<F>(self.read_scalar(&args[0])?, self.read_scalar(&args[1])?, op)?;
1015 self.write_scalar(res, dest)?;
1016 interp_ok(())
1017 }
1018
1019 fn float_copysign_intrinsic<F>(
1020 &mut self,
1021 args: &[OpTy<'tcx, M::Provenance>],
1022 dest: &PlaceTy<'tcx, M::Provenance>,
1023 ) -> InterpResult<'tcx, ()>
1024 where
1025 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1026 {
1027 let a: F = self.read_scalar(&args[0])?.to_float()?;
1028 let b: F = self.read_scalar(&args[1])?.to_float()?;
1029 self.write_scalar(a.copy_sign(b), dest)?;
1031 interp_ok(())
1032 }
1033
1034 fn float_abs_intrinsic<F>(
1035 &mut self,
1036 args: &[OpTy<'tcx, M::Provenance>],
1037 dest: &PlaceTy<'tcx, M::Provenance>,
1038 ) -> InterpResult<'tcx, ()>
1039 where
1040 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1041 {
1042 let x: F = self.read_scalar(&args[0])?.to_float()?;
1043 self.write_scalar(x.abs(), dest)?;
1045 interp_ok(())
1046 }
1047
1048 fn float_round<F>(
1049 &mut self,
1050 x: Scalar<M::Provenance>,
1051 mode: rustc_apfloat::Round,
1052 ) -> InterpResult<'tcx, Scalar<M::Provenance>>
1053 where
1054 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1055 {
1056 let x: F = x.to_float()?;
1057 let res = x.round_to_integral(mode).value;
1058 let res = self.adjust_nan(res, &[x]);
1059 interp_ok(res.into())
1060 }
1061
1062 fn float_round_intrinsic<F>(
1063 &mut self,
1064 args: &[OpTy<'tcx, M::Provenance>],
1065 dest: &PlaceTy<'tcx, M::Provenance>,
1066 mode: rustc_apfloat::Round,
1067 ) -> InterpResult<'tcx, ()>
1068 where
1069 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1070 {
1071 let res = self.float_round::<F>(self.read_scalar(&args[0])?, mode)?;
1072 self.write_scalar(res, dest)?;
1073 interp_ok(())
1074 }
1075
1076 fn float_muladd<F>(
1077 &self,
1078 a: Scalar<M::Provenance>,
1079 b: Scalar<M::Provenance>,
1080 c: Scalar<M::Provenance>,
1081 typ: MulAddType,
1082 ) -> InterpResult<'tcx, Scalar<M::Provenance>>
1083 where
1084 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1085 {
1086 let a: F = a.to_float()?;
1087 let b: F = b.to_float()?;
1088 let c: F = c.to_float()?;
1089
1090 let fuse = typ == MulAddType::Fused || M::float_fuse_mul_add(self);
1091
1092 let res = if fuse { a.mul_add(b, c).value } else { ((a * b).value + c).value };
1093 let res = self.adjust_nan(res, &[a, b, c]);
1094 interp_ok(res.into())
1095 }
1096
1097 fn float_muladd_intrinsic<F>(
1098 &mut self,
1099 args: &[OpTy<'tcx, M::Provenance>],
1100 dest: &PlaceTy<'tcx, M::Provenance>,
1101 typ: MulAddType,
1102 ) -> InterpResult<'tcx, ()>
1103 where
1104 F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1105 {
1106 let a = self.read_scalar(&args[0])?;
1107 let b = self.read_scalar(&args[1])?;
1108 let c = self.read_scalar(&args[2])?;
1109
1110 let res = self.float_muladd::<F>(a, b, c, typ)?;
1111 self.write_scalar(res, dest)?;
1112 interp_ok(())
1113 }
1114
1115 pub fn float_to_int_checked(
1119 &self,
1120 src: &ImmTy<'tcx, M::Provenance>,
1121 cast_to: TyAndLayout<'tcx>,
1122 round: rustc_apfloat::Round,
1123 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
1124 fn float_to_int_inner<'tcx, F: rustc_apfloat::Float, M: Machine<'tcx>>(
1125 ecx: &InterpCx<'tcx, M>,
1126 src: F,
1127 cast_to: TyAndLayout<'tcx>,
1128 round: rustc_apfloat::Round,
1129 ) -> (Scalar<M::Provenance>, rustc_apfloat::Status) {
1130 let int_size = cast_to.layout.size;
1131 match cast_to.ty.kind() {
1132 ty::Uint(_) => {
1134 let res = src.to_u128_r(int_size.bits_usize(), round, &mut false);
1135 (Scalar::from_uint(res.value, int_size), res.status)
1136 }
1137 ty::Int(_) => {
1139 let res = src.to_i128_r(int_size.bits_usize(), round, &mut false);
1140 (Scalar::from_int(res.value, int_size), res.status)
1141 }
1142 _ => span_bug!(
1144 ecx.cur_span(),
1145 "attempted float-to-int conversion with non-int output type {}",
1146 cast_to.ty,
1147 ),
1148 }
1149 }
1150
1151 let ty::Float(fty) = src.layout.ty.kind() else {
1152 bug!("float_to_int_checked: non-float input type {}", src.layout.ty)
1153 };
1154
1155 let (val, status) = match fty {
1156 FloatTy::F16 => float_to_int_inner(self, src.to_scalar().to_f16()?, cast_to, round),
1157 FloatTy::F32 => float_to_int_inner(self, src.to_scalar().to_f32()?, cast_to, round),
1158 FloatTy::F64 => float_to_int_inner(self, src.to_scalar().to_f64()?, cast_to, round),
1159 FloatTy::F128 => float_to_int_inner(self, src.to_scalar().to_f128()?, cast_to, round),
1160 };
1161
1162 if status.intersects(
1163 rustc_apfloat::Status::INVALID_OP
1164 | rustc_apfloat::Status::OVERFLOW
1165 | rustc_apfloat::Status::UNDERFLOW,
1166 ) {
1167 interp_ok(None)
1170 } else {
1171 interp_ok(Some(ImmTy::from_scalar(val, cast_to)))
1174 }
1175 }
1176}