1use rustc_abi::{Align, WrappingRange};
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7use rustc_target::spec::Arch;
8
9use super::FunctionCx;
10use super::operand::OperandRef;
11use super::place::PlaceRef;
12use crate::common::{AtomicRmwBinOp, SynchronizationScope};
13use crate::errors::InvalidMonomorphization;
14use crate::traits::*;
15use crate::{MemFlags, meth, size_of_val};
16
17fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18 bx: &mut Bx,
19 allow_overlap: bool,
20 volatile: bool,
21 ty: Ty<'tcx>,
22 dst: Bx::Value,
23 src: Bx::Value,
24 count: Bx::Value,
25) {
26 let layout = bx.layout_of(ty);
27 let size = layout.size;
28 let align = layout.align.abi;
29 let size = bx.mul(bx.const_usize(size.bytes()), count);
30 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31 if allow_overlap {
32 bx.memmove(dst, align, src, align, size, flags);
33 } else {
34 bx.memcpy(dst, align, src, align, size, flags, None);
35 }
36}
37
38fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39 bx: &mut Bx,
40 volatile: bool,
41 ty: Ty<'tcx>,
42 dst: Bx::Value,
43 val: Bx::Value,
44 count: Bx::Value,
45) {
46 let layout = bx.layout_of(ty);
47 let size = layout.size;
48 let align = layout.align.abi;
49 let size = bx.mul(bx.const_usize(size.bytes()), count);
50 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51 bx.memset(dst, val, size, align, flags);
52}
53
54impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55 pub fn codegen_intrinsic_call(
57 &mut self,
58 bx: &mut Bx,
59 instance: ty::Instance<'tcx>,
60 args: &[OperandRef<'tcx, Bx::Value>],
61 result: PlaceRef<'tcx, Bx::Value>,
62 source_info: SourceInfo,
63 ) -> Result<(), ty::Instance<'tcx>> {
64 let span = source_info.span;
65
66 let name = bx.tcx().item_name(instance.def_id());
67 let fn_args = instance.args;
68
69 if let sym::typed_swap_nonoverlapping = name {
73 let pointee_ty = fn_args.type_at(0);
74 let pointee_layout = bx.layout_of(pointee_ty);
75 if !bx.is_backend_ref(pointee_layout)
76 || bx.sess().opts.optimize == OptLevel::No
79 || bx.sess().target.arch == Arch::SpirV
84 {
85 let align = pointee_layout.align.abi;
86 let x_place = args[0].val.deref(align);
87 let y_place = args[1].val.deref(align);
88 bx.typed_place_swap(x_place, y_place, pointee_layout);
89 return Ok(());
90 }
91 }
92
93 let invalid_monomorphization_int_type = |ty| {
94 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
95 };
96 let invalid_monomorphization_int_or_ptr_type = |ty| {
97 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
98 span,
99 name,
100 ty,
101 });
102 };
103
104 let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
105 let discr = ord.to_branch()[0].to_leaf();
106 discr.to_atomic_ordering()
107 };
108
109 if args.is_empty() {
110 match name {
111 sym::abort
112 | sym::unreachable
113 | sym::cold_path
114 | sym::breakpoint
115 | sym::amdgpu_dispatch_ptr
116 | sym::assert_zero_valid
117 | sym::assert_mem_uninitialized_valid
118 | sym::assert_inhabited
119 | sym::ub_checks
120 | sym::contract_checks
121 | sym::atomic_fence
122 | sym::atomic_singlethreadfence
123 | sym::caller_location => {}
124 _ => {
125 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("Nullary intrinsic {0} must be called in a const block. If you are seeing this message from code outside the standard library, the unstable implementation details of the relevant intrinsic may have changed. Consider using stable APIs instead. If you are adding a new nullary intrinsic that is inherently a runtime intrinsic, update this check.",
name));span_bug!(
126 span,
127 "Nullary intrinsic {name} must be called in a const block. \
128 If you are seeing this message from code outside the standard library, the \
129 unstable implementation details of the relevant intrinsic may have changed. \
130 Consider using stable APIs instead. \
131 If you are adding a new nullary intrinsic that is inherently a runtime \
132 intrinsic, update this check."
133 );
134 }
135 }
136 }
137
138 let llval = match name {
139 sym::abort => {
140 bx.abort();
141 return Ok(());
142 }
143
144 sym::caller_location => {
145 let location = self.get_caller_location(bx, source_info);
146 location.val.store(bx, result);
147 return Ok(());
148 }
149
150 sym::va_start => bx.va_start(args[0].immediate()),
151 sym::va_end => bx.va_end(args[0].immediate()),
152 sym::size_of_val => {
153 let tp_ty = fn_args.type_at(0);
154 let (_, meta) = args[0].val.pointer_parts();
155 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
156 llsize
157 }
158 sym::align_of_val => {
159 let tp_ty = fn_args.type_at(0);
160 let (_, meta) = args[0].val.pointer_parts();
161 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
162 llalign
163 }
164 sym::vtable_size | sym::vtable_align => {
165 let vtable = args[0].immediate();
166 let idx = match name {
167 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
168 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
169 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
170 };
171 let value = meth::VirtualIndex::from_index(idx).get_usize(
172 bx,
173 vtable,
174 instance.ty(bx.tcx(), bx.typing_env()),
175 );
176 match name {
177 sym::vtable_size => {
179 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
180 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
181 }
182 sym::vtable_align => {
185 let align_bound = Align::max_for_target(bx.data_layout()).bytes().into();
186 bx.range_metadata(value, WrappingRange { start: 1, end: align_bound })
187 }
188 _ => {}
189 }
190 value
191 }
192 sym::arith_offset => {
193 let ty = fn_args.type_at(0);
194 let layout = bx.layout_of(ty);
195 let ptr = args[0].immediate();
196 let offset = args[1].immediate();
197 bx.gep(bx.backend_type(layout), ptr, &[offset])
198 }
199 sym::copy => {
200 copy_intrinsic(
201 bx,
202 true,
203 false,
204 fn_args.type_at(0),
205 args[1].immediate(),
206 args[0].immediate(),
207 args[2].immediate(),
208 );
209 return Ok(());
210 }
211 sym::write_bytes => {
212 memset_intrinsic(
213 bx,
214 false,
215 fn_args.type_at(0),
216 args[0].immediate(),
217 args[1].immediate(),
218 args[2].immediate(),
219 );
220 return Ok(());
221 }
222
223 sym::volatile_copy_nonoverlapping_memory => {
224 copy_intrinsic(
225 bx,
226 false,
227 true,
228 fn_args.type_at(0),
229 args[0].immediate(),
230 args[1].immediate(),
231 args[2].immediate(),
232 );
233 return Ok(());
234 }
235 sym::volatile_copy_memory => {
236 copy_intrinsic(
237 bx,
238 true,
239 true,
240 fn_args.type_at(0),
241 args[0].immediate(),
242 args[1].immediate(),
243 args[2].immediate(),
244 );
245 return Ok(());
246 }
247 sym::volatile_set_memory => {
248 memset_intrinsic(
249 bx,
250 true,
251 fn_args.type_at(0),
252 args[0].immediate(),
253 args[1].immediate(),
254 args[2].immediate(),
255 );
256 return Ok(());
257 }
258 sym::volatile_store => {
259 let dst = args[0].deref(bx.cx());
260 args[1].val.volatile_store(bx, dst);
261 return Ok(());
262 }
263 sym::unaligned_volatile_store => {
264 let dst = args[0].deref(bx.cx());
265 args[1].val.unaligned_volatile_store(bx, dst);
266 return Ok(());
267 }
268 sym::disjoint_bitor => {
269 let a = args[0].immediate();
270 let b = args[1].immediate();
271 bx.or_disjoint(a, b)
272 }
273 sym::exact_div => {
274 let ty = args[0].layout.ty;
275 match int_type_width_signed(ty, bx.tcx()) {
276 Some((_width, signed)) => {
277 if signed {
278 bx.exactsdiv(args[0].immediate(), args[1].immediate())
279 } else {
280 bx.exactudiv(args[0].immediate(), args[1].immediate())
281 }
282 }
283 None => {
284 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
285 span,
286 name,
287 ty,
288 });
289 return Ok(());
290 }
291 }
292 }
293 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
294 match float_type_width(args[0].layout.ty) {
295 Some(_width) => match name {
296 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
297 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
298 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
299 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
300 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
301 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
302 },
303 None => {
304 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
305 span,
306 name,
307 ty: args[0].layout.ty,
308 });
309 return Ok(());
310 }
311 }
312 }
313 sym::fadd_algebraic
314 | sym::fsub_algebraic
315 | sym::fmul_algebraic
316 | sym::fdiv_algebraic
317 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
318 Some(_width) => match name {
319 sym::fadd_algebraic => {
320 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
321 }
322 sym::fsub_algebraic => {
323 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
324 }
325 sym::fmul_algebraic => {
326 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
327 }
328 sym::fdiv_algebraic => {
329 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
330 }
331 sym::frem_algebraic => {
332 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
333 }
334 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
335 },
336 None => {
337 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
338 span,
339 name,
340 ty: args[0].layout.ty,
341 });
342 return Ok(());
343 }
344 },
345
346 sym::float_to_int_unchecked => {
347 if float_type_width(args[0].layout.ty).is_none() {
348 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
349 span,
350 ty: args[0].layout.ty,
351 });
352 return Ok(());
353 }
354 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
355 else {
356 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
357 span,
358 ty: result.layout.ty,
359 });
360 return Ok(());
361 };
362 if signed {
363 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
364 } else {
365 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
366 }
367 }
368
369 sym::atomic_load => {
370 let ty = fn_args.type_at(0);
371 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
372 invalid_monomorphization_int_or_ptr_type(ty);
373 return Ok(());
374 }
375 let ordering = fn_args.const_at(1).to_value();
376 let layout = bx.layout_of(ty);
377 let source = args[0].immediate();
378 bx.atomic_load(
379 bx.backend_type(layout),
380 source,
381 parse_atomic_ordering(ordering),
382 layout.size,
383 )
384 }
385 sym::atomic_store => {
386 let ty = fn_args.type_at(0);
387 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
388 invalid_monomorphization_int_or_ptr_type(ty);
389 return Ok(());
390 }
391 let ordering = fn_args.const_at(1).to_value();
392 let size = bx.layout_of(ty).size;
393 let val = args[1].immediate();
394 let ptr = args[0].immediate();
395 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
396 return Ok(());
397 }
398 sym::atomic_cxchg | sym::atomic_cxchgweak => {
400 let ty = fn_args.type_at(0);
401 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
402 invalid_monomorphization_int_or_ptr_type(ty);
403 return Ok(());
404 }
405 let succ_ordering = fn_args.const_at(1).to_value();
406 let fail_ordering = fn_args.const_at(2).to_value();
407 let weak = name == sym::atomic_cxchgweak;
408 let dst = args[0].immediate();
409 let cmp = args[1].immediate();
410 let src = args[2].immediate();
411 let (val, success) = bx.atomic_cmpxchg(
412 dst,
413 cmp,
414 src,
415 parse_atomic_ordering(succ_ordering),
416 parse_atomic_ordering(fail_ordering),
417 weak,
418 );
419 let val = bx.from_immediate(val);
420 let success = bx.from_immediate(success);
421
422 let dest = result.project_field(bx, 0);
423 bx.store_to_place(val, dest.val);
424 let dest = result.project_field(bx, 1);
425 bx.store_to_place(success, dest.val);
426
427 return Ok(());
428 }
429 sym::atomic_max | sym::atomic_min => {
430 let atom_op = if name == sym::atomic_max {
431 AtomicRmwBinOp::AtomicMax
432 } else {
433 AtomicRmwBinOp::AtomicMin
434 };
435
436 let ty = fn_args.type_at(0);
437 if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
ty::Int(_) => true,
_ => false,
}matches!(ty.kind(), ty::Int(_)) {
438 let ordering = fn_args.const_at(1).to_value();
439 let ptr = args[0].immediate();
440 let val = args[1].immediate();
441 bx.atomic_rmw(
442 atom_op,
443 ptr,
444 val,
445 parse_atomic_ordering(ordering),
446 false,
447 )
448 } else {
449 invalid_monomorphization_int_type(ty);
450 return Ok(());
451 }
452 }
453 sym::atomic_umax | sym::atomic_umin => {
454 let atom_op = if name == sym::atomic_umax {
455 AtomicRmwBinOp::AtomicUMax
456 } else {
457 AtomicRmwBinOp::AtomicUMin
458 };
459
460 let ty = fn_args.type_at(0);
461 if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
ty::Uint(_) => true,
_ => false,
}matches!(ty.kind(), ty::Uint(_)) {
462 let ordering = fn_args.const_at(1).to_value();
463 let ptr = args[0].immediate();
464 let val = args[1].immediate();
465 bx.atomic_rmw(
466 atom_op,
467 ptr,
468 val,
469 parse_atomic_ordering(ordering),
470 false,
471 )
472 } else {
473 invalid_monomorphization_int_type(ty);
474 return Ok(());
475 }
476 }
477 sym::atomic_xchg => {
478 let ty = fn_args.type_at(0);
479 let ordering = fn_args.const_at(1).to_value();
480 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
481 let ptr = args[0].immediate();
482 let val = args[1].immediate();
483 let atomic_op = AtomicRmwBinOp::AtomicXchg;
484 bx.atomic_rmw(
485 atomic_op,
486 ptr,
487 val,
488 parse_atomic_ordering(ordering),
489 ty.is_raw_ptr(),
490 )
491 } else {
492 invalid_monomorphization_int_or_ptr_type(ty);
493 return Ok(());
494 }
495 }
496 sym::atomic_xadd
497 | sym::atomic_xsub
498 | sym::atomic_and
499 | sym::atomic_nand
500 | sym::atomic_or
501 | sym::atomic_xor => {
502 let atom_op = match name {
503 sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
504 sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
505 sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
506 sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
507 sym::atomic_or => AtomicRmwBinOp::AtomicOr,
508 sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
509 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
510 };
511
512 let ty_mem = fn_args.type_at(0);
514 let ty_op = fn_args.type_at(1);
516
517 let ordering = fn_args.const_at(2).to_value();
518 if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
521 || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
522 {
523 let ptr = args[0].immediate(); let val = args[1].immediate(); bx.atomic_rmw(
526 atom_op,
527 ptr,
528 val,
529 parse_atomic_ordering(ordering),
530 ty_mem.is_raw_ptr(),
531 )
532 } else {
533 invalid_monomorphization_int_or_ptr_type(ty_mem);
534 return Ok(());
535 }
536 }
537 sym::atomic_fence => {
538 let ordering = fn_args.const_at(0).to_value();
539 bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
540 return Ok(());
541 }
542
543 sym::atomic_singlethreadfence => {
544 let ordering = fn_args.const_at(0).to_value();
545 bx.atomic_fence(
546 parse_atomic_ordering(ordering),
547 SynchronizationScope::SingleThread,
548 );
549 return Ok(());
550 }
551
552 sym::nontemporal_store => {
553 let dst = args[0].deref(bx.cx());
554 args[1].val.nontemporal_store(bx, dst);
555 return Ok(());
556 }
557
558 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
559 let ty = fn_args.type_at(0);
560 let pointee_size = bx.layout_of(ty).size;
561
562 let a = args[0].immediate();
563 let b = args[1].immediate();
564 let a = bx.ptrtoint(a, bx.type_isize());
565 let b = bx.ptrtoint(b, bx.type_isize());
566 let pointee_size = bx.const_usize(pointee_size.bytes());
567 if name == sym::ptr_offset_from {
568 let d = bx.sub(a, b);
572 bx.exactsdiv(d, pointee_size)
574 } else {
575 let d = bx.unchecked_usub(a, b);
578 bx.exactudiv(d, pointee_size)
579 }
580 }
581
582 sym::cold_path => {
583 return Ok(());
585 }
586
587 _ => {
588 return bx.codegen_intrinsic_call(instance, args, result, span);
590 }
591 };
592
593 if result.layout.ty.is_bool() {
594 let val = bx.from_immediate(llval);
595 bx.store_to_place(val, result.val);
596 } else if !result.layout.ty.is_unit() {
597 bx.store_to_place(llval, result.val);
598 }
599 Ok(())
600 }
601}
602
603fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
608 match ty.kind() {
609 ty::Int(t) => {
610 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
611 }
612 ty::Uint(t) => {
613 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
614 }
615 _ => None,
616 }
617}
618
619fn float_type_width(ty: Ty<'_>) -> Option<u64> {
622 match ty.kind() {
623 ty::Float(t) => Some(t.bit_width()),
624 _ => None,
625 }
626}