1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::common::{AtomicRmwBinOp, SynchronizationScope};
12use crate::errors::InvalidMonomorphization;
13use crate::traits::*;
14use crate::{MemFlags, meth, size_of_val};
15
16fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
17 bx: &mut Bx,
18 allow_overlap: bool,
19 volatile: bool,
20 ty: Ty<'tcx>,
21 dst: Bx::Value,
22 src: Bx::Value,
23 count: Bx::Value,
24) {
25 let layout = bx.layout_of(ty);
26 let size = layout.size;
27 let align = layout.align.abi;
28 let size = bx.mul(bx.const_usize(size.bytes()), count);
29 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
30 if allow_overlap {
31 bx.memmove(dst, align, src, align, size, flags);
32 } else {
33 bx.memcpy(dst, align, src, align, size, flags, None);
34 }
35}
36
37fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
38 bx: &mut Bx,
39 volatile: bool,
40 ty: Ty<'tcx>,
41 dst: Bx::Value,
42 val: Bx::Value,
43 count: Bx::Value,
44) {
45 let layout = bx.layout_of(ty);
46 let size = layout.size;
47 let align = layout.align.abi;
48 let size = bx.mul(bx.const_usize(size.bytes()), count);
49 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
50 bx.memset(dst, val, size, align, flags);
51}
52
53impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
54 pub fn codegen_intrinsic_call(
56 &mut self,
57 bx: &mut Bx,
58 instance: ty::Instance<'tcx>,
59 args: &[OperandRef<'tcx, Bx::Value>],
60 result: PlaceRef<'tcx, Bx::Value>,
61 source_info: SourceInfo,
62 ) -> Result<(), ty::Instance<'tcx>> {
63 let span = source_info.span;
64
65 let name = bx.tcx().item_name(instance.def_id());
66 let fn_args = instance.args;
67
68 if let sym::typed_swap_nonoverlapping = name {
72 let pointee_ty = fn_args.type_at(0);
73 let pointee_layout = bx.layout_of(pointee_ty);
74 if !bx.is_backend_ref(pointee_layout)
75 || bx.sess().opts.optimize == OptLevel::No
78 || bx.sess().target.arch == "spirv"
83 {
84 let align = pointee_layout.align.abi;
85 let x_place = args[0].val.deref(align);
86 let y_place = args[1].val.deref(align);
87 bx.typed_place_swap(x_place, y_place, pointee_layout);
88 return Ok(());
89 }
90 }
91
92 let invalid_monomorphization_int_type = |ty| {
93 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
94 };
95 let invalid_monomorphization_int_or_ptr_type = |ty| {
96 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
97 span,
98 name,
99 ty,
100 });
101 };
102
103 let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
104 let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
105 discr.to_atomic_ordering()
106 };
107
108 if args.is_empty() {
109 match name {
110 sym::abort
111 | sym::unreachable
112 | sym::cold_path
113 | sym::breakpoint
114 | sym::assert_zero_valid
115 | sym::assert_mem_uninitialized_valid
116 | sym::assert_inhabited
117 | sym::ub_checks
118 | sym::contract_checks
119 | sym::atomic_fence
120 | sym::atomic_singlethreadfence
121 | sym::caller_location => {}
122 _ => {
123 span_bug!(
124 span,
125 "Nullary intrinsic {name} must be called in a const block. \
126 If you are seeing this message from code outside the standard library, the \
127 unstable implementation details of the relevant intrinsic may have changed. \
128 Consider using stable APIs instead. \
129 If you are adding a new nullary intrinsic that is inherently a runtime \
130 intrinsic, update this check."
131 );
132 }
133 }
134 }
135
136 let llval = match name {
137 sym::abort => {
138 bx.abort();
139 return Ok(());
140 }
141
142 sym::caller_location => {
143 let location = self.get_caller_location(bx, source_info);
144 location.val.store(bx, result);
145 return Ok(());
146 }
147
148 sym::va_start => bx.va_start(args[0].immediate()),
149 sym::va_end => bx.va_end(args[0].immediate()),
150 sym::size_of_val => {
151 let tp_ty = fn_args.type_at(0);
152 let (_, meta) = args[0].val.pointer_parts();
153 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
154 llsize
155 }
156 sym::align_of_val => {
157 let tp_ty = fn_args.type_at(0);
158 let (_, meta) = args[0].val.pointer_parts();
159 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
160 llalign
161 }
162 sym::vtable_size | sym::vtable_align => {
163 let vtable = args[0].immediate();
164 let idx = match name {
165 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
166 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
167 _ => bug!(),
168 };
169 let value = meth::VirtualIndex::from_index(idx).get_usize(
170 bx,
171 vtable,
172 instance.ty(bx.tcx(), bx.typing_env()),
173 );
174 match name {
175 sym::vtable_size => {
177 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
178 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
179 }
180 sym::vtable_align => {
182 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
183 }
184 _ => {}
185 }
186 value
187 }
188 sym::arith_offset => {
189 let ty = fn_args.type_at(0);
190 let layout = bx.layout_of(ty);
191 let ptr = args[0].immediate();
192 let offset = args[1].immediate();
193 bx.gep(bx.backend_type(layout), ptr, &[offset])
194 }
195 sym::copy => {
196 copy_intrinsic(
197 bx,
198 true,
199 false,
200 fn_args.type_at(0),
201 args[1].immediate(),
202 args[0].immediate(),
203 args[2].immediate(),
204 );
205 return Ok(());
206 }
207 sym::write_bytes => {
208 memset_intrinsic(
209 bx,
210 false,
211 fn_args.type_at(0),
212 args[0].immediate(),
213 args[1].immediate(),
214 args[2].immediate(),
215 );
216 return Ok(());
217 }
218
219 sym::volatile_copy_nonoverlapping_memory => {
220 copy_intrinsic(
221 bx,
222 false,
223 true,
224 fn_args.type_at(0),
225 args[0].immediate(),
226 args[1].immediate(),
227 args[2].immediate(),
228 );
229 return Ok(());
230 }
231 sym::volatile_copy_memory => {
232 copy_intrinsic(
233 bx,
234 true,
235 true,
236 fn_args.type_at(0),
237 args[0].immediate(),
238 args[1].immediate(),
239 args[2].immediate(),
240 );
241 return Ok(());
242 }
243 sym::volatile_set_memory => {
244 memset_intrinsic(
245 bx,
246 true,
247 fn_args.type_at(0),
248 args[0].immediate(),
249 args[1].immediate(),
250 args[2].immediate(),
251 );
252 return Ok(());
253 }
254 sym::volatile_store => {
255 let dst = args[0].deref(bx.cx());
256 args[1].val.volatile_store(bx, dst);
257 return Ok(());
258 }
259 sym::unaligned_volatile_store => {
260 let dst = args[0].deref(bx.cx());
261 args[1].val.unaligned_volatile_store(bx, dst);
262 return Ok(());
263 }
264 sym::disjoint_bitor => {
265 let a = args[0].immediate();
266 let b = args[1].immediate();
267 bx.or_disjoint(a, b)
268 }
269 sym::exact_div => {
270 let ty = args[0].layout.ty;
271 match int_type_width_signed(ty, bx.tcx()) {
272 Some((_width, signed)) => {
273 if signed {
274 bx.exactsdiv(args[0].immediate(), args[1].immediate())
275 } else {
276 bx.exactudiv(args[0].immediate(), args[1].immediate())
277 }
278 }
279 None => {
280 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
281 span,
282 name,
283 ty,
284 });
285 return Ok(());
286 }
287 }
288 }
289 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
290 match float_type_width(args[0].layout.ty) {
291 Some(_width) => match name {
292 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
293 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
294 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
295 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
296 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
297 _ => bug!(),
298 },
299 None => {
300 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
301 span,
302 name,
303 ty: args[0].layout.ty,
304 });
305 return Ok(());
306 }
307 }
308 }
309 sym::fadd_algebraic
310 | sym::fsub_algebraic
311 | sym::fmul_algebraic
312 | sym::fdiv_algebraic
313 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
314 Some(_width) => match name {
315 sym::fadd_algebraic => {
316 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
317 }
318 sym::fsub_algebraic => {
319 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
320 }
321 sym::fmul_algebraic => {
322 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
323 }
324 sym::fdiv_algebraic => {
325 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
326 }
327 sym::frem_algebraic => {
328 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
329 }
330 _ => bug!(),
331 },
332 None => {
333 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
334 span,
335 name,
336 ty: args[0].layout.ty,
337 });
338 return Ok(());
339 }
340 },
341
342 sym::float_to_int_unchecked => {
343 if float_type_width(args[0].layout.ty).is_none() {
344 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
345 span,
346 ty: args[0].layout.ty,
347 });
348 return Ok(());
349 }
350 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
351 else {
352 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
353 span,
354 ty: result.layout.ty,
355 });
356 return Ok(());
357 };
358 if signed {
359 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
360 } else {
361 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
362 }
363 }
364
365 sym::atomic_load => {
366 let ty = fn_args.type_at(0);
367 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
368 invalid_monomorphization_int_or_ptr_type(ty);
369 return Ok(());
370 }
371 let ordering = fn_args.const_at(1).to_value();
372 let layout = bx.layout_of(ty);
373 let source = args[0].immediate();
374 bx.atomic_load(
375 bx.backend_type(layout),
376 source,
377 parse_atomic_ordering(ordering),
378 layout.size,
379 )
380 }
381 sym::atomic_store => {
382 let ty = fn_args.type_at(0);
383 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
384 invalid_monomorphization_int_or_ptr_type(ty);
385 return Ok(());
386 }
387 let ordering = fn_args.const_at(1).to_value();
388 let size = bx.layout_of(ty).size;
389 let val = args[1].immediate();
390 let ptr = args[0].immediate();
391 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
392 return Ok(());
393 }
394 sym::atomic_cxchg | sym::atomic_cxchgweak => {
396 let ty = fn_args.type_at(0);
397 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
398 invalid_monomorphization_int_or_ptr_type(ty);
399 return Ok(());
400 }
401 let succ_ordering = fn_args.const_at(1).to_value();
402 let fail_ordering = fn_args.const_at(2).to_value();
403 let weak = name == sym::atomic_cxchgweak;
404 let dst = args[0].immediate();
405 let cmp = args[1].immediate();
406 let src = args[2].immediate();
407 let (val, success) = bx.atomic_cmpxchg(
408 dst,
409 cmp,
410 src,
411 parse_atomic_ordering(succ_ordering),
412 parse_atomic_ordering(fail_ordering),
413 weak,
414 );
415 let val = bx.from_immediate(val);
416 let success = bx.from_immediate(success);
417
418 let dest = result.project_field(bx, 0);
419 bx.store_to_place(val, dest.val);
420 let dest = result.project_field(bx, 1);
421 bx.store_to_place(success, dest.val);
422
423 return Ok(());
424 }
425 sym::atomic_max | sym::atomic_min => {
426 let atom_op = if name == sym::atomic_max {
427 AtomicRmwBinOp::AtomicMax
428 } else {
429 AtomicRmwBinOp::AtomicMin
430 };
431
432 let ty = fn_args.type_at(0);
433 if matches!(ty.kind(), ty::Int(_)) {
434 let ordering = fn_args.const_at(1).to_value();
435 let ptr = args[0].immediate();
436 let val = args[1].immediate();
437 bx.atomic_rmw(
438 atom_op,
439 ptr,
440 val,
441 parse_atomic_ordering(ordering),
442 false,
443 )
444 } else {
445 invalid_monomorphization_int_type(ty);
446 return Ok(());
447 }
448 }
449 sym::atomic_umax | sym::atomic_umin => {
450 let atom_op = if name == sym::atomic_umax {
451 AtomicRmwBinOp::AtomicUMax
452 } else {
453 AtomicRmwBinOp::AtomicUMin
454 };
455
456 let ty = fn_args.type_at(0);
457 if matches!(ty.kind(), ty::Uint(_)) {
458 let ordering = fn_args.const_at(1).to_value();
459 let ptr = args[0].immediate();
460 let val = args[1].immediate();
461 bx.atomic_rmw(
462 atom_op,
463 ptr,
464 val,
465 parse_atomic_ordering(ordering),
466 false,
467 )
468 } else {
469 invalid_monomorphization_int_type(ty);
470 return Ok(());
471 }
472 }
473 sym::atomic_xchg => {
474 let ty = fn_args.type_at(0);
475 let ordering = fn_args.const_at(1).to_value();
476 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
477 let ptr = args[0].immediate();
478 let val = args[1].immediate();
479 let atomic_op = AtomicRmwBinOp::AtomicXchg;
480 bx.atomic_rmw(
481 atomic_op,
482 ptr,
483 val,
484 parse_atomic_ordering(ordering),
485 ty.is_raw_ptr(),
486 )
487 } else {
488 invalid_monomorphization_int_or_ptr_type(ty);
489 return Ok(());
490 }
491 }
492 sym::atomic_xadd
493 | sym::atomic_xsub
494 | sym::atomic_and
495 | sym::atomic_nand
496 | sym::atomic_or
497 | sym::atomic_xor => {
498 let atom_op = match name {
499 sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
500 sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
501 sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
502 sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
503 sym::atomic_or => AtomicRmwBinOp::AtomicOr,
504 sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
505 _ => unreachable!(),
506 };
507
508 let ty_mem = fn_args.type_at(0);
510 let ty_op = fn_args.type_at(1);
512
513 let ordering = fn_args.const_at(2).to_value();
514 if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
517 || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
518 {
519 let ptr = args[0].immediate(); let val = args[1].immediate(); bx.atomic_rmw(
522 atom_op,
523 ptr,
524 val,
525 parse_atomic_ordering(ordering),
526 ty_mem.is_raw_ptr(),
527 )
528 } else {
529 invalid_monomorphization_int_or_ptr_type(ty_mem);
530 return Ok(());
531 }
532 }
533 sym::atomic_fence => {
534 let ordering = fn_args.const_at(0).to_value();
535 bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
536 return Ok(());
537 }
538
539 sym::atomic_singlethreadfence => {
540 let ordering = fn_args.const_at(0).to_value();
541 bx.atomic_fence(
542 parse_atomic_ordering(ordering),
543 SynchronizationScope::SingleThread,
544 );
545 return Ok(());
546 }
547
548 sym::nontemporal_store => {
549 let dst = args[0].deref(bx.cx());
550 args[1].val.nontemporal_store(bx, dst);
551 return Ok(());
552 }
553
554 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
555 let ty = fn_args.type_at(0);
556 let pointee_size = bx.layout_of(ty).size;
557
558 let a = args[0].immediate();
559 let b = args[1].immediate();
560 let a = bx.ptrtoint(a, bx.type_isize());
561 let b = bx.ptrtoint(b, bx.type_isize());
562 let pointee_size = bx.const_usize(pointee_size.bytes());
563 if name == sym::ptr_offset_from {
564 let d = bx.sub(a, b);
568 bx.exactsdiv(d, pointee_size)
570 } else {
571 let d = bx.unchecked_usub(a, b);
574 bx.exactudiv(d, pointee_size)
575 }
576 }
577
578 sym::cold_path => {
579 return Ok(());
581 }
582
583 _ => {
584 return bx.codegen_intrinsic_call(instance, args, result, span);
586 }
587 };
588
589 if result.layout.ty.is_bool() {
590 let val = bx.from_immediate(llval);
591 bx.store_to_place(val, result.val);
592 } else if !result.layout.ty.is_unit() {
593 bx.store_to_place(llval, result.val);
594 }
595 Ok(())
596 }
597}
598
599fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
604 match ty.kind() {
605 ty::Int(t) => {
606 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
607 }
608 ty::Uint(t) => {
609 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
610 }
611 _ => None,
612 }
613}
614
615fn float_type_width(ty: Ty<'_>) -> Option<u64> {
618 match ty.kind() {
619 ty::Float(t) => Some(t.bit_width()),
620 _ => None,
621 }
622}