1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7use rustc_target::spec::Arch;
8
9use super::FunctionCx;
10use super::operand::OperandRef;
11use super::place::PlaceRef;
12use crate::common::{AtomicRmwBinOp, SynchronizationScope};
13use crate::errors::InvalidMonomorphization;
14use crate::traits::*;
15use crate::{MemFlags, meth, size_of_val};
16
17fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18 bx: &mut Bx,
19 allow_overlap: bool,
20 volatile: bool,
21 ty: Ty<'tcx>,
22 dst: Bx::Value,
23 src: Bx::Value,
24 count: Bx::Value,
25) {
26 let layout = bx.layout_of(ty);
27 let size = layout.size;
28 let align = layout.align.abi;
29 let size = bx.mul(bx.const_usize(size.bytes()), count);
30 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31 if allow_overlap {
32 bx.memmove(dst, align, src, align, size, flags);
33 } else {
34 bx.memcpy(dst, align, src, align, size, flags, None);
35 }
36}
37
38fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39 bx: &mut Bx,
40 volatile: bool,
41 ty: Ty<'tcx>,
42 dst: Bx::Value,
43 val: Bx::Value,
44 count: Bx::Value,
45) {
46 let layout = bx.layout_of(ty);
47 let size = layout.size;
48 let align = layout.align.abi;
49 let size = bx.mul(bx.const_usize(size.bytes()), count);
50 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51 bx.memset(dst, val, size, align, flags);
52}
53
54impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55 pub fn codegen_intrinsic_call(
57 &mut self,
58 bx: &mut Bx,
59 instance: ty::Instance<'tcx>,
60 args: &[OperandRef<'tcx, Bx::Value>],
61 result: PlaceRef<'tcx, Bx::Value>,
62 source_info: SourceInfo,
63 ) -> Result<(), ty::Instance<'tcx>> {
64 let span = source_info.span;
65
66 let name = bx.tcx().item_name(instance.def_id());
67 let fn_args = instance.args;
68
69 if let sym::typed_swap_nonoverlapping = name {
73 let pointee_ty = fn_args.type_at(0);
74 let pointee_layout = bx.layout_of(pointee_ty);
75 if !bx.is_backend_ref(pointee_layout)
76 || bx.sess().opts.optimize == OptLevel::No
79 || bx.sess().target.arch == Arch::SpirV
84 {
85 let align = pointee_layout.align.abi;
86 let x_place = args[0].val.deref(align);
87 let y_place = args[1].val.deref(align);
88 bx.typed_place_swap(x_place, y_place, pointee_layout);
89 return Ok(());
90 }
91 }
92
93 let invalid_monomorphization_int_type = |ty| {
94 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
95 };
96 let invalid_monomorphization_int_or_ptr_type = |ty| {
97 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
98 span,
99 name,
100 ty,
101 });
102 };
103
104 let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
105 let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
106 discr.to_atomic_ordering()
107 };
108
109 if args.is_empty() {
110 match name {
111 sym::abort
112 | sym::unreachable
113 | sym::cold_path
114 | sym::breakpoint
115 | sym::assert_zero_valid
116 | sym::assert_mem_uninitialized_valid
117 | sym::assert_inhabited
118 | sym::ub_checks
119 | sym::contract_checks
120 | sym::atomic_fence
121 | sym::atomic_singlethreadfence
122 | sym::caller_location => {}
123 _ => {
124 span_bug!(
125 span,
126 "Nullary intrinsic {name} must be called in a const block. \
127 If you are seeing this message from code outside the standard library, the \
128 unstable implementation details of the relevant intrinsic may have changed. \
129 Consider using stable APIs instead. \
130 If you are adding a new nullary intrinsic that is inherently a runtime \
131 intrinsic, update this check."
132 );
133 }
134 }
135 }
136
137 let llval = match name {
138 sym::abort => {
139 bx.abort();
140 return Ok(());
141 }
142
143 sym::caller_location => {
144 let location = self.get_caller_location(bx, source_info);
145 location.val.store(bx, result);
146 return Ok(());
147 }
148
149 sym::va_start => bx.va_start(args[0].immediate()),
150 sym::va_end => bx.va_end(args[0].immediate()),
151 sym::size_of_val => {
152 let tp_ty = fn_args.type_at(0);
153 let (_, meta) = args[0].val.pointer_parts();
154 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
155 llsize
156 }
157 sym::align_of_val => {
158 let tp_ty = fn_args.type_at(0);
159 let (_, meta) = args[0].val.pointer_parts();
160 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
161 llalign
162 }
163 sym::vtable_size | sym::vtable_align => {
164 let vtable = args[0].immediate();
165 let idx = match name {
166 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
167 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
168 _ => bug!(),
169 };
170 let value = meth::VirtualIndex::from_index(idx).get_usize(
171 bx,
172 vtable,
173 instance.ty(bx.tcx(), bx.typing_env()),
174 );
175 match name {
176 sym::vtable_size => {
178 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
179 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
180 }
181 sym::vtable_align => {
183 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
184 }
185 _ => {}
186 }
187 value
188 }
189 sym::arith_offset => {
190 let ty = fn_args.type_at(0);
191 let layout = bx.layout_of(ty);
192 let ptr = args[0].immediate();
193 let offset = args[1].immediate();
194 bx.gep(bx.backend_type(layout), ptr, &[offset])
195 }
196 sym::copy => {
197 copy_intrinsic(
198 bx,
199 true,
200 false,
201 fn_args.type_at(0),
202 args[1].immediate(),
203 args[0].immediate(),
204 args[2].immediate(),
205 );
206 return Ok(());
207 }
208 sym::write_bytes => {
209 memset_intrinsic(
210 bx,
211 false,
212 fn_args.type_at(0),
213 args[0].immediate(),
214 args[1].immediate(),
215 args[2].immediate(),
216 );
217 return Ok(());
218 }
219
220 sym::volatile_copy_nonoverlapping_memory => {
221 copy_intrinsic(
222 bx,
223 false,
224 true,
225 fn_args.type_at(0),
226 args[0].immediate(),
227 args[1].immediate(),
228 args[2].immediate(),
229 );
230 return Ok(());
231 }
232 sym::volatile_copy_memory => {
233 copy_intrinsic(
234 bx,
235 true,
236 true,
237 fn_args.type_at(0),
238 args[0].immediate(),
239 args[1].immediate(),
240 args[2].immediate(),
241 );
242 return Ok(());
243 }
244 sym::volatile_set_memory => {
245 memset_intrinsic(
246 bx,
247 true,
248 fn_args.type_at(0),
249 args[0].immediate(),
250 args[1].immediate(),
251 args[2].immediate(),
252 );
253 return Ok(());
254 }
255 sym::volatile_store => {
256 let dst = args[0].deref(bx.cx());
257 args[1].val.volatile_store(bx, dst);
258 return Ok(());
259 }
260 sym::unaligned_volatile_store => {
261 let dst = args[0].deref(bx.cx());
262 args[1].val.unaligned_volatile_store(bx, dst);
263 return Ok(());
264 }
265 sym::disjoint_bitor => {
266 let a = args[0].immediate();
267 let b = args[1].immediate();
268 bx.or_disjoint(a, b)
269 }
270 sym::exact_div => {
271 let ty = args[0].layout.ty;
272 match int_type_width_signed(ty, bx.tcx()) {
273 Some((_width, signed)) => {
274 if signed {
275 bx.exactsdiv(args[0].immediate(), args[1].immediate())
276 } else {
277 bx.exactudiv(args[0].immediate(), args[1].immediate())
278 }
279 }
280 None => {
281 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
282 span,
283 name,
284 ty,
285 });
286 return Ok(());
287 }
288 }
289 }
290 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
291 match float_type_width(args[0].layout.ty) {
292 Some(_width) => match name {
293 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
294 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
295 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
296 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
297 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
298 _ => bug!(),
299 },
300 None => {
301 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
302 span,
303 name,
304 ty: args[0].layout.ty,
305 });
306 return Ok(());
307 }
308 }
309 }
310 sym::fadd_algebraic
311 | sym::fsub_algebraic
312 | sym::fmul_algebraic
313 | sym::fdiv_algebraic
314 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
315 Some(_width) => match name {
316 sym::fadd_algebraic => {
317 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
318 }
319 sym::fsub_algebraic => {
320 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
321 }
322 sym::fmul_algebraic => {
323 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
324 }
325 sym::fdiv_algebraic => {
326 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
327 }
328 sym::frem_algebraic => {
329 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
330 }
331 _ => bug!(),
332 },
333 None => {
334 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
335 span,
336 name,
337 ty: args[0].layout.ty,
338 });
339 return Ok(());
340 }
341 },
342
343 sym::float_to_int_unchecked => {
344 if float_type_width(args[0].layout.ty).is_none() {
345 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
346 span,
347 ty: args[0].layout.ty,
348 });
349 return Ok(());
350 }
351 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
352 else {
353 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
354 span,
355 ty: result.layout.ty,
356 });
357 return Ok(());
358 };
359 if signed {
360 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
361 } else {
362 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
363 }
364 }
365
366 sym::atomic_load => {
367 let ty = fn_args.type_at(0);
368 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
369 invalid_monomorphization_int_or_ptr_type(ty);
370 return Ok(());
371 }
372 let ordering = fn_args.const_at(1).to_value();
373 let layout = bx.layout_of(ty);
374 let source = args[0].immediate();
375 bx.atomic_load(
376 bx.backend_type(layout),
377 source,
378 parse_atomic_ordering(ordering),
379 layout.size,
380 )
381 }
382 sym::atomic_store => {
383 let ty = fn_args.type_at(0);
384 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
385 invalid_monomorphization_int_or_ptr_type(ty);
386 return Ok(());
387 }
388 let ordering = fn_args.const_at(1).to_value();
389 let size = bx.layout_of(ty).size;
390 let val = args[1].immediate();
391 let ptr = args[0].immediate();
392 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
393 return Ok(());
394 }
395 sym::atomic_cxchg | sym::atomic_cxchgweak => {
397 let ty = fn_args.type_at(0);
398 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
399 invalid_monomorphization_int_or_ptr_type(ty);
400 return Ok(());
401 }
402 let succ_ordering = fn_args.const_at(1).to_value();
403 let fail_ordering = fn_args.const_at(2).to_value();
404 let weak = name == sym::atomic_cxchgweak;
405 let dst = args[0].immediate();
406 let cmp = args[1].immediate();
407 let src = args[2].immediate();
408 let (val, success) = bx.atomic_cmpxchg(
409 dst,
410 cmp,
411 src,
412 parse_atomic_ordering(succ_ordering),
413 parse_atomic_ordering(fail_ordering),
414 weak,
415 );
416 let val = bx.from_immediate(val);
417 let success = bx.from_immediate(success);
418
419 let dest = result.project_field(bx, 0);
420 bx.store_to_place(val, dest.val);
421 let dest = result.project_field(bx, 1);
422 bx.store_to_place(success, dest.val);
423
424 return Ok(());
425 }
426 sym::atomic_max | sym::atomic_min => {
427 let atom_op = if name == sym::atomic_max {
428 AtomicRmwBinOp::AtomicMax
429 } else {
430 AtomicRmwBinOp::AtomicMin
431 };
432
433 let ty = fn_args.type_at(0);
434 if matches!(ty.kind(), ty::Int(_)) {
435 let ordering = fn_args.const_at(1).to_value();
436 let ptr = args[0].immediate();
437 let val = args[1].immediate();
438 bx.atomic_rmw(
439 atom_op,
440 ptr,
441 val,
442 parse_atomic_ordering(ordering),
443 false,
444 )
445 } else {
446 invalid_monomorphization_int_type(ty);
447 return Ok(());
448 }
449 }
450 sym::atomic_umax | sym::atomic_umin => {
451 let atom_op = if name == sym::atomic_umax {
452 AtomicRmwBinOp::AtomicUMax
453 } else {
454 AtomicRmwBinOp::AtomicUMin
455 };
456
457 let ty = fn_args.type_at(0);
458 if matches!(ty.kind(), ty::Uint(_)) {
459 let ordering = fn_args.const_at(1).to_value();
460 let ptr = args[0].immediate();
461 let val = args[1].immediate();
462 bx.atomic_rmw(
463 atom_op,
464 ptr,
465 val,
466 parse_atomic_ordering(ordering),
467 false,
468 )
469 } else {
470 invalid_monomorphization_int_type(ty);
471 return Ok(());
472 }
473 }
474 sym::atomic_xchg => {
475 let ty = fn_args.type_at(0);
476 let ordering = fn_args.const_at(1).to_value();
477 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
478 let ptr = args[0].immediate();
479 let val = args[1].immediate();
480 let atomic_op = AtomicRmwBinOp::AtomicXchg;
481 bx.atomic_rmw(
482 atomic_op,
483 ptr,
484 val,
485 parse_atomic_ordering(ordering),
486 ty.is_raw_ptr(),
487 )
488 } else {
489 invalid_monomorphization_int_or_ptr_type(ty);
490 return Ok(());
491 }
492 }
493 sym::atomic_xadd
494 | sym::atomic_xsub
495 | sym::atomic_and
496 | sym::atomic_nand
497 | sym::atomic_or
498 | sym::atomic_xor => {
499 let atom_op = match name {
500 sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
501 sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
502 sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
503 sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
504 sym::atomic_or => AtomicRmwBinOp::AtomicOr,
505 sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
506 _ => unreachable!(),
507 };
508
509 let ty_mem = fn_args.type_at(0);
511 let ty_op = fn_args.type_at(1);
513
514 let ordering = fn_args.const_at(2).to_value();
515 if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
518 || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
519 {
520 let ptr = args[0].immediate(); let val = args[1].immediate(); bx.atomic_rmw(
523 atom_op,
524 ptr,
525 val,
526 parse_atomic_ordering(ordering),
527 ty_mem.is_raw_ptr(),
528 )
529 } else {
530 invalid_monomorphization_int_or_ptr_type(ty_mem);
531 return Ok(());
532 }
533 }
534 sym::atomic_fence => {
535 let ordering = fn_args.const_at(0).to_value();
536 bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
537 return Ok(());
538 }
539
540 sym::atomic_singlethreadfence => {
541 let ordering = fn_args.const_at(0).to_value();
542 bx.atomic_fence(
543 parse_atomic_ordering(ordering),
544 SynchronizationScope::SingleThread,
545 );
546 return Ok(());
547 }
548
549 sym::nontemporal_store => {
550 let dst = args[0].deref(bx.cx());
551 args[1].val.nontemporal_store(bx, dst);
552 return Ok(());
553 }
554
555 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
556 let ty = fn_args.type_at(0);
557 let pointee_size = bx.layout_of(ty).size;
558
559 let a = args[0].immediate();
560 let b = args[1].immediate();
561 let a = bx.ptrtoint(a, bx.type_isize());
562 let b = bx.ptrtoint(b, bx.type_isize());
563 let pointee_size = bx.const_usize(pointee_size.bytes());
564 if name == sym::ptr_offset_from {
565 let d = bx.sub(a, b);
569 bx.exactsdiv(d, pointee_size)
571 } else {
572 let d = bx.unchecked_usub(a, b);
575 bx.exactudiv(d, pointee_size)
576 }
577 }
578
579 sym::cold_path => {
580 return Ok(());
582 }
583
584 _ => {
585 return bx.codegen_intrinsic_call(instance, args, result, span);
587 }
588 };
589
590 if result.layout.ty.is_bool() {
591 let val = bx.from_immediate(llval);
592 bx.store_to_place(val, result.val);
593 } else if !result.layout.ty.is_unit() {
594 bx.store_to_place(llval, result.val);
595 }
596 Ok(())
597 }
598}
599
600fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
605 match ty.kind() {
606 ty::Int(t) => {
607 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
608 }
609 ty::Uint(t) => {
610 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
611 }
612 _ => None,
613 }
614}
615
616fn float_type_width(ty: Ty<'_>) -> Option<u64> {
619 match ty.kind() {
620 ty::Float(t) => Some(t.bit_width()),
621 _ => None,
622 }
623}