1use rustc_abi::WrappingRange;
2use rustc_middle::ty::{self, Ty, TyCtxt};
3use rustc_middle::{bug, span_bug};
4use rustc_session::config::OptLevel;
5use rustc_span::{Span, sym};
6use rustc_target::callconv::{FnAbi, PassMode};
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::errors::InvalidMonomorphization;
12use crate::traits::*;
13use crate::{MemFlags, errors, meth, size_of_val};
14
15fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
16 bx: &mut Bx,
17 allow_overlap: bool,
18 volatile: bool,
19 ty: Ty<'tcx>,
20 dst: Bx::Value,
21 src: Bx::Value,
22 count: Bx::Value,
23) {
24 let layout = bx.layout_of(ty);
25 let size = layout.size;
26 let align = layout.align.abi;
27 let size = bx.mul(bx.const_usize(size.bytes()), count);
28 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
29 if allow_overlap {
30 bx.memmove(dst, align, src, align, size, flags);
31 } else {
32 bx.memcpy(dst, align, src, align, size, flags);
33 }
34}
35
36fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
37 bx: &mut Bx,
38 volatile: bool,
39 ty: Ty<'tcx>,
40 dst: Bx::Value,
41 val: Bx::Value,
42 count: Bx::Value,
43) {
44 let layout = bx.layout_of(ty);
45 let size = layout.size;
46 let align = layout.align.abi;
47 let size = bx.mul(bx.const_usize(size.bytes()), count);
48 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
49 bx.memset(dst, val, size, align, flags);
50}
51
52impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
53 pub fn codegen_intrinsic_call(
55 bx: &mut Bx,
56 instance: ty::Instance<'tcx>,
57 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
58 args: &[OperandRef<'tcx, Bx::Value>],
59 llresult: Bx::Value,
60 span: Span,
61 ) -> Result<(), ty::Instance<'tcx>> {
62 let callee_ty = instance.ty(bx.tcx(), bx.typing_env());
63
64 let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
65 bug!("expected fn item type, found {}", callee_ty);
66 };
67
68 let sig = callee_ty.fn_sig(bx.tcx());
69 let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig);
70 let arg_tys = sig.inputs();
71 let ret_ty = sig.output();
72 let name = bx.tcx().item_name(def_id);
73 let name_str = name.as_str();
74
75 if let sym::typed_swap_nonoverlapping = name {
79 let pointee_ty = fn_args.type_at(0);
80 let pointee_layout = bx.layout_of(pointee_ty);
81 if !bx.is_backend_ref(pointee_layout)
82 || bx.sess().opts.optimize == OptLevel::No
85 || bx.sess().target.arch == "spirv"
90 {
91 let align = pointee_layout.align.abi;
92 let x_place = args[0].val.deref(align);
93 let y_place = args[1].val.deref(align);
94 bx.typed_place_swap(x_place, y_place, pointee_layout);
95 return Ok(());
96 }
97 }
98
99 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
100 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
101
102 let llval = match name {
103 sym::abort => {
104 bx.abort();
105 return Ok(());
106 }
107
108 sym::va_start => bx.va_start(args[0].immediate()),
109 sym::va_end => bx.va_end(args[0].immediate()),
110 sym::size_of_val => {
111 let tp_ty = fn_args.type_at(0);
112 let (_, meta) = args[0].val.pointer_parts();
113 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
114 llsize
115 }
116 sym::min_align_of_val => {
117 let tp_ty = fn_args.type_at(0);
118 let (_, meta) = args[0].val.pointer_parts();
119 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
120 llalign
121 }
122 sym::vtable_size | sym::vtable_align => {
123 let vtable = args[0].immediate();
124 let idx = match name {
125 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
126 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
127 _ => bug!(),
128 };
129 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty);
130 match name {
131 sym::vtable_size => {
133 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
134 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
135 }
136 sym::vtable_align => {
138 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
139 }
140 _ => {}
141 }
142 value
143 }
144 sym::pref_align_of
145 | sym::needs_drop
146 | sym::type_id
147 | sym::type_name
148 | sym::variant_count => {
149 let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
150 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
151 }
152 sym::arith_offset => {
153 let ty = fn_args.type_at(0);
154 let layout = bx.layout_of(ty);
155 let ptr = args[0].immediate();
156 let offset = args[1].immediate();
157 bx.gep(bx.backend_type(layout), ptr, &[offset])
158 }
159 sym::copy => {
160 copy_intrinsic(
161 bx,
162 true,
163 false,
164 fn_args.type_at(0),
165 args[1].immediate(),
166 args[0].immediate(),
167 args[2].immediate(),
168 );
169 return Ok(());
170 }
171 sym::write_bytes => {
172 memset_intrinsic(
173 bx,
174 false,
175 fn_args.type_at(0),
176 args[0].immediate(),
177 args[1].immediate(),
178 args[2].immediate(),
179 );
180 return Ok(());
181 }
182
183 sym::volatile_copy_nonoverlapping_memory => {
184 copy_intrinsic(
185 bx,
186 false,
187 true,
188 fn_args.type_at(0),
189 args[0].immediate(),
190 args[1].immediate(),
191 args[2].immediate(),
192 );
193 return Ok(());
194 }
195 sym::volatile_copy_memory => {
196 copy_intrinsic(
197 bx,
198 true,
199 true,
200 fn_args.type_at(0),
201 args[0].immediate(),
202 args[1].immediate(),
203 args[2].immediate(),
204 );
205 return Ok(());
206 }
207 sym::volatile_set_memory => {
208 memset_intrinsic(
209 bx,
210 true,
211 fn_args.type_at(0),
212 args[0].immediate(),
213 args[1].immediate(),
214 args[2].immediate(),
215 );
216 return Ok(());
217 }
218 sym::volatile_store => {
219 let dst = args[0].deref(bx.cx());
220 args[1].val.volatile_store(bx, dst);
221 return Ok(());
222 }
223 sym::unaligned_volatile_store => {
224 let dst = args[0].deref(bx.cx());
225 args[1].val.unaligned_volatile_store(bx, dst);
226 return Ok(());
227 }
228 sym::disjoint_bitor => {
229 let a = args[0].immediate();
230 let b = args[1].immediate();
231 bx.or_disjoint(a, b)
232 }
233 sym::exact_div => {
234 let ty = arg_tys[0];
235 match int_type_width_signed(ty, bx.tcx()) {
236 Some((_width, signed)) => {
237 if signed {
238 bx.exactsdiv(args[0].immediate(), args[1].immediate())
239 } else {
240 bx.exactudiv(args[0].immediate(), args[1].immediate())
241 }
242 }
243 None => {
244 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
245 span,
246 name,
247 ty,
248 });
249 return Ok(());
250 }
251 }
252 }
253 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
254 match float_type_width(arg_tys[0]) {
255 Some(_width) => match name {
256 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
257 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
258 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
259 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
260 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
261 _ => bug!(),
262 },
263 None => {
264 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
265 span,
266 name,
267 ty: arg_tys[0],
268 });
269 return Ok(());
270 }
271 }
272 }
273 sym::fadd_algebraic
274 | sym::fsub_algebraic
275 | sym::fmul_algebraic
276 | sym::fdiv_algebraic
277 | sym::frem_algebraic => match float_type_width(arg_tys[0]) {
278 Some(_width) => match name {
279 sym::fadd_algebraic => {
280 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
281 }
282 sym::fsub_algebraic => {
283 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
284 }
285 sym::fmul_algebraic => {
286 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
287 }
288 sym::fdiv_algebraic => {
289 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
290 }
291 sym::frem_algebraic => {
292 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
293 }
294 _ => bug!(),
295 },
296 None => {
297 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
298 span,
299 name,
300 ty: arg_tys[0],
301 });
302 return Ok(());
303 }
304 },
305
306 sym::float_to_int_unchecked => {
307 if float_type_width(arg_tys[0]).is_none() {
308 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
309 span,
310 ty: arg_tys[0],
311 });
312 return Ok(());
313 }
314 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
315 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
316 span,
317 ty: ret_ty,
318 });
319 return Ok(());
320 };
321 if signed {
322 bx.fptosi(args[0].immediate(), llret_ty)
323 } else {
324 bx.fptoui(args[0].immediate(), llret_ty)
325 }
326 }
327
328 sym::discriminant_value => {
329 if ret_ty.is_integral() {
330 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
331 } else {
332 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
333 }
334 }
335
336 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
339 use crate::common::AtomicOrdering::*;
340 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
341
342 let Some((instruction, ordering)) = atomic.split_once('_') else {
343 bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
344 };
345
346 let parse_ordering = |bx: &Bx, s| match s {
347 "unordered" => Unordered,
348 "relaxed" => Relaxed,
349 "acquire" => Acquire,
350 "release" => Release,
351 "acqrel" => AcquireRelease,
352 "seqcst" => SequentiallyConsistent,
353 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
354 };
355
356 let invalid_monomorphization = |ty| {
357 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
358 span,
359 name,
360 ty,
361 });
362 };
363
364 match instruction {
365 "cxchg" | "cxchgweak" => {
366 let Some((success, failure)) = ordering.split_once('_') else {
367 bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
368 };
369 let ty = fn_args.type_at(0);
370 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
371 let weak = instruction == "cxchgweak";
372 let dst = args[0].immediate();
373 let cmp = args[1].immediate();
374 let src = args[2].immediate();
375 let (val, success) = bx.atomic_cmpxchg(
376 dst,
377 cmp,
378 src,
379 parse_ordering(bx, success),
380 parse_ordering(bx, failure),
381 weak,
382 );
383 let val = bx.from_immediate(val);
384 let success = bx.from_immediate(success);
385
386 let dest = result.project_field(bx, 0);
387 bx.store_to_place(val, dest.val);
388 let dest = result.project_field(bx, 1);
389 bx.store_to_place(success, dest.val);
390 } else {
391 invalid_monomorphization(ty);
392 }
393 return Ok(());
394 }
395
396 "load" => {
397 let ty = fn_args.type_at(0);
398 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
399 let layout = bx.layout_of(ty);
400 let size = layout.size;
401 let source = args[0].immediate();
402 bx.atomic_load(
403 bx.backend_type(layout),
404 source,
405 parse_ordering(bx, ordering),
406 size,
407 )
408 } else {
409 invalid_monomorphization(ty);
410 return Ok(());
411 }
412 }
413
414 "store" => {
415 let ty = fn_args.type_at(0);
416 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
417 let size = bx.layout_of(ty).size;
418 let val = args[1].immediate();
419 let ptr = args[0].immediate();
420 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
421 } else {
422 invalid_monomorphization(ty);
423 }
424 return Ok(());
425 }
426
427 "fence" => {
428 bx.atomic_fence(
429 parse_ordering(bx, ordering),
430 SynchronizationScope::CrossThread,
431 );
432 return Ok(());
433 }
434
435 "singlethreadfence" => {
436 bx.atomic_fence(
437 parse_ordering(bx, ordering),
438 SynchronizationScope::SingleThread,
439 );
440 return Ok(());
441 }
442
443 op => {
445 let atom_op = match op {
446 "xchg" => AtomicRmwBinOp::AtomicXchg,
447 "xadd" => AtomicRmwBinOp::AtomicAdd,
448 "xsub" => AtomicRmwBinOp::AtomicSub,
449 "and" => AtomicRmwBinOp::AtomicAnd,
450 "nand" => AtomicRmwBinOp::AtomicNand,
451 "or" => AtomicRmwBinOp::AtomicOr,
452 "xor" => AtomicRmwBinOp::AtomicXor,
453 "max" => AtomicRmwBinOp::AtomicMax,
454 "min" => AtomicRmwBinOp::AtomicMin,
455 "umax" => AtomicRmwBinOp::AtomicUMax,
456 "umin" => AtomicRmwBinOp::AtomicUMin,
457 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
458 };
459
460 let ty = fn_args.type_at(0);
461 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
462 let ptr = args[0].immediate();
463 let val = args[1].immediate();
464 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
465 } else {
466 invalid_monomorphization(ty);
467 return Ok(());
468 }
469 }
470 }
471 }
472
473 sym::nontemporal_store => {
474 let dst = args[0].deref(bx.cx());
475 args[1].val.nontemporal_store(bx, dst);
476 return Ok(());
477 }
478
479 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
480 let ty = fn_args.type_at(0);
481 let pointee_size = bx.layout_of(ty).size;
482
483 let a = args[0].immediate();
484 let b = args[1].immediate();
485 let a = bx.ptrtoint(a, bx.type_isize());
486 let b = bx.ptrtoint(b, bx.type_isize());
487 let pointee_size = bx.const_usize(pointee_size.bytes());
488 if name == sym::ptr_offset_from {
489 let d = bx.sub(a, b);
493 bx.exactsdiv(d, pointee_size)
495 } else {
496 let d = bx.unchecked_usub(a, b);
499 bx.exactudiv(d, pointee_size)
500 }
501 }
502
503 sym::cold_path => {
504 return Ok(());
506 }
507
508 _ => {
509 return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
511 }
512 };
513
514 if !fn_abi.ret.is_ignore() {
515 if let PassMode::Cast { .. } = &fn_abi.ret.mode {
516 bx.store_to_place(llval, result.val);
517 } else {
518 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
519 .val
520 .store(bx, result);
521 }
522 }
523 Ok(())
524 }
525}
526
527fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
532 match ty.kind() {
533 ty::Int(t) => {
534 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
535 }
536 ty::Uint(t) => {
537 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
538 }
539 _ => None,
540 }
541}
542
543fn float_type_width(ty: Ty<'_>) -> Option<u64> {
546 match ty.kind() {
547 ty::Float(t) => Some(t.bit_width()),
548 _ => None,
549 }
550}