1use itertools::Itertools as _;
2use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT};
3use rustc_middle::ty::adjustment::PointerCoercion;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
6use rustc_middle::{bug, mir, span_bug};
7use rustc_session::config::OptLevel;
8use tracing::{debug, instrument};
9
10use super::FunctionCx;
11use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
12use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 #[instrument(level = "trace", skip(self, bx))]
19 pub(crate) fn codegen_rvalue(
20 &mut self,
21 bx: &mut Bx,
22 dest: PlaceRef<'tcx, Bx::Value>,
23 rvalue: &mir::Rvalue<'tcx>,
24 ) {
25 match *rvalue {
26 mir::Rvalue::Use(ref operand) => {
27 let cg_operand = self.codegen_operand(bx, operand);
28 if matches!(
32 cg_operand.layout.backend_repr,
33 BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..),
34 ) {
35 debug_assert!(!matches!(cg_operand.val, OperandValue::Ref(..)));
36 }
37 cg_operand.val.store(bx, dest);
40 }
41
42 mir::Rvalue::Cast(
43 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
44 ref source,
45 _,
46 ) => {
47 if bx.cx().is_backend_scalar_pair(dest.layout) {
50 let temp = self.codegen_rvalue_operand(bx, rvalue);
53 temp.val.store(bx, dest);
54 return;
55 }
56
57 let operand = self.codegen_operand(bx, source);
62 match operand.val {
63 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
64 debug!("codegen_rvalue: creating ugly alloca");
71 let scratch = PlaceRef::alloca(bx, operand.layout);
72 scratch.storage_live(bx);
73 operand.val.store(bx, scratch);
74 base::coerce_unsized_into(bx, scratch, dest);
75 scratch.storage_dead(bx);
76 }
77 OperandValue::Ref(val) => {
78 if val.llextra.is_some() {
79 bug!("unsized coercion on an unsized rvalue");
80 }
81 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
82 }
83 OperandValue::ZeroSized => {
84 bug!("unsized coercion on a ZST rvalue");
85 }
86 }
87 }
88
89 mir::Rvalue::Cast(
90 mir::CastKind::Transmute | mir::CastKind::Subtype,
91 ref operand,
92 _ty,
93 ) => {
94 let src = self.codegen_operand(bx, operand);
95 self.codegen_transmute(bx, src, dest);
96 }
97
98 mir::Rvalue::Repeat(ref elem, count) => {
99 if dest.layout.is_zst() {
101 return;
102 }
103
104 if let mir::Operand::Constant(const_op) = elem {
107 let val = self.eval_mir_constant(const_op);
108 if val.all_bytes_uninit(self.cx.tcx()) {
109 let size = bx.const_usize(dest.layout.size.bytes());
110 bx.memset(
111 dest.val.llval,
112 bx.const_undef(bx.type_i8()),
113 size,
114 dest.val.align,
115 MemFlags::empty(),
116 );
117 return;
118 }
119 }
120
121 let cg_elem = self.codegen_operand(bx, elem);
122
123 let try_init_all_same = |bx: &mut Bx, v| {
124 let start = dest.val.llval;
125 let size = bx.const_usize(dest.layout.size.bytes());
126
127 if let Some(int) = bx.cx().const_to_opt_u128(v, false)
129 && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]
130 && let Ok(&byte) = bytes.iter().all_equal_value()
131 {
132 let fill = bx.cx().const_u8(byte);
133 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
134 return true;
135 }
136
137 let v = bx.from_immediate(v);
139 if bx.cx().val_ty(v) == bx.cx().type_i8() {
140 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
141 return true;
142 }
143 false
144 };
145
146 if let OperandValue::Immediate(v) = cg_elem.val
147 && try_init_all_same(bx, v)
148 {
149 return;
150 }
151
152 let count = self
153 .monomorphize(count)
154 .try_to_target_usize(bx.tcx())
155 .expect("expected monomorphic const in codegen");
156
157 bx.write_operand_repeatedly(cg_elem, count, dest);
158 }
159
160 mir::Rvalue::Aggregate(ref kind, ref operands)
163 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
164 {
165 let (variant_index, variant_dest, active_field_index) = match **kind {
166 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
167 let variant_dest = dest.project_downcast(bx, variant_index);
168 (variant_index, variant_dest, active_field_index)
169 }
170 _ => (FIRST_VARIANT, dest, None),
171 };
172 if active_field_index.is_some() {
173 assert_eq!(operands.len(), 1);
174 }
175 for (i, operand) in operands.iter_enumerated() {
176 let op = self.codegen_operand(bx, operand);
177 if !op.layout.is_zst() {
179 let field_index = active_field_index.unwrap_or(i);
180 let field = if let mir::AggregateKind::Array(_) = **kind {
181 let llindex = bx.cx().const_usize(field_index.as_u32().into());
182 variant_dest.project_index(bx, llindex)
183 } else {
184 variant_dest.project_field(bx, field_index.as_usize())
185 };
186 op.val.store(bx, field);
187 }
188 }
189 dest.codegen_set_discr(bx, variant_index);
190 }
191
192 _ => {
193 let temp = self.codegen_rvalue_operand(bx, rvalue);
194 temp.val.store(bx, dest);
195 }
196 }
197 }
198
199 fn codegen_transmute(
204 &mut self,
205 bx: &mut Bx,
206 src: OperandRef<'tcx, Bx::Value>,
207 dst: PlaceRef<'tcx, Bx::Value>,
208 ) {
209 assert!(src.layout.is_sized());
211 assert!(dst.layout.is_sized());
212
213 if src.layout.size != dst.layout.size
214 || src.layout.is_uninhabited()
215 || dst.layout.is_uninhabited()
216 {
217 bx.unreachable_nonterminator();
220 } else {
221 src.val.store(bx, dst.val.with_type(src.layout));
225 }
226 }
227
228 pub(crate) fn codegen_transmute_operand(
233 &mut self,
234 bx: &mut Bx,
235 operand: OperandRef<'tcx, Bx::Value>,
236 cast: TyAndLayout<'tcx>,
237 ) -> OperandValue<Bx::Value> {
238 if let abi::BackendRepr::Memory { .. } = cast.backend_repr
239 && !cast.is_zst()
240 {
241 span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
242 }
243
244 if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
247 return operand.val;
248 }
249
250 if operand.layout.size != cast.size
252 || operand.layout.is_uninhabited()
253 || cast.is_uninhabited()
254 {
255 bx.unreachable_nonterminator();
256
257 return OperandValue::poison(bx, cast);
260 }
261
262 #[inline]
265 fn vector_can_bitcast(x: abi::Scalar) -> bool {
266 matches!(
267 x,
268 abi::Scalar::Initialized {
269 value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
270 ..
271 }
272 )
273 }
274
275 let cx = bx.cx();
276 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
277 _ if cast.is_zst() => OperandValue::ZeroSized,
278 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
279 assert_eq!(source_place_val.llextra, None);
280 bx.load_operand(source_place_val.with_type(cast)).val
283 }
284 (
285 OperandValue::Immediate(imm),
286 abi::BackendRepr::Scalar(from_scalar),
287 abi::BackendRepr::Scalar(to_scalar),
288 ) if from_scalar.size(cx) == to_scalar.size(cx) => {
289 OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
290 }
291 (
292 OperandValue::Immediate(imm),
293 abi::BackendRepr::SimdVector { element: from_scalar, .. },
294 abi::BackendRepr::SimdVector { element: to_scalar, .. },
295 ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
296 let to_backend_ty = bx.cx().immediate_backend_type(cast);
297 OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
298 }
299 (
300 OperandValue::Pair(imm_a, imm_b),
301 abi::BackendRepr::ScalarPair(in_a, in_b),
302 abi::BackendRepr::ScalarPair(out_a, out_b),
303 ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
304 OperandValue::Pair(
305 transmute_scalar(bx, imm_a, in_a, out_a),
306 transmute_scalar(bx, imm_b, in_b, out_b),
307 )
308 }
309 _ => {
310 let align = Ord::max(operand.layout.align.abi, cast.align.abi);
320 let size = Ord::max(operand.layout.size, cast.size);
321 let temp = PlaceValue::alloca(bx, size, align);
322 bx.lifetime_start(temp.llval, size);
323 operand.val.store(bx, temp.with_type(operand.layout));
324 let val = bx.load_operand(temp.with_type(cast)).val;
325 bx.lifetime_end(temp.llval, size);
326 val
327 }
328 }
329 }
330
331 fn cast_immediate(
336 &self,
337 bx: &mut Bx,
338 mut imm: Bx::Value,
339 from_scalar: abi::Scalar,
340 from_backend_ty: Bx::Type,
341 to_scalar: abi::Scalar,
342 to_backend_ty: Bx::Type,
343 ) -> Option<Bx::Value> {
344 use abi::Primitive::*;
345
346 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
351
352 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
353 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
354 (Float(_), Float(_)) => {
355 let srcsz = bx.cx().float_width(from_backend_ty);
356 let dstsz = bx.cx().float_width(to_backend_ty);
357 if dstsz > srcsz {
358 bx.fpext(imm, to_backend_ty)
359 } else if srcsz > dstsz {
360 bx.fptrunc(imm, to_backend_ty)
361 } else {
362 imm
363 }
364 }
365 (Int(_, is_signed), Float(_)) => {
366 if is_signed {
367 bx.sitofp(imm, to_backend_ty)
368 } else {
369 bx.uitofp(imm, to_backend_ty)
370 }
371 }
372 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
373 (Int(_, is_signed), Pointer(..)) => {
374 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
375 bx.inttoptr(usize_imm, to_backend_ty)
376 }
377 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
378 _ => return None,
379 };
380 Some(imm)
381 }
382
383 pub(crate) fn codegen_rvalue_operand(
384 &mut self,
385 bx: &mut Bx,
386 rvalue: &mir::Rvalue<'tcx>,
387 ) -> OperandRef<'tcx, Bx::Value> {
388 match *rvalue {
389 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
390 let operand = self.codegen_operand(bx, source);
391 debug!("cast operand is {:?}", operand);
392 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
393
394 let val = match *kind {
395 mir::CastKind::PointerExposeProvenance => {
396 assert!(bx.cx().is_backend_immediate(cast));
397 let llptr = operand.immediate();
398 let llcast_ty = bx.cx().immediate_backend_type(cast);
399 let lladdr = bx.ptrtoint(llptr, llcast_ty);
400 OperandValue::Immediate(lladdr)
401 }
402 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
403 match *operand.layout.ty.kind() {
404 ty::FnDef(def_id, args) => {
405 let instance = ty::Instance::resolve_for_fn_ptr(
406 bx.tcx(),
407 bx.typing_env(),
408 def_id,
409 args,
410 )
411 .unwrap();
412 OperandValue::Immediate(bx.get_fn_addr(instance))
413 }
414 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
415 }
416 }
417 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
418 match *operand.layout.ty.kind() {
419 ty::Closure(def_id, args) => {
420 let instance = Instance::resolve_closure(
421 bx.cx().tcx(),
422 def_id,
423 args,
424 ty::ClosureKind::FnOnce,
425 );
426 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
427 }
428 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
429 }
430 }
431 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
432 operand.val
434 }
435 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
436 assert!(bx.cx().is_backend_scalar_pair(cast));
437 let (lldata, llextra) = operand.val.pointer_parts();
438 let (lldata, llextra) =
439 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
440 OperandValue::Pair(lldata, llextra)
441 }
442 mir::CastKind::PointerCoercion(
443 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
444 ) => {
445 bug!("{kind:?} is for borrowck, and should never appear in codegen");
446 }
447 mir::CastKind::PtrToPtr
448 if bx.cx().is_backend_scalar_pair(operand.layout) =>
449 {
450 if let OperandValue::Pair(data_ptr, meta) = operand.val {
451 if bx.cx().is_backend_scalar_pair(cast) {
452 OperandValue::Pair(data_ptr, meta)
453 } else {
454 OperandValue::Immediate(data_ptr)
456 }
457 } else {
458 bug!("unexpected non-pair operand");
459 }
460 }
461 | mir::CastKind::IntToInt
462 | mir::CastKind::FloatToInt
463 | mir::CastKind::FloatToFloat
464 | mir::CastKind::IntToFloat
465 | mir::CastKind::PtrToPtr
466 | mir::CastKind::FnPtrToPtr
467 | mir::CastKind::PointerWithExposedProvenance => {
471 let imm = operand.immediate();
472 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
473 bug!("Found non-scalar for operand {operand:?}");
474 };
475 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
476
477 assert!(bx.cx().is_backend_immediate(cast));
478 let to_backend_ty = bx.cx().immediate_backend_type(cast);
479 if operand.layout.is_uninhabited() {
480 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
481 return OperandRef { val, layout: cast };
482 }
483 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
484 bug!("Found non-scalar for cast {cast:?}");
485 };
486
487 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
488 .map(OperandValue::Immediate)
489 .unwrap_or_else(|| {
490 bug!("Unsupported cast of {operand:?} to {cast:?}");
491 })
492 }
493 mir::CastKind::Transmute | mir::CastKind::Subtype => {
494 self.codegen_transmute_operand(bx, operand, cast)
495 }
496 };
497 OperandRef { val, layout: cast }
498 }
499
500 mir::Rvalue::Ref(_, bk, place) => {
501 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
502 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
503 };
504 self.codegen_place_to_pointer(bx, place, mk_ref)
505 }
506
507 mir::Rvalue::RawPtr(kind, place) => {
508 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
509 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
510 };
511 self.codegen_place_to_pointer(bx, place, mk_ptr)
512 }
513
514 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
515 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
516 {
517 let lhs = self.codegen_operand(bx, lhs);
518 let rhs = self.codegen_operand(bx, rhs);
519 let result = self.codegen_scalar_checked_binop(
520 bx,
521 op,
522 lhs.immediate(),
523 rhs.immediate(),
524 lhs.layout.ty,
525 );
526 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
527 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
528 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
529 }
530 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
531 let lhs = self.codegen_operand(bx, lhs);
532 let rhs = self.codegen_operand(bx, rhs);
533 let llresult = match (lhs.val, rhs.val) {
534 (
535 OperandValue::Pair(lhs_addr, lhs_extra),
536 OperandValue::Pair(rhs_addr, rhs_extra),
537 ) => self.codegen_wide_ptr_binop(
538 bx,
539 op,
540 lhs_addr,
541 lhs_extra,
542 rhs_addr,
543 rhs_extra,
544 lhs.layout.ty,
545 ),
546
547 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
548 .codegen_scalar_binop(
549 bx,
550 op,
551 lhs_val,
552 rhs_val,
553 lhs.layout.ty,
554 rhs.layout.ty,
555 ),
556
557 _ => bug!(),
558 };
559 OperandRef {
560 val: OperandValue::Immediate(llresult),
561 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
562 }
563 }
564
565 mir::Rvalue::UnaryOp(op, ref operand) => {
566 let operand = self.codegen_operand(bx, operand);
567 let is_float = operand.layout.ty.is_floating_point();
568 let (val, layout) = match op {
569 mir::UnOp::Not => {
570 let llval = bx.not(operand.immediate());
571 (OperandValue::Immediate(llval), operand.layout)
572 }
573 mir::UnOp::Neg => {
574 let llval = if is_float {
575 bx.fneg(operand.immediate())
576 } else {
577 bx.neg(operand.immediate())
578 };
579 (OperandValue::Immediate(llval), operand.layout)
580 }
581 mir::UnOp::PtrMetadata => {
582 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
583 let (_, meta) = operand.val.pointer_parts();
584 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
585 if let Some(meta) = meta {
586 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
587 } else {
588 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
589 }
590 }
591 };
592 assert!(
593 val.is_expected_variant_for_type(self.cx, layout),
594 "Made wrong variant {val:?} for type {layout:?}",
595 );
596 OperandRef { val, layout }
597 }
598
599 mir::Rvalue::Discriminant(ref place) => {
600 let discr_ty = rvalue.ty(self.mir, bx.tcx());
601 let discr_ty = self.monomorphize(discr_ty);
602 let operand = self.codegen_consume(bx, place.as_ref());
603 let discr = operand.codegen_get_discr(self, bx, discr_ty);
604 OperandRef {
605 val: OperandValue::Immediate(discr),
606 layout: self.cx.layout_of(discr_ty),
607 }
608 }
609
610 mir::Rvalue::NullaryOp(ref null_op, ty) => {
611 let ty = self.monomorphize(ty);
612 let layout = bx.cx().layout_of(ty);
613 let val = match null_op {
614 mir::NullOp::SizeOf => {
615 assert!(bx.cx().type_is_sized(ty));
616 let val = layout.size.bytes();
617 bx.cx().const_usize(val)
618 }
619 mir::NullOp::AlignOf => {
620 assert!(bx.cx().type_is_sized(ty));
621 let val = layout.align.bytes();
622 bx.cx().const_usize(val)
623 }
624 mir::NullOp::OffsetOf(fields) => {
625 let val = bx
626 .tcx()
627 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
628 .bytes();
629 bx.cx().const_usize(val)
630 }
631 mir::NullOp::UbChecks => {
632 let val = bx.tcx().sess.ub_checks();
633 bx.cx().const_bool(val)
634 }
635 mir::NullOp::ContractChecks => {
636 let val = bx.tcx().sess.contract_checks();
637 bx.cx().const_bool(val)
638 }
639 };
640 let tcx = self.cx.tcx();
641 OperandRef {
642 val: OperandValue::Immediate(val),
643 layout: self.cx.layout_of(null_op.ty(tcx)),
644 }
645 }
646
647 mir::Rvalue::ThreadLocalRef(def_id) => {
648 assert!(bx.cx().tcx().is_static(def_id));
649 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
650 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
651 {
652 let instance = ty::Instance {
653 def: ty::InstanceKind::ThreadLocalShim(def_id),
654 args: ty::GenericArgs::empty(),
655 };
656 let fn_ptr = bx.get_fn_addr(instance);
657 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
658 let fn_ty = bx.fn_decl_backend_type(fn_abi);
659 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
660 Some(bx.tcx().codegen_instance_attrs(instance.def))
661 } else {
662 None
663 };
664 bx.call(
665 fn_ty,
666 fn_attrs.as_deref(),
667 Some(fn_abi),
668 fn_ptr,
669 &[],
670 None,
671 Some(instance),
672 )
673 } else {
674 bx.get_static(def_id)
675 };
676 OperandRef { val: OperandValue::Immediate(static_), layout }
677 }
678 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
679 mir::Rvalue::Repeat(ref elem, len_const) => {
680 let operand = self.codegen_operand(bx, elem);
684 let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
685 let array_ty = self.monomorphize(array_ty);
686 let array_layout = bx.layout_of(array_ty);
687 assert!(array_layout.is_zst());
688 OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
689 }
690 mir::Rvalue::Aggregate(ref kind, ref fields) => {
691 let (variant_index, active_field_index) = match **kind {
692 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
693 (variant_index, active_field_index)
694 }
695 _ => (FIRST_VARIANT, None),
696 };
697
698 let ty = rvalue.ty(self.mir, self.cx.tcx());
699 let ty = self.monomorphize(ty);
700 let layout = self.cx.layout_of(ty);
701
702 let mut builder = OperandRefBuilder::new(layout);
703 for (field_idx, field) in fields.iter_enumerated() {
704 let op = self.codegen_operand(bx, field);
705 let fi = active_field_index.unwrap_or(field_idx);
706 builder.insert_field(bx, variant_index, fi, op);
707 }
708
709 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
710 match tag_result {
711 Err(super::place::UninhabitedVariantError) => {
712 bx.abort();
716 let val = OperandValue::poison(bx, layout);
717 OperandRef { val, layout }
718 }
719 Ok(maybe_tag_value) => {
720 if let Some((tag_field, tag_imm)) = maybe_tag_value {
721 builder.insert_imm(tag_field, tag_imm);
722 }
723 builder.build(bx.cx())
724 }
725 }
726 }
727 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
728 let operand = self.codegen_operand(bx, operand);
729 let val = operand.immediate();
730
731 let content_ty = self.monomorphize(content_ty);
732 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
733
734 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
735 }
736 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
737 let operand = self.codegen_operand(bx, operand);
738 let binder_ty = self.monomorphize(binder_ty);
739 let layout = bx.cx().layout_of(binder_ty);
740 OperandRef { val: operand.val, layout }
741 }
742 mir::Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"),
743 }
744 }
745
746 fn codegen_place_to_pointer(
748 &mut self,
749 bx: &mut Bx,
750 place: mir::Place<'tcx>,
751 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
752 ) -> OperandRef<'tcx, Bx::Value> {
753 let cg_place = self.codegen_place(bx, place.as_ref());
754 let val = cg_place.val.address();
755
756 let ty = cg_place.layout.ty;
757 assert!(
758 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
759 matches!(val, OperandValue::Pair(..))
760 } else {
761 matches!(val, OperandValue::Immediate(..))
762 },
763 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
764 );
765
766 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
767 }
768
769 fn codegen_scalar_binop(
770 &mut self,
771 bx: &mut Bx,
772 op: mir::BinOp,
773 lhs: Bx::Value,
774 rhs: Bx::Value,
775 lhs_ty: Ty<'tcx>,
776 rhs_ty: Ty<'tcx>,
777 ) -> Bx::Value {
778 let is_float = lhs_ty.is_floating_point();
779 let is_signed = lhs_ty.is_signed();
780 match op {
781 mir::BinOp::Add => {
782 if is_float {
783 bx.fadd(lhs, rhs)
784 } else {
785 bx.add(lhs, rhs)
786 }
787 }
788 mir::BinOp::AddUnchecked => {
789 if is_signed {
790 bx.unchecked_sadd(lhs, rhs)
791 } else {
792 bx.unchecked_uadd(lhs, rhs)
793 }
794 }
795 mir::BinOp::Sub => {
796 if is_float {
797 bx.fsub(lhs, rhs)
798 } else {
799 bx.sub(lhs, rhs)
800 }
801 }
802 mir::BinOp::SubUnchecked => {
803 if is_signed {
804 bx.unchecked_ssub(lhs, rhs)
805 } else {
806 bx.unchecked_usub(lhs, rhs)
807 }
808 }
809 mir::BinOp::Mul => {
810 if is_float {
811 bx.fmul(lhs, rhs)
812 } else {
813 bx.mul(lhs, rhs)
814 }
815 }
816 mir::BinOp::MulUnchecked => {
817 if is_signed {
818 bx.unchecked_smul(lhs, rhs)
819 } else {
820 bx.unchecked_umul(lhs, rhs)
821 }
822 }
823 mir::BinOp::Div => {
824 if is_float {
825 bx.fdiv(lhs, rhs)
826 } else if is_signed {
827 bx.sdiv(lhs, rhs)
828 } else {
829 bx.udiv(lhs, rhs)
830 }
831 }
832 mir::BinOp::Rem => {
833 if is_float {
834 bx.frem(lhs, rhs)
835 } else if is_signed {
836 bx.srem(lhs, rhs)
837 } else {
838 bx.urem(lhs, rhs)
839 }
840 }
841 mir::BinOp::BitOr => bx.or(lhs, rhs),
842 mir::BinOp::BitAnd => bx.and(lhs, rhs),
843 mir::BinOp::BitXor => bx.xor(lhs, rhs),
844 mir::BinOp::Offset => {
845 let pointee_type = lhs_ty
846 .builtin_deref(true)
847 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
848 let pointee_layout = bx.cx().layout_of(pointee_type);
849 if pointee_layout.is_zst() {
850 lhs
853 } else {
854 let llty = bx.cx().backend_type(pointee_layout);
855 if !rhs_ty.is_signed() {
856 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
857 } else {
858 bx.inbounds_gep(llty, lhs, &[rhs])
859 }
860 }
861 }
862 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
863 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
864 bx.shl(lhs, rhs)
865 }
866 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
867 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
868 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
869 }
870 mir::BinOp::Ne
871 | mir::BinOp::Lt
872 | mir::BinOp::Gt
873 | mir::BinOp::Eq
874 | mir::BinOp::Le
875 | mir::BinOp::Ge => {
876 if is_float {
877 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
878 } else {
879 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
880 }
881 }
882 mir::BinOp::Cmp => {
883 assert!(!is_float);
884 bx.three_way_compare(lhs_ty, lhs, rhs)
885 }
886 mir::BinOp::AddWithOverflow
887 | mir::BinOp::SubWithOverflow
888 | mir::BinOp::MulWithOverflow => {
889 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
890 }
891 }
892 }
893
894 fn codegen_wide_ptr_binop(
895 &mut self,
896 bx: &mut Bx,
897 op: mir::BinOp,
898 lhs_addr: Bx::Value,
899 lhs_extra: Bx::Value,
900 rhs_addr: Bx::Value,
901 rhs_extra: Bx::Value,
902 _input_ty: Ty<'tcx>,
903 ) -> Bx::Value {
904 match op {
905 mir::BinOp::Eq => {
906 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
907 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
908 bx.and(lhs, rhs)
909 }
910 mir::BinOp::Ne => {
911 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
912 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
913 bx.or(lhs, rhs)
914 }
915 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
916 let (op, strict_op) = match op {
918 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
919 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
920 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
921 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
922 _ => bug!(),
923 };
924 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
925 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
926 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
927 let rhs = bx.and(and_lhs, and_rhs);
928 bx.or(lhs, rhs)
929 }
930 _ => {
931 bug!("unexpected wide ptr binop");
932 }
933 }
934 }
935
936 fn codegen_scalar_checked_binop(
937 &mut self,
938 bx: &mut Bx,
939 op: mir::BinOp,
940 lhs: Bx::Value,
941 rhs: Bx::Value,
942 input_ty: Ty<'tcx>,
943 ) -> OperandValue<Bx::Value> {
944 let (val, of) = match op {
945 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
947 let oop = match op {
948 mir::BinOp::Add => OverflowOp::Add,
949 mir::BinOp::Sub => OverflowOp::Sub,
950 mir::BinOp::Mul => OverflowOp::Mul,
951 _ => unreachable!(),
952 };
953 bx.checked_binop(oop, input_ty, lhs, rhs)
954 }
955 _ => bug!("Operator `{:?}` is not a checkable operator", op),
956 };
957
958 OperandValue::Pair(val, of)
959 }
960}
961
962pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
970 bx: &mut Bx,
971 mut imm: Bx::Value,
972 from_scalar: abi::Scalar,
973 to_scalar: abi::Scalar,
974) -> Bx::Value {
975 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
976 let imm_ty = bx.cx().val_ty(imm);
977 assert_ne!(
978 bx.cx().type_kind(imm_ty),
979 TypeKind::Vector,
980 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
981 );
982
983 if from_scalar == to_scalar {
987 return imm;
988 }
989
990 use abi::Primitive::*;
991 imm = bx.from_immediate(imm);
992
993 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
994 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
995 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
996
997 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
1007
1008 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1009 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1010 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1011 (Int(..), Pointer(..)) => bx.inttoptr(imm, to_backend_ty),
1012 (Pointer(..), Int(..)) => {
1013 bx.ptrtoint(imm, to_backend_ty)
1015 }
1016 (Float(_), Pointer(..)) => {
1017 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1018 bx.inttoptr(int_imm, to_backend_ty)
1019 }
1020 (Pointer(..), Float(_)) => {
1021 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1023 bx.bitcast(int_imm, to_backend_ty)
1024 }
1025 };
1026
1027 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1028
1029 assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1035
1036 imm = bx.to_immediate_scalar(imm, to_scalar);
1037 imm
1038}
1039
1040fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1045 bx: &mut Bx,
1046 imm: Bx::Value,
1047 scalar: abi::Scalar,
1048 backend_ty: Bx::Type,
1049 known: Option<&abi::Scalar>,
1050) {
1051 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1052 return;
1053 }
1054
1055 match (scalar, known) {
1056 (abi::Scalar::Union { .. }, _) => return,
1057 (_, None) => {
1058 if scalar.is_always_valid(bx.cx()) {
1059 return;
1060 }
1061 }
1062 (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1063 let known_range = known.valid_range(bx.cx());
1064 if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1065 return;
1066 }
1067 }
1068 }
1069
1070 match scalar.primitive() {
1071 abi::Primitive::Int(..) => {
1072 let range = scalar.valid_range(bx.cx());
1073 bx.assume_integer_range(imm, backend_ty, range);
1074 }
1075 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1076 if !scalar.valid_range(bx.cx()).contains(0) =>
1077 {
1078 bx.assume_nonnull(imm);
1079 }
1080 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1081 }
1082}