1use itertools::Itertools as _;
2use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT};
3use rustc_middle::ty::adjustment::PointerCoercion;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
6use rustc_middle::{bug, mir, span_bug};
7use rustc_session::config::OptLevel;
8use tracing::{debug, instrument};
9
10use super::FunctionCx;
11use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
12use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 #[instrument(level = "trace", skip(self, bx))]
19 pub(crate) fn codegen_rvalue(
20 &mut self,
21 bx: &mut Bx,
22 dest: PlaceRef<'tcx, Bx::Value>,
23 rvalue: &mir::Rvalue<'tcx>,
24 ) {
25 match *rvalue {
26 mir::Rvalue::Use(ref operand) => {
27 let cg_operand = self.codegen_operand(bx, operand);
28 if matches!(
32 cg_operand.layout.backend_repr,
33 BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..),
34 ) {
35 debug_assert!(!matches!(cg_operand.val, OperandValue::Ref(..)));
36 }
37 cg_operand.val.store(bx, dest);
40 }
41
42 mir::Rvalue::Cast(
43 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
44 ref source,
45 _,
46 ) => {
47 if bx.cx().is_backend_scalar_pair(dest.layout) {
50 let temp = self.codegen_rvalue_operand(bx, rvalue);
53 temp.val.store(bx, dest);
54 return;
55 }
56
57 let operand = self.codegen_operand(bx, source);
62 match operand.val {
63 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
64 debug!("codegen_rvalue: creating ugly alloca");
71 let scratch = PlaceRef::alloca(bx, operand.layout);
72 scratch.storage_live(bx);
73 operand.val.store(bx, scratch);
74 base::coerce_unsized_into(bx, scratch, dest);
75 scratch.storage_dead(bx);
76 }
77 OperandValue::Ref(val) => {
78 if val.llextra.is_some() {
79 bug!("unsized coercion on an unsized rvalue");
80 }
81 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
82 }
83 OperandValue::ZeroSized => {
84 bug!("unsized coercion on a ZST rvalue");
85 }
86 }
87 }
88
89 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
90 let src = self.codegen_operand(bx, operand);
91 self.codegen_transmute(bx, src, dest);
92 }
93
94 mir::Rvalue::Repeat(ref elem, count) => {
95 if dest.layout.is_zst() {
97 return;
98 }
99
100 if let mir::Operand::Constant(const_op) = elem {
103 let val = self.eval_mir_constant(const_op);
104 if val.all_bytes_uninit(self.cx.tcx()) {
105 let size = bx.const_usize(dest.layout.size.bytes());
106 bx.memset(
107 dest.val.llval,
108 bx.const_undef(bx.type_i8()),
109 size,
110 dest.val.align,
111 MemFlags::empty(),
112 );
113 return;
114 }
115 }
116
117 let cg_elem = self.codegen_operand(bx, elem);
118
119 let try_init_all_same = |bx: &mut Bx, v| {
120 let start = dest.val.llval;
121 let size = bx.const_usize(dest.layout.size.bytes());
122
123 if let Some(int) = bx.cx().const_to_opt_u128(v, false)
125 && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]
126 && let Ok(&byte) = bytes.iter().all_equal_value()
127 {
128 let fill = bx.cx().const_u8(byte);
129 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
130 return true;
131 }
132
133 let v = bx.from_immediate(v);
135 if bx.cx().val_ty(v) == bx.cx().type_i8() {
136 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
137 return true;
138 }
139 false
140 };
141
142 if let OperandValue::Immediate(v) = cg_elem.val
143 && try_init_all_same(bx, v)
144 {
145 return;
146 }
147
148 let count = self
149 .monomorphize(count)
150 .try_to_target_usize(bx.tcx())
151 .expect("expected monomorphic const in codegen");
152
153 bx.write_operand_repeatedly(cg_elem, count, dest);
154 }
155
156 mir::Rvalue::Aggregate(ref kind, ref operands)
159 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
160 {
161 let (variant_index, variant_dest, active_field_index) = match **kind {
162 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
163 let variant_dest = dest.project_downcast(bx, variant_index);
164 (variant_index, variant_dest, active_field_index)
165 }
166 _ => (FIRST_VARIANT, dest, None),
167 };
168 if active_field_index.is_some() {
169 assert_eq!(operands.len(), 1);
170 }
171 for (i, operand) in operands.iter_enumerated() {
172 let op = self.codegen_operand(bx, operand);
173 if !op.layout.is_zst() {
175 let field_index = active_field_index.unwrap_or(i);
176 let field = if let mir::AggregateKind::Array(_) = **kind {
177 let llindex = bx.cx().const_usize(field_index.as_u32().into());
178 variant_dest.project_index(bx, llindex)
179 } else {
180 variant_dest.project_field(bx, field_index.as_usize())
181 };
182 op.val.store(bx, field);
183 }
184 }
185 dest.codegen_set_discr(bx, variant_index);
186 }
187
188 _ => {
189 let temp = self.codegen_rvalue_operand(bx, rvalue);
190 temp.val.store(bx, dest);
191 }
192 }
193 }
194
195 fn codegen_transmute(
200 &mut self,
201 bx: &mut Bx,
202 src: OperandRef<'tcx, Bx::Value>,
203 dst: PlaceRef<'tcx, Bx::Value>,
204 ) {
205 assert!(src.layout.is_sized());
207 assert!(dst.layout.is_sized());
208
209 if src.layout.size != dst.layout.size
210 || src.layout.is_uninhabited()
211 || dst.layout.is_uninhabited()
212 {
213 bx.unreachable_nonterminator();
216 } else {
217 src.val.store(bx, dst.val.with_type(src.layout));
221 }
222 }
223
224 pub(crate) fn codegen_transmute_operand(
229 &mut self,
230 bx: &mut Bx,
231 operand: OperandRef<'tcx, Bx::Value>,
232 cast: TyAndLayout<'tcx>,
233 ) -> OperandValue<Bx::Value> {
234 if let abi::BackendRepr::Memory { .. } = cast.backend_repr
235 && !cast.is_zst()
236 {
237 span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
238 }
239
240 if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
243 return operand.val;
244 }
245
246 if operand.layout.size != cast.size
248 || operand.layout.is_uninhabited()
249 || cast.is_uninhabited()
250 {
251 bx.unreachable_nonterminator();
252
253 return OperandValue::poison(bx, cast);
256 }
257
258 #[inline]
261 fn vector_can_bitcast(x: abi::Scalar) -> bool {
262 matches!(
263 x,
264 abi::Scalar::Initialized {
265 value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
266 ..
267 }
268 )
269 }
270
271 let cx = bx.cx();
272 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
273 _ if cast.is_zst() => OperandValue::ZeroSized,
274 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
275 assert_eq!(source_place_val.llextra, None);
276 bx.load_operand(source_place_val.with_type(cast)).val
279 }
280 (
281 OperandValue::Immediate(imm),
282 abi::BackendRepr::Scalar(from_scalar),
283 abi::BackendRepr::Scalar(to_scalar),
284 ) if from_scalar.size(cx) == to_scalar.size(cx) => {
285 OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
286 }
287 (
288 OperandValue::Immediate(imm),
289 abi::BackendRepr::SimdVector { element: from_scalar, .. },
290 abi::BackendRepr::SimdVector { element: to_scalar, .. },
291 ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
292 let to_backend_ty = bx.cx().immediate_backend_type(cast);
293 OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
294 }
295 (
296 OperandValue::Pair(imm_a, imm_b),
297 abi::BackendRepr::ScalarPair(in_a, in_b),
298 abi::BackendRepr::ScalarPair(out_a, out_b),
299 ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
300 OperandValue::Pair(
301 transmute_scalar(bx, imm_a, in_a, out_a),
302 transmute_scalar(bx, imm_b, in_b, out_b),
303 )
304 }
305 _ => {
306 let align = Ord::max(operand.layout.align.abi, cast.align.abi);
316 let size = Ord::max(operand.layout.size, cast.size);
317 let temp = PlaceValue::alloca(bx, size, align);
318 bx.lifetime_start(temp.llval, size);
319 operand.val.store(bx, temp.with_type(operand.layout));
320 let val = bx.load_operand(temp.with_type(cast)).val;
321 bx.lifetime_end(temp.llval, size);
322 val
323 }
324 }
325 }
326
327 fn cast_immediate(
332 &self,
333 bx: &mut Bx,
334 mut imm: Bx::Value,
335 from_scalar: abi::Scalar,
336 from_backend_ty: Bx::Type,
337 to_scalar: abi::Scalar,
338 to_backend_ty: Bx::Type,
339 ) -> Option<Bx::Value> {
340 use abi::Primitive::*;
341
342 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
347
348 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
349 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
350 (Float(_), Float(_)) => {
351 let srcsz = bx.cx().float_width(from_backend_ty);
352 let dstsz = bx.cx().float_width(to_backend_ty);
353 if dstsz > srcsz {
354 bx.fpext(imm, to_backend_ty)
355 } else if srcsz > dstsz {
356 bx.fptrunc(imm, to_backend_ty)
357 } else {
358 imm
359 }
360 }
361 (Int(_, is_signed), Float(_)) => {
362 if is_signed {
363 bx.sitofp(imm, to_backend_ty)
364 } else {
365 bx.uitofp(imm, to_backend_ty)
366 }
367 }
368 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
369 (Int(_, is_signed), Pointer(..)) => {
370 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
371 bx.inttoptr(usize_imm, to_backend_ty)
372 }
373 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
374 _ => return None,
375 };
376 Some(imm)
377 }
378
379 pub(crate) fn codegen_rvalue_operand(
380 &mut self,
381 bx: &mut Bx,
382 rvalue: &mir::Rvalue<'tcx>,
383 ) -> OperandRef<'tcx, Bx::Value> {
384 match *rvalue {
385 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
386 let operand = self.codegen_operand(bx, source);
387 debug!("cast operand is {:?}", operand);
388 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
389
390 let val = match *kind {
391 mir::CastKind::PointerExposeProvenance => {
392 assert!(bx.cx().is_backend_immediate(cast));
393 let llptr = operand.immediate();
394 let llcast_ty = bx.cx().immediate_backend_type(cast);
395 let lladdr = bx.ptrtoint(llptr, llcast_ty);
396 OperandValue::Immediate(lladdr)
397 }
398 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
399 match *operand.layout.ty.kind() {
400 ty::FnDef(def_id, args) => {
401 let instance = ty::Instance::resolve_for_fn_ptr(
402 bx.tcx(),
403 bx.typing_env(),
404 def_id,
405 args,
406 )
407 .unwrap();
408 OperandValue::Immediate(bx.get_fn_addr(instance))
409 }
410 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
411 }
412 }
413 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
414 match *operand.layout.ty.kind() {
415 ty::Closure(def_id, args) => {
416 let instance = Instance::resolve_closure(
417 bx.cx().tcx(),
418 def_id,
419 args,
420 ty::ClosureKind::FnOnce,
421 );
422 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
423 }
424 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
425 }
426 }
427 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
428 operand.val
430 }
431 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
432 assert!(bx.cx().is_backend_scalar_pair(cast));
433 let (lldata, llextra) = operand.val.pointer_parts();
434 let (lldata, llextra) =
435 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
436 OperandValue::Pair(lldata, llextra)
437 }
438 mir::CastKind::PointerCoercion(
439 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
440 ) => {
441 bug!("{kind:?} is for borrowck, and should never appear in codegen");
442 }
443 mir::CastKind::PtrToPtr
444 if bx.cx().is_backend_scalar_pair(operand.layout) =>
445 {
446 if let OperandValue::Pair(data_ptr, meta) = operand.val {
447 if bx.cx().is_backend_scalar_pair(cast) {
448 OperandValue::Pair(data_ptr, meta)
449 } else {
450 OperandValue::Immediate(data_ptr)
452 }
453 } else {
454 bug!("unexpected non-pair operand");
455 }
456 }
457 | mir::CastKind::IntToInt
458 | mir::CastKind::FloatToInt
459 | mir::CastKind::FloatToFloat
460 | mir::CastKind::IntToFloat
461 | mir::CastKind::PtrToPtr
462 | mir::CastKind::FnPtrToPtr
463 | mir::CastKind::PointerWithExposedProvenance => {
467 let imm = operand.immediate();
468 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
469 bug!("Found non-scalar for operand {operand:?}");
470 };
471 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
472
473 assert!(bx.cx().is_backend_immediate(cast));
474 let to_backend_ty = bx.cx().immediate_backend_type(cast);
475 if operand.layout.is_uninhabited() {
476 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
477 return OperandRef { val, layout: cast };
478 }
479 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
480 bug!("Found non-scalar for cast {cast:?}");
481 };
482
483 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
484 .map(OperandValue::Immediate)
485 .unwrap_or_else(|| {
486 bug!("Unsupported cast of {operand:?} to {cast:?}");
487 })
488 }
489 mir::CastKind::Transmute => {
490 self.codegen_transmute_operand(bx, operand, cast)
491 }
492 };
493 OperandRef { val, layout: cast }
494 }
495
496 mir::Rvalue::Ref(_, bk, place) => {
497 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
498 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
499 };
500 self.codegen_place_to_pointer(bx, place, mk_ref)
501 }
502
503 mir::Rvalue::CopyForDeref(place) => {
504 self.codegen_operand(bx, &mir::Operand::Copy(place))
505 }
506 mir::Rvalue::RawPtr(kind, place) => {
507 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
508 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
509 };
510 self.codegen_place_to_pointer(bx, place, mk_ptr)
511 }
512
513 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
514 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
515 {
516 let lhs = self.codegen_operand(bx, lhs);
517 let rhs = self.codegen_operand(bx, rhs);
518 let result = self.codegen_scalar_checked_binop(
519 bx,
520 op,
521 lhs.immediate(),
522 rhs.immediate(),
523 lhs.layout.ty,
524 );
525 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
526 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
527 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
528 }
529 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
530 let lhs = self.codegen_operand(bx, lhs);
531 let rhs = self.codegen_operand(bx, rhs);
532 let llresult = match (lhs.val, rhs.val) {
533 (
534 OperandValue::Pair(lhs_addr, lhs_extra),
535 OperandValue::Pair(rhs_addr, rhs_extra),
536 ) => self.codegen_wide_ptr_binop(
537 bx,
538 op,
539 lhs_addr,
540 lhs_extra,
541 rhs_addr,
542 rhs_extra,
543 lhs.layout.ty,
544 ),
545
546 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
547 .codegen_scalar_binop(
548 bx,
549 op,
550 lhs_val,
551 rhs_val,
552 lhs.layout.ty,
553 rhs.layout.ty,
554 ),
555
556 _ => bug!(),
557 };
558 OperandRef {
559 val: OperandValue::Immediate(llresult),
560 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
561 }
562 }
563
564 mir::Rvalue::UnaryOp(op, ref operand) => {
565 let operand = self.codegen_operand(bx, operand);
566 let is_float = operand.layout.ty.is_floating_point();
567 let (val, layout) = match op {
568 mir::UnOp::Not => {
569 let llval = bx.not(operand.immediate());
570 (OperandValue::Immediate(llval), operand.layout)
571 }
572 mir::UnOp::Neg => {
573 let llval = if is_float {
574 bx.fneg(operand.immediate())
575 } else {
576 bx.neg(operand.immediate())
577 };
578 (OperandValue::Immediate(llval), operand.layout)
579 }
580 mir::UnOp::PtrMetadata => {
581 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
582 let (_, meta) = operand.val.pointer_parts();
583 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
584 if let Some(meta) = meta {
585 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
586 } else {
587 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
588 }
589 }
590 };
591 assert!(
592 val.is_expected_variant_for_type(self.cx, layout),
593 "Made wrong variant {val:?} for type {layout:?}",
594 );
595 OperandRef { val, layout }
596 }
597
598 mir::Rvalue::Discriminant(ref place) => {
599 let discr_ty = rvalue.ty(self.mir, bx.tcx());
600 let discr_ty = self.monomorphize(discr_ty);
601 let operand = self.codegen_consume(bx, place.as_ref());
602 let discr = operand.codegen_get_discr(self, bx, discr_ty);
603 OperandRef {
604 val: OperandValue::Immediate(discr),
605 layout: self.cx.layout_of(discr_ty),
606 }
607 }
608
609 mir::Rvalue::NullaryOp(ref null_op, ty) => {
610 let ty = self.monomorphize(ty);
611 let layout = bx.cx().layout_of(ty);
612 let val = match null_op {
613 mir::NullOp::SizeOf => {
614 assert!(bx.cx().type_is_sized(ty));
615 let val = layout.size.bytes();
616 bx.cx().const_usize(val)
617 }
618 mir::NullOp::AlignOf => {
619 assert!(bx.cx().type_is_sized(ty));
620 let val = layout.align.abi.bytes();
621 bx.cx().const_usize(val)
622 }
623 mir::NullOp::OffsetOf(fields) => {
624 let val = bx
625 .tcx()
626 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
627 .bytes();
628 bx.cx().const_usize(val)
629 }
630 mir::NullOp::UbChecks => {
631 let val = bx.tcx().sess.ub_checks();
632 bx.cx().const_bool(val)
633 }
634 mir::NullOp::ContractChecks => {
635 let val = bx.tcx().sess.contract_checks();
636 bx.cx().const_bool(val)
637 }
638 };
639 let tcx = self.cx.tcx();
640 OperandRef {
641 val: OperandValue::Immediate(val),
642 layout: self.cx.layout_of(null_op.ty(tcx)),
643 }
644 }
645
646 mir::Rvalue::ThreadLocalRef(def_id) => {
647 assert!(bx.cx().tcx().is_static(def_id));
648 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
649 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
650 {
651 let instance = ty::Instance {
652 def: ty::InstanceKind::ThreadLocalShim(def_id),
653 args: ty::GenericArgs::empty(),
654 };
655 let fn_ptr = bx.get_fn_addr(instance);
656 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
657 let fn_ty = bx.fn_decl_backend_type(fn_abi);
658 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
659 Some(bx.tcx().codegen_instance_attrs(instance.def))
660 } else {
661 None
662 };
663 bx.call(
664 fn_ty,
665 fn_attrs.as_deref(),
666 Some(fn_abi),
667 fn_ptr,
668 &[],
669 None,
670 Some(instance),
671 )
672 } else {
673 bx.get_static(def_id)
674 };
675 OperandRef { val: OperandValue::Immediate(static_), layout }
676 }
677 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
678 mir::Rvalue::Repeat(ref elem, len_const) => {
679 let operand = self.codegen_operand(bx, elem);
683 let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
684 let array_ty = self.monomorphize(array_ty);
685 let array_layout = bx.layout_of(array_ty);
686 assert!(array_layout.is_zst());
687 OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
688 }
689 mir::Rvalue::Aggregate(ref kind, ref fields) => {
690 let (variant_index, active_field_index) = match **kind {
691 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
692 (variant_index, active_field_index)
693 }
694 _ => (FIRST_VARIANT, None),
695 };
696
697 let ty = rvalue.ty(self.mir, self.cx.tcx());
698 let ty = self.monomorphize(ty);
699 let layout = self.cx.layout_of(ty);
700
701 let mut builder = OperandRefBuilder::new(layout);
702 for (field_idx, field) in fields.iter_enumerated() {
703 let op = self.codegen_operand(bx, field);
704 let fi = active_field_index.unwrap_or(field_idx);
705 builder.insert_field(bx, variant_index, fi, op);
706 }
707
708 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
709 match tag_result {
710 Err(super::place::UninhabitedVariantError) => {
711 bx.abort();
715 let val = OperandValue::poison(bx, layout);
716 OperandRef { val, layout }
717 }
718 Ok(maybe_tag_value) => {
719 if let Some((tag_field, tag_imm)) = maybe_tag_value {
720 builder.insert_imm(tag_field, tag_imm);
721 }
722 builder.build(bx.cx())
723 }
724 }
725 }
726 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
727 let operand = self.codegen_operand(bx, operand);
728 let val = operand.immediate();
729
730 let content_ty = self.monomorphize(content_ty);
731 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
732
733 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
734 }
735 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
736 let operand = self.codegen_operand(bx, operand);
737 let binder_ty = self.monomorphize(binder_ty);
738 let layout = bx.cx().layout_of(binder_ty);
739 OperandRef { val: operand.val, layout }
740 }
741 }
742 }
743
744 fn codegen_place_to_pointer(
746 &mut self,
747 bx: &mut Bx,
748 place: mir::Place<'tcx>,
749 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
750 ) -> OperandRef<'tcx, Bx::Value> {
751 let cg_place = self.codegen_place(bx, place.as_ref());
752 let val = cg_place.val.address();
753
754 let ty = cg_place.layout.ty;
755 assert!(
756 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
757 matches!(val, OperandValue::Pair(..))
758 } else {
759 matches!(val, OperandValue::Immediate(..))
760 },
761 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
762 );
763
764 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
765 }
766
767 fn codegen_scalar_binop(
768 &mut self,
769 bx: &mut Bx,
770 op: mir::BinOp,
771 lhs: Bx::Value,
772 rhs: Bx::Value,
773 lhs_ty: Ty<'tcx>,
774 rhs_ty: Ty<'tcx>,
775 ) -> Bx::Value {
776 let is_float = lhs_ty.is_floating_point();
777 let is_signed = lhs_ty.is_signed();
778 match op {
779 mir::BinOp::Add => {
780 if is_float {
781 bx.fadd(lhs, rhs)
782 } else {
783 bx.add(lhs, rhs)
784 }
785 }
786 mir::BinOp::AddUnchecked => {
787 if is_signed {
788 bx.unchecked_sadd(lhs, rhs)
789 } else {
790 bx.unchecked_uadd(lhs, rhs)
791 }
792 }
793 mir::BinOp::Sub => {
794 if is_float {
795 bx.fsub(lhs, rhs)
796 } else {
797 bx.sub(lhs, rhs)
798 }
799 }
800 mir::BinOp::SubUnchecked => {
801 if is_signed {
802 bx.unchecked_ssub(lhs, rhs)
803 } else {
804 bx.unchecked_usub(lhs, rhs)
805 }
806 }
807 mir::BinOp::Mul => {
808 if is_float {
809 bx.fmul(lhs, rhs)
810 } else {
811 bx.mul(lhs, rhs)
812 }
813 }
814 mir::BinOp::MulUnchecked => {
815 if is_signed {
816 bx.unchecked_smul(lhs, rhs)
817 } else {
818 bx.unchecked_umul(lhs, rhs)
819 }
820 }
821 mir::BinOp::Div => {
822 if is_float {
823 bx.fdiv(lhs, rhs)
824 } else if is_signed {
825 bx.sdiv(lhs, rhs)
826 } else {
827 bx.udiv(lhs, rhs)
828 }
829 }
830 mir::BinOp::Rem => {
831 if is_float {
832 bx.frem(lhs, rhs)
833 } else if is_signed {
834 bx.srem(lhs, rhs)
835 } else {
836 bx.urem(lhs, rhs)
837 }
838 }
839 mir::BinOp::BitOr => bx.or(lhs, rhs),
840 mir::BinOp::BitAnd => bx.and(lhs, rhs),
841 mir::BinOp::BitXor => bx.xor(lhs, rhs),
842 mir::BinOp::Offset => {
843 let pointee_type = lhs_ty
844 .builtin_deref(true)
845 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
846 let pointee_layout = bx.cx().layout_of(pointee_type);
847 if pointee_layout.is_zst() {
848 lhs
851 } else {
852 let llty = bx.cx().backend_type(pointee_layout);
853 if !rhs_ty.is_signed() {
854 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
855 } else {
856 bx.inbounds_gep(llty, lhs, &[rhs])
857 }
858 }
859 }
860 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
861 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
862 bx.shl(lhs, rhs)
863 }
864 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
865 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
866 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
867 }
868 mir::BinOp::Ne
869 | mir::BinOp::Lt
870 | mir::BinOp::Gt
871 | mir::BinOp::Eq
872 | mir::BinOp::Le
873 | mir::BinOp::Ge => {
874 if is_float {
875 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
876 } else {
877 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
878 }
879 }
880 mir::BinOp::Cmp => {
881 assert!(!is_float);
882 bx.three_way_compare(lhs_ty, lhs, rhs)
883 }
884 mir::BinOp::AddWithOverflow
885 | mir::BinOp::SubWithOverflow
886 | mir::BinOp::MulWithOverflow => {
887 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
888 }
889 }
890 }
891
892 fn codegen_wide_ptr_binop(
893 &mut self,
894 bx: &mut Bx,
895 op: mir::BinOp,
896 lhs_addr: Bx::Value,
897 lhs_extra: Bx::Value,
898 rhs_addr: Bx::Value,
899 rhs_extra: Bx::Value,
900 _input_ty: Ty<'tcx>,
901 ) -> Bx::Value {
902 match op {
903 mir::BinOp::Eq => {
904 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
905 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
906 bx.and(lhs, rhs)
907 }
908 mir::BinOp::Ne => {
909 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
910 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
911 bx.or(lhs, rhs)
912 }
913 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
914 let (op, strict_op) = match op {
916 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
917 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
918 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
919 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
920 _ => bug!(),
921 };
922 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
923 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
924 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
925 let rhs = bx.and(and_lhs, and_rhs);
926 bx.or(lhs, rhs)
927 }
928 _ => {
929 bug!("unexpected wide ptr binop");
930 }
931 }
932 }
933
934 fn codegen_scalar_checked_binop(
935 &mut self,
936 bx: &mut Bx,
937 op: mir::BinOp,
938 lhs: Bx::Value,
939 rhs: Bx::Value,
940 input_ty: Ty<'tcx>,
941 ) -> OperandValue<Bx::Value> {
942 let (val, of) = match op {
943 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
945 let oop = match op {
946 mir::BinOp::Add => OverflowOp::Add,
947 mir::BinOp::Sub => OverflowOp::Sub,
948 mir::BinOp::Mul => OverflowOp::Mul,
949 _ => unreachable!(),
950 };
951 bx.checked_binop(oop, input_ty, lhs, rhs)
952 }
953 _ => bug!("Operator `{:?}` is not a checkable operator", op),
954 };
955
956 OperandValue::Pair(val, of)
957 }
958}
959
960pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
968 bx: &mut Bx,
969 mut imm: Bx::Value,
970 from_scalar: abi::Scalar,
971 to_scalar: abi::Scalar,
972) -> Bx::Value {
973 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
974 let imm_ty = bx.cx().val_ty(imm);
975 assert_ne!(
976 bx.cx().type_kind(imm_ty),
977 TypeKind::Vector,
978 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
979 );
980
981 if from_scalar == to_scalar {
985 return imm;
986 }
987
988 use abi::Primitive::*;
989 imm = bx.from_immediate(imm);
990
991 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
992 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
993 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
994
995 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
1005
1006 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1007 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1008 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1009 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1010 (Pointer(..), Int(..)) => {
1011 bx.ptrtoint(imm, to_backend_ty)
1013 }
1014 (Float(_), Pointer(..)) => {
1015 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1016 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1017 }
1018 (Pointer(..), Float(_)) => {
1019 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1021 bx.bitcast(int_imm, to_backend_ty)
1022 }
1023 };
1024
1025 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1026
1027 assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1033
1034 imm = bx.to_immediate_scalar(imm, to_scalar);
1035 imm
1036}
1037
1038fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1043 bx: &mut Bx,
1044 imm: Bx::Value,
1045 scalar: abi::Scalar,
1046 backend_ty: Bx::Type,
1047 known: Option<&abi::Scalar>,
1048) {
1049 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1050 return;
1051 }
1052
1053 match (scalar, known) {
1054 (abi::Scalar::Union { .. }, _) => return,
1055 (_, None) => {
1056 if scalar.is_always_valid(bx.cx()) {
1057 return;
1058 }
1059 }
1060 (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1061 let known_range = known.valid_range(bx.cx());
1062 if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1063 return;
1064 }
1065 }
1066 }
1067
1068 match scalar.primitive() {
1069 abi::Primitive::Int(..) => {
1070 let range = scalar.valid_range(bx.cx());
1071 bx.assume_integer_range(imm, backend_ty, range);
1072 }
1073 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1074 if !scalar.valid_range(bx.cx()).contains(0) =>
1075 {
1076 bx.assume_nonnull(imm);
1077 }
1078 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1079 }
1080}