1use itertools::Itertools as _;
2use rustc_abi::{self as abi, FIRST_VARIANT};
3use rustc_middle::ty::adjustment::PointerCoercion;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
6use rustc_middle::{bug, mir, span_bug};
7use rustc_session::config::OptLevel;
8use tracing::{debug, instrument};
9
10use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
11use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
12use super::{FunctionCx, LocalRef};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 #[instrument(level = "trace", skip(self, bx))]
19 pub(crate) fn codegen_rvalue(
20 &mut self,
21 bx: &mut Bx,
22 dest: PlaceRef<'tcx, Bx::Value>,
23 rvalue: &mir::Rvalue<'tcx>,
24 ) {
25 match *rvalue {
26 mir::Rvalue::Use(ref operand) => {
27 let cg_operand = self.codegen_operand(bx, operand);
28 cg_operand.val.store(bx, dest);
31 }
32
33 mir::Rvalue::Cast(
34 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
35 ref source,
36 _,
37 ) => {
38 if bx.cx().is_backend_scalar_pair(dest.layout) {
41 let temp = self.codegen_rvalue_operand(bx, rvalue);
44 temp.val.store(bx, dest);
45 return;
46 }
47
48 let operand = self.codegen_operand(bx, source);
53 match operand.val {
54 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
55 debug!("codegen_rvalue: creating ugly alloca");
62 let scratch = PlaceRef::alloca(bx, operand.layout);
63 scratch.storage_live(bx);
64 operand.val.store(bx, scratch);
65 base::coerce_unsized_into(bx, scratch, dest);
66 scratch.storage_dead(bx);
67 }
68 OperandValue::Ref(val) => {
69 if val.llextra.is_some() {
70 bug!("unsized coercion on an unsized rvalue");
71 }
72 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
73 }
74 OperandValue::ZeroSized => {
75 bug!("unsized coercion on a ZST rvalue");
76 }
77 }
78 }
79
80 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
81 let src = self.codegen_operand(bx, operand);
82 self.codegen_transmute(bx, src, dest);
83 }
84
85 mir::Rvalue::Repeat(ref elem, count) => {
86 if dest.layout.is_zst() {
88 return;
89 }
90
91 if let mir::Operand::Constant(const_op) = elem {
94 let val = self.eval_mir_constant(const_op);
95 if val.all_bytes_uninit(self.cx.tcx()) {
96 let size = bx.const_usize(dest.layout.size.bytes());
97 bx.memset(
98 dest.val.llval,
99 bx.const_undef(bx.type_i8()),
100 size,
101 dest.val.align,
102 MemFlags::empty(),
103 );
104 return;
105 }
106 }
107
108 let cg_elem = self.codegen_operand(bx, elem);
109
110 let try_init_all_same = |bx: &mut Bx, v| {
111 let start = dest.val.llval;
112 let size = bx.const_usize(dest.layout.size.bytes());
113
114 if let Some(int) = bx.cx().const_to_opt_u128(v, false)
116 && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]
117 && let Ok(&byte) = bytes.iter().all_equal_value()
118 {
119 let fill = bx.cx().const_u8(byte);
120 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
121 return true;
122 }
123
124 let v = bx.from_immediate(v);
126 if bx.cx().val_ty(v) == bx.cx().type_i8() {
127 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
128 return true;
129 }
130 false
131 };
132
133 if let OperandValue::Immediate(v) = cg_elem.val
134 && try_init_all_same(bx, v)
135 {
136 return;
137 }
138
139 let count = self
140 .monomorphize(count)
141 .try_to_target_usize(bx.tcx())
142 .expect("expected monomorphic const in codegen");
143
144 bx.write_operand_repeatedly(cg_elem, count, dest);
145 }
146
147 mir::Rvalue::Aggregate(ref kind, ref operands)
150 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
151 {
152 let (variant_index, variant_dest, active_field_index) = match **kind {
153 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
154 let variant_dest = dest.project_downcast(bx, variant_index);
155 (variant_index, variant_dest, active_field_index)
156 }
157 _ => (FIRST_VARIANT, dest, None),
158 };
159 if active_field_index.is_some() {
160 assert_eq!(operands.len(), 1);
161 }
162 for (i, operand) in operands.iter_enumerated() {
163 let op = self.codegen_operand(bx, operand);
164 if !op.layout.is_zst() {
166 let field_index = active_field_index.unwrap_or(i);
167 let field = if let mir::AggregateKind::Array(_) = **kind {
168 let llindex = bx.cx().const_usize(field_index.as_u32().into());
169 variant_dest.project_index(bx, llindex)
170 } else {
171 variant_dest.project_field(bx, field_index.as_usize())
172 };
173 op.val.store(bx, field);
174 }
175 }
176 dest.codegen_set_discr(bx, variant_index);
177 }
178
179 _ => {
180 let temp = self.codegen_rvalue_operand(bx, rvalue);
181 temp.val.store(bx, dest);
182 }
183 }
184 }
185
186 fn codegen_transmute(
191 &mut self,
192 bx: &mut Bx,
193 src: OperandRef<'tcx, Bx::Value>,
194 dst: PlaceRef<'tcx, Bx::Value>,
195 ) {
196 assert!(src.layout.is_sized());
198 assert!(dst.layout.is_sized());
199
200 if src.layout.size != dst.layout.size
201 || src.layout.is_uninhabited()
202 || dst.layout.is_uninhabited()
203 {
204 bx.unreachable_nonterminator();
207 } else {
208 src.val.store(bx, dst.val.with_type(src.layout));
212 }
213 }
214
215 pub(crate) fn codegen_transmute_operand(
220 &mut self,
221 bx: &mut Bx,
222 operand: OperandRef<'tcx, Bx::Value>,
223 cast: TyAndLayout<'tcx>,
224 ) -> OperandValue<Bx::Value> {
225 if let abi::BackendRepr::Memory { .. } = cast.backend_repr
226 && !cast.is_zst()
227 {
228 span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
229 }
230
231 if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
234 return operand.val;
235 }
236
237 if operand.layout.size != cast.size
239 || operand.layout.is_uninhabited()
240 || cast.is_uninhabited()
241 {
242 bx.unreachable_nonterminator();
243
244 return OperandValue::poison(bx, cast);
247 }
248
249 #[inline]
252 fn vector_can_bitcast(x: abi::Scalar) -> bool {
253 matches!(
254 x,
255 abi::Scalar::Initialized {
256 value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
257 ..
258 }
259 )
260 }
261
262 let cx = bx.cx();
263 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
264 _ if cast.is_zst() => OperandValue::ZeroSized,
265 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
266 assert_eq!(source_place_val.llextra, None);
267 bx.load_operand(source_place_val.with_type(cast)).val
270 }
271 (
272 OperandValue::Immediate(imm),
273 abi::BackendRepr::Scalar(from_scalar),
274 abi::BackendRepr::Scalar(to_scalar),
275 ) if from_scalar.size(cx) == to_scalar.size(cx) => {
276 OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
277 }
278 (
279 OperandValue::Immediate(imm),
280 abi::BackendRepr::SimdVector { element: from_scalar, .. },
281 abi::BackendRepr::SimdVector { element: to_scalar, .. },
282 ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
283 let to_backend_ty = bx.cx().immediate_backend_type(cast);
284 OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
285 }
286 (
287 OperandValue::Pair(imm_a, imm_b),
288 abi::BackendRepr::ScalarPair(in_a, in_b),
289 abi::BackendRepr::ScalarPair(out_a, out_b),
290 ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
291 OperandValue::Pair(
292 transmute_scalar(bx, imm_a, in_a, out_a),
293 transmute_scalar(bx, imm_b, in_b, out_b),
294 )
295 }
296 _ => {
297 let align = Ord::max(operand.layout.align.abi, cast.align.abi);
307 let size = Ord::max(operand.layout.size, cast.size);
308 let temp = PlaceValue::alloca(bx, size, align);
309 bx.lifetime_start(temp.llval, size);
310 operand.val.store(bx, temp.with_type(operand.layout));
311 let val = bx.load_operand(temp.with_type(cast)).val;
312 bx.lifetime_end(temp.llval, size);
313 val
314 }
315 }
316 }
317
318 fn cast_immediate(
323 &self,
324 bx: &mut Bx,
325 mut imm: Bx::Value,
326 from_scalar: abi::Scalar,
327 from_backend_ty: Bx::Type,
328 to_scalar: abi::Scalar,
329 to_backend_ty: Bx::Type,
330 ) -> Option<Bx::Value> {
331 use abi::Primitive::*;
332
333 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
338
339 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
340 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
341 (Float(_), Float(_)) => {
342 let srcsz = bx.cx().float_width(from_backend_ty);
343 let dstsz = bx.cx().float_width(to_backend_ty);
344 if dstsz > srcsz {
345 bx.fpext(imm, to_backend_ty)
346 } else if srcsz > dstsz {
347 bx.fptrunc(imm, to_backend_ty)
348 } else {
349 imm
350 }
351 }
352 (Int(_, is_signed), Float(_)) => {
353 if is_signed {
354 bx.sitofp(imm, to_backend_ty)
355 } else {
356 bx.uitofp(imm, to_backend_ty)
357 }
358 }
359 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
360 (Int(_, is_signed), Pointer(..)) => {
361 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
362 bx.inttoptr(usize_imm, to_backend_ty)
363 }
364 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
365 _ => return None,
366 };
367 Some(imm)
368 }
369
370 pub(crate) fn codegen_rvalue_operand(
371 &mut self,
372 bx: &mut Bx,
373 rvalue: &mir::Rvalue<'tcx>,
374 ) -> OperandRef<'tcx, Bx::Value> {
375 match *rvalue {
376 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
377 let operand = self.codegen_operand(bx, source);
378 debug!("cast operand is {:?}", operand);
379 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
380
381 let val = match *kind {
382 mir::CastKind::PointerExposeProvenance => {
383 assert!(bx.cx().is_backend_immediate(cast));
384 let llptr = operand.immediate();
385 let llcast_ty = bx.cx().immediate_backend_type(cast);
386 let lladdr = bx.ptrtoint(llptr, llcast_ty);
387 OperandValue::Immediate(lladdr)
388 }
389 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
390 match *operand.layout.ty.kind() {
391 ty::FnDef(def_id, args) => {
392 let instance = ty::Instance::resolve_for_fn_ptr(
393 bx.tcx(),
394 bx.typing_env(),
395 def_id,
396 args,
397 )
398 .unwrap();
399 OperandValue::Immediate(bx.get_fn_addr(instance))
400 }
401 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
402 }
403 }
404 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
405 match *operand.layout.ty.kind() {
406 ty::Closure(def_id, args) => {
407 let instance = Instance::resolve_closure(
408 bx.cx().tcx(),
409 def_id,
410 args,
411 ty::ClosureKind::FnOnce,
412 );
413 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
414 }
415 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
416 }
417 }
418 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
419 operand.val
421 }
422 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
423 assert!(bx.cx().is_backend_scalar_pair(cast));
424 let (lldata, llextra) = operand.val.pointer_parts();
425 let (lldata, llextra) =
426 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
427 OperandValue::Pair(lldata, llextra)
428 }
429 mir::CastKind::PointerCoercion(
430 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
431 ) => {
432 bug!("{kind:?} is for borrowck, and should never appear in codegen");
433 }
434 mir::CastKind::PtrToPtr
435 if bx.cx().is_backend_scalar_pair(operand.layout) =>
436 {
437 if let OperandValue::Pair(data_ptr, meta) = operand.val {
438 if bx.cx().is_backend_scalar_pair(cast) {
439 OperandValue::Pair(data_ptr, meta)
440 } else {
441 OperandValue::Immediate(data_ptr)
443 }
444 } else {
445 bug!("unexpected non-pair operand");
446 }
447 }
448 | mir::CastKind::IntToInt
449 | mir::CastKind::FloatToInt
450 | mir::CastKind::FloatToFloat
451 | mir::CastKind::IntToFloat
452 | mir::CastKind::PtrToPtr
453 | mir::CastKind::FnPtrToPtr
454 | mir::CastKind::PointerWithExposedProvenance => {
458 let imm = operand.immediate();
459 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
460 bug!("Found non-scalar for operand {operand:?}");
461 };
462 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
463
464 assert!(bx.cx().is_backend_immediate(cast));
465 let to_backend_ty = bx.cx().immediate_backend_type(cast);
466 if operand.layout.is_uninhabited() {
467 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
468 return OperandRef { val, layout: cast };
469 }
470 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
471 bug!("Found non-scalar for cast {cast:?}");
472 };
473
474 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
475 .map(OperandValue::Immediate)
476 .unwrap_or_else(|| {
477 bug!("Unsupported cast of {operand:?} to {cast:?}");
478 })
479 }
480 mir::CastKind::Transmute => {
481 self.codegen_transmute_operand(bx, operand, cast)
482 }
483 };
484 OperandRef { val, layout: cast }
485 }
486
487 mir::Rvalue::Ref(_, bk, place) => {
488 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
489 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
490 };
491 self.codegen_place_to_pointer(bx, place, mk_ref)
492 }
493
494 mir::Rvalue::CopyForDeref(place) => {
495 self.codegen_operand(bx, &mir::Operand::Copy(place))
496 }
497 mir::Rvalue::RawPtr(kind, place) => {
498 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
499 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
500 };
501 self.codegen_place_to_pointer(bx, place, mk_ptr)
502 }
503
504 mir::Rvalue::Len(place) => {
505 let size = self.evaluate_array_len(bx, place);
506 OperandRef {
507 val: OperandValue::Immediate(size),
508 layout: bx.cx().layout_of(bx.tcx().types.usize),
509 }
510 }
511
512 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
513 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
514 {
515 let lhs = self.codegen_operand(bx, lhs);
516 let rhs = self.codegen_operand(bx, rhs);
517 let result = self.codegen_scalar_checked_binop(
518 bx,
519 op,
520 lhs.immediate(),
521 rhs.immediate(),
522 lhs.layout.ty,
523 );
524 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
525 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
526 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
527 }
528 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
529 let lhs = self.codegen_operand(bx, lhs);
530 let rhs = self.codegen_operand(bx, rhs);
531 let llresult = match (lhs.val, rhs.val) {
532 (
533 OperandValue::Pair(lhs_addr, lhs_extra),
534 OperandValue::Pair(rhs_addr, rhs_extra),
535 ) => self.codegen_wide_ptr_binop(
536 bx,
537 op,
538 lhs_addr,
539 lhs_extra,
540 rhs_addr,
541 rhs_extra,
542 lhs.layout.ty,
543 ),
544
545 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
546 .codegen_scalar_binop(
547 bx,
548 op,
549 lhs_val,
550 rhs_val,
551 lhs.layout.ty,
552 rhs.layout.ty,
553 ),
554
555 _ => bug!(),
556 };
557 OperandRef {
558 val: OperandValue::Immediate(llresult),
559 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
560 }
561 }
562
563 mir::Rvalue::UnaryOp(op, ref operand) => {
564 let operand = self.codegen_operand(bx, operand);
565 let is_float = operand.layout.ty.is_floating_point();
566 let (val, layout) = match op {
567 mir::UnOp::Not => {
568 let llval = bx.not(operand.immediate());
569 (OperandValue::Immediate(llval), operand.layout)
570 }
571 mir::UnOp::Neg => {
572 let llval = if is_float {
573 bx.fneg(operand.immediate())
574 } else {
575 bx.neg(operand.immediate())
576 };
577 (OperandValue::Immediate(llval), operand.layout)
578 }
579 mir::UnOp::PtrMetadata => {
580 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
581 let (_, meta) = operand.val.pointer_parts();
582 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
583 if let Some(meta) = meta {
584 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
585 } else {
586 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
587 }
588 }
589 };
590 assert!(
591 val.is_expected_variant_for_type(self.cx, layout),
592 "Made wrong variant {val:?} for type {layout:?}",
593 );
594 OperandRef { val, layout }
595 }
596
597 mir::Rvalue::Discriminant(ref place) => {
598 let discr_ty = rvalue.ty(self.mir, bx.tcx());
599 let discr_ty = self.monomorphize(discr_ty);
600 let operand = self.codegen_consume(bx, place.as_ref());
601 let discr = operand.codegen_get_discr(self, bx, discr_ty);
602 OperandRef {
603 val: OperandValue::Immediate(discr),
604 layout: self.cx.layout_of(discr_ty),
605 }
606 }
607
608 mir::Rvalue::NullaryOp(ref null_op, ty) => {
609 let ty = self.monomorphize(ty);
610 let layout = bx.cx().layout_of(ty);
611 let val = match null_op {
612 mir::NullOp::SizeOf => {
613 assert!(bx.cx().type_is_sized(ty));
614 let val = layout.size.bytes();
615 bx.cx().const_usize(val)
616 }
617 mir::NullOp::AlignOf => {
618 assert!(bx.cx().type_is_sized(ty));
619 let val = layout.align.abi.bytes();
620 bx.cx().const_usize(val)
621 }
622 mir::NullOp::OffsetOf(fields) => {
623 let val = bx
624 .tcx()
625 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
626 .bytes();
627 bx.cx().const_usize(val)
628 }
629 mir::NullOp::UbChecks => {
630 let val = bx.tcx().sess.ub_checks();
631 bx.cx().const_bool(val)
632 }
633 mir::NullOp::ContractChecks => {
634 let val = bx.tcx().sess.contract_checks();
635 bx.cx().const_bool(val)
636 }
637 };
638 let tcx = self.cx.tcx();
639 OperandRef {
640 val: OperandValue::Immediate(val),
641 layout: self.cx.layout_of(null_op.ty(tcx)),
642 }
643 }
644
645 mir::Rvalue::ThreadLocalRef(def_id) => {
646 assert!(bx.cx().tcx().is_static(def_id));
647 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
648 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
649 {
650 let instance = ty::Instance {
651 def: ty::InstanceKind::ThreadLocalShim(def_id),
652 args: ty::GenericArgs::empty(),
653 };
654 let fn_ptr = bx.get_fn_addr(instance);
655 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
656 let fn_ty = bx.fn_decl_backend_type(fn_abi);
657 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
658 Some(bx.tcx().codegen_instance_attrs(instance.def))
659 } else {
660 None
661 };
662 bx.call(
663 fn_ty,
664 fn_attrs.as_deref(),
665 Some(fn_abi),
666 fn_ptr,
667 &[],
668 None,
669 Some(instance),
670 )
671 } else {
672 bx.get_static(def_id)
673 };
674 OperandRef { val: OperandValue::Immediate(static_), layout }
675 }
676 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
677 mir::Rvalue::Repeat(ref elem, len_const) => {
678 let operand = self.codegen_operand(bx, elem);
682 let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
683 let array_ty = self.monomorphize(array_ty);
684 let array_layout = bx.layout_of(array_ty);
685 assert!(array_layout.is_zst());
686 OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
687 }
688 mir::Rvalue::Aggregate(ref kind, ref fields) => {
689 let (variant_index, active_field_index) = match **kind {
690 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
691 (variant_index, active_field_index)
692 }
693 _ => (FIRST_VARIANT, None),
694 };
695
696 let ty = rvalue.ty(self.mir, self.cx.tcx());
697 let ty = self.monomorphize(ty);
698 let layout = self.cx.layout_of(ty);
699
700 let mut builder = OperandRefBuilder::new(layout);
701 for (field_idx, field) in fields.iter_enumerated() {
702 let op = self.codegen_operand(bx, field);
703 let fi = active_field_index.unwrap_or(field_idx);
704 builder.insert_field(bx, variant_index, fi, op);
705 }
706
707 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
708 match tag_result {
709 Err(super::place::UninhabitedVariantError) => {
710 bx.abort();
714 let val = OperandValue::poison(bx, layout);
715 OperandRef { val, layout }
716 }
717 Ok(maybe_tag_value) => {
718 if let Some((tag_field, tag_imm)) = maybe_tag_value {
719 builder.insert_imm(tag_field, tag_imm);
720 }
721 builder.build(bx.cx())
722 }
723 }
724 }
725 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
726 let operand = self.codegen_operand(bx, operand);
727 let val = operand.immediate();
728
729 let content_ty = self.monomorphize(content_ty);
730 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
731
732 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
733 }
734 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
735 let operand = self.codegen_operand(bx, operand);
736 let binder_ty = self.monomorphize(binder_ty);
737 let layout = bx.cx().layout_of(binder_ty);
738 OperandRef { val: operand.val, layout }
739 }
740 }
741 }
742
743 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
744 if let Some(index) = place.as_local()
747 && let LocalRef::Operand(op) = self.locals[index]
748 && let ty::Array(_, n) = op.layout.ty.kind()
749 {
750 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
751 return bx.cx().const_usize(n);
752 }
753 let cg_value = self.codegen_place(bx, place.as_ref());
755 cg_value.len(bx.cx())
756 }
757
758 fn codegen_place_to_pointer(
760 &mut self,
761 bx: &mut Bx,
762 place: mir::Place<'tcx>,
763 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
764 ) -> OperandRef<'tcx, Bx::Value> {
765 let cg_place = self.codegen_place(bx, place.as_ref());
766 let val = cg_place.val.address();
767
768 let ty = cg_place.layout.ty;
769 assert!(
770 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
771 matches!(val, OperandValue::Pair(..))
772 } else {
773 matches!(val, OperandValue::Immediate(..))
774 },
775 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
776 );
777
778 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
779 }
780
781 fn codegen_scalar_binop(
782 &mut self,
783 bx: &mut Bx,
784 op: mir::BinOp,
785 lhs: Bx::Value,
786 rhs: Bx::Value,
787 lhs_ty: Ty<'tcx>,
788 rhs_ty: Ty<'tcx>,
789 ) -> Bx::Value {
790 let is_float = lhs_ty.is_floating_point();
791 let is_signed = lhs_ty.is_signed();
792 match op {
793 mir::BinOp::Add => {
794 if is_float {
795 bx.fadd(lhs, rhs)
796 } else {
797 bx.add(lhs, rhs)
798 }
799 }
800 mir::BinOp::AddUnchecked => {
801 if is_signed {
802 bx.unchecked_sadd(lhs, rhs)
803 } else {
804 bx.unchecked_uadd(lhs, rhs)
805 }
806 }
807 mir::BinOp::Sub => {
808 if is_float {
809 bx.fsub(lhs, rhs)
810 } else {
811 bx.sub(lhs, rhs)
812 }
813 }
814 mir::BinOp::SubUnchecked => {
815 if is_signed {
816 bx.unchecked_ssub(lhs, rhs)
817 } else {
818 bx.unchecked_usub(lhs, rhs)
819 }
820 }
821 mir::BinOp::Mul => {
822 if is_float {
823 bx.fmul(lhs, rhs)
824 } else {
825 bx.mul(lhs, rhs)
826 }
827 }
828 mir::BinOp::MulUnchecked => {
829 if is_signed {
830 bx.unchecked_smul(lhs, rhs)
831 } else {
832 bx.unchecked_umul(lhs, rhs)
833 }
834 }
835 mir::BinOp::Div => {
836 if is_float {
837 bx.fdiv(lhs, rhs)
838 } else if is_signed {
839 bx.sdiv(lhs, rhs)
840 } else {
841 bx.udiv(lhs, rhs)
842 }
843 }
844 mir::BinOp::Rem => {
845 if is_float {
846 bx.frem(lhs, rhs)
847 } else if is_signed {
848 bx.srem(lhs, rhs)
849 } else {
850 bx.urem(lhs, rhs)
851 }
852 }
853 mir::BinOp::BitOr => bx.or(lhs, rhs),
854 mir::BinOp::BitAnd => bx.and(lhs, rhs),
855 mir::BinOp::BitXor => bx.xor(lhs, rhs),
856 mir::BinOp::Offset => {
857 let pointee_type = lhs_ty
858 .builtin_deref(true)
859 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
860 let pointee_layout = bx.cx().layout_of(pointee_type);
861 if pointee_layout.is_zst() {
862 lhs
865 } else {
866 let llty = bx.cx().backend_type(pointee_layout);
867 if !rhs_ty.is_signed() {
868 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
869 } else {
870 bx.inbounds_gep(llty, lhs, &[rhs])
871 }
872 }
873 }
874 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
875 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
876 bx.shl(lhs, rhs)
877 }
878 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
879 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
880 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
881 }
882 mir::BinOp::Ne
883 | mir::BinOp::Lt
884 | mir::BinOp::Gt
885 | mir::BinOp::Eq
886 | mir::BinOp::Le
887 | mir::BinOp::Ge => {
888 if is_float {
889 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
890 } else {
891 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
892 }
893 }
894 mir::BinOp::Cmp => {
895 use std::cmp::Ordering;
896 assert!(!is_float);
897 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
898 return value;
899 }
900 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
901 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
902 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
909 let gtext = bx.zext(is_gt, bx.type_i8());
910 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
911 let ltext = bx.zext(is_lt, bx.type_i8());
912 bx.unchecked_ssub(gtext, ltext)
913 } else {
914 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
917 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
918 let ge = bx.select(
919 is_ne,
920 bx.cx().const_i8(Ordering::Greater as i8),
921 bx.cx().const_i8(Ordering::Equal as i8),
922 );
923 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
924 }
925 }
926 mir::BinOp::AddWithOverflow
927 | mir::BinOp::SubWithOverflow
928 | mir::BinOp::MulWithOverflow => {
929 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
930 }
931 }
932 }
933
934 fn codegen_wide_ptr_binop(
935 &mut self,
936 bx: &mut Bx,
937 op: mir::BinOp,
938 lhs_addr: Bx::Value,
939 lhs_extra: Bx::Value,
940 rhs_addr: Bx::Value,
941 rhs_extra: Bx::Value,
942 _input_ty: Ty<'tcx>,
943 ) -> Bx::Value {
944 match op {
945 mir::BinOp::Eq => {
946 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
947 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
948 bx.and(lhs, rhs)
949 }
950 mir::BinOp::Ne => {
951 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
952 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
953 bx.or(lhs, rhs)
954 }
955 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
956 let (op, strict_op) = match op {
958 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
959 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
960 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
961 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
962 _ => bug!(),
963 };
964 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
965 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
966 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
967 let rhs = bx.and(and_lhs, and_rhs);
968 bx.or(lhs, rhs)
969 }
970 _ => {
971 bug!("unexpected wide ptr binop");
972 }
973 }
974 }
975
976 fn codegen_scalar_checked_binop(
977 &mut self,
978 bx: &mut Bx,
979 op: mir::BinOp,
980 lhs: Bx::Value,
981 rhs: Bx::Value,
982 input_ty: Ty<'tcx>,
983 ) -> OperandValue<Bx::Value> {
984 let (val, of) = match op {
985 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
987 let oop = match op {
988 mir::BinOp::Add => OverflowOp::Add,
989 mir::BinOp::Sub => OverflowOp::Sub,
990 mir::BinOp::Mul => OverflowOp::Mul,
991 _ => unreachable!(),
992 };
993 bx.checked_binop(oop, input_ty, lhs, rhs)
994 }
995 _ => bug!("Operator `{:?}` is not a checkable operator", op),
996 };
997
998 OperandValue::Pair(val, of)
999 }
1000}
1001
1002pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1010 bx: &mut Bx,
1011 mut imm: Bx::Value,
1012 from_scalar: abi::Scalar,
1013 to_scalar: abi::Scalar,
1014) -> Bx::Value {
1015 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1016 let imm_ty = bx.cx().val_ty(imm);
1017 assert_ne!(
1018 bx.cx().type_kind(imm_ty),
1019 TypeKind::Vector,
1020 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
1021 );
1022
1023 if from_scalar == to_scalar {
1027 return imm;
1028 }
1029
1030 use abi::Primitive::*;
1031 imm = bx.from_immediate(imm);
1032
1033 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1034 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1035 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1036
1037 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
1047
1048 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1049 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1050 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1051 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1052 (Pointer(..), Int(..)) => {
1053 bx.ptrtoint(imm, to_backend_ty)
1055 }
1056 (Float(_), Pointer(..)) => {
1057 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1058 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1059 }
1060 (Pointer(..), Float(_)) => {
1061 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1063 bx.bitcast(int_imm, to_backend_ty)
1064 }
1065 };
1066
1067 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1068
1069 assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1075
1076 imm = bx.to_immediate_scalar(imm, to_scalar);
1077 imm
1078}
1079
1080fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1085 bx: &mut Bx,
1086 imm: Bx::Value,
1087 scalar: abi::Scalar,
1088 backend_ty: Bx::Type,
1089 known: Option<&abi::Scalar>,
1090) {
1091 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1092 return;
1093 }
1094
1095 match (scalar, known) {
1096 (abi::Scalar::Union { .. }, _) => return,
1097 (_, None) => {
1098 if scalar.is_always_valid(bx.cx()) {
1099 return;
1100 }
1101 }
1102 (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1103 let known_range = known.valid_range(bx.cx());
1104 if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1105 return;
1106 }
1107 }
1108 }
1109
1110 match scalar.primitive() {
1111 abi::Primitive::Int(..) => {
1112 let range = scalar.valid_range(bx.cx());
1113 bx.assume_integer_range(imm, backend_ty, range);
1114 }
1115 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1116 if !scalar.valid_range(bx.cx()).contains(0) =>
1117 {
1118 bx.assume_nonnull(imm);
1119 }
1120 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1121 }
1122}