1use itertools::Itertools as _;
2use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT};
3use rustc_middle::ty::adjustment::PointerCoercion;
4use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
5use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
6use rustc_middle::{bug, mir, span_bug};
7use rustc_session::config::OptLevel;
8use tracing::{debug, instrument};
9
10use super::FunctionCx;
11use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
12use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
13use crate::common::{IntPredicate, TypeKind};
14use crate::traits::*;
15use crate::{MemFlags, base};
16
17impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 #[instrument(level = "trace", skip(self, bx))]
19 pub(crate) fn codegen_rvalue(
20 &mut self,
21 bx: &mut Bx,
22 dest: PlaceRef<'tcx, Bx::Value>,
23 rvalue: &mir::Rvalue<'tcx>,
24 ) {
25 match *rvalue {
26 mir::Rvalue::Use(ref operand) => {
27 if let mir::Operand::Constant(const_op) = operand {
28 let val = self.eval_mir_constant(&const_op);
29 if val.all_bytes_uninit(self.cx.tcx()) {
30 return;
31 }
32 }
33 let cg_operand = self.codegen_operand(bx, operand);
34 if matches!(
38 cg_operand.layout.backend_repr,
39 BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..),
40 ) {
41 debug_assert!(!matches!(cg_operand.val, OperandValue::Ref(..)));
42 }
43 cg_operand.store_with_annotation(bx, dest);
46 }
47
48 mir::Rvalue::Cast(
49 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
50 ref source,
51 _,
52 ) => {
53 if bx.cx().is_backend_scalar_pair(dest.layout) {
56 let temp = self.codegen_rvalue_operand(bx, rvalue);
59 temp.store_with_annotation(bx, dest);
60 return;
61 }
62
63 let operand = self.codegen_operand(bx, source);
68 match operand.val {
69 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
70 debug!("codegen_rvalue: creating ugly alloca");
77 let scratch = PlaceRef::alloca(bx, operand.layout);
78 scratch.storage_live(bx);
79 operand.store_with_annotation(bx, scratch);
80 base::coerce_unsized_into(bx, scratch, dest);
81 scratch.storage_dead(bx);
82 }
83 OperandValue::Ref(val) => {
84 if val.llextra.is_some() {
85 bug!("unsized coercion on an unsized rvalue");
86 }
87 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
88 }
89 OperandValue::ZeroSized => {
90 bug!("unsized coercion on a ZST rvalue");
91 }
92 }
93 }
94
95 mir::Rvalue::Cast(
96 mir::CastKind::Transmute | mir::CastKind::Subtype,
97 ref operand,
98 _ty,
99 ) => {
100 let src = self.codegen_operand(bx, operand);
101 self.codegen_transmute(bx, src, dest);
102 }
103
104 mir::Rvalue::Repeat(ref elem, count) => {
105 if dest.layout.is_zst() {
107 return;
108 }
109
110 if let mir::Operand::Constant(const_op) = elem {
113 let val = self.eval_mir_constant(const_op);
114 if val.all_bytes_uninit(self.cx.tcx()) {
115 let size = bx.const_usize(dest.layout.size.bytes());
116 bx.memset(
117 dest.val.llval,
118 bx.const_undef(bx.type_i8()),
119 size,
120 dest.val.align,
121 MemFlags::empty(),
122 );
123 return;
124 }
125 }
126
127 let cg_elem = self.codegen_operand(bx, elem);
128
129 let try_init_all_same = |bx: &mut Bx, v| {
130 let start = dest.val.llval;
131 let size = bx.const_usize(dest.layout.size.bytes());
132
133 if let Some(int) = bx.cx().const_to_opt_u128(v, false)
135 && let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]
136 && let Ok(&byte) = bytes.iter().all_equal_value()
137 {
138 let fill = bx.cx().const_u8(byte);
139 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
140 return true;
141 }
142
143 let v = bx.from_immediate(v);
145 if bx.cx().val_ty(v) == bx.cx().type_i8() {
146 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
147 return true;
148 }
149 false
150 };
151
152 if let OperandValue::Immediate(v) = cg_elem.val
153 && try_init_all_same(bx, v)
154 {
155 return;
156 }
157
158 let count = self
159 .monomorphize(count)
160 .try_to_target_usize(bx.tcx())
161 .expect("expected monomorphic const in codegen");
162
163 bx.write_operand_repeatedly(cg_elem, count, dest);
164 }
165
166 mir::Rvalue::Aggregate(ref kind, ref operands)
169 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
170 {
171 let (variant_index, variant_dest, active_field_index) = match **kind {
172 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
173 let variant_dest = dest.project_downcast(bx, variant_index);
174 (variant_index, variant_dest, active_field_index)
175 }
176 _ => (FIRST_VARIANT, dest, None),
177 };
178 if active_field_index.is_some() {
179 assert_eq!(operands.len(), 1);
180 }
181 for (i, operand) in operands.iter_enumerated() {
182 let op = self.codegen_operand(bx, operand);
183 if !op.layout.is_zst() {
185 let field_index = active_field_index.unwrap_or(i);
186 let field = if let mir::AggregateKind::Array(_) = **kind {
187 let llindex = bx.cx().const_usize(field_index.as_u32().into());
188 variant_dest.project_index(bx, llindex)
189 } else {
190 variant_dest.project_field(bx, field_index.as_usize())
191 };
192 op.store_with_annotation(bx, field);
193 }
194 }
195 dest.codegen_set_discr(bx, variant_index);
196 }
197
198 _ => {
199 let temp = self.codegen_rvalue_operand(bx, rvalue);
200 temp.store_with_annotation(bx, dest);
201 }
202 }
203 }
204
205 fn codegen_transmute(
210 &mut self,
211 bx: &mut Bx,
212 src: OperandRef<'tcx, Bx::Value>,
213 dst: PlaceRef<'tcx, Bx::Value>,
214 ) {
215 assert!(src.layout.is_sized());
217 assert!(dst.layout.is_sized());
218
219 if src.layout.size != dst.layout.size
220 || src.layout.is_uninhabited()
221 || dst.layout.is_uninhabited()
222 {
223 bx.unreachable_nonterminator();
226 } else {
227 src.store_with_annotation(bx, dst.val.with_type(src.layout));
231 }
232 }
233
234 pub(crate) fn codegen_transmute_operand(
239 &mut self,
240 bx: &mut Bx,
241 operand: OperandRef<'tcx, Bx::Value>,
242 cast: TyAndLayout<'tcx>,
243 ) -> OperandValue<Bx::Value> {
244 if let abi::BackendRepr::Memory { .. } = cast.backend_repr
245 && !cast.is_zst()
246 {
247 span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
248 }
249
250 if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
253 return operand.val;
254 }
255
256 if operand.layout.size != cast.size
258 || operand.layout.is_uninhabited()
259 || cast.is_uninhabited()
260 {
261 bx.unreachable_nonterminator();
262
263 return OperandValue::poison(bx, cast);
266 }
267
268 #[inline]
271 fn vector_can_bitcast(x: abi::Scalar) -> bool {
272 matches!(
273 x,
274 abi::Scalar::Initialized {
275 value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
276 ..
277 }
278 )
279 }
280
281 let cx = bx.cx();
282 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
283 _ if cast.is_zst() => OperandValue::ZeroSized,
284 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
285 assert_eq!(source_place_val.llextra, None);
286 bx.load_operand(source_place_val.with_type(cast)).val
289 }
290 (
291 OperandValue::Immediate(imm),
292 abi::BackendRepr::Scalar(from_scalar),
293 abi::BackendRepr::Scalar(to_scalar),
294 ) if from_scalar.size(cx) == to_scalar.size(cx) => {
295 OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
296 }
297 (
298 OperandValue::Immediate(imm),
299 abi::BackendRepr::SimdVector { element: from_scalar, .. },
300 abi::BackendRepr::SimdVector { element: to_scalar, .. },
301 ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
302 let to_backend_ty = bx.cx().immediate_backend_type(cast);
303 OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
304 }
305 (
306 OperandValue::Pair(imm_a, imm_b),
307 abi::BackendRepr::ScalarPair(in_a, in_b),
308 abi::BackendRepr::ScalarPair(out_a, out_b),
309 ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
310 OperandValue::Pair(
311 transmute_scalar(bx, imm_a, in_a, out_a),
312 transmute_scalar(bx, imm_b, in_b, out_b),
313 )
314 }
315 _ => {
316 let align = Ord::max(operand.layout.align.abi, cast.align.abi);
326 let size = Ord::max(operand.layout.size, cast.size);
327 let temp = PlaceValue::alloca(bx, size, align);
328 bx.lifetime_start(temp.llval, size);
329 operand.store_with_annotation(bx, temp.with_type(operand.layout));
330 let val = bx.load_operand(temp.with_type(cast)).val;
331 bx.lifetime_end(temp.llval, size);
332 val
333 }
334 }
335 }
336
337 fn cast_immediate(
342 &self,
343 bx: &mut Bx,
344 mut imm: Bx::Value,
345 from_scalar: abi::Scalar,
346 from_backend_ty: Bx::Type,
347 to_scalar: abi::Scalar,
348 to_backend_ty: Bx::Type,
349 ) -> Option<Bx::Value> {
350 use abi::Primitive::*;
351
352 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
357
358 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
359 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
360 (Float(_), Float(_)) => {
361 let srcsz = bx.cx().float_width(from_backend_ty);
362 let dstsz = bx.cx().float_width(to_backend_ty);
363 if dstsz > srcsz {
364 bx.fpext(imm, to_backend_ty)
365 } else if srcsz > dstsz {
366 bx.fptrunc(imm, to_backend_ty)
367 } else {
368 imm
369 }
370 }
371 (Int(_, is_signed), Float(_)) => {
372 if is_signed {
373 bx.sitofp(imm, to_backend_ty)
374 } else {
375 bx.uitofp(imm, to_backend_ty)
376 }
377 }
378 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
379 (Int(_, is_signed), Pointer(..)) => {
380 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
381 bx.inttoptr(usize_imm, to_backend_ty)
382 }
383 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
384 _ => return None,
385 };
386 Some(imm)
387 }
388
389 pub(crate) fn codegen_rvalue_operand(
390 &mut self,
391 bx: &mut Bx,
392 rvalue: &mir::Rvalue<'tcx>,
393 ) -> OperandRef<'tcx, Bx::Value> {
394 match *rvalue {
395 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
396 let operand = self.codegen_operand(bx, source);
397 debug!("cast operand is {:?}", operand);
398 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
399
400 let val = match *kind {
401 mir::CastKind::PointerExposeProvenance => {
402 assert!(bx.cx().is_backend_immediate(cast));
403 let llptr = operand.immediate();
404 let llcast_ty = bx.cx().immediate_backend_type(cast);
405 let lladdr = bx.ptrtoint(llptr, llcast_ty);
406 OperandValue::Immediate(lladdr)
407 }
408 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
409 match *operand.layout.ty.kind() {
410 ty::FnDef(def_id, args) => {
411 let instance = ty::Instance::resolve_for_fn_ptr(
412 bx.tcx(),
413 bx.typing_env(),
414 def_id,
415 args,
416 )
417 .unwrap();
418 OperandValue::Immediate(bx.get_fn_addr(instance))
419 }
420 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
421 }
422 }
423 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
424 match *operand.layout.ty.kind() {
425 ty::Closure(def_id, args) => {
426 let instance = Instance::resolve_closure(
427 bx.cx().tcx(),
428 def_id,
429 args,
430 ty::ClosureKind::FnOnce,
431 );
432 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
433 }
434 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
435 }
436 }
437 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
438 operand.val
440 }
441 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
442 assert!(bx.cx().is_backend_scalar_pair(cast));
443 let (lldata, llextra) = operand.val.pointer_parts();
444 let (lldata, llextra) =
445 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
446 OperandValue::Pair(lldata, llextra)
447 }
448 mir::CastKind::PointerCoercion(
449 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
450 ) => {
451 bug!("{kind:?} is for borrowck, and should never appear in codegen");
452 }
453 mir::CastKind::PtrToPtr
454 if bx.cx().is_backend_scalar_pair(operand.layout) =>
455 {
456 if let OperandValue::Pair(data_ptr, meta) = operand.val {
457 if bx.cx().is_backend_scalar_pair(cast) {
458 OperandValue::Pair(data_ptr, meta)
459 } else {
460 OperandValue::Immediate(data_ptr)
462 }
463 } else {
464 bug!("unexpected non-pair operand");
465 }
466 }
467 | mir::CastKind::IntToInt
468 | mir::CastKind::FloatToInt
469 | mir::CastKind::FloatToFloat
470 | mir::CastKind::IntToFloat
471 | mir::CastKind::PtrToPtr
472 | mir::CastKind::FnPtrToPtr
473 | mir::CastKind::PointerWithExposedProvenance => {
477 let imm = operand.immediate();
478 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
479 bug!("Found non-scalar for operand {operand:?}");
480 };
481 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
482
483 assert!(bx.cx().is_backend_immediate(cast));
484 let to_backend_ty = bx.cx().immediate_backend_type(cast);
485 if operand.layout.is_uninhabited() {
486 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
487 return OperandRef { val, layout: cast, move_annotation: None };
488 }
489 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
490 bug!("Found non-scalar for cast {cast:?}");
491 };
492
493 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
494 .map(OperandValue::Immediate)
495 .unwrap_or_else(|| {
496 bug!("Unsupported cast of {operand:?} to {cast:?}");
497 })
498 }
499 mir::CastKind::Transmute | mir::CastKind::Subtype => {
500 self.codegen_transmute_operand(bx, operand, cast)
501 }
502 };
503 OperandRef { val, layout: cast, move_annotation: None }
504 }
505
506 mir::Rvalue::Ref(_, bk, place) => {
507 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
508 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
509 };
510 self.codegen_place_to_pointer(bx, place, mk_ref)
511 }
512
513 mir::Rvalue::RawPtr(kind, place) => {
514 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
515 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
516 };
517 self.codegen_place_to_pointer(bx, place, mk_ptr)
518 }
519
520 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
521 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
522 {
523 let lhs = self.codegen_operand(bx, lhs);
524 let rhs = self.codegen_operand(bx, rhs);
525 let result = self.codegen_scalar_checked_binop(
526 bx,
527 op,
528 lhs.immediate(),
529 rhs.immediate(),
530 lhs.layout.ty,
531 );
532 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
533 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
534 OperandRef {
535 val: result,
536 layout: bx.cx().layout_of(operand_ty),
537 move_annotation: None,
538 }
539 }
540 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
541 let lhs = self.codegen_operand(bx, lhs);
542 let rhs = self.codegen_operand(bx, rhs);
543 let llresult = match (lhs.val, rhs.val) {
544 (
545 OperandValue::Pair(lhs_addr, lhs_extra),
546 OperandValue::Pair(rhs_addr, rhs_extra),
547 ) => self.codegen_wide_ptr_binop(
548 bx,
549 op,
550 lhs_addr,
551 lhs_extra,
552 rhs_addr,
553 rhs_extra,
554 lhs.layout.ty,
555 ),
556
557 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
558 .codegen_scalar_binop(
559 bx,
560 op,
561 lhs_val,
562 rhs_val,
563 lhs.layout.ty,
564 rhs.layout.ty,
565 ),
566
567 _ => bug!(),
568 };
569 OperandRef {
570 val: OperandValue::Immediate(llresult),
571 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
572 move_annotation: None,
573 }
574 }
575
576 mir::Rvalue::UnaryOp(op, ref operand) => {
577 let operand = self.codegen_operand(bx, operand);
578 let is_float = operand.layout.ty.is_floating_point();
579 let (val, layout) = match op {
580 mir::UnOp::Not => {
581 let llval = bx.not(operand.immediate());
582 (OperandValue::Immediate(llval), operand.layout)
583 }
584 mir::UnOp::Neg => {
585 let llval = if is_float {
586 bx.fneg(operand.immediate())
587 } else {
588 bx.neg(operand.immediate())
589 };
590 (OperandValue::Immediate(llval), operand.layout)
591 }
592 mir::UnOp::PtrMetadata => {
593 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
594 let (_, meta) = operand.val.pointer_parts();
595 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
596 if let Some(meta) = meta {
597 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
598 } else {
599 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
600 }
601 }
602 };
603 assert!(
604 val.is_expected_variant_for_type(self.cx, layout),
605 "Made wrong variant {val:?} for type {layout:?}",
606 );
607 OperandRef { val, layout, move_annotation: None }
608 }
609
610 mir::Rvalue::Discriminant(ref place) => {
611 let discr_ty = rvalue.ty(self.mir, bx.tcx());
612 let discr_ty = self.monomorphize(discr_ty);
613 let operand = self.codegen_consume(bx, place.as_ref());
614 let discr = operand.codegen_get_discr(self, bx, discr_ty);
615 OperandRef {
616 val: OperandValue::Immediate(discr),
617 layout: self.cx.layout_of(discr_ty),
618 move_annotation: None,
619 }
620 }
621
622 mir::Rvalue::NullaryOp(ref null_op) => {
623 let val = match null_op {
624 mir::NullOp::RuntimeChecks(kind) => {
625 let val = kind.value(bx.tcx().sess);
626 bx.cx().const_bool(val)
627 }
628 };
629 let tcx = self.cx.tcx();
630 OperandRef {
631 val: OperandValue::Immediate(val),
632 layout: self.cx.layout_of(null_op.ty(tcx)),
633 move_annotation: None,
634 }
635 }
636
637 mir::Rvalue::ThreadLocalRef(def_id) => {
638 assert!(bx.cx().tcx().is_static(def_id));
639 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
640 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
641 {
642 let instance = ty::Instance {
643 def: ty::InstanceKind::ThreadLocalShim(def_id),
644 args: ty::GenericArgs::empty(),
645 };
646 let fn_ptr = bx.get_fn_addr(instance);
647 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
648 let fn_ty = bx.fn_decl_backend_type(fn_abi);
649 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
650 Some(bx.tcx().codegen_instance_attrs(instance.def))
651 } else {
652 None
653 };
654 bx.call(
655 fn_ty,
656 fn_attrs.as_deref(),
657 Some(fn_abi),
658 fn_ptr,
659 &[],
660 None,
661 Some(instance),
662 )
663 } else {
664 bx.get_static(def_id)
665 };
666 OperandRef { val: OperandValue::Immediate(static_), layout, move_annotation: None }
667 }
668 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
669 mir::Rvalue::Repeat(ref elem, len_const) => {
670 let operand = self.codegen_operand(bx, elem);
674 let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
675 let array_ty = self.monomorphize(array_ty);
676 let array_layout = bx.layout_of(array_ty);
677 assert!(array_layout.is_zst());
678 OperandRef {
679 val: OperandValue::ZeroSized,
680 layout: array_layout,
681 move_annotation: None,
682 }
683 }
684 mir::Rvalue::Aggregate(ref kind, ref fields) => {
685 let (variant_index, active_field_index) = match **kind {
686 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
687 (variant_index, active_field_index)
688 }
689 _ => (FIRST_VARIANT, None),
690 };
691
692 let ty = rvalue.ty(self.mir, self.cx.tcx());
693 let ty = self.monomorphize(ty);
694 let layout = self.cx.layout_of(ty);
695
696 let mut builder = OperandRefBuilder::new(layout);
697 for (field_idx, field) in fields.iter_enumerated() {
698 let op = self.codegen_operand(bx, field);
699 let fi = active_field_index.unwrap_or(field_idx);
700 builder.insert_field(bx, variant_index, fi, op);
701 }
702
703 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
704 match tag_result {
705 Err(super::place::UninhabitedVariantError) => {
706 bx.abort();
710 let val = OperandValue::poison(bx, layout);
711 OperandRef { val, layout, move_annotation: None }
712 }
713 Ok(maybe_tag_value) => {
714 if let Some((tag_field, tag_imm)) = maybe_tag_value {
715 builder.insert_imm(tag_field, tag_imm);
716 }
717 builder.build(bx.cx())
718 }
719 }
720 }
721 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
722 let operand = self.codegen_operand(bx, operand);
723 let binder_ty = self.monomorphize(binder_ty);
724 let layout = bx.cx().layout_of(binder_ty);
725 OperandRef { val: operand.val, layout, move_annotation: None }
726 }
727 mir::Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"),
728 mir::Rvalue::ShallowInitBox(..) => bug!("`ShallowInitBox` in codegen"),
729 }
730 }
731
732 fn codegen_place_to_pointer(
734 &mut self,
735 bx: &mut Bx,
736 place: mir::Place<'tcx>,
737 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
738 ) -> OperandRef<'tcx, Bx::Value> {
739 let cg_place = self.codegen_place(bx, place.as_ref());
740 let val = cg_place.val.address();
741
742 let ty = cg_place.layout.ty;
743 assert!(
744 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
745 matches!(val, OperandValue::Pair(..))
746 } else {
747 matches!(val, OperandValue::Immediate(..))
748 },
749 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
750 );
751
752 OperandRef {
753 val,
754 layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)),
755 move_annotation: None,
756 }
757 }
758
759 fn codegen_scalar_binop(
760 &mut self,
761 bx: &mut Bx,
762 op: mir::BinOp,
763 lhs: Bx::Value,
764 rhs: Bx::Value,
765 lhs_ty: Ty<'tcx>,
766 rhs_ty: Ty<'tcx>,
767 ) -> Bx::Value {
768 let is_float = lhs_ty.is_floating_point();
769 let is_signed = lhs_ty.is_signed();
770 match op {
771 mir::BinOp::Add => {
772 if is_float {
773 bx.fadd(lhs, rhs)
774 } else {
775 bx.add(lhs, rhs)
776 }
777 }
778 mir::BinOp::AddUnchecked => {
779 if is_signed {
780 bx.unchecked_sadd(lhs, rhs)
781 } else {
782 bx.unchecked_uadd(lhs, rhs)
783 }
784 }
785 mir::BinOp::Sub => {
786 if is_float {
787 bx.fsub(lhs, rhs)
788 } else {
789 bx.sub(lhs, rhs)
790 }
791 }
792 mir::BinOp::SubUnchecked => {
793 if is_signed {
794 bx.unchecked_ssub(lhs, rhs)
795 } else {
796 bx.unchecked_usub(lhs, rhs)
797 }
798 }
799 mir::BinOp::Mul => {
800 if is_float {
801 bx.fmul(lhs, rhs)
802 } else {
803 bx.mul(lhs, rhs)
804 }
805 }
806 mir::BinOp::MulUnchecked => {
807 if is_signed {
808 bx.unchecked_smul(lhs, rhs)
809 } else {
810 bx.unchecked_umul(lhs, rhs)
811 }
812 }
813 mir::BinOp::Div => {
814 if is_float {
815 bx.fdiv(lhs, rhs)
816 } else if is_signed {
817 bx.sdiv(lhs, rhs)
818 } else {
819 bx.udiv(lhs, rhs)
820 }
821 }
822 mir::BinOp::Rem => {
823 if is_float {
824 bx.frem(lhs, rhs)
825 } else if is_signed {
826 bx.srem(lhs, rhs)
827 } else {
828 bx.urem(lhs, rhs)
829 }
830 }
831 mir::BinOp::BitOr => bx.or(lhs, rhs),
832 mir::BinOp::BitAnd => bx.and(lhs, rhs),
833 mir::BinOp::BitXor => bx.xor(lhs, rhs),
834 mir::BinOp::Offset => {
835 let pointee_type = lhs_ty
836 .builtin_deref(true)
837 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
838 let pointee_layout = bx.cx().layout_of(pointee_type);
839 if pointee_layout.is_zst() {
840 lhs
843 } else {
844 let llty = bx.cx().backend_type(pointee_layout);
845 if !rhs_ty.is_signed() {
846 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
847 } else {
848 bx.inbounds_gep(llty, lhs, &[rhs])
849 }
850 }
851 }
852 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
853 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
854 bx.shl(lhs, rhs)
855 }
856 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
857 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
858 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
859 }
860 mir::BinOp::Ne
861 | mir::BinOp::Lt
862 | mir::BinOp::Gt
863 | mir::BinOp::Eq
864 | mir::BinOp::Le
865 | mir::BinOp::Ge => {
866 if is_float {
867 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
868 } else {
869 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
870 }
871 }
872 mir::BinOp::Cmp => {
873 assert!(!is_float);
874 bx.three_way_compare(lhs_ty, lhs, rhs)
875 }
876 mir::BinOp::AddWithOverflow
877 | mir::BinOp::SubWithOverflow
878 | mir::BinOp::MulWithOverflow => {
879 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
880 }
881 }
882 }
883
884 fn codegen_wide_ptr_binop(
885 &mut self,
886 bx: &mut Bx,
887 op: mir::BinOp,
888 lhs_addr: Bx::Value,
889 lhs_extra: Bx::Value,
890 rhs_addr: Bx::Value,
891 rhs_extra: Bx::Value,
892 _input_ty: Ty<'tcx>,
893 ) -> Bx::Value {
894 match op {
895 mir::BinOp::Eq => {
896 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
897 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
898 bx.and(lhs, rhs)
899 }
900 mir::BinOp::Ne => {
901 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
902 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
903 bx.or(lhs, rhs)
904 }
905 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
906 let (op, strict_op) = match op {
908 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
909 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
910 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
911 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
912 _ => bug!(),
913 };
914 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
915 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
916 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
917 let rhs = bx.and(and_lhs, and_rhs);
918 bx.or(lhs, rhs)
919 }
920 _ => {
921 bug!("unexpected wide ptr binop");
922 }
923 }
924 }
925
926 fn codegen_scalar_checked_binop(
927 &mut self,
928 bx: &mut Bx,
929 op: mir::BinOp,
930 lhs: Bx::Value,
931 rhs: Bx::Value,
932 input_ty: Ty<'tcx>,
933 ) -> OperandValue<Bx::Value> {
934 let (val, of) = match op {
935 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
937 let oop = match op {
938 mir::BinOp::Add => OverflowOp::Add,
939 mir::BinOp::Sub => OverflowOp::Sub,
940 mir::BinOp::Mul => OverflowOp::Mul,
941 _ => unreachable!(),
942 };
943 bx.checked_binop(oop, input_ty, lhs, rhs)
944 }
945 _ => bug!("Operator `{:?}` is not a checkable operator", op),
946 };
947
948 OperandValue::Pair(val, of)
949 }
950}
951
952pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
960 bx: &mut Bx,
961 mut imm: Bx::Value,
962 from_scalar: abi::Scalar,
963 to_scalar: abi::Scalar,
964) -> Bx::Value {
965 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
966 let imm_ty = bx.cx().val_ty(imm);
967 assert_ne!(
968 bx.cx().type_kind(imm_ty),
969 TypeKind::Vector,
970 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
971 );
972
973 if from_scalar == to_scalar {
977 return imm;
978 }
979
980 use abi::Primitive::*;
981 imm = bx.from_immediate(imm);
982
983 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
984 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
985 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
986
987 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
997
998 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
999 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1000 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1001 (Int(..), Pointer(..)) => bx.inttoptr(imm, to_backend_ty),
1002 (Pointer(..), Int(..)) => {
1003 bx.ptrtoint(imm, to_backend_ty)
1005 }
1006 (Float(_), Pointer(..)) => {
1007 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1008 bx.inttoptr(int_imm, to_backend_ty)
1009 }
1010 (Pointer(..), Float(_)) => {
1011 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1013 bx.bitcast(int_imm, to_backend_ty)
1014 }
1015 };
1016
1017 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1018
1019 assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1025
1026 imm = bx.to_immediate_scalar(imm, to_scalar);
1027 imm
1028}
1029
1030fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1035 bx: &mut Bx,
1036 imm: Bx::Value,
1037 scalar: abi::Scalar,
1038 backend_ty: Bx::Type,
1039 known: Option<&abi::Scalar>,
1040) {
1041 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1042 return;
1043 }
1044
1045 match (scalar, known) {
1046 (abi::Scalar::Union { .. }, _) => return,
1047 (_, None) => {
1048 if scalar.is_always_valid(bx.cx()) {
1049 return;
1050 }
1051 }
1052 (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1053 let known_range = known.valid_range(bx.cx());
1054 if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1055 return;
1056 }
1057 }
1058 }
1059
1060 match scalar.primitive() {
1061 abi::Primitive::Int(..) => {
1062 let range = scalar.valid_range(bx.cx());
1063 bx.assume_integer_range(imm, backend_ty, range);
1064 }
1065 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1066 if !scalar.valid_range(bx.cx()).contains(0) =>
1067 {
1068 bx.assume_nonnull(imm);
1069 }
1070 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1071 }
1072}