1use std::assert_matches::assert_matches;
2
3use arrayvec::ArrayVec;
4use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx};
5use rustc_middle::ty::adjustment::PointerCoercion;
6use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
8use rustc_middle::{bug, mir, span_bug};
9use rustc_session::config::OptLevel;
10use rustc_span::{DUMMY_SP, Span};
11use tracing::{debug, instrument};
12
13use super::operand::{OperandRef, OperandValue};
14use super::place::PlaceRef;
15use super::{FunctionCx, LocalRef};
16use crate::common::IntPredicate;
17use crate::traits::*;
18use crate::{MemFlags, base};
19
20impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
21 #[instrument(level = "trace", skip(self, bx))]
22 pub(crate) fn codegen_rvalue(
23 &mut self,
24 bx: &mut Bx,
25 dest: PlaceRef<'tcx, Bx::Value>,
26 rvalue: &mir::Rvalue<'tcx>,
27 ) {
28 match *rvalue {
29 mir::Rvalue::Use(ref operand) => {
30 let cg_operand = self.codegen_operand(bx, operand);
31 cg_operand.val.store(bx, dest);
34 }
35
36 mir::Rvalue::Cast(
37 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
38 ref source,
39 _,
40 ) => {
41 if bx.cx().is_backend_scalar_pair(dest.layout) {
44 let temp = self.codegen_rvalue_operand(bx, rvalue);
47 temp.val.store(bx, dest);
48 return;
49 }
50
51 let operand = self.codegen_operand(bx, source);
56 match operand.val {
57 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
58 debug!("codegen_rvalue: creating ugly alloca");
65 let scratch = PlaceRef::alloca(bx, operand.layout);
66 scratch.storage_live(bx);
67 operand.val.store(bx, scratch);
68 base::coerce_unsized_into(bx, scratch, dest);
69 scratch.storage_dead(bx);
70 }
71 OperandValue::Ref(val) => {
72 if val.llextra.is_some() {
73 bug!("unsized coercion on an unsized rvalue");
74 }
75 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
76 }
77 OperandValue::ZeroSized => {
78 bug!("unsized coercion on a ZST rvalue");
79 }
80 }
81 }
82
83 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
84 let src = self.codegen_operand(bx, operand);
85 self.codegen_transmute(bx, src, dest);
86 }
87
88 mir::Rvalue::Repeat(ref elem, count) => {
89 if dest.layout.is_zst() {
91 return;
92 }
93
94 if let mir::Operand::Constant(const_op) = elem {
97 let val = self.eval_mir_constant(const_op);
98 if val.all_bytes_uninit(self.cx.tcx()) {
99 let size = bx.const_usize(dest.layout.size.bytes());
100 bx.memset(
101 dest.val.llval,
102 bx.const_undef(bx.type_i8()),
103 size,
104 dest.val.align,
105 MemFlags::empty(),
106 );
107 return;
108 }
109 }
110
111 let cg_elem = self.codegen_operand(bx, elem);
112
113 let try_init_all_same = |bx: &mut Bx, v| {
114 let start = dest.val.llval;
115 let size = bx.const_usize(dest.layout.size.bytes());
116
117 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
119 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
120 let first = bytes[0];
121 if bytes[1..].iter().all(|&b| b == first) {
122 let fill = bx.cx().const_u8(first);
123 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
124 return true;
125 }
126 }
127
128 let v = bx.from_immediate(v);
130 if bx.cx().val_ty(v) == bx.cx().type_i8() {
131 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
132 return true;
133 }
134 false
135 };
136
137 match cg_elem.val {
138 OperandValue::Immediate(v) => {
139 if try_init_all_same(bx, v) {
140 return;
141 }
142 }
143 _ => (),
144 }
145
146 let count = self
147 .monomorphize(count)
148 .try_to_target_usize(bx.tcx())
149 .expect("expected monomorphic const in codegen");
150
151 bx.write_operand_repeatedly(cg_elem, count, dest);
152 }
153
154 mir::Rvalue::Aggregate(ref kind, ref operands)
157 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
158 {
159 let (variant_index, variant_dest, active_field_index) = match **kind {
160 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
161 let variant_dest = dest.project_downcast(bx, variant_index);
162 (variant_index, variant_dest, active_field_index)
163 }
164 _ => (FIRST_VARIANT, dest, None),
165 };
166 if active_field_index.is_some() {
167 assert_eq!(operands.len(), 1);
168 }
169 for (i, operand) in operands.iter_enumerated() {
170 let op = self.codegen_operand(bx, operand);
171 if !op.layout.is_zst() {
173 let field_index = active_field_index.unwrap_or(i);
174 let field = if let mir::AggregateKind::Array(_) = **kind {
175 let llindex = bx.cx().const_usize(field_index.as_u32().into());
176 variant_dest.project_index(bx, llindex)
177 } else {
178 variant_dest.project_field(bx, field_index.as_usize())
179 };
180 op.val.store(bx, field);
181 }
182 }
183 dest.codegen_set_discr(bx, variant_index);
184 }
185
186 _ => {
187 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
188 let temp = self.codegen_rvalue_operand(bx, rvalue);
189 temp.val.store(bx, dest);
190 }
191 }
192 }
193
194 fn codegen_transmute(
195 &mut self,
196 bx: &mut Bx,
197 src: OperandRef<'tcx, Bx::Value>,
198 dst: PlaceRef<'tcx, Bx::Value>,
199 ) {
200 assert!(src.layout.is_sized());
202 assert!(dst.layout.is_sized());
203
204 if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
205 val.store(bx, dst);
206 return;
207 }
208
209 match src.val {
210 OperandValue::Ref(..) | OperandValue::ZeroSized => {
211 span_bug!(
212 self.mir.span,
213 "Operand path should have handled transmute \
214 from {src:?} to place {dst:?}"
215 );
216 }
217 OperandValue::Immediate(..) | OperandValue::Pair(..) => {
218 src.val.store(bx, dst.val.with_type(src.layout));
221 }
222 }
223 }
224
225 pub(crate) fn codegen_transmute_operand(
230 &mut self,
231 bx: &mut Bx,
232 operand: OperandRef<'tcx, Bx::Value>,
233 cast: TyAndLayout<'tcx>,
234 ) -> Option<OperandValue<Bx::Value>> {
235 if operand.layout.size != cast.size
237 || operand.layout.is_uninhabited()
238 || cast.is_uninhabited()
239 {
240 if !operand.layout.is_uninhabited() {
241 bx.abort();
244 }
245
246 return Some(OperandValue::poison(bx, cast));
249 }
250
251 let operand_kind = self.value_kind(operand.layout);
252 let cast_kind = self.value_kind(cast);
253
254 match operand.val {
255 OperandValue::Ref(source_place_val) => {
256 assert_eq!(source_place_val.llextra, None);
257 assert_matches!(operand_kind, OperandValueKind::Ref);
258 Some(bx.load_operand(source_place_val.with_type(cast)).val)
261 }
262 OperandValue::ZeroSized => {
263 let OperandValueKind::ZeroSized = operand_kind else {
264 bug!("Found {operand_kind:?} for operand {operand:?}");
265 };
266 if let OperandValueKind::ZeroSized = cast_kind {
267 Some(OperandValue::ZeroSized)
268 } else {
269 None
270 }
271 }
272 OperandValue::Immediate(imm) => {
273 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
274 bug!("Found {operand_kind:?} for operand {operand:?}");
275 };
276 if let OperandValueKind::Immediate(to_scalar) = cast_kind
277 && from_scalar.size(self.cx) == to_scalar.size(self.cx)
278 {
279 let from_backend_ty = bx.backend_type(operand.layout);
280 let to_backend_ty = bx.backend_type(cast);
281 Some(OperandValue::Immediate(self.transmute_immediate(
282 bx,
283 imm,
284 from_scalar,
285 from_backend_ty,
286 to_scalar,
287 to_backend_ty,
288 )))
289 } else {
290 None
291 }
292 }
293 OperandValue::Pair(imm_a, imm_b) => {
294 let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
295 bug!("Found {operand_kind:?} for operand {operand:?}");
296 };
297 if let OperandValueKind::Pair(out_a, out_b) = cast_kind
298 && in_a.size(self.cx) == out_a.size(self.cx)
299 && in_b.size(self.cx) == out_b.size(self.cx)
300 {
301 let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
302 let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
303 let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
304 let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
305 Some(OperandValue::Pair(
306 self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
307 self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
308 ))
309 } else {
310 None
311 }
312 }
313 }
314 }
315
316 fn cast_immediate(
321 &self,
322 bx: &mut Bx,
323 mut imm: Bx::Value,
324 from_scalar: abi::Scalar,
325 from_backend_ty: Bx::Type,
326 to_scalar: abi::Scalar,
327 to_backend_ty: Bx::Type,
328 ) -> Option<Bx::Value> {
329 use abi::Primitive::*;
330
331 self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
336
337 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
338 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
339 (Float(_), Float(_)) => {
340 let srcsz = bx.cx().float_width(from_backend_ty);
341 let dstsz = bx.cx().float_width(to_backend_ty);
342 if dstsz > srcsz {
343 bx.fpext(imm, to_backend_ty)
344 } else if srcsz > dstsz {
345 bx.fptrunc(imm, to_backend_ty)
346 } else {
347 imm
348 }
349 }
350 (Int(_, is_signed), Float(_)) => {
351 if is_signed {
352 bx.sitofp(imm, to_backend_ty)
353 } else {
354 bx.uitofp(imm, to_backend_ty)
355 }
356 }
357 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
358 (Int(_, is_signed), Pointer(..)) => {
359 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
360 bx.inttoptr(usize_imm, to_backend_ty)
361 }
362 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
363 _ => return None,
364 };
365 Some(imm)
366 }
367
368 fn transmute_immediate(
374 &self,
375 bx: &mut Bx,
376 mut imm: Bx::Value,
377 from_scalar: abi::Scalar,
378 from_backend_ty: Bx::Type,
379 to_scalar: abi::Scalar,
380 to_backend_ty: Bx::Type,
381 ) -> Bx::Value {
382 assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
383
384 if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
388 return imm;
389 }
390
391 use abi::Primitive::*;
392 imm = bx.from_immediate(imm);
393
394 self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
404
405 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
406 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
407 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
408 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
409 (Pointer(..), Int(..)) => {
410 bx.ptrtoint(imm, to_backend_ty)
412 }
413 (Float(_), Pointer(..)) => {
414 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
415 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
416 }
417 (Pointer(..), Float(_)) => {
418 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
420 bx.bitcast(int_imm, to_backend_ty)
421 }
422 };
423
424 self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
430
431 imm = bx.to_immediate_scalar(imm, to_scalar);
432 imm
433 }
434
435 fn assume_scalar_range(
436 &self,
437 bx: &mut Bx,
438 imm: Bx::Value,
439 scalar: abi::Scalar,
440 backend_ty: Bx::Type,
441 ) {
442 if matches!(self.cx.sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(self.cx) {
443 return;
444 }
445
446 match scalar.primitive() {
447 abi::Primitive::Int(..) => {
448 let range = scalar.valid_range(self.cx);
449 bx.assume_integer_range(imm, backend_ty, range);
450 }
451 abi::Primitive::Pointer(abi::AddressSpace::DATA)
452 if !scalar.valid_range(self.cx).contains(0) =>
453 {
454 bx.assume_nonnull(imm);
455 }
456 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
457 }
458 }
459
460 pub(crate) fn codegen_rvalue_unsized(
461 &mut self,
462 bx: &mut Bx,
463 indirect_dest: PlaceRef<'tcx, Bx::Value>,
464 rvalue: &mir::Rvalue<'tcx>,
465 ) {
466 debug!(
467 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
468 indirect_dest.val.llval, rvalue
469 );
470
471 match *rvalue {
472 mir::Rvalue::Use(ref operand) => {
473 let cg_operand = self.codegen_operand(bx, operand);
474 cg_operand.val.store_unsized(bx, indirect_dest);
475 }
476
477 _ => bug!("unsized assignment other than `Rvalue::Use`"),
478 }
479 }
480
481 pub(crate) fn codegen_rvalue_operand(
482 &mut self,
483 bx: &mut Bx,
484 rvalue: &mir::Rvalue<'tcx>,
485 ) -> OperandRef<'tcx, Bx::Value> {
486 assert!(
487 self.rvalue_creates_operand(rvalue, DUMMY_SP),
488 "cannot codegen {rvalue:?} to operand",
489 );
490
491 match *rvalue {
492 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
493 let operand = self.codegen_operand(bx, source);
494 debug!("cast operand is {:?}", operand);
495 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
496
497 let val = match *kind {
498 mir::CastKind::PointerExposeProvenance => {
499 assert!(bx.cx().is_backend_immediate(cast));
500 let llptr = operand.immediate();
501 let llcast_ty = bx.cx().immediate_backend_type(cast);
502 let lladdr = bx.ptrtoint(llptr, llcast_ty);
503 OperandValue::Immediate(lladdr)
504 }
505 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
506 match *operand.layout.ty.kind() {
507 ty::FnDef(def_id, args) => {
508 let instance = ty::Instance::resolve_for_fn_ptr(
509 bx.tcx(),
510 bx.typing_env(),
511 def_id,
512 args,
513 )
514 .unwrap();
515 OperandValue::Immediate(bx.get_fn_addr(instance))
516 }
517 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
518 }
519 }
520 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
521 match *operand.layout.ty.kind() {
522 ty::Closure(def_id, args) => {
523 let instance = Instance::resolve_closure(
524 bx.cx().tcx(),
525 def_id,
526 args,
527 ty::ClosureKind::FnOnce,
528 );
529 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
530 }
531 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
532 }
533 }
534 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
535 operand.val
537 }
538 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
539 assert!(bx.cx().is_backend_scalar_pair(cast));
540 let (lldata, llextra) = operand.val.pointer_parts();
541 let (lldata, llextra) =
542 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
543 OperandValue::Pair(lldata, llextra)
544 }
545 mir::CastKind::PointerCoercion(
546 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
547 ) => {
548 bug!("{kind:?} is for borrowck, and should never appear in codegen");
549 }
550 mir::CastKind::PtrToPtr
551 if bx.cx().is_backend_scalar_pair(operand.layout) =>
552 {
553 if let OperandValue::Pair(data_ptr, meta) = operand.val {
554 if bx.cx().is_backend_scalar_pair(cast) {
555 OperandValue::Pair(data_ptr, meta)
556 } else {
557 OperandValue::Immediate(data_ptr)
559 }
560 } else {
561 bug!("unexpected non-pair operand");
562 }
563 }
564 mir::CastKind::PointerCoercion(PointerCoercion::DynStar, _) => {
565 let (lldata, llextra) = operand.val.pointer_parts();
566 let (lldata, llextra) =
567 base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
568 OperandValue::Pair(lldata, llextra)
569 }
570 | mir::CastKind::IntToInt
571 | mir::CastKind::FloatToInt
572 | mir::CastKind::FloatToFloat
573 | mir::CastKind::IntToFloat
574 | mir::CastKind::PtrToPtr
575 | mir::CastKind::FnPtrToPtr
576 | mir::CastKind::PointerWithExposedProvenance => {
580 let imm = operand.immediate();
581 let operand_kind = self.value_kind(operand.layout);
582 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
583 bug!("Found {operand_kind:?} for operand {operand:?}");
584 };
585 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
586
587 assert!(bx.cx().is_backend_immediate(cast));
588 let to_backend_ty = bx.cx().immediate_backend_type(cast);
589 if operand.layout.is_uninhabited() {
590 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
591 return OperandRef { val, layout: cast };
592 }
593 let cast_kind = self.value_kind(cast);
594 let OperandValueKind::Immediate(to_scalar) = cast_kind else {
595 bug!("Found {cast_kind:?} for operand {cast:?}");
596 };
597
598 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
599 .map(OperandValue::Immediate)
600 .unwrap_or_else(|| {
601 bug!("Unsupported cast of {operand:?} to {cast:?}");
602 })
603 }
604 mir::CastKind::Transmute => {
605 self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
606 bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
607 })
608 }
609 };
610 OperandRef { val, layout: cast }
611 }
612
613 mir::Rvalue::Ref(_, bk, place) => {
614 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
615 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
616 };
617 self.codegen_place_to_pointer(bx, place, mk_ref)
618 }
619
620 mir::Rvalue::CopyForDeref(place) => {
621 self.codegen_operand(bx, &mir::Operand::Copy(place))
622 }
623 mir::Rvalue::RawPtr(kind, place) => {
624 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
625 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
626 };
627 self.codegen_place_to_pointer(bx, place, mk_ptr)
628 }
629
630 mir::Rvalue::Len(place) => {
631 let size = self.evaluate_array_len(bx, place);
632 OperandRef {
633 val: OperandValue::Immediate(size),
634 layout: bx.cx().layout_of(bx.tcx().types.usize),
635 }
636 }
637
638 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
639 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
640 {
641 let lhs = self.codegen_operand(bx, lhs);
642 let rhs = self.codegen_operand(bx, rhs);
643 let result = self.codegen_scalar_checked_binop(
644 bx,
645 op,
646 lhs.immediate(),
647 rhs.immediate(),
648 lhs.layout.ty,
649 );
650 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
651 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
652 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
653 }
654 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
655 let lhs = self.codegen_operand(bx, lhs);
656 let rhs = self.codegen_operand(bx, rhs);
657 let llresult = match (lhs.val, rhs.val) {
658 (
659 OperandValue::Pair(lhs_addr, lhs_extra),
660 OperandValue::Pair(rhs_addr, rhs_extra),
661 ) => self.codegen_wide_ptr_binop(
662 bx,
663 op,
664 lhs_addr,
665 lhs_extra,
666 rhs_addr,
667 rhs_extra,
668 lhs.layout.ty,
669 ),
670
671 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
672 .codegen_scalar_binop(
673 bx,
674 op,
675 lhs_val,
676 rhs_val,
677 lhs.layout.ty,
678 rhs.layout.ty,
679 ),
680
681 _ => bug!(),
682 };
683 OperandRef {
684 val: OperandValue::Immediate(llresult),
685 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
686 }
687 }
688
689 mir::Rvalue::UnaryOp(op, ref operand) => {
690 let operand = self.codegen_operand(bx, operand);
691 let is_float = operand.layout.ty.is_floating_point();
692 let (val, layout) = match op {
693 mir::UnOp::Not => {
694 let llval = bx.not(operand.immediate());
695 (OperandValue::Immediate(llval), operand.layout)
696 }
697 mir::UnOp::Neg => {
698 let llval = if is_float {
699 bx.fneg(operand.immediate())
700 } else {
701 bx.neg(operand.immediate())
702 };
703 (OperandValue::Immediate(llval), operand.layout)
704 }
705 mir::UnOp::PtrMetadata => {
706 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
707 let (_, meta) = operand.val.pointer_parts();
708 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
709 if let Some(meta) = meta {
710 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
711 } else {
712 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
713 }
714 }
715 };
716 assert!(
717 val.is_expected_variant_for_type(self.cx, layout),
718 "Made wrong variant {val:?} for type {layout:?}",
719 );
720 OperandRef { val, layout }
721 }
722
723 mir::Rvalue::Discriminant(ref place) => {
724 let discr_ty = rvalue.ty(self.mir, bx.tcx());
725 let discr_ty = self.monomorphize(discr_ty);
726 let operand = self.codegen_consume(bx, place.as_ref());
727 let discr = operand.codegen_get_discr(self, bx, discr_ty);
728 OperandRef {
729 val: OperandValue::Immediate(discr),
730 layout: self.cx.layout_of(discr_ty),
731 }
732 }
733
734 mir::Rvalue::NullaryOp(ref null_op, ty) => {
735 let ty = self.monomorphize(ty);
736 let layout = bx.cx().layout_of(ty);
737 let val = match null_op {
738 mir::NullOp::SizeOf => {
739 assert!(bx.cx().type_is_sized(ty));
740 let val = layout.size.bytes();
741 bx.cx().const_usize(val)
742 }
743 mir::NullOp::AlignOf => {
744 assert!(bx.cx().type_is_sized(ty));
745 let val = layout.align.abi.bytes();
746 bx.cx().const_usize(val)
747 }
748 mir::NullOp::OffsetOf(fields) => {
749 let val = bx
750 .tcx()
751 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
752 .bytes();
753 bx.cx().const_usize(val)
754 }
755 mir::NullOp::UbChecks => {
756 let val = bx.tcx().sess.ub_checks();
757 bx.cx().const_bool(val)
758 }
759 mir::NullOp::ContractChecks => {
760 let val = bx.tcx().sess.contract_checks();
761 bx.cx().const_bool(val)
762 }
763 };
764 let tcx = self.cx.tcx();
765 OperandRef {
766 val: OperandValue::Immediate(val),
767 layout: self.cx.layout_of(null_op.ty(tcx)),
768 }
769 }
770
771 mir::Rvalue::ThreadLocalRef(def_id) => {
772 assert!(bx.cx().tcx().is_static(def_id));
773 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
774 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
775 {
776 let instance = ty::Instance {
777 def: ty::InstanceKind::ThreadLocalShim(def_id),
778 args: ty::GenericArgs::empty(),
779 };
780 let fn_ptr = bx.get_fn_addr(instance);
781 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
782 let fn_ty = bx.fn_decl_backend_type(fn_abi);
783 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
784 Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
785 } else {
786 None
787 };
788 bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None, Some(instance))
789 } else {
790 bx.get_static(def_id)
791 };
792 OperandRef { val: OperandValue::Immediate(static_), layout }
793 }
794 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
795 mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
796 mir::Rvalue::Aggregate(_, ref fields) => {
797 let ty = rvalue.ty(self.mir, self.cx.tcx());
798 let ty = self.monomorphize(ty);
799 let layout = self.cx.layout_of(ty);
800
801 let mut inputs = ArrayVec::<Bx::Value, 2>::new();
804 let mut input_scalars = ArrayVec::<abi::Scalar, 2>::new();
805 for field_idx in layout.fields.index_by_increasing_offset() {
806 let field_idx = FieldIdx::from_usize(field_idx);
807 let op = self.codegen_operand(bx, &fields[field_idx]);
808 let values = op.val.immediates_or_place().left_or_else(|p| {
809 bug!("Field {field_idx:?} is {p:?} making {layout:?}");
810 });
811 let scalars = self.value_kind(op.layout).scalars().unwrap();
812 assert_eq!(values.len(), scalars.len());
813 inputs.extend(values);
814 input_scalars.extend(scalars);
815 }
816
817 let output_scalars = self.value_kind(layout).scalars().unwrap();
818 itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each(
819 |(v, in_s, out_s)| {
820 if in_s != out_s {
821 *v = bx.from_immediate(*v);
824 *v = bx.to_immediate_scalar(*v, out_s);
825 }
826 },
827 );
828
829 let val = OperandValue::from_immediates(inputs);
830 assert!(
831 val.is_expected_variant_for_type(self.cx, layout),
832 "Made wrong variant {val:?} for type {layout:?}",
833 );
834 OperandRef { val, layout }
835 }
836 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
837 let operand = self.codegen_operand(bx, operand);
838 let val = operand.immediate();
839
840 let content_ty = self.monomorphize(content_ty);
841 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
842
843 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
844 }
845 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
846 let operand = self.codegen_operand(bx, operand);
847 let binder_ty = self.monomorphize(binder_ty);
848 let layout = bx.cx().layout_of(binder_ty);
849 OperandRef { val: operand.val, layout }
850 }
851 }
852 }
853
854 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
855 if let Some(index) = place.as_local()
858 && let LocalRef::Operand(op) = self.locals[index]
859 && let ty::Array(_, n) = op.layout.ty.kind()
860 {
861 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
862 return bx.cx().const_usize(n);
863 }
864 let cg_value = self.codegen_place(bx, place.as_ref());
866 cg_value.len(bx.cx())
867 }
868
869 fn codegen_place_to_pointer(
871 &mut self,
872 bx: &mut Bx,
873 place: mir::Place<'tcx>,
874 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
875 ) -> OperandRef<'tcx, Bx::Value> {
876 let cg_place = self.codegen_place(bx, place.as_ref());
877 let val = cg_place.val.address();
878
879 let ty = cg_place.layout.ty;
880 assert!(
881 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
882 matches!(val, OperandValue::Pair(..))
883 } else {
884 matches!(val, OperandValue::Immediate(..))
885 },
886 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
887 );
888
889 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
890 }
891
892 fn codegen_scalar_binop(
893 &mut self,
894 bx: &mut Bx,
895 op: mir::BinOp,
896 lhs: Bx::Value,
897 rhs: Bx::Value,
898 lhs_ty: Ty<'tcx>,
899 rhs_ty: Ty<'tcx>,
900 ) -> Bx::Value {
901 let is_float = lhs_ty.is_floating_point();
902 let is_signed = lhs_ty.is_signed();
903 match op {
904 mir::BinOp::Add => {
905 if is_float {
906 bx.fadd(lhs, rhs)
907 } else {
908 bx.add(lhs, rhs)
909 }
910 }
911 mir::BinOp::AddUnchecked => {
912 if is_signed {
913 bx.unchecked_sadd(lhs, rhs)
914 } else {
915 bx.unchecked_uadd(lhs, rhs)
916 }
917 }
918 mir::BinOp::Sub => {
919 if is_float {
920 bx.fsub(lhs, rhs)
921 } else {
922 bx.sub(lhs, rhs)
923 }
924 }
925 mir::BinOp::SubUnchecked => {
926 if is_signed {
927 bx.unchecked_ssub(lhs, rhs)
928 } else {
929 bx.unchecked_usub(lhs, rhs)
930 }
931 }
932 mir::BinOp::Mul => {
933 if is_float {
934 bx.fmul(lhs, rhs)
935 } else {
936 bx.mul(lhs, rhs)
937 }
938 }
939 mir::BinOp::MulUnchecked => {
940 if is_signed {
941 bx.unchecked_smul(lhs, rhs)
942 } else {
943 bx.unchecked_umul(lhs, rhs)
944 }
945 }
946 mir::BinOp::Div => {
947 if is_float {
948 bx.fdiv(lhs, rhs)
949 } else if is_signed {
950 bx.sdiv(lhs, rhs)
951 } else {
952 bx.udiv(lhs, rhs)
953 }
954 }
955 mir::BinOp::Rem => {
956 if is_float {
957 bx.frem(lhs, rhs)
958 } else if is_signed {
959 bx.srem(lhs, rhs)
960 } else {
961 bx.urem(lhs, rhs)
962 }
963 }
964 mir::BinOp::BitOr => bx.or(lhs, rhs),
965 mir::BinOp::BitAnd => bx.and(lhs, rhs),
966 mir::BinOp::BitXor => bx.xor(lhs, rhs),
967 mir::BinOp::Offset => {
968 let pointee_type = lhs_ty
969 .builtin_deref(true)
970 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
971 let pointee_layout = bx.cx().layout_of(pointee_type);
972 if pointee_layout.is_zst() {
973 lhs
976 } else {
977 let llty = bx.cx().backend_type(pointee_layout);
978 if !rhs_ty.is_signed() {
979 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
980 } else {
981 bx.inbounds_gep(llty, lhs, &[rhs])
982 }
983 }
984 }
985 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
986 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
987 bx.shl(lhs, rhs)
988 }
989 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
990 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
991 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
992 }
993 mir::BinOp::Ne
994 | mir::BinOp::Lt
995 | mir::BinOp::Gt
996 | mir::BinOp::Eq
997 | mir::BinOp::Le
998 | mir::BinOp::Ge => {
999 if is_float {
1000 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
1001 } else {
1002 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
1003 }
1004 }
1005 mir::BinOp::Cmp => {
1006 use std::cmp::Ordering;
1007 assert!(!is_float);
1008 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
1009 return value;
1010 }
1011 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
1012 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
1013 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
1020 let gtext = bx.zext(is_gt, bx.type_i8());
1021 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
1022 let ltext = bx.zext(is_lt, bx.type_i8());
1023 bx.unchecked_ssub(gtext, ltext)
1024 } else {
1025 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
1028 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
1029 let ge = bx.select(
1030 is_ne,
1031 bx.cx().const_i8(Ordering::Greater as i8),
1032 bx.cx().const_i8(Ordering::Equal as i8),
1033 );
1034 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
1035 }
1036 }
1037 mir::BinOp::AddWithOverflow
1038 | mir::BinOp::SubWithOverflow
1039 | mir::BinOp::MulWithOverflow => {
1040 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
1041 }
1042 }
1043 }
1044
1045 fn codegen_wide_ptr_binop(
1046 &mut self,
1047 bx: &mut Bx,
1048 op: mir::BinOp,
1049 lhs_addr: Bx::Value,
1050 lhs_extra: Bx::Value,
1051 rhs_addr: Bx::Value,
1052 rhs_extra: Bx::Value,
1053 _input_ty: Ty<'tcx>,
1054 ) -> Bx::Value {
1055 match op {
1056 mir::BinOp::Eq => {
1057 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
1058 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
1059 bx.and(lhs, rhs)
1060 }
1061 mir::BinOp::Ne => {
1062 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
1063 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
1064 bx.or(lhs, rhs)
1065 }
1066 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
1067 let (op, strict_op) = match op {
1069 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
1070 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
1071 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
1072 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
1073 _ => bug!(),
1074 };
1075 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
1076 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
1077 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
1078 let rhs = bx.and(and_lhs, and_rhs);
1079 bx.or(lhs, rhs)
1080 }
1081 _ => {
1082 bug!("unexpected wide ptr binop");
1083 }
1084 }
1085 }
1086
1087 fn codegen_scalar_checked_binop(
1088 &mut self,
1089 bx: &mut Bx,
1090 op: mir::BinOp,
1091 lhs: Bx::Value,
1092 rhs: Bx::Value,
1093 input_ty: Ty<'tcx>,
1094 ) -> OperandValue<Bx::Value> {
1095 let (val, of) = match op {
1096 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
1098 let oop = match op {
1099 mir::BinOp::Add => OverflowOp::Add,
1100 mir::BinOp::Sub => OverflowOp::Sub,
1101 mir::BinOp::Mul => OverflowOp::Mul,
1102 _ => unreachable!(),
1103 };
1104 bx.checked_binop(oop, input_ty, lhs, rhs)
1105 }
1106 _ => bug!("Operator `{:?}` is not a checkable operator", op),
1107 };
1108
1109 OperandValue::Pair(val, of)
1110 }
1111
1112 pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
1113 match *rvalue {
1114 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
1115 let operand_ty = operand.ty(self.mir, self.cx.tcx());
1116 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
1117 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
1118
1119 match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
1120 (OperandValueKind::Ref, _) => true,
1122
1123 (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
1125
1126 (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
1128
1129 (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
1131
1132 (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
1135 a.size(self.cx) == b.size(self.cx),
1136 (OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
1137 a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
1138
1139 (OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
1142 (OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
1143 }
1144 }
1145 mir::Rvalue::Ref(..) |
1146 mir::Rvalue::CopyForDeref(..) |
1147 mir::Rvalue::RawPtr(..) |
1148 mir::Rvalue::Len(..) |
1149 mir::Rvalue::Cast(..) | mir::Rvalue::ShallowInitBox(..) | mir::Rvalue::BinaryOp(..) |
1152 mir::Rvalue::UnaryOp(..) |
1153 mir::Rvalue::Discriminant(..) |
1154 mir::Rvalue::NullaryOp(..) |
1155 mir::Rvalue::ThreadLocalRef(_) |
1156 mir::Rvalue::Use(..) |
1157 mir::Rvalue::WrapUnsafeBinder(..) => true,
1159 mir::Rvalue::Repeat(..) => false,
1162 mir::Rvalue::Aggregate(ref kind, _) => {
1163 let allowed_kind = match **kind {
1164 mir::AggregateKind::RawPtr(..) => true,
1166 mir::AggregateKind::Array(..) => false,
1167 mir::AggregateKind::Tuple => true,
1168 mir::AggregateKind::Adt(def_id, ..) => {
1169 let adt_def = self.cx.tcx().adt_def(def_id);
1170 adt_def.is_struct() && !adt_def.repr().simd()
1171 }
1172 mir::AggregateKind::Closure(..) => true,
1173 mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false,
1175 };
1176 allowed_kind && {
1177 let ty = rvalue.ty(self.mir, self.cx.tcx());
1178 let ty = self.monomorphize(ty);
1179 let layout = self.cx.spanned_layout_of(ty, span);
1180 !self.cx.is_backend_ref(layout)
1181 }
1182 }
1183 }
1184
1185 }
1187
1188 fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
1190 if layout.is_zst() {
1191 OperandValueKind::ZeroSized
1192 } else if self.cx.is_backend_immediate(layout) {
1193 assert!(!self.cx.is_backend_scalar_pair(layout));
1194 OperandValueKind::Immediate(match layout.backend_repr {
1195 abi::BackendRepr::Scalar(s) => s,
1196 abi::BackendRepr::SimdVector { element, .. } => element,
1197 x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
1198 })
1199 } else if self.cx.is_backend_scalar_pair(layout) {
1200 let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
1201 span_bug!(
1202 self.mir.span,
1203 "Couldn't translate {:?} as backend scalar pair",
1204 layout.backend_repr,
1205 );
1206 };
1207 OperandValueKind::Pair(s1, s2)
1208 } else {
1209 OperandValueKind::Ref
1210 }
1211 }
1212}
1213
1214#[derive(Debug, Copy, Clone)]
1217enum OperandValueKind {
1218 Ref,
1219 Immediate(abi::Scalar),
1220 Pair(abi::Scalar, abi::Scalar),
1221 ZeroSized,
1222}
1223
1224impl OperandValueKind {
1225 fn scalars(self) -> Option<ArrayVec<abi::Scalar, 2>> {
1226 Some(match self {
1227 OperandValueKind::ZeroSized => ArrayVec::new(),
1228 OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]),
1229 OperandValueKind::Pair(a, b) => [a, b].into(),
1230 OperandValueKind::Ref => return None,
1231 })
1232 }
1233}