1use std::assert_matches::assert_matches;
2
3use arrayvec::ArrayVec;
4use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx};
5use rustc_middle::ty::adjustment::PointerCoercion;
6use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
8use rustc_middle::{bug, mir, span_bug};
9use rustc_session::config::OptLevel;
10use rustc_span::{DUMMY_SP, Span};
11use tracing::{debug, instrument};
12
13use super::operand::{OperandRef, OperandValue};
14use super::place::PlaceRef;
15use super::{FunctionCx, LocalRef};
16use crate::common::IntPredicate;
17use crate::traits::*;
18use crate::{MemFlags, base};
19
20impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
21 #[instrument(level = "trace", skip(self, bx))]
22 pub(crate) fn codegen_rvalue(
23 &mut self,
24 bx: &mut Bx,
25 dest: PlaceRef<'tcx, Bx::Value>,
26 rvalue: &mir::Rvalue<'tcx>,
27 ) {
28 match *rvalue {
29 mir::Rvalue::Use(ref operand) => {
30 let cg_operand = self.codegen_operand(bx, operand);
31 cg_operand.val.store(bx, dest);
34 }
35
36 mir::Rvalue::Cast(
37 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
38 ref source,
39 _,
40 ) => {
41 if bx.cx().is_backend_scalar_pair(dest.layout) {
44 let temp = self.codegen_rvalue_operand(bx, rvalue);
47 temp.val.store(bx, dest);
48 return;
49 }
50
51 let operand = self.codegen_operand(bx, source);
56 match operand.val {
57 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
58 debug!("codegen_rvalue: creating ugly alloca");
65 let scratch = PlaceRef::alloca(bx, operand.layout);
66 scratch.storage_live(bx);
67 operand.val.store(bx, scratch);
68 base::coerce_unsized_into(bx, scratch, dest);
69 scratch.storage_dead(bx);
70 }
71 OperandValue::Ref(val) => {
72 if val.llextra.is_some() {
73 bug!("unsized coercion on an unsized rvalue");
74 }
75 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
76 }
77 OperandValue::ZeroSized => {
78 bug!("unsized coercion on a ZST rvalue");
79 }
80 }
81 }
82
83 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
84 let src = self.codegen_operand(bx, operand);
85 self.codegen_transmute(bx, src, dest);
86 }
87
88 mir::Rvalue::Repeat(ref elem, count) => {
89 let cg_elem = self.codegen_operand(bx, elem);
90
91 if dest.layout.is_zst() {
93 return;
94 }
95
96 let try_init_all_same = |bx: &mut Bx, v| {
97 let start = dest.val.llval;
98 let size = bx.const_usize(dest.layout.size.bytes());
99
100 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
102 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
103 let first = bytes[0];
104 if bytes[1..].iter().all(|&b| b == first) {
105 let fill = bx.cx().const_u8(first);
106 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
107 return true;
108 }
109 }
110
111 let v = bx.from_immediate(v);
113 if bx.cx().val_ty(v) == bx.cx().type_i8() {
114 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
115 return true;
116 }
117 false
118 };
119
120 match cg_elem.val {
121 OperandValue::Immediate(v) => {
122 if try_init_all_same(bx, v) {
123 return;
124 }
125 }
126 _ => (),
127 }
128
129 let count = self
130 .monomorphize(count)
131 .try_to_target_usize(bx.tcx())
132 .expect("expected monomorphic const in codegen");
133
134 bx.write_operand_repeatedly(cg_elem, count, dest);
135 }
136
137 mir::Rvalue::Aggregate(ref kind, ref operands)
140 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
141 {
142 let (variant_index, variant_dest, active_field_index) = match **kind {
143 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
144 let variant_dest = dest.project_downcast(bx, variant_index);
145 (variant_index, variant_dest, active_field_index)
146 }
147 _ => (FIRST_VARIANT, dest, None),
148 };
149 if active_field_index.is_some() {
150 assert_eq!(operands.len(), 1);
151 }
152 for (i, operand) in operands.iter_enumerated() {
153 let op = self.codegen_operand(bx, operand);
154 if !op.layout.is_zst() {
156 let field_index = active_field_index.unwrap_or(i);
157 let field = if let mir::AggregateKind::Array(_) = **kind {
158 let llindex = bx.cx().const_usize(field_index.as_u32().into());
159 variant_dest.project_index(bx, llindex)
160 } else {
161 variant_dest.project_field(bx, field_index.as_usize())
162 };
163 op.val.store(bx, field);
164 }
165 }
166 dest.codegen_set_discr(bx, variant_index);
167 }
168
169 _ => {
170 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
171 let temp = self.codegen_rvalue_operand(bx, rvalue);
172 temp.val.store(bx, dest);
173 }
174 }
175 }
176
177 fn codegen_transmute(
178 &mut self,
179 bx: &mut Bx,
180 src: OperandRef<'tcx, Bx::Value>,
181 dst: PlaceRef<'tcx, Bx::Value>,
182 ) {
183 assert!(src.layout.is_sized());
185 assert!(dst.layout.is_sized());
186
187 if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
188 val.store(bx, dst);
189 return;
190 }
191
192 match src.val {
193 OperandValue::Ref(..) | OperandValue::ZeroSized => {
194 span_bug!(
195 self.mir.span,
196 "Operand path should have handled transmute \
197 from {src:?} to place {dst:?}"
198 );
199 }
200 OperandValue::Immediate(..) | OperandValue::Pair(..) => {
201 src.val.store(bx, dst.val.with_type(src.layout));
204 }
205 }
206 }
207
208 fn codegen_transmute_operand(
213 &mut self,
214 bx: &mut Bx,
215 operand: OperandRef<'tcx, Bx::Value>,
216 cast: TyAndLayout<'tcx>,
217 ) -> Option<OperandValue<Bx::Value>> {
218 if operand.layout.size != cast.size
220 || operand.layout.is_uninhabited()
221 || cast.is_uninhabited()
222 {
223 if !operand.layout.is_uninhabited() {
224 bx.abort();
227 }
228
229 return Some(OperandValue::poison(bx, cast));
232 }
233
234 let operand_kind = self.value_kind(operand.layout);
235 let cast_kind = self.value_kind(cast);
236
237 match operand.val {
238 OperandValue::Ref(source_place_val) => {
239 assert_eq!(source_place_val.llextra, None);
240 assert_matches!(operand_kind, OperandValueKind::Ref);
241 Some(bx.load_operand(source_place_val.with_type(cast)).val)
242 }
243 OperandValue::ZeroSized => {
244 let OperandValueKind::ZeroSized = operand_kind else {
245 bug!("Found {operand_kind:?} for operand {operand:?}");
246 };
247 if let OperandValueKind::ZeroSized = cast_kind {
248 Some(OperandValue::ZeroSized)
249 } else {
250 None
251 }
252 }
253 OperandValue::Immediate(imm) => {
254 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
255 bug!("Found {operand_kind:?} for operand {operand:?}");
256 };
257 if let OperandValueKind::Immediate(to_scalar) = cast_kind
258 && from_scalar.size(self.cx) == to_scalar.size(self.cx)
259 {
260 let from_backend_ty = bx.backend_type(operand.layout);
261 let to_backend_ty = bx.backend_type(cast);
262 Some(OperandValue::Immediate(self.transmute_immediate(
263 bx,
264 imm,
265 from_scalar,
266 from_backend_ty,
267 to_scalar,
268 to_backend_ty,
269 )))
270 } else {
271 None
272 }
273 }
274 OperandValue::Pair(imm_a, imm_b) => {
275 let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
276 bug!("Found {operand_kind:?} for operand {operand:?}");
277 };
278 if let OperandValueKind::Pair(out_a, out_b) = cast_kind
279 && in_a.size(self.cx) == out_a.size(self.cx)
280 && in_b.size(self.cx) == out_b.size(self.cx)
281 {
282 let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
283 let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
284 let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
285 let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
286 Some(OperandValue::Pair(
287 self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
288 self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
289 ))
290 } else {
291 None
292 }
293 }
294 }
295 }
296
297 fn cast_immediate(
302 &self,
303 bx: &mut Bx,
304 mut imm: Bx::Value,
305 from_scalar: abi::Scalar,
306 from_backend_ty: Bx::Type,
307 to_scalar: abi::Scalar,
308 to_backend_ty: Bx::Type,
309 ) -> Option<Bx::Value> {
310 use abi::Primitive::*;
311
312 self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
317
318 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
319 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
320 (Float(_), Float(_)) => {
321 let srcsz = bx.cx().float_width(from_backend_ty);
322 let dstsz = bx.cx().float_width(to_backend_ty);
323 if dstsz > srcsz {
324 bx.fpext(imm, to_backend_ty)
325 } else if srcsz > dstsz {
326 bx.fptrunc(imm, to_backend_ty)
327 } else {
328 imm
329 }
330 }
331 (Int(_, is_signed), Float(_)) => {
332 if is_signed {
333 bx.sitofp(imm, to_backend_ty)
334 } else {
335 bx.uitofp(imm, to_backend_ty)
336 }
337 }
338 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
339 (Int(_, is_signed), Pointer(..)) => {
340 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
341 bx.inttoptr(usize_imm, to_backend_ty)
342 }
343 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
344 _ => return None,
345 };
346 Some(imm)
347 }
348
349 fn transmute_immediate(
355 &self,
356 bx: &mut Bx,
357 mut imm: Bx::Value,
358 from_scalar: abi::Scalar,
359 from_backend_ty: Bx::Type,
360 to_scalar: abi::Scalar,
361 to_backend_ty: Bx::Type,
362 ) -> Bx::Value {
363 assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
364
365 use abi::Primitive::*;
366 imm = bx.from_immediate(imm);
367
368 self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
378
379 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
380 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
381 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
382 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
383 (Pointer(..), Int(..)) => {
384 bx.ptrtoint(imm, to_backend_ty)
386 }
387 (Float(_), Pointer(..)) => {
388 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
389 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
390 }
391 (Pointer(..), Float(_)) => {
392 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
394 bx.bitcast(int_imm, to_backend_ty)
395 }
396 };
397
398 self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
404
405 imm = bx.to_immediate_scalar(imm, to_scalar);
406 imm
407 }
408
409 fn assume_scalar_range(
410 &self,
411 bx: &mut Bx,
412 imm: Bx::Value,
413 scalar: abi::Scalar,
414 backend_ty: Bx::Type,
415 ) {
416 if matches!(self.cx.sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(self.cx) {
417 return;
418 }
419
420 match scalar.primitive() {
421 abi::Primitive::Int(..) => {
422 let range = scalar.valid_range(self.cx);
423 bx.assume_integer_range(imm, backend_ty, range);
424 }
425 abi::Primitive::Pointer(abi::AddressSpace::DATA)
426 if !scalar.valid_range(self.cx).contains(0) =>
427 {
428 bx.assume_nonnull(imm);
429 }
430 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
431 }
432 }
433
434 pub(crate) fn codegen_rvalue_unsized(
435 &mut self,
436 bx: &mut Bx,
437 indirect_dest: PlaceRef<'tcx, Bx::Value>,
438 rvalue: &mir::Rvalue<'tcx>,
439 ) {
440 debug!(
441 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
442 indirect_dest.val.llval, rvalue
443 );
444
445 match *rvalue {
446 mir::Rvalue::Use(ref operand) => {
447 let cg_operand = self.codegen_operand(bx, operand);
448 cg_operand.val.store_unsized(bx, indirect_dest);
449 }
450
451 _ => bug!("unsized assignment other than `Rvalue::Use`"),
452 }
453 }
454
455 pub(crate) fn codegen_rvalue_operand(
456 &mut self,
457 bx: &mut Bx,
458 rvalue: &mir::Rvalue<'tcx>,
459 ) -> OperandRef<'tcx, Bx::Value> {
460 assert!(
461 self.rvalue_creates_operand(rvalue, DUMMY_SP),
462 "cannot codegen {rvalue:?} to operand",
463 );
464
465 match *rvalue {
466 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
467 let operand = self.codegen_operand(bx, source);
468 debug!("cast operand is {:?}", operand);
469 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
470
471 let val = match *kind {
472 mir::CastKind::PointerExposeProvenance => {
473 assert!(bx.cx().is_backend_immediate(cast));
474 let llptr = operand.immediate();
475 let llcast_ty = bx.cx().immediate_backend_type(cast);
476 let lladdr = bx.ptrtoint(llptr, llcast_ty);
477 OperandValue::Immediate(lladdr)
478 }
479 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
480 match *operand.layout.ty.kind() {
481 ty::FnDef(def_id, args) => {
482 let instance = ty::Instance::resolve_for_fn_ptr(
483 bx.tcx(),
484 bx.typing_env(),
485 def_id,
486 args,
487 )
488 .unwrap();
489 OperandValue::Immediate(bx.get_fn_addr(instance))
490 }
491 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
492 }
493 }
494 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
495 match *operand.layout.ty.kind() {
496 ty::Closure(def_id, args) => {
497 let instance = Instance::resolve_closure(
498 bx.cx().tcx(),
499 def_id,
500 args,
501 ty::ClosureKind::FnOnce,
502 );
503 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
504 }
505 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
506 }
507 }
508 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
509 operand.val
511 }
512 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
513 assert!(bx.cx().is_backend_scalar_pair(cast));
514 let (lldata, llextra) = operand.val.pointer_parts();
515 let (lldata, llextra) =
516 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
517 OperandValue::Pair(lldata, llextra)
518 }
519 mir::CastKind::PointerCoercion(
520 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
521 ) => {
522 bug!("{kind:?} is for borrowck, and should never appear in codegen");
523 }
524 mir::CastKind::PtrToPtr
525 if bx.cx().is_backend_scalar_pair(operand.layout) =>
526 {
527 if let OperandValue::Pair(data_ptr, meta) = operand.val {
528 if bx.cx().is_backend_scalar_pair(cast) {
529 OperandValue::Pair(data_ptr, meta)
530 } else {
531 OperandValue::Immediate(data_ptr)
533 }
534 } else {
535 bug!("unexpected non-pair operand");
536 }
537 }
538 mir::CastKind::PointerCoercion(PointerCoercion::DynStar, _) => {
539 let (lldata, llextra) = operand.val.pointer_parts();
540 let (lldata, llextra) =
541 base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
542 OperandValue::Pair(lldata, llextra)
543 }
544 | mir::CastKind::IntToInt
545 | mir::CastKind::FloatToInt
546 | mir::CastKind::FloatToFloat
547 | mir::CastKind::IntToFloat
548 | mir::CastKind::PtrToPtr
549 | mir::CastKind::FnPtrToPtr
550 | mir::CastKind::PointerWithExposedProvenance => {
554 let imm = operand.immediate();
555 let operand_kind = self.value_kind(operand.layout);
556 let OperandValueKind::Immediate(from_scalar) = operand_kind else {
557 bug!("Found {operand_kind:?} for operand {operand:?}");
558 };
559 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
560
561 assert!(bx.cx().is_backend_immediate(cast));
562 let to_backend_ty = bx.cx().immediate_backend_type(cast);
563 if operand.layout.is_uninhabited() {
564 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
565 return OperandRef { val, layout: cast };
566 }
567 let cast_kind = self.value_kind(cast);
568 let OperandValueKind::Immediate(to_scalar) = cast_kind else {
569 bug!("Found {cast_kind:?} for operand {cast:?}");
570 };
571
572 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
573 .map(OperandValue::Immediate)
574 .unwrap_or_else(|| {
575 bug!("Unsupported cast of {operand:?} to {cast:?}");
576 })
577 }
578 mir::CastKind::Transmute => {
579 self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
580 bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
581 })
582 }
583 };
584 OperandRef { val, layout: cast }
585 }
586
587 mir::Rvalue::Ref(_, bk, place) => {
588 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
589 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
590 };
591 self.codegen_place_to_pointer(bx, place, mk_ref)
592 }
593
594 mir::Rvalue::CopyForDeref(place) => {
595 self.codegen_operand(bx, &mir::Operand::Copy(place))
596 }
597 mir::Rvalue::RawPtr(kind, place) => {
598 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
599 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
600 };
601 self.codegen_place_to_pointer(bx, place, mk_ptr)
602 }
603
604 mir::Rvalue::Len(place) => {
605 let size = self.evaluate_array_len(bx, place);
606 OperandRef {
607 val: OperandValue::Immediate(size),
608 layout: bx.cx().layout_of(bx.tcx().types.usize),
609 }
610 }
611
612 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
613 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
614 {
615 let lhs = self.codegen_operand(bx, lhs);
616 let rhs = self.codegen_operand(bx, rhs);
617 let result = self.codegen_scalar_checked_binop(
618 bx,
619 op,
620 lhs.immediate(),
621 rhs.immediate(),
622 lhs.layout.ty,
623 );
624 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
625 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
626 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
627 }
628 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
629 let lhs = self.codegen_operand(bx, lhs);
630 let rhs = self.codegen_operand(bx, rhs);
631 let llresult = match (lhs.val, rhs.val) {
632 (
633 OperandValue::Pair(lhs_addr, lhs_extra),
634 OperandValue::Pair(rhs_addr, rhs_extra),
635 ) => self.codegen_wide_ptr_binop(
636 bx,
637 op,
638 lhs_addr,
639 lhs_extra,
640 rhs_addr,
641 rhs_extra,
642 lhs.layout.ty,
643 ),
644
645 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
646 self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
647 }
648
649 _ => bug!(),
650 };
651 OperandRef {
652 val: OperandValue::Immediate(llresult),
653 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
654 }
655 }
656
657 mir::Rvalue::UnaryOp(op, ref operand) => {
658 let operand = self.codegen_operand(bx, operand);
659 let is_float = operand.layout.ty.is_floating_point();
660 let (val, layout) = match op {
661 mir::UnOp::Not => {
662 let llval = bx.not(operand.immediate());
663 (OperandValue::Immediate(llval), operand.layout)
664 }
665 mir::UnOp::Neg => {
666 let llval = if is_float {
667 bx.fneg(operand.immediate())
668 } else {
669 bx.neg(operand.immediate())
670 };
671 (OperandValue::Immediate(llval), operand.layout)
672 }
673 mir::UnOp::PtrMetadata => {
674 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
675 let (_, meta) = operand.val.pointer_parts();
676 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
677 if let Some(meta) = meta {
678 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
679 } else {
680 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
681 }
682 }
683 };
684 assert!(
685 val.is_expected_variant_for_type(self.cx, layout),
686 "Made wrong variant {val:?} for type {layout:?}",
687 );
688 OperandRef { val, layout }
689 }
690
691 mir::Rvalue::Discriminant(ref place) => {
692 let discr_ty = rvalue.ty(self.mir, bx.tcx());
693 let discr_ty = self.monomorphize(discr_ty);
694 let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
695 OperandRef {
696 val: OperandValue::Immediate(discr),
697 layout: self.cx.layout_of(discr_ty),
698 }
699 }
700
701 mir::Rvalue::NullaryOp(ref null_op, ty) => {
702 let ty = self.monomorphize(ty);
703 let layout = bx.cx().layout_of(ty);
704 let val = match null_op {
705 mir::NullOp::SizeOf => {
706 assert!(bx.cx().type_is_sized(ty));
707 let val = layout.size.bytes();
708 bx.cx().const_usize(val)
709 }
710 mir::NullOp::AlignOf => {
711 assert!(bx.cx().type_is_sized(ty));
712 let val = layout.align.abi.bytes();
713 bx.cx().const_usize(val)
714 }
715 mir::NullOp::OffsetOf(fields) => {
716 let val = bx
717 .tcx()
718 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
719 .bytes();
720 bx.cx().const_usize(val)
721 }
722 mir::NullOp::UbChecks => {
723 let val = bx.tcx().sess.ub_checks();
724 bx.cx().const_bool(val)
725 }
726 mir::NullOp::ContractChecks => {
727 let val = bx.tcx().sess.contract_checks();
728 bx.cx().const_bool(val)
729 }
730 };
731 let tcx = self.cx.tcx();
732 OperandRef {
733 val: OperandValue::Immediate(val),
734 layout: self.cx.layout_of(tcx.types.usize),
735 }
736 }
737
738 mir::Rvalue::ThreadLocalRef(def_id) => {
739 assert!(bx.cx().tcx().is_static(def_id));
740 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
741 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
742 {
743 let instance = ty::Instance {
744 def: ty::InstanceKind::ThreadLocalShim(def_id),
745 args: ty::GenericArgs::empty(),
746 };
747 let fn_ptr = bx.get_fn_addr(instance);
748 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
749 let fn_ty = bx.fn_decl_backend_type(fn_abi);
750 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
751 Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
752 } else {
753 None
754 };
755 bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None, Some(instance))
756 } else {
757 bx.get_static(def_id)
758 };
759 OperandRef { val: OperandValue::Immediate(static_), layout }
760 }
761 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
762 mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
763 mir::Rvalue::Aggregate(_, ref fields) => {
764 let ty = rvalue.ty(self.mir, self.cx.tcx());
765 let ty = self.monomorphize(ty);
766 let layout = self.cx.layout_of(ty);
767
768 let mut inputs = ArrayVec::<Bx::Value, 2>::new();
771 let mut input_scalars = ArrayVec::<abi::Scalar, 2>::new();
772 for field_idx in layout.fields.index_by_increasing_offset() {
773 let field_idx = FieldIdx::from_usize(field_idx);
774 let op = self.codegen_operand(bx, &fields[field_idx]);
775 let values = op.val.immediates_or_place().left_or_else(|p| {
776 bug!("Field {field_idx:?} is {p:?} making {layout:?}");
777 });
778 let scalars = self.value_kind(op.layout).scalars().unwrap();
779 assert_eq!(values.len(), scalars.len());
780 inputs.extend(values);
781 input_scalars.extend(scalars);
782 }
783
784 let output_scalars = self.value_kind(layout).scalars().unwrap();
785 itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each(
786 |(v, in_s, out_s)| {
787 if in_s != out_s {
788 *v = bx.from_immediate(*v);
791 *v = bx.to_immediate_scalar(*v, out_s);
792 }
793 },
794 );
795
796 let val = OperandValue::from_immediates(inputs);
797 assert!(
798 val.is_expected_variant_for_type(self.cx, layout),
799 "Made wrong variant {val:?} for type {layout:?}",
800 );
801 OperandRef { val, layout }
802 }
803 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
804 let operand = self.codegen_operand(bx, operand);
805 let val = operand.immediate();
806
807 let content_ty = self.monomorphize(content_ty);
808 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
809
810 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
811 }
812 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
813 let operand = self.codegen_operand(bx, operand);
814 let binder_ty = self.monomorphize(binder_ty);
815 let layout = bx.cx().layout_of(binder_ty);
816 OperandRef { val: operand.val, layout }
817 }
818 }
819 }
820
821 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
822 if let Some(index) = place.as_local() {
825 if let LocalRef::Operand(op) = self.locals[index] {
826 if let ty::Array(_, n) = op.layout.ty.kind() {
827 let n = n
828 .try_to_target_usize(bx.tcx())
829 .expect("expected monomorphic const in codegen");
830 return bx.cx().const_usize(n);
831 }
832 }
833 }
834 let cg_value = self.codegen_place(bx, place.as_ref());
836 cg_value.len(bx.cx())
837 }
838
839 fn codegen_place_to_pointer(
841 &mut self,
842 bx: &mut Bx,
843 place: mir::Place<'tcx>,
844 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
845 ) -> OperandRef<'tcx, Bx::Value> {
846 let cg_place = self.codegen_place(bx, place.as_ref());
847 let val = cg_place.val.address();
848
849 let ty = cg_place.layout.ty;
850 assert!(
851 if bx.cx().type_has_metadata(ty) {
852 matches!(val, OperandValue::Pair(..))
853 } else {
854 matches!(val, OperandValue::Immediate(..))
855 },
856 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
857 );
858
859 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
860 }
861
862 fn codegen_scalar_binop(
863 &mut self,
864 bx: &mut Bx,
865 op: mir::BinOp,
866 lhs: Bx::Value,
867 rhs: Bx::Value,
868 input_ty: Ty<'tcx>,
869 ) -> Bx::Value {
870 let is_float = input_ty.is_floating_point();
871 let is_signed = input_ty.is_signed();
872 match op {
873 mir::BinOp::Add => {
874 if is_float {
875 bx.fadd(lhs, rhs)
876 } else {
877 bx.add(lhs, rhs)
878 }
879 }
880 mir::BinOp::AddUnchecked => {
881 if is_signed {
882 bx.unchecked_sadd(lhs, rhs)
883 } else {
884 bx.unchecked_uadd(lhs, rhs)
885 }
886 }
887 mir::BinOp::Sub => {
888 if is_float {
889 bx.fsub(lhs, rhs)
890 } else {
891 bx.sub(lhs, rhs)
892 }
893 }
894 mir::BinOp::SubUnchecked => {
895 if is_signed {
896 bx.unchecked_ssub(lhs, rhs)
897 } else {
898 bx.unchecked_usub(lhs, rhs)
899 }
900 }
901 mir::BinOp::Mul => {
902 if is_float {
903 bx.fmul(lhs, rhs)
904 } else {
905 bx.mul(lhs, rhs)
906 }
907 }
908 mir::BinOp::MulUnchecked => {
909 if is_signed {
910 bx.unchecked_smul(lhs, rhs)
911 } else {
912 bx.unchecked_umul(lhs, rhs)
913 }
914 }
915 mir::BinOp::Div => {
916 if is_float {
917 bx.fdiv(lhs, rhs)
918 } else if is_signed {
919 bx.sdiv(lhs, rhs)
920 } else {
921 bx.udiv(lhs, rhs)
922 }
923 }
924 mir::BinOp::Rem => {
925 if is_float {
926 bx.frem(lhs, rhs)
927 } else if is_signed {
928 bx.srem(lhs, rhs)
929 } else {
930 bx.urem(lhs, rhs)
931 }
932 }
933 mir::BinOp::BitOr => bx.or(lhs, rhs),
934 mir::BinOp::BitAnd => bx.and(lhs, rhs),
935 mir::BinOp::BitXor => bx.xor(lhs, rhs),
936 mir::BinOp::Offset => {
937 let pointee_type = input_ty
938 .builtin_deref(true)
939 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty));
940 let pointee_layout = bx.cx().layout_of(pointee_type);
941 if pointee_layout.is_zst() {
942 lhs
945 } else {
946 let llty = bx.cx().backend_type(pointee_layout);
947 bx.inbounds_gep(llty, lhs, &[rhs])
948 }
949 }
950 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
951 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
952 bx.shl(lhs, rhs)
953 }
954 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
955 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
956 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
957 }
958 mir::BinOp::Ne
959 | mir::BinOp::Lt
960 | mir::BinOp::Gt
961 | mir::BinOp::Eq
962 | mir::BinOp::Le
963 | mir::BinOp::Ge => {
964 if is_float {
965 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
966 } else {
967 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
968 }
969 }
970 mir::BinOp::Cmp => {
971 use std::cmp::Ordering;
972 assert!(!is_float);
973 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
974 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
975 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
982 let gtext = bx.zext(is_gt, bx.type_i8());
983 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
984 let ltext = bx.zext(is_lt, bx.type_i8());
985 bx.unchecked_ssub(gtext, ltext)
986 } else {
987 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
990 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
991 let ge = bx.select(
992 is_ne,
993 bx.cx().const_i8(Ordering::Greater as i8),
994 bx.cx().const_i8(Ordering::Equal as i8),
995 );
996 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
997 }
998 }
999 mir::BinOp::AddWithOverflow
1000 | mir::BinOp::SubWithOverflow
1001 | mir::BinOp::MulWithOverflow => {
1002 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
1003 }
1004 }
1005 }
1006
1007 fn codegen_wide_ptr_binop(
1008 &mut self,
1009 bx: &mut Bx,
1010 op: mir::BinOp,
1011 lhs_addr: Bx::Value,
1012 lhs_extra: Bx::Value,
1013 rhs_addr: Bx::Value,
1014 rhs_extra: Bx::Value,
1015 _input_ty: Ty<'tcx>,
1016 ) -> Bx::Value {
1017 match op {
1018 mir::BinOp::Eq => {
1019 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
1020 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
1021 bx.and(lhs, rhs)
1022 }
1023 mir::BinOp::Ne => {
1024 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
1025 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
1026 bx.or(lhs, rhs)
1027 }
1028 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
1029 let (op, strict_op) = match op {
1031 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
1032 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
1033 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
1034 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
1035 _ => bug!(),
1036 };
1037 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
1038 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
1039 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
1040 let rhs = bx.and(and_lhs, and_rhs);
1041 bx.or(lhs, rhs)
1042 }
1043 _ => {
1044 bug!("unexpected wide ptr binop");
1045 }
1046 }
1047 }
1048
1049 fn codegen_scalar_checked_binop(
1050 &mut self,
1051 bx: &mut Bx,
1052 op: mir::BinOp,
1053 lhs: Bx::Value,
1054 rhs: Bx::Value,
1055 input_ty: Ty<'tcx>,
1056 ) -> OperandValue<Bx::Value> {
1057 let (val, of) = match op {
1058 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
1060 let oop = match op {
1061 mir::BinOp::Add => OverflowOp::Add,
1062 mir::BinOp::Sub => OverflowOp::Sub,
1063 mir::BinOp::Mul => OverflowOp::Mul,
1064 _ => unreachable!(),
1065 };
1066 bx.checked_binop(oop, input_ty, lhs, rhs)
1067 }
1068 _ => bug!("Operator `{:?}` is not a checkable operator", op),
1069 };
1070
1071 OperandValue::Pair(val, of)
1072 }
1073
1074 pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
1075 match *rvalue {
1076 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
1077 let operand_ty = operand.ty(self.mir, self.cx.tcx());
1078 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
1079 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
1080
1081 match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
1082 (OperandValueKind::Ref, _) => true,
1084
1085 (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
1087
1088 (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
1090
1091 (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
1093
1094 (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
1097 a.size(self.cx) == b.size(self.cx),
1098 (OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
1099 a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
1100
1101 (OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
1104 (OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
1105 }
1106 }
1107 mir::Rvalue::Ref(..) |
1108 mir::Rvalue::CopyForDeref(..) |
1109 mir::Rvalue::RawPtr(..) |
1110 mir::Rvalue::Len(..) |
1111 mir::Rvalue::Cast(..) | mir::Rvalue::ShallowInitBox(..) | mir::Rvalue::BinaryOp(..) |
1114 mir::Rvalue::UnaryOp(..) |
1115 mir::Rvalue::Discriminant(..) |
1116 mir::Rvalue::NullaryOp(..) |
1117 mir::Rvalue::ThreadLocalRef(_) |
1118 mir::Rvalue::Use(..) |
1119 mir::Rvalue::WrapUnsafeBinder(..) => true,
1121 mir::Rvalue::Repeat(..) => false,
1124 mir::Rvalue::Aggregate(ref kind, _) => {
1125 let allowed_kind = match **kind {
1126 mir::AggregateKind::RawPtr(..) => true,
1128 mir::AggregateKind::Array(..) => false,
1129 mir::AggregateKind::Tuple => true,
1130 mir::AggregateKind::Adt(def_id, ..) => {
1131 let adt_def = self.cx.tcx().adt_def(def_id);
1132 adt_def.is_struct() && !adt_def.repr().simd()
1133 }
1134 mir::AggregateKind::Closure(..) => true,
1135 mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false,
1137 };
1138 allowed_kind && {
1139 let ty = rvalue.ty(self.mir, self.cx.tcx());
1140 let ty = self.monomorphize(ty);
1141 let layout = self.cx.spanned_layout_of(ty, span);
1142 !self.cx.is_backend_ref(layout)
1143 }
1144 }
1145 }
1146
1147 }
1149
1150 fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
1152 if layout.is_zst() {
1153 OperandValueKind::ZeroSized
1154 } else if self.cx.is_backend_immediate(layout) {
1155 assert!(!self.cx.is_backend_scalar_pair(layout));
1156 OperandValueKind::Immediate(match layout.backend_repr {
1157 abi::BackendRepr::Scalar(s) => s,
1158 abi::BackendRepr::Vector { element, .. } => element,
1159 x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
1160 })
1161 } else if self.cx.is_backend_scalar_pair(layout) {
1162 let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
1163 span_bug!(
1164 self.mir.span,
1165 "Couldn't translate {:?} as backend scalar pair",
1166 layout.backend_repr,
1167 );
1168 };
1169 OperandValueKind::Pair(s1, s2)
1170 } else {
1171 OperandValueKind::Ref
1172 }
1173 }
1174}
1175
1176#[derive(Debug, Copy, Clone)]
1179enum OperandValueKind {
1180 Ref,
1181 Immediate(abi::Scalar),
1182 Pair(abi::Scalar, abi::Scalar),
1183 ZeroSized,
1184}
1185
1186impl OperandValueKind {
1187 fn scalars(self) -> Option<ArrayVec<abi::Scalar, 2>> {
1188 Some(match self {
1189 OperandValueKind::ZeroSized => ArrayVec::new(),
1190 OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]),
1191 OperandValueKind::Pair(a, b) => [a, b].into(),
1192 OperandValueKind::Ref => return None,
1193 })
1194 }
1195}