rustc_middle/mir/syntax.rs
1//! This defines the syntax of MIR, i.e., the set of available MIR operations, and other definitions
2//! closely related to MIR semantics.
3//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
4//! The intention is that this file only contains datatype declarations, no code.
5
6use rustc_abi::{FieldIdx, VariantIdx};
7use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece, Mutability};
8use rustc_data_structures::packed::Pu128;
9use rustc_hir::CoroutineKind;
10use rustc_hir::def_id::DefId;
11use rustc_index::IndexVec;
12use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable};
13use rustc_span::def_id::LocalDefId;
14use rustc_span::source_map::Spanned;
15use rustc_span::{Span, Symbol};
16use rustc_target::asm::InlineAsmRegOrRegClass;
17use smallvec::SmallVec;
18
19use super::{BasicBlock, Const, Local, UserTypeProjection};
20use crate::mir::coverage::CoverageKind;
21use crate::ty::adjustment::PointerCoercion;
22use crate::ty::{self, GenericArgsRef, List, Region, Ty, UserTypeAnnotationIndex};
23
24/// Represents the "flavors" of MIR.
25///
26/// The MIR pipeline is structured into a few major dialects, with one or more phases within each
27/// dialect. A MIR flavor is identified by a dialect-phase pair. A single `MirPhase` value
28/// specifies such a pair. All flavors of MIR use the same data structure to represent the program.
29///
30/// Different MIR dialects have different semantics. (The differences between dialects are small,
31/// but they do exist.) The progression from one MIR dialect to the next is technically a lowering
32/// from one IR to another. In other words, a single well-formed [`Body`](crate::mir::Body) might
33/// have different semantic meaning and different behavior at runtime in the different dialects.
34/// The specific differences between dialects are described on the variants below.
35///
36/// Phases exist only to place restrictions on what language constructs are permitted in
37/// well-formed MIR, and subsequent phases mostly increase those restrictions. I.e. to convert MIR
38/// from one phase to the next might require removing/replacing certain MIR constructs.
39///
40/// When adding dialects or phases, remember to update [`MirPhase::index`].
41#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
42#[derive(HashStable)]
43pub enum MirPhase {
44 /// The "built MIR" dialect, as generated by MIR building.
45 ///
46 /// The only things that operate on this dialect are unsafeck, the various MIR lints, and const
47 /// qualifs.
48 ///
49 /// This dialect has just the one (implicit) phase, which places few restrictions on what MIR
50 /// constructs are allowed.
51 Built,
52
53 /// The "analysis MIR" dialect, used for borrowck and friends.
54 ///
55 /// The only semantic difference between built MIR and analysis MIR relates to constant
56 /// promotion. In built MIR, sequences of statements that would generally be subject to
57 /// constant promotion are semantically constants, while in analysis MIR all constants are
58 /// explicit.
59 ///
60 /// The result of const promotion is available from the `mir_promoted` and `promoted_mir`
61 /// queries.
62 ///
63 /// The phases of this dialect are described in `AnalysisPhase`.
64 Analysis(AnalysisPhase),
65
66 /// The "runtime MIR" dialect, used for CTFE, optimizations, and codegen.
67 ///
68 /// The semantic differences between analysis MIR and runtime MIR are as follows.
69 ///
70 /// - Drops: In analysis MIR, `Drop` terminators represent *conditional* drops; roughly
71 /// speaking, if dataflow analysis determines that the place being dropped is uninitialized,
72 /// the drop will not be executed. The exact semantics of this aren't written down anywhere,
73 /// which means they are essentially "what drop elaboration does." In runtime MIR, the drops
74 /// are unconditional; when a `Drop` terminator is reached, if the type has drop glue that
75 /// drop glue is always executed. This may be UB if the underlying place is not initialized.
76 /// - Packed drops: Places might in general be misaligned - in most cases this is UB, the
77 /// exception is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be
78 /// misaligned for this reason implicitly moves `P` to a temporary before dropping. Runtime
79 /// MIR has no such rules, and dropping a misaligned place is simply UB.
80 /// - Async drops: after drop elaboration some drops may become async (`drop`, `async_fut` fields).
81 /// StateTransform pass will expand those async drops or reset to sync.
82 /// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In
83 /// runtime MIR, this is UB.
84 /// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same
85 /// way that Rust itself has them. Where exactly these are is generally subject to change,
86 /// and so we don't document this here. Runtime MIR has most retags explicit (though implicit
87 /// retags can still occur at `Rvalue::{Ref,AddrOf}`).
88 /// - Coroutine bodies: In analysis MIR, locals may actually be behind a pointer that user code
89 /// has access to. This occurs in coroutine bodies. Such locals do not behave like other
90 /// locals, because they e.g. may be aliased in surprising ways. Runtime MIR has no such
91 /// special locals. All coroutine bodies are lowered and so all places that look like locals
92 /// really are locals.
93 ///
94 /// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part
95 /// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that
96 /// transformations that can suppress such errors should not run on analysis MIR.
97 ///
98 /// The phases of this dialect are described in `RuntimePhase`.
99 Runtime(RuntimePhase),
100}
101
102/// See [`MirPhase::Analysis`].
103#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
104#[derive(HashStable)]
105pub enum AnalysisPhase {
106 Initial = 0,
107 /// Beginning in this phase, the following variants are disallowed:
108 /// * [`TerminatorKind::FalseUnwind`]
109 /// * [`TerminatorKind::FalseEdge`]
110 /// * [`StatementKind::FakeRead`]
111 /// * [`StatementKind::AscribeUserType`]
112 /// * [`StatementKind::Coverage`] with [`CoverageKind::BlockMarker`] or
113 /// [`CoverageKind::SpanMarker`]
114 /// * [`Rvalue::Ref`] with `BorrowKind::Fake`
115 /// * [`CastKind::PointerCoercion`] with any of the following:
116 /// * [`PointerCoercion::ArrayToPointer`]
117 /// * [`PointerCoercion::MutToConstPointer`]
118 ///
119 /// Furthermore, `Deref` projections must be the first projection within any place (if they
120 /// appear at all)
121 PostCleanup = 1,
122}
123
124/// See [`MirPhase::Runtime`].
125#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
126#[derive(HashStable)]
127pub enum RuntimePhase {
128 /// In addition to the semantic changes, beginning with this phase, the following variants are
129 /// disallowed:
130 /// * [`TerminatorKind::Yield`]
131 /// * [`TerminatorKind::CoroutineDrop`]
132 /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
133 /// * [`PlaceElem::OpaqueCast`]
134 ///
135 /// And the following variants are allowed:
136 /// * [`StatementKind::Retag`]
137 /// * [`StatementKind::SetDiscriminant`]
138 /// * [`StatementKind::Deinit`]
139 ///
140 /// Furthermore, `Copy` operands are allowed for non-`Copy` types.
141 Initial = 0,
142 /// Beginning with this phase, the following variant is disallowed:
143 /// * [`ProjectionElem::Deref`] of `Box`
144 PostCleanup = 1,
145 Optimized = 2,
146}
147
148///////////////////////////////////////////////////////////////////////////
149// Borrow kinds
150
151#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
152#[derive(Hash, HashStable)]
153pub enum BorrowKind {
154 /// Data must be immutable and is aliasable.
155 Shared,
156
157 /// An immutable, aliasable borrow that is discarded after borrow-checking. Can behave either
158 /// like a normal shared borrow or like a special shallow borrow (see [`FakeBorrowKind`]).
159 ///
160 /// This is used when lowering index expressions and matches. This is used to prevent code like
161 /// the following from compiling:
162 /// ```compile_fail,E0510
163 /// let mut x: &[_] = &[[0, 1]];
164 /// let y: &[_] = &[];
165 /// let _ = x[0][{x = y; 1}];
166 /// ```
167 /// ```compile_fail,E0510
168 /// let mut x = &Some(0);
169 /// match *x {
170 /// None => (),
171 /// Some(_) if { x = &None; false } => (),
172 /// Some(_) => (),
173 /// }
174 /// ```
175 /// We can also report errors with this kind of borrow differently.
176 Fake(FakeBorrowKind),
177
178 /// Data is mutable and not aliasable.
179 Mut { kind: MutBorrowKind },
180}
181
182#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
183#[derive(Hash, HashStable)]
184pub enum RawPtrKind {
185 Mut,
186 Const,
187 /// Creates a raw pointer to a place that will only be used to access its metadata,
188 /// not the data behind the pointer. Note that this limitation is *not* enforced
189 /// by the validator.
190 ///
191 /// The borrow checker allows overlap of these raw pointers with references to the
192 /// data. This is sound even if the pointer is "misused" since any such use is anyway
193 /// unsafe. In terms of the operational semantics (i.e., Miri), this is equivalent
194 /// to `RawPtrKind::Mut`, but will never incur a retag.
195 FakeForPtrMetadata,
196}
197
198#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
199#[derive(Hash, HashStable)]
200pub enum MutBorrowKind {
201 Default,
202 /// This borrow arose from method-call auto-ref. (i.e., `adjustment::Adjust::Borrow`)
203 TwoPhaseBorrow,
204 /// Data must be immutable but not aliasable. This kind of borrow
205 /// cannot currently be expressed by the user and is used only in
206 /// implicit closure bindings. It is needed when the closure is
207 /// borrowing or mutating a mutable referent, e.g.:
208 /// ```
209 /// let mut z = 3;
210 /// let x: &mut isize = &mut z;
211 /// let y = || *x += 5;
212 /// ```
213 /// If we were to try to translate this closure into a more explicit
214 /// form, we'd encounter an error with the code as written:
215 /// ```compile_fail,E0594
216 /// struct Env<'a> { x: &'a &'a mut isize }
217 /// let mut z = 3;
218 /// let x: &mut isize = &mut z;
219 /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn
220 /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
221 /// ```
222 /// This is then illegal because you cannot mutate an `&mut` found
223 /// in an aliasable location. To solve, you'd have to translate with
224 /// an `&mut` borrow:
225 /// ```compile_fail,E0596
226 /// struct Env<'a> { x: &'a mut &'a mut isize }
227 /// let mut z = 3;
228 /// let x: &mut isize = &mut z;
229 /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
230 /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
231 /// ```
232 /// Now the assignment to `**env.x` is legal, but creating a
233 /// mutable pointer to `x` is not because `x` is not mutable. We
234 /// could fix this by declaring `x` as `let mut x`. This is ok in
235 /// user code, if awkward, but extra weird for closures, since the
236 /// borrow is hidden.
237 ///
238 /// So we introduce a `ClosureCapture` borrow -- user will not have to mark the variable
239 /// containing the mutable reference as `mut`, as they didn't ever
240 /// intend to mutate the mutable reference itself. We still mutable capture it in order to
241 /// mutate the pointed value through it (but not mutating the reference itself).
242 ///
243 /// This solves the problem. For simplicity, we don't give users the way to express this
244 /// borrow, it's just used when translating closures.
245 ClosureCapture,
246}
247
248#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
249#[derive(Hash, HashStable)]
250pub enum FakeBorrowKind {
251 /// A shared shallow borrow. The immediately borrowed place must be immutable, but projections
252 /// from it don't need to be. For example, a shallow borrow of `a.b` doesn't conflict with a
253 /// mutable borrow of `a.b.c`.
254 ///
255 /// This is used when lowering matches: when matching on a place we want to ensure that place
256 /// have the same value from the start of the match until an arm is selected. This prevents this
257 /// code from compiling:
258 /// ```compile_fail,E0510
259 /// let mut x = &Some(0);
260 /// match *x {
261 /// None => (),
262 /// Some(_) if { x = &None; false } => (),
263 /// Some(_) => (),
264 /// }
265 /// ```
266 /// This can't be a shared borrow because mutably borrowing `(*x as Some).0` should not checking
267 /// the discriminant or accessing other variants, because the mutating `(*x as Some).0` can't
268 /// affect the discriminant of `x`. E.g. the following is allowed:
269 /// ```rust
270 /// let mut x = Some(0);
271 /// match x {
272 /// Some(_)
273 /// if {
274 /// if let Some(ref mut y) = x {
275 /// *y += 1;
276 /// };
277 /// true
278 /// } => {}
279 /// _ => {}
280 /// }
281 /// ```
282 Shallow,
283 /// A shared (deep) borrow. Data must be immutable and is aliasable.
284 ///
285 /// This is used when lowering deref patterns, where shallow borrows wouldn't prevent something
286 /// like:
287 /// ```compile_fail
288 /// let mut b = Box::new(false);
289 /// match b {
290 /// deref!(true) => {} // not reached because `*b == false`
291 /// _ if { *b = true; false } => {} // not reached because the guard is `false`
292 /// deref!(false) => {} // not reached because the guard changed it
293 /// // UB because we reached the unreachable.
294 /// }
295 /// ```
296 Deep,
297}
298
299///////////////////////////////////////////////////////////////////////////
300// Statements
301
302/// The various kinds of statements that can appear in MIR.
303///
304/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
305/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
306/// causing an ICE if they are violated.
307#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
308#[derive(TypeFoldable, TypeVisitable)]
309pub enum StatementKind<'tcx> {
310 /// Assign statements roughly correspond to an assignment in Rust proper (`x = ...`) except
311 /// without the possibility of dropping the previous value (that must be done separately, if at
312 /// all). The *exact* way this works is undecided. It probably does something like evaluating
313 /// the LHS to a place and the RHS to a value, and then storing the value to the place. Various
314 /// parts of this may do type specific things that are more complicated than simply copying
315 /// bytes.
316 ///
317 /// **Needs clarification**: The implication of the above idea would be that assignment implies
318 /// that the resulting value is initialized. I believe we could commit to this separately from
319 /// committing to whatever part of the memory model we would need to decide on to make the above
320 /// paragraph precise. Do we want to?
321 ///
322 /// Assignments in which the types of the place and rvalue differ are not well-formed.
323 ///
324 /// **Needs clarification**: Do we ever want to worry about non-free (in the body) lifetimes for
325 /// the typing requirement in post drop-elaboration MIR? I think probably not - I'm not sure we
326 /// could meaningfully require this anyway. How about free lifetimes? Is ignoring this
327 /// interesting for optimizations? Do we want to allow such optimizations?
328 ///
329 /// **Needs clarification**: We currently require that the LHS place not overlap with any place
330 /// read as part of computation of the RHS for some rvalues (generally those not producing
331 /// primitives). This requirement is under discussion in [#68364]. As a part of this discussion,
332 /// it is also unclear in what order the components are evaluated.
333 ///
334 /// [#68364]: https://github.com/rust-lang/rust/issues/68364
335 ///
336 /// See [`Rvalue`] documentation for details on each of those.
337 Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
338
339 /// When executed at runtime, this is a nop.
340 ///
341 /// During static analysis, a fake read:
342 /// - requires that the value being read is initialized (or, in the case
343 /// of closures, that it was fully initialized at some point in the past)
344 /// - constitutes a use of a value for the purposes of NLL (i.e. if the
345 /// value being fake-read is a reference, the lifetime of that reference
346 /// will be extended to cover the `FakeRead`)
347 /// - but, unlike an actual read, does *not* invalidate any exclusive
348 /// borrows.
349 ///
350 /// See [`FakeReadCause`] for more details on the situations in which a
351 /// `FakeRead` is emitted.
352 ///
353 /// Disallowed after drop elaboration.
354 FakeRead(Box<(FakeReadCause, Place<'tcx>)>),
355
356 /// Write the discriminant for a variant to the enum Place.
357 ///
358 /// This is permitted for both coroutines and ADTs. This does not necessarily write to the
359 /// entire place; instead, it writes to the minimum set of bytes as required by the layout for
360 /// the type.
361 SetDiscriminant { place: Box<Place<'tcx>>, variant_index: VariantIdx },
362
363 /// Deinitializes the place.
364 ///
365 /// This writes `uninit` bytes to the entire place.
366 Deinit(Box<Place<'tcx>>),
367
368 /// `StorageLive` and `StorageDead` statements mark the live range of a local.
369 ///
370 /// At any point during the execution of a function, each local is either allocated or
371 /// unallocated. Except as noted below, all locals except function parameters are initially
372 /// unallocated. `StorageLive` statements cause memory to be allocated for the local while
373 /// `StorageDead` statements cause the memory to be freed. In other words,
374 /// `StorageLive`/`StorageDead` act like the heap operations `allocate`/`deallocate`, but for
375 /// stack-allocated local variables. Using a local in any way (not only reading/writing from it)
376 /// while it is unallocated is UB.
377 ///
378 /// Some locals have no `StorageLive` or `StorageDead` statements within the entire MIR body.
379 /// These locals are implicitly allocated for the full duration of the function. There is a
380 /// convenience method at `rustc_mir_dataflow::storage::always_storage_live_locals` for
381 /// computing these locals.
382 ///
383 /// If the local is already allocated, calling `StorageLive` again will implicitly free the
384 /// local and then allocate fresh uninitialized memory. If a local is already deallocated,
385 /// calling `StorageDead` again is a NOP.
386 StorageLive(Local),
387
388 /// See `StorageLive` above.
389 StorageDead(Local),
390
391 /// Retag references in the given place, ensuring they got fresh tags.
392 ///
393 /// This is part of the Stacked Borrows model. These statements are currently only interpreted
394 /// by miri and only generated when `-Z mir-emit-retag` is passed. See
395 /// <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/> for
396 /// more details.
397 ///
398 /// For code that is not specific to stacked borrows, you should consider retags to read and
399 /// modify the place in an opaque way.
400 ///
401 /// Only `RetagKind::Default` and `RetagKind::FnEntry` are permitted.
402 Retag(RetagKind, Box<Place<'tcx>>),
403
404 /// This statement exists to preserve a trace of a scrutinee matched against a wildcard binding.
405 /// This is especially useful for `let _ = PLACE;` bindings that desugar to a single
406 /// `PlaceMention(PLACE)`.
407 ///
408 /// When executed at runtime, this computes the given place, but then discards
409 /// it without doing a load. `let _ = *ptr;` is fine even if the pointer is dangling.
410 PlaceMention(Box<Place<'tcx>>),
411
412 /// Encodes a user's type ascription. These need to be preserved
413 /// intact so that NLL can respect them. For example:
414 /// ```ignore (illustrative)
415 /// let a: T = y;
416 /// ```
417 /// The effect of this annotation is to relate the type `T_y` of the place `y`
418 /// to the user-given type `T`. The effect depends on the specified variance:
419 ///
420 /// - `Covariant` -- requires that `T_y <: T`
421 /// - `Contravariant` -- requires that `T_y :> T`
422 /// - `Invariant` -- requires that `T_y == T`
423 /// - `Bivariant` -- no effect
424 ///
425 /// When executed at runtime this is a nop.
426 ///
427 /// Disallowed after drop elaboration.
428 AscribeUserType(Box<(Place<'tcx>, UserTypeProjection)>, ty::Variance),
429
430 /// Carries control-flow-sensitive information injected by `-Cinstrument-coverage`,
431 /// such as where to generate physical coverage-counter-increments during codegen.
432 ///
433 /// Coverage statements are used in conjunction with the coverage mappings and other
434 /// information stored in the function's
435 /// [`mir::Body::function_coverage_info`](crate::mir::Body::function_coverage_info).
436 /// (For inlined MIR, take care to look up the *original function's* coverage info.)
437 ///
438 /// Interpreters and codegen backends that don't support coverage instrumentation
439 /// can usually treat this as a no-op.
440 Coverage(
441 // Coverage statements are unlikely to ever contain type information in
442 // the foreseeable future, so excluding them from TypeFoldable/TypeVisitable
443 // avoids some unhelpful derive boilerplate.
444 #[type_foldable(identity)]
445 #[type_visitable(ignore)]
446 CoverageKind,
447 ),
448
449 /// Denotes a call to an intrinsic that does not require an unwind path and always returns.
450 /// This avoids adding a new block and a terminator for simple intrinsics.
451 Intrinsic(Box<NonDivergingIntrinsic<'tcx>>),
452
453 /// Instructs the const eval interpreter to increment a counter; this counter is used to track
454 /// how many steps the interpreter has taken. It is used to prevent the user from writing const
455 /// code that runs for too long or infinitely. Other than in the const eval interpreter, this
456 /// is a no-op.
457 ConstEvalCounter,
458
459 /// No-op. Useful for deleting instructions without affecting statement indices.
460 Nop,
461
462 /// Marker statement indicating where `place` would be dropped.
463 /// This is semantically equivalent to `Nop`, so codegen and MIRI should interpret this
464 /// statement as such.
465 /// The only use case of this statement is for linting in MIR to detect temporary lifetime
466 /// changes.
467 BackwardIncompatibleDropHint {
468 /// Place to drop
469 place: Box<Place<'tcx>>,
470 /// Reason for backward incompatibility
471 reason: BackwardIncompatibleDropReason,
472 },
473}
474
475#[derive(
476 Clone,
477 TyEncodable,
478 TyDecodable,
479 Debug,
480 PartialEq,
481 Hash,
482 HashStable,
483 TypeFoldable,
484 TypeVisitable
485)]
486pub enum NonDivergingIntrinsic<'tcx> {
487 /// Denotes a call to the intrinsic function `assume`.
488 ///
489 /// The operand must be a boolean. Optimizers may use the value of the boolean to backtrack its
490 /// computation to infer information about other variables. So if the boolean came from a
491 /// `x < y` operation, subsequent operations on `x` and `y` could elide various bound checks.
492 /// If the argument is `false`, this operation is equivalent to `TerminatorKind::Unreachable`.
493 Assume(Operand<'tcx>),
494
495 /// Denotes a call to the intrinsic function `copy_nonoverlapping`.
496 ///
497 /// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
498 /// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
499 /// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
500 /// the `src` place are copied to the contiguous range of bytes beginning with the first byte
501 /// of `dest`.
502 ///
503 /// **Needs clarification**: In what order are operands computed and dereferenced? It should
504 /// probably match the order for assignment, but that is also undecided.
505 ///
506 /// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
507 /// I vaguely remember Ralf saying somewhere that he thought it should not be.
508 CopyNonOverlapping(CopyNonOverlapping<'tcx>),
509}
510
511/// Describes what kind of retag is to be performed.
512#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
513#[rustc_pass_by_value]
514pub enum RetagKind {
515 /// The initial retag of arguments when entering a function.
516 FnEntry,
517 /// Retag preparing for a two-phase borrow.
518 TwoPhase,
519 /// Retagging raw pointers.
520 Raw,
521 /// A "normal" retag.
522 Default,
523}
524
525/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
526#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, Hash, HashStable, PartialEq)]
527pub enum FakeReadCause {
528 /// A fake read injected into a match guard to ensure that the discriminants
529 /// that are being matched on aren't modified while the match guard is being
530 /// evaluated.
531 ///
532 /// At the beginning of each match guard, a [fake borrow][FakeBorrowKind] is
533 /// inserted for each discriminant accessed in the entire `match` statement.
534 ///
535 /// Then, at the end of the match guard, a `FakeRead(ForMatchGuard)` is
536 /// inserted to keep the fake borrows alive until that point.
537 ///
538 /// This should ensure that you cannot change the variant for an enum while
539 /// you are in the midst of matching on it.
540 ForMatchGuard,
541
542 /// Fake read of the scrutinee of a `match` or destructuring `let`
543 /// (i.e. `let` with non-trivial pattern).
544 ///
545 /// In `match x { ... }`, we generate a `FakeRead(ForMatchedPlace, x)`
546 /// and insert it into the `otherwise_block` (which is supposed to be
547 /// unreachable for irrefutable pattern-matches like `match` or `let`).
548 ///
549 /// This is necessary because `let x: !; match x {}` doesn't generate any
550 /// actual read of x, so we need to generate a `FakeRead` to check that it
551 /// is initialized.
552 ///
553 /// If the `FakeRead(ForMatchedPlace)` is being performed with a closure
554 /// that doesn't capture the required upvars, the `FakeRead` within the
555 /// closure is omitted entirely.
556 ///
557 /// To make sure that this is still sound, if a closure matches against
558 /// a Place starting with an Upvar, we hoist the `FakeRead` to the
559 /// definition point of the closure.
560 ///
561 /// If the `FakeRead` comes from being hoisted out of a closure like this,
562 /// we record the `LocalDefId` of the closure. Otherwise, the `Option` will be `None`.
563 //
564 // We can use LocalDefId here since fake read statements are removed
565 // before codegen in the `CleanupNonCodegenStatements` pass.
566 ForMatchedPlace(Option<LocalDefId>),
567
568 /// A fake read injected into a match guard to ensure that the places
569 /// bound by the pattern are immutable for the duration of the match guard.
570 ///
571 /// Within a match guard, references are created for each place that the
572 /// pattern creates a binding for — this is known as the `RefWithinGuard`
573 /// version of the variables. To make sure that the references stay
574 /// alive until the end of the match guard, and properly prevent the
575 /// places in question from being modified, a `FakeRead(ForGuardBinding)`
576 /// is inserted at the end of the match guard.
577 ///
578 /// For details on how these references are created, see the extensive
579 /// documentation on `bind_matched_candidate_for_guard` in
580 /// `rustc_mir_build`.
581 ForGuardBinding,
582
583 /// Officially, the semantics of
584 ///
585 /// `let pattern = <expr>;`
586 ///
587 /// is that `<expr>` is evaluated into a temporary and then this temporary is
588 /// into the pattern.
589 ///
590 /// However, if we see the simple pattern `let var = <expr>`, we optimize this to
591 /// evaluate `<expr>` directly into the variable `var`. This is mostly unobservable,
592 /// but in some cases it can affect the borrow checker, as in #53695.
593 ///
594 /// Therefore, we insert a `FakeRead(ForLet)` immediately after each `let`
595 /// with a trivial pattern.
596 ///
597 /// FIXME: `ExprUseVisitor` has an entirely different opinion on what `FakeRead(ForLet)`
598 /// is supposed to mean. If it was accurate to what MIR lowering does,
599 /// would it even make sense to hoist these out of closures like
600 /// `ForMatchedPlace`?
601 ForLet(Option<LocalDefId>),
602
603 /// Currently, index expressions overloaded through the `Index` trait
604 /// get lowered differently than index expressions with builtin semantics
605 /// for arrays and slices — the latter will emit code to perform
606 /// bound checks, and then return a MIR place that will only perform the
607 /// indexing "for real" when it gets incorporated into an instruction.
608 ///
609 /// This is observable in the fact that the following compiles:
610 ///
611 /// ```
612 /// fn f(x: &mut [&mut [u32]], i: usize) {
613 /// x[i][x[i].len() - 1] += 1;
614 /// }
615 /// ```
616 ///
617 /// However, we need to be careful to not let the user invalidate the
618 /// bound check with an expression like
619 ///
620 /// `(*x)[1][{ x = y; 4}]`
621 ///
622 /// Here, the first bounds check would be invalidated when we evaluate the
623 /// second index expression. To make sure that this doesn't happen, we
624 /// create a fake borrow of `x` and hold it while we evaluate the second
625 /// index.
626 ///
627 /// This borrow is kept alive by a `FakeRead(ForIndex)` at the end of its
628 /// scope.
629 ForIndex,
630}
631
632#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
633#[derive(TypeFoldable, TypeVisitable)]
634pub struct CopyNonOverlapping<'tcx> {
635 pub src: Operand<'tcx>,
636 pub dst: Operand<'tcx>,
637 /// Number of elements to copy from src to dest, not bytes.
638 pub count: Operand<'tcx>,
639}
640
641/// Represents how a [`TerminatorKind::Call`] was constructed.
642/// Used only for diagnostics.
643#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, PartialEq, Hash, HashStable)]
644#[derive(TypeFoldable, TypeVisitable)]
645pub enum CallSource {
646 /// This came from something such as `a > b` or `a + b`. In THIR, if `from_hir_call`
647 /// is false then this is the desugaring.
648 OverloadedOperator,
649 /// This was from comparison generated by a match, used by const-eval for better errors
650 /// when the comparison cannot be done in compile time.
651 ///
652 /// (see <https://github.com/rust-lang/rust/issues/90237>)
653 MatchCmp,
654 /// Other types of desugaring that did not come from the HIR, but we don't care about
655 /// for diagnostics (yet).
656 Misc,
657 /// Use of value, generating a clone function call
658 Use,
659 /// Normal function call, no special source
660 Normal,
661}
662
663#[derive(Clone, Copy, Debug, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
664#[derive(TypeFoldable, TypeVisitable)]
665/// The macro that an inline assembly block was created by
666pub enum InlineAsmMacro {
667 /// The `asm!` macro
668 Asm,
669 /// The `naked_asm!` macro
670 NakedAsm,
671}
672
673///////////////////////////////////////////////////////////////////////////
674// Terminators
675
676/// The various kinds of terminators, representing ways of exiting from a basic block.
677///
678/// A note on unwinding: Panics may occur during the execution of some terminators. Depending on the
679/// `-C panic` flag, this may either cause the program to abort or the call stack to unwind. Such
680/// terminators have a `unwind: UnwindAction` field on them. If stack unwinding occurs, then
681/// once the current function is reached, an action will be taken based on the `unwind` field.
682/// If the action is `Cleanup`, then the execution continues at the given basic block. If the
683/// action is `Continue` then no cleanup is performed, and the stack continues unwinding.
684///
685/// The basic block pointed to by a `Cleanup` unwind action must have its `cleanup` flag set.
686/// `cleanup` basic blocks have a couple restrictions:
687/// 1. All `unwind` fields in them must be `UnwindAction::Terminate` or `UnwindAction::Unreachable`.
688/// 2. `Return` terminators are not allowed in them. `Terminate` and `Resume` terminators are.
689/// 3. All other basic blocks (in the current body) that are reachable from `cleanup` basic blocks
690/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
691/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
692/// runtime.
693/// 4. The control flow between cleanup blocks must look like an upside down tree. Roughly
694/// speaking, this means that control flow that looks like a V is allowed, while control flow
695/// that looks like a W is not. This is necessary to ensure that landing pad information can be
696/// correctly codegened on MSVC. More precisely:
697///
698/// Begin with the standard control flow graph `G`. Modify `G` as follows: for any two cleanup
699/// vertices `u` and `v` such that `u` dominates `v`, contract `u` and `v` into a single vertex,
700/// deleting self edges and duplicate edges in the process. Now remove all vertices from `G`
701/// that are not cleanup vertices or are not reachable. The resulting graph must be an inverted
702/// tree, that is each vertex may have at most one successor and there may be no cycles.
703#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
704pub enum TerminatorKind<'tcx> {
705 /// Block has one successor; we continue execution there.
706 Goto { target: BasicBlock },
707
708 /// Switches based on the computed value.
709 ///
710 /// First, evaluates the `discr` operand. The type of the operand must be a signed or unsigned
711 /// integer, char, or bool, and must match the given type. Then, if the list of switch targets
712 /// contains the computed value, continues execution at the associated basic block. Otherwise,
713 /// continues execution at the "otherwise" basic block.
714 ///
715 /// Target values may not appear more than once.
716 SwitchInt {
717 /// The discriminant value being tested.
718 discr: Operand<'tcx>,
719 targets: SwitchTargets,
720 },
721
722 /// Indicates that the landing pad is finished and that the process should continue unwinding.
723 ///
724 /// Like a return, this marks the end of this invocation of the function.
725 ///
726 /// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after
727 /// deaggregation runs.
728 UnwindResume,
729
730 /// Indicates that the landing pad is finished and that the process should terminate.
731 ///
732 /// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
733 /// cleanup blocks.
734 UnwindTerminate(UnwindTerminateReason),
735
736 /// Returns from the function.
737 ///
738 /// Like function calls, the exact semantics of returns in Rust are unclear. Returning very
739 /// likely at least assigns the value currently in the return place (`_0`) to the place
740 /// specified in the associated `Call` terminator in the calling function, as if assigned via
741 /// `dest = move _0`. It might additionally do other things, like have side-effects in the
742 /// aliasing model.
743 ///
744 /// If the body is a coroutine body, this has slightly different semantics; it instead causes a
745 /// `CoroutineState::Returned(_0)` to be created (as if by an `Aggregate` rvalue) and assigned
746 /// to the return place.
747 Return,
748
749 /// Indicates a terminator that can never be reached.
750 ///
751 /// Executing this terminator is UB.
752 Unreachable,
753
754 /// The behavior of this statement differs significantly before and after drop elaboration.
755 ///
756 /// After drop elaboration: `Drop` terminators are a complete nop for types that have no drop
757 /// glue. For other types, `Drop` terminators behave exactly like a call to
758 /// `core::mem::drop_in_place` with a pointer to the given place.
759 ///
760 /// `Drop` before drop elaboration is a *conditional* execution of the drop glue. Specifically,
761 /// the `Drop` will be executed if...
762 ///
763 /// **Needs clarification**: End of that sentence. This in effect should document the exact
764 /// behavior of drop elaboration. The following sounds vaguely right, but I'm not quite sure:
765 ///
766 /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to
767 /// > the place or one of its "parents" occurred more recently than a move out of it. This does not
768 /// > consider indirect assignments.
769 ///
770 /// The `replace` flag indicates whether this terminator was created as part of an assignment.
771 /// This should only be used for diagnostic purposes, and does not have any operational
772 /// meaning.
773 ///
774 /// Async drop processing:
775 /// In compiler/rustc_mir_build/src/build/scope.rs we detect possible async drop:
776 /// drop of object with `needs_async_drop`.
777 /// Async drop later, in StateTransform pass, may be expanded into additional yield-point
778 /// for poll-loop of async drop future.
779 /// So we need prepared 'drop' target block in the similar way as for `Yield` terminator
780 /// (see `drops.build_mir::<CoroutineDrop>` in scopes.rs).
781 /// In compiler/rustc_mir_transform/src/elaborate_drops.rs for object implementing `AsyncDrop` trait
782 /// we need to prepare async drop feature - resolve `AsyncDrop::drop` and codegen call.
783 /// `async_fut` is set to the corresponding local.
784 /// For coroutine drop we don't need this logic because coroutine drop works with the same
785 /// layout object as coroutine itself. So `async_fut` will be `None` for coroutine drop.
786 /// Both `drop` and `async_fut` fields are only used in compiler/rustc_mir_transform/src/coroutine.rs,
787 /// StateTransform pass. In `expand_async_drops` async drops are expanded
788 /// into one or two yield points with poll ready/pending switch.
789 /// When a coroutine has any internal async drop, the coroutine drop function will be async
790 /// (generated by `create_coroutine_drop_shim_async`, not `create_coroutine_drop_shim`).
791 Drop {
792 place: Place<'tcx>,
793 target: BasicBlock,
794 unwind: UnwindAction,
795 replace: bool,
796 /// Cleanup to be done if the coroutine is dropped at this suspend point (for async drop).
797 drop: Option<BasicBlock>,
798 /// Prepared async future local (for async drop)
799 async_fut: Option<Local>,
800 },
801
802 /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
803 /// the referred to function. The operand types must match the argument types of the function.
804 /// The return place type must match the return type. The type of the `func` operand must be
805 /// callable, meaning either a function pointer, a function type, or a closure type.
806 ///
807 /// **Needs clarification**: The exact semantics of this. Current backends rely on `move`
808 /// operands not aliasing the return place. It is unclear how this is justified in MIR, see
809 /// [#71117].
810 ///
811 /// [#71117]: https://github.com/rust-lang/rust/issues/71117
812 Call {
813 /// The function that’s being called.
814 func: Operand<'tcx>,
815 /// Arguments the function is called with.
816 /// These are owned by the callee, which is free to modify them.
817 /// This allows the memory occupied by "by-value" arguments to be
818 /// reused across function calls without duplicating the contents.
819 /// The span for each arg is also included
820 /// (e.g. `a` and `b` in `x.foo(a, b)`).
821 args: Box<[Spanned<Operand<'tcx>>]>,
822 /// Where the returned value will be written
823 destination: Place<'tcx>,
824 /// Where to go after this call returns. If none, the call necessarily diverges.
825 target: Option<BasicBlock>,
826 /// Action to be taken if the call unwinds.
827 unwind: UnwindAction,
828 /// Where this call came from in HIR/THIR.
829 call_source: CallSource,
830 /// This `Span` is the span of the function, without the dot and receiver
831 /// e.g. `foo(a, b)` in `x.foo(a, b)`
832 fn_span: Span,
833 },
834
835 /// Tail call.
836 ///
837 /// Roughly speaking this is a chimera of [`Call`] and [`Return`], with some caveats.
838 /// Semantically tail calls consists of two actions:
839 /// - pop of the current stack frame
840 /// - a call to the `func`, with the return address of the **current** caller
841 /// - so that a `return` inside `func` returns to the caller of the caller
842 /// of the function that is currently being executed
843 ///
844 /// Note that in difference with [`Call`] this is missing
845 /// - `destination` (because it's always the return place)
846 /// - `target` (because it's always taken from the current stack frame)
847 /// - `unwind` (because it's always taken from the current stack frame)
848 ///
849 /// [`Call`]: TerminatorKind::Call
850 /// [`Return`]: TerminatorKind::Return
851 TailCall {
852 /// The function that’s being called.
853 func: Operand<'tcx>,
854 /// Arguments the function is called with.
855 /// These are owned by the callee, which is free to modify them.
856 /// This allows the memory occupied by "by-value" arguments to be
857 /// reused across function calls without duplicating the contents.
858 args: Box<[Spanned<Operand<'tcx>>]>,
859 // FIXME(explicit_tail_calls): should we have the span for `become`? is this span accurate? do we need it?
860 /// This `Span` is the span of the function, without the dot and receiver
861 /// (e.g. `foo(a, b)` in `x.foo(a, b)`
862 fn_span: Span,
863 },
864
865 /// Evaluates the operand, which must have type `bool`. If it is not equal to `expected`,
866 /// initiates a panic. Initiating a panic corresponds to a `Call` terminator with some
867 /// unspecified constant as the function to call, all the operands stored in the `AssertMessage`
868 /// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
869 /// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
870 /// assertion does not fail, execution continues at the specified basic block.
871 ///
872 /// When overflow checking is disabled and this is run-time MIR (as opposed to compile-time MIR
873 /// that is used for CTFE), the following variants of this terminator behave as `goto target`:
874 /// - `OverflowNeg(..)`,
875 /// - `Overflow(op, ..)` if op is add, sub, mul, shl, shr, but NOT div or rem.
876 Assert {
877 cond: Operand<'tcx>,
878 expected: bool,
879 msg: Box<AssertMessage<'tcx>>,
880 target: BasicBlock,
881 unwind: UnwindAction,
882 },
883
884 /// Marks a suspend point.
885 ///
886 /// Like `Return` terminators in coroutine bodies, this computes `value` and then a
887 /// `CoroutineState::Yielded(value)` as if by `Aggregate` rvalue. That value is then assigned to
888 /// the return place of the function calling this one, and execution continues in the calling
889 /// function. When next invoked with the same first argument, execution of this function
890 /// continues at the `resume` basic block, with the second argument written to the `resume_arg`
891 /// place. If the coroutine is dropped before then, the `drop` basic block is invoked.
892 ///
893 /// Note that coroutines can be (unstably) cloned under certain conditions, which means that
894 /// this terminator can **return multiple times**! MIR optimizations that reorder code into
895 /// different basic blocks needs to be aware of that.
896 /// See <https://github.com/rust-lang/rust/issues/95360>.
897 ///
898 /// Not permitted in bodies that are not coroutine bodies, or after coroutine lowering.
899 ///
900 /// **Needs clarification**: What about the evaluation order of the `resume_arg` and `value`?
901 Yield {
902 /// The value to return.
903 value: Operand<'tcx>,
904 /// Where to resume to.
905 resume: BasicBlock,
906 /// The place to store the resume argument in.
907 resume_arg: Place<'tcx>,
908 /// Cleanup to be done if the coroutine is dropped at this suspend point.
909 drop: Option<BasicBlock>,
910 },
911
912 /// Indicates the end of dropping a coroutine.
913 ///
914 /// Semantically just a `return` (from the coroutines drop glue). Only permitted in the same situations
915 /// as `yield`.
916 ///
917 /// **Needs clarification**: Is that even correct? The coroutine drop code is always confusing
918 /// to me, because it's not even really in the current body.
919 ///
920 /// **Needs clarification**: Are there type system constraints on these terminators? Should
921 /// there be a "block type" like `cleanup` blocks for them?
922 CoroutineDrop,
923
924 /// A block where control flow only ever takes one real path, but borrowck needs to be more
925 /// conservative.
926 ///
927 /// At runtime this is semantically just a goto.
928 ///
929 /// Disallowed after drop elaboration.
930 FalseEdge {
931 /// The target normal control flow will take.
932 real_target: BasicBlock,
933 /// A block control flow could conceptually jump to, but won't in
934 /// practice.
935 imaginary_target: BasicBlock,
936 },
937
938 /// A terminator for blocks that only take one path in reality, but where we reserve the right
939 /// to unwind in borrowck, even if it won't happen in practice. This can arise in infinite loops
940 /// with no function calls for example.
941 ///
942 /// At runtime this is semantically just a goto.
943 ///
944 /// Disallowed after drop elaboration.
945 FalseUnwind {
946 /// The target normal control flow will take.
947 real_target: BasicBlock,
948 /// The imaginary cleanup block link. This particular path will never be taken
949 /// in practice, but in order to avoid fragility we want to always
950 /// consider it in borrowck. We don't want to accept programs which
951 /// pass borrowck only when `panic=abort` or some assertions are disabled
952 /// due to release vs. debug mode builds.
953 unwind: UnwindAction,
954 },
955
956 /// Block ends with an inline assembly block. This is a terminator since
957 /// inline assembly is allowed to diverge.
958 InlineAsm {
959 /// Macro used to create this inline asm: one of `asm!` or `naked_asm!`
960 asm_macro: InlineAsmMacro,
961
962 /// The template for the inline assembly, with placeholders.
963 #[type_foldable(identity)]
964 #[type_visitable(ignore)]
965 template: &'tcx [InlineAsmTemplatePiece],
966
967 /// The operands for the inline assembly, as `Operand`s or `Place`s.
968 operands: Box<[InlineAsmOperand<'tcx>]>,
969
970 /// Miscellaneous options for the inline assembly.
971 options: InlineAsmOptions,
972
973 /// Source spans for each line of the inline assembly code. These are
974 /// used to map assembler errors back to the line in the source code.
975 #[type_foldable(identity)]
976 #[type_visitable(ignore)]
977 line_spans: &'tcx [Span],
978
979 /// Valid targets for the inline assembly.
980 /// The first element is the fallthrough destination, unless
981 /// asm_macro == InlineAsmMacro::NakedAsm or InlineAsmOptions::NORETURN is set.
982 targets: Box<[BasicBlock]>,
983
984 /// Action to be taken if the inline assembly unwinds. This is present
985 /// if and only if InlineAsmOptions::MAY_UNWIND is set.
986 unwind: UnwindAction,
987 },
988}
989
990#[derive(
991 Clone,
992 Copy,
993 Debug,
994 TyEncodable,
995 TyDecodable,
996 Hash,
997 HashStable,
998 PartialEq,
999 Eq,
1000 TypeFoldable,
1001 TypeVisitable
1002)]
1003pub enum BackwardIncompatibleDropReason {
1004 Edition2024,
1005 /// Used by the `macro_extended_temporary_scopes` lint.
1006 MacroExtendedScope,
1007}
1008
1009#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
1010pub struct SwitchTargets {
1011 /// Possible values. For each value, the location to branch to is found in
1012 /// the corresponding element in the `targets` vector.
1013 pub(super) values: SmallVec<[Pu128; 1]>,
1014
1015 /// Possible branch targets. The last element of this vector is used for
1016 /// the "otherwise" branch, so `targets.len() == values.len() + 1` always
1017 /// holds.
1018 //
1019 // Note: This invariant is non-obvious and easy to violate. This would be a
1020 // more rigorous representation:
1021 //
1022 // normal: SmallVec<[(Pu128, BasicBlock); 1]>,
1023 // otherwise: BasicBlock,
1024 //
1025 // But it's important to have the targets in a sliceable type, because
1026 // target slices show up elsewhere. E.g. `TerminatorKind::InlineAsm` has a
1027 // boxed slice, and `TerminatorKind::FalseEdge` has a single target that
1028 // can be converted to a slice with `slice::from_ref`.
1029 //
1030 // Why does this matter? In functions like `TerminatorKind::successors` we
1031 // return `impl Iterator` and a non-slice-of-targets representation here
1032 // causes problems because multiple different concrete iterator types would
1033 // be involved and we would need a boxed trait object, which requires an
1034 // allocation, which is expensive if done frequently.
1035 pub(super) targets: SmallVec<[BasicBlock; 2]>,
1036}
1037
1038/// Action to be taken when a stack unwind happens.
1039#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1040#[derive(TypeFoldable, TypeVisitable)]
1041pub enum UnwindAction {
1042 /// No action is to be taken. Continue unwinding.
1043 ///
1044 /// This is similar to `Cleanup(bb)` where `bb` does nothing but `Resume`, but they are not
1045 /// equivalent, as presence of `Cleanup(_)` will make a frame non-POF.
1046 Continue,
1047 /// Triggers undefined behavior if unwind happens.
1048 Unreachable,
1049 /// Terminates the execution if unwind happens.
1050 ///
1051 /// Depending on the platform and situation this may cause a non-unwindable panic or abort.
1052 Terminate(UnwindTerminateReason),
1053 /// Cleanups to be done.
1054 Cleanup(BasicBlock),
1055}
1056
1057/// The reason we are terminating the process during unwinding.
1058#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1059#[derive(TypeFoldable, TypeVisitable)]
1060pub enum UnwindTerminateReason {
1061 /// Unwinding is just not possible given the ABI of this function.
1062 Abi,
1063 /// We were already cleaning up for an ongoing unwind, and a *second*, *nested* unwind was
1064 /// triggered by the drop glue.
1065 InCleanup,
1066}
1067
1068/// Information about an assertion failure.
1069#[derive(Clone, Hash, HashStable, PartialEq, Debug)]
1070#[derive(TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
1071pub enum AssertKind<O> {
1072 BoundsCheck { len: O, index: O },
1073 Overflow(BinOp, O, O),
1074 OverflowNeg(O),
1075 DivisionByZero(O),
1076 RemainderByZero(O),
1077 ResumedAfterReturn(CoroutineKind),
1078 ResumedAfterPanic(CoroutineKind),
1079 ResumedAfterDrop(CoroutineKind),
1080 MisalignedPointerDereference { required: O, found: O },
1081 NullPointerDereference,
1082 InvalidEnumConstruction(O),
1083}
1084
1085#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
1086#[derive(TypeFoldable, TypeVisitable)]
1087pub enum InlineAsmOperand<'tcx> {
1088 In {
1089 reg: InlineAsmRegOrRegClass,
1090 value: Operand<'tcx>,
1091 },
1092 Out {
1093 reg: InlineAsmRegOrRegClass,
1094 late: bool,
1095 place: Option<Place<'tcx>>,
1096 },
1097 InOut {
1098 reg: InlineAsmRegOrRegClass,
1099 late: bool,
1100 in_value: Operand<'tcx>,
1101 out_place: Option<Place<'tcx>>,
1102 },
1103 Const {
1104 value: Box<ConstOperand<'tcx>>,
1105 },
1106 SymFn {
1107 value: Box<ConstOperand<'tcx>>,
1108 },
1109 SymStatic {
1110 def_id: DefId,
1111 },
1112 Label {
1113 /// This represents the index into the `targets` array in `TerminatorKind::InlineAsm`.
1114 target_index: usize,
1115 },
1116}
1117
1118/// Type for MIR `Assert` terminator error messages.
1119pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
1120
1121///////////////////////////////////////////////////////////////////////////
1122// Places
1123
1124/// Places roughly correspond to a "location in memory." Places in MIR are the same mathematical
1125/// object as places in Rust. This of course means that what exactly they are is undecided and part
1126/// of the Rust memory model. However, they will likely contain at least the following pieces of
1127/// information in some form:
1128///
1129/// 1. The address in memory that the place refers to.
1130/// 2. The provenance with which the place is being accessed.
1131/// 3. The type of the place and an optional variant index. See [`PlaceTy`][super::PlaceTy].
1132/// 4. Optionally, some metadata. This exists if and only if the type of the place is not `Sized`.
1133///
1134/// We'll give a description below of how all pieces of the place except for the provenance are
1135/// calculated. We cannot give a description of the provenance, because that is part of the
1136/// undecided aliasing model - we only include it here at all to acknowledge its existence.
1137///
1138/// Each local naturally corresponds to the place `Place { local, projection: [] }`. This place has
1139/// the address of the local's allocation and the type of the local.
1140///
1141/// For places that are not locals, ie they have a non-empty list of projections, we define the
1142/// values as a function of the parent place, that is the place with its last [`ProjectionElem`]
1143/// stripped. The way this is computed of course depends on the kind of that last projection
1144/// element:
1145///
1146/// - [`Downcast`](ProjectionElem::Downcast): This projection sets the place's variant index to the
1147/// given one, and makes no other changes. A `Downcast` projection must always be followed
1148/// immediately by a `Field` projection.
1149/// - [`Field`](ProjectionElem::Field): `Field` projections take their parent place and create a
1150/// place referring to one of the fields of the type. The resulting address is the parent
1151/// address, plus the offset of the field. The type becomes the type of the field. If the parent
1152/// was unsized and so had metadata associated with it, then the metadata is retained if the
1153/// field is unsized and thrown out if it is sized.
1154///
1155/// These projections are only legal for tuples, ADTs, closures, and coroutines. If the ADT or
1156/// coroutine has more than one variant, the parent place's variant index must be set, indicating
1157/// which variant is being used. If it has just one variant, the variant index may or may not be
1158/// included - the single possible variant is inferred if it is not included.
1159/// - [`OpaqueCast`](ProjectionElem::OpaqueCast): This projection changes the place's type to the
1160/// given one, and makes no other changes. A `OpaqueCast` projection on any type other than an
1161/// opaque type from the current crate is not well-formed.
1162/// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
1163/// place as described in the documentation for the `ProjectionElem`. The resulting address is
1164/// the parent's address plus that offset, and the type is `T`. This is only legal if the parent
1165/// place has type `[T; N]` or `[T]` (*not* `&[T]`). Since such a `T` is always sized, any
1166/// resulting metadata is thrown out.
1167/// - [`Subslice`](ProjectionElem::Subslice): This projection calculates an offset and a new
1168/// address in a similar manner as `ConstantIndex`. It is also only legal on `[T; N]` and `[T]`.
1169/// However, this yields a `Place` of type `[T]`, and additionally sets the metadata to be the
1170/// length of the subslice.
1171/// - [`Index`](ProjectionElem::Index): Like `ConstantIndex`, only legal on `[T; N]` or `[T]`.
1172/// However, `Index` additionally takes a local from which the value of the index is computed at
1173/// runtime. Computing the value of the index involves interpreting the `Local` as a
1174/// `Place { local, projection: [] }`, and then computing its value as if done via
1175/// [`Operand::Copy`]. The array/slice is then indexed with the resulting value. The local must
1176/// have type `usize`.
1177/// - [`Deref`](ProjectionElem::Deref): Derefs are the last type of projection, and the most
1178/// complicated. They are only legal on parent places that are references, pointers, or `Box`. A
1179/// `Deref` projection begins by loading a value from the parent place, as if by
1180/// [`Operand::Copy`]. It then dereferences the resulting pointer, creating a place of the
1181/// pointee's type. The resulting address is the address that was stored in the pointer. If the
1182/// pointee type is unsized, the pointer additionally stored the value of the metadata.
1183///
1184/// The "validity invariant" of places is the same as that of raw pointers, meaning that e.g.
1185/// `*ptr` on a dangling or unaligned pointer is never UB. (Later doing a load/store on that place
1186/// or turning it into a reference can be UB though!) The only ways for a place computation can
1187/// cause UB are:
1188/// - On a `Deref` projection, we do an actual load of the inner place, with all the usual
1189/// consequences (the inner place must be based on an aligned pointer, it must point to allocated
1190/// memory, the aliasig model must allow reads, this must not be a data race).
1191/// - For the projections that perform pointer arithmetic, the offset must in-bounds of an
1192/// allocation (i.e., the preconditions of `ptr::offset` must be met).
1193#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable, TypeFoldable, TypeVisitable)]
1194pub struct Place<'tcx> {
1195 pub local: Local,
1196
1197 /// projection out of a place (access a field, deref a pointer, etc)
1198 pub projection: &'tcx List<PlaceElem<'tcx>>,
1199}
1200
1201#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1202#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
1203pub enum ProjectionElem<V, T> {
1204 Deref,
1205
1206 /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
1207 /// rustc can identify that a field projection refers to either two different regions of memory
1208 /// or the same one between the base and the 'projection element'.
1209 /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
1210 ///
1211 /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
1212 Field(FieldIdx, T),
1213
1214 /// Index into a slice/array.
1215 ///
1216 /// Note that this does not also dereference, and so it does not exactly correspond to slice
1217 /// indexing in Rust. In other words, in the below Rust code:
1218 ///
1219 /// ```rust
1220 /// let x = &[1, 2, 3, 4];
1221 /// let i = 2;
1222 /// x[i];
1223 /// ```
1224 ///
1225 /// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
1226 /// thing is true of the `ConstantIndex` and `Subslice` projections below.
1227 Index(V),
1228
1229 /// These indices are generated by slice patterns. Easiest to explain
1230 /// by example:
1231 ///
1232 /// ```ignore (illustrative)
1233 /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false },
1234 /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false },
1235 /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true },
1236 /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true },
1237 /// ```
1238 ConstantIndex {
1239 /// index or -index (in Python terms), depending on from_end
1240 offset: u64,
1241 /// The thing being indexed must be at least this long -- otherwise, the
1242 /// projection is UB.
1243 ///
1244 /// For arrays this is always the exact length.
1245 min_length: u64,
1246 /// Counting backwards from end? This is always false when indexing an
1247 /// array.
1248 from_end: bool,
1249 },
1250
1251 /// These indices are generated by slice patterns.
1252 ///
1253 /// If `from_end` is true `slice[from..slice.len() - to]`.
1254 /// Otherwise `array[from..to]`.
1255 Subslice {
1256 from: u64,
1257 to: u64,
1258 /// Whether `to` counts from the start or end of the array/slice.
1259 /// For `PlaceElem`s this is `true` if and only if the base is a slice.
1260 /// For `ProjectionKind`, this can also be `true` for arrays.
1261 from_end: bool,
1262 },
1263
1264 /// "Downcast" to a variant of an enum or a coroutine.
1265 ///
1266 /// The included Symbol is the name of the variant, used for printing MIR.
1267 ///
1268 /// This operation itself is never UB, all it does is change the type of the place.
1269 Downcast(Option<Symbol>, VariantIdx),
1270
1271 /// Like an explicit cast from an opaque type to a concrete type, but without
1272 /// requiring an intermediate variable.
1273 ///
1274 /// This is unused with `-Znext-solver`.
1275 OpaqueCast(T),
1276
1277 /// A transmute from an unsafe binder to the type that it wraps. This is a projection
1278 /// of a place, so it doesn't necessarily constitute a move out of the binder.
1279 UnwrapUnsafeBinder(T),
1280
1281 /// A `Subtype(T)` projection is applied to any `StatementKind::Assign` where
1282 /// type of lvalue doesn't match the type of rvalue, the primary goal is making subtyping
1283 /// explicit during optimizations and codegen.
1284 ///
1285 /// This projection doesn't impact the runtime behavior of the program except for potentially changing
1286 /// some type metadata of the interpreter or codegen backend.
1287 ///
1288 /// This goal is achieved with mir_transform pass `Subtyper`, which runs right after
1289 /// borrowchecker, as we only care about subtyping that can affect trait selection and
1290 /// `TypeId`.
1291 Subtype(T),
1292}
1293
1294/// Alias for projections as they appear in places, where the base is a place
1295/// and the index is a local.
1296pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
1297
1298///////////////////////////////////////////////////////////////////////////
1299// Operands
1300
1301/// An operand in MIR represents a "value" in Rust, the definition of which is undecided and part of
1302/// the memory model. One proposal for a definition of values can be found [on UCG][value-def].
1303///
1304/// [value-def]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/value-domain.md
1305///
1306/// The most common way to create values is via loading a place. Loading a place is an operation
1307/// which reads the memory of the place and converts it to a value. This is a fundamentally *typed*
1308/// operation. The nature of the value produced depends on the type of the conversion. Furthermore,
1309/// there may be other effects: if the type has a validity constraint loading the place might be UB
1310/// if the validity constraint is not met.
1311///
1312/// **Needs clarification:** Is loading a place that has its variant index set well-formed? Miri
1313/// currently implements it, but it seems like this may be something to check against in the
1314/// validator.
1315#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
1316pub enum Operand<'tcx> {
1317 /// Creates a value by loading the given place.
1318 ///
1319 /// Before drop elaboration, the type of the place must be `Copy`. After drop elaboration there
1320 /// is no such requirement.
1321 Copy(Place<'tcx>),
1322
1323 /// Creates a value by performing loading the place, just like the `Copy` operand.
1324 ///
1325 /// This *may* additionally overwrite the place with `uninit` bytes, depending on how we decide
1326 /// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this
1327 /// place without first re-initializing it.
1328 ///
1329 /// **Needs clarification:** The operational impact of `Move` is unclear. Currently (both in
1330 /// Miri and codegen) it has no effect at all unless it appears in an argument to `Call`; for
1331 /// `Call` it allows the argument to be passed to the callee "in-place", i.e. the callee might
1332 /// just get a reference to this place instead of a full copy. Miri implements this with a
1333 /// combination of aliasing model "protectors" and putting `uninit` into the place. Ralf
1334 /// proposes that we don't want these semantics for `Move` in regular assignments, because
1335 /// loading a place should not have side-effects, and the aliasing model "protectors" are
1336 /// inherently tied to a function call. Are these the semantics we want for MIR? Is this
1337 /// something we can even decide without knowing more about Rust's memory model?
1338 ///
1339 /// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188
1340 Move(Place<'tcx>),
1341
1342 /// Constants are already semantically values, and remain unchanged.
1343 Constant(Box<ConstOperand<'tcx>>),
1344}
1345
1346#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
1347#[derive(TypeFoldable, TypeVisitable)]
1348pub struct ConstOperand<'tcx> {
1349 pub span: Span,
1350
1351 /// Optional user-given type: for something like
1352 /// `collect::<Vec<_>>`, this would be present and would
1353 /// indicate that `Vec<_>` was explicitly specified.
1354 ///
1355 /// Needed for NLL to impose user-given type constraints.
1356 pub user_ty: Option<UserTypeAnnotationIndex>,
1357
1358 pub const_: Const<'tcx>,
1359}
1360
1361///////////////////////////////////////////////////////////////////////////
1362// Rvalues
1363
1364/// The various kinds of rvalues that can appear in MIR.
1365///
1366/// Not all of these are allowed at every [`MirPhase`] - when this is the case, it's stated below.
1367///
1368/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
1369/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
1370/// value that an [`Operand`] produces.
1371#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
1372pub enum Rvalue<'tcx> {
1373 /// Yields the operand unchanged
1374 Use(Operand<'tcx>),
1375
1376 /// Creates an array where each element is the value of the operand.
1377 ///
1378 /// Corresponds to source code like `[x; 32]`.
1379 Repeat(Operand<'tcx>, ty::Const<'tcx>),
1380
1381 /// Creates a reference of the indicated kind to the place.
1382 ///
1383 /// There is not much to document here, because besides the obvious parts the semantics of this
1384 /// are essentially entirely a part of the aliasing model. There are many UCG issues discussing
1385 /// exactly what the behavior of this operation should be.
1386 ///
1387 /// `Shallow` borrows are disallowed after drop lowering.
1388 Ref(Region<'tcx>, BorrowKind, Place<'tcx>),
1389
1390 /// Creates a pointer/reference to the given thread local.
1391 ///
1392 /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a
1393 /// `*const T`, and if neither of those apply a `&T`.
1394 ///
1395 /// **Note:** This is a runtime operation that actually executes code and is in this sense more
1396 /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to
1397 /// SIGILL for some reason that I (JakobDegen) never got a chance to look into.
1398 ///
1399 /// **Needs clarification**: Are there weird additional semantics here related to the runtime
1400 /// nature of this operation?
1401 ThreadLocalRef(DefId),
1402
1403 /// Creates a raw pointer with the indicated mutability to the place.
1404 ///
1405 /// This is generated by pointer casts like `&v as *const _` or raw borrow expressions like
1406 /// `&raw const v`.
1407 ///
1408 /// Like with references, the semantics of this operation are heavily dependent on the aliasing
1409 /// model.
1410 RawPtr(RawPtrKind, Place<'tcx>),
1411
1412 /// Yields the length of the place, as a `usize`.
1413 ///
1414 /// If the type of the place is an array, this is the array length. For slices (`[T]`, not
1415 /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is
1416 /// ill-formed for places of other types.
1417 ///
1418 /// This cannot be a `UnOp(PtrMetadata, _)` because that expects a value, and we only
1419 /// have a place, and `UnOp(PtrMetadata, RawPtr(place))` is not a thing.
1420 Len(Place<'tcx>),
1421
1422 /// Performs essentially all of the casts that can be performed via `as`.
1423 ///
1424 /// This allows for casts from/to a variety of types.
1425 ///
1426 /// **FIXME**: Document exactly which `CastKind`s allow which types of casts.
1427 Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
1428
1429 /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
1430 /// parameter may be a `usize` as well.
1431 /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
1432 /// raw pointers, or function pointers and return a `bool`. The types of the operands must be
1433 /// matching, up to the usual caveat of the lifetimes in function pointers.
1434 /// * Left and right shift operations accept signed or unsigned integers not necessarily of the
1435 /// same type and return a value of the same type as their LHS. Like in Rust, the RHS is
1436 /// truncated as needed.
1437 /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching
1438 /// types and return a value of that type.
1439 /// * The `FooWithOverflow` are like the `Foo`, but returning `(T, bool)` instead of just `T`,
1440 /// where the `bool` is true if the result is not equal to the infinite-precision result.
1441 /// * The remaining operations accept signed integers, unsigned integers, or floats with
1442 /// matching types and return a value of that type.
1443 BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
1444
1445 /// Computes a value as described by the operation.
1446 NullaryOp(NullOp<'tcx>, Ty<'tcx>),
1447
1448 /// Exactly like `BinaryOp`, but less operands.
1449 ///
1450 /// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
1451 /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds
1452 /// return a value with the same type as their operand.
1453 UnaryOp(UnOp, Operand<'tcx>),
1454
1455 /// Computes the discriminant of the place, returning it as an integer of type
1456 /// [`discriminant_ty`]. Returns zero for types without discriminant.
1457 ///
1458 /// The validity requirements for the underlying value are undecided for this rvalue, see
1459 /// [#91095]. Note too that the value of the discriminant is not the same thing as the
1460 /// variant index; use [`discriminant_for_variant`] to convert.
1461 ///
1462 /// [`discriminant_ty`]: crate::ty::Ty::discriminant_ty
1463 /// [#91095]: https://github.com/rust-lang/rust/issues/91095
1464 /// [`discriminant_for_variant`]: crate::ty::Ty::discriminant_for_variant
1465 Discriminant(Place<'tcx>),
1466
1467 /// Creates an aggregate value, like a tuple or struct.
1468 ///
1469 /// This is needed because dataflow analysis needs to distinguish
1470 /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo`
1471 /// has a destructor.
1472 ///
1473 /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Coroutine`. After
1474 /// coroutine lowering, `Coroutine` aggregate kinds are disallowed too.
1475 Aggregate(Box<AggregateKind<'tcx>>, IndexVec<FieldIdx, Operand<'tcx>>),
1476
1477 /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
1478 ///
1479 /// This is different from a normal transmute because dataflow analysis will treat the box as
1480 /// initialized but its content as uninitialized. Like other pointer casts, this in general
1481 /// affects alias analysis.
1482 ShallowInitBox(Operand<'tcx>, Ty<'tcx>),
1483
1484 /// A CopyForDeref is equivalent to a read from a place at the
1485 /// codegen level, but is treated specially by drop elaboration. When such a read happens, it
1486 /// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator)
1487 /// that the only use of the returned value is a deref operation, immediately
1488 /// followed by one or more projections. Drop elaboration treats this rvalue as if the
1489 /// read never happened and just projects further. This allows simplifying various MIR
1490 /// optimizations and codegen backends that previously had to handle deref operations anywhere
1491 /// in a place.
1492 CopyForDeref(Place<'tcx>),
1493
1494 /// Wraps a value in an unsafe binder.
1495 WrapUnsafeBinder(Operand<'tcx>, Ty<'tcx>),
1496}
1497
1498#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1499pub enum CastKind {
1500 /// An exposing pointer to address cast. A cast between a pointer and an integer type, or
1501 /// between a function pointer and an integer type.
1502 /// See the docs on `expose_provenance` for more details.
1503 PointerExposeProvenance,
1504 /// An address-to-pointer cast that picks up an exposed provenance.
1505 /// See the docs on `with_exposed_provenance` for more details.
1506 PointerWithExposedProvenance,
1507 /// Pointer related casts that are done by coercions. Note that reference-to-raw-ptr casts are
1508 /// translated into `&raw mut/const *r`, i.e., they are not actually casts.
1509 ///
1510 /// The following are allowed in [`AnalysisPhase::Initial`] as they're needed for borrowck,
1511 /// but after that are forbidden (including in all phases of runtime MIR):
1512 /// * [`PointerCoercion::ArrayToPointer`]
1513 /// * [`PointerCoercion::MutToConstPointer`]
1514 ///
1515 /// Both are runtime nops, so should be [`CastKind::PtrToPtr`] instead in runtime MIR.
1516 PointerCoercion(PointerCoercion, CoercionSource),
1517 IntToInt,
1518 FloatToInt,
1519 FloatToFloat,
1520 IntToFloat,
1521 PtrToPtr,
1522 FnPtrToPtr,
1523 /// Reinterpret the bits of the input as a different type.
1524 ///
1525 /// MIR is well-formed if the input and output types have different sizes,
1526 /// but running a transmute between differently-sized types is UB.
1527 Transmute,
1528}
1529
1530/// Represents how a [`CastKind::PointerCoercion`] was constructed.
1531/// Used only for diagnostics.
1532#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1533pub enum CoercionSource {
1534 /// The coercion was manually written by the user with an `as` cast.
1535 AsCast,
1536 /// The coercion was automatically inserted by the compiler.
1537 Implicit,
1538}
1539
1540#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1541#[derive(TypeFoldable, TypeVisitable)]
1542pub enum AggregateKind<'tcx> {
1543 /// The type is of the element
1544 Array(Ty<'tcx>),
1545 Tuple,
1546
1547 /// The second field is the variant index. It's equal to 0 for struct
1548 /// and union expressions. The last field is the
1549 /// active field number and is present only for union expressions
1550 /// -- e.g., for a union expression `SomeUnion { c: .. }`, the
1551 /// active field index would identity the field `c`
1552 Adt(DefId, VariantIdx, GenericArgsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
1553
1554 Closure(DefId, GenericArgsRef<'tcx>),
1555 Coroutine(DefId, GenericArgsRef<'tcx>),
1556 CoroutineClosure(DefId, GenericArgsRef<'tcx>),
1557
1558 /// Construct a raw pointer from the data pointer and metadata.
1559 ///
1560 /// The `Ty` here is the type of the *pointee*, not the pointer itself.
1561 /// The `Mutability` indicates whether this produces a `*const` or `*mut`.
1562 ///
1563 /// The [`Rvalue::Aggregate`] operands for thus must be
1564 ///
1565 /// 0. A raw pointer of matching mutability with any [`core::ptr::Thin`] pointee
1566 /// 1. A value of the appropriate [`core::ptr::Pointee::Metadata`] type
1567 ///
1568 /// *Both* operands must always be included, even the unit value if this is
1569 /// creating a thin pointer. If you're just converting between thin pointers,
1570 /// you may want an [`Rvalue::Cast`] with [`CastKind::PtrToPtr`] instead.
1571 RawPtr(Ty<'tcx>, Mutability),
1572}
1573
1574#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1575pub enum NullOp<'tcx> {
1576 /// Returns the size of a value of that type
1577 SizeOf,
1578 /// Returns the minimum alignment of a type
1579 AlignOf,
1580 /// Returns the offset of a field
1581 OffsetOf(&'tcx List<(VariantIdx, FieldIdx)>),
1582 /// Returns whether we should perform some UB-checking at runtime.
1583 /// See the `ub_checks` intrinsic docs for details.
1584 UbChecks,
1585 /// Returns whether we should perform contract-checking at runtime.
1586 /// See the `contract_checks` intrinsic docs for details.
1587 ContractChecks,
1588}
1589
1590#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1591#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
1592pub enum UnOp {
1593 /// The `!` operator for logical inversion
1594 Not,
1595 /// The `-` operator for negation
1596 Neg,
1597 /// Gets the metadata `M` from a `*const`/`*mut`/`&`/`&mut` to
1598 /// `impl Pointee<Metadata = M>`.
1599 ///
1600 /// For example, this will give a `()` from `*const i32`, a `usize` from
1601 /// `&mut [u8]`, or a `ptr::DynMetadata<dyn Foo>` (internally a pointer)
1602 /// from a `*mut dyn Foo`.
1603 ///
1604 /// Allowed only in [`MirPhase::Runtime`]; earlier it's an intrinsic.
1605 PtrMetadata,
1606}
1607
1608#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1609#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
1610pub enum BinOp {
1611 /// The `+` operator (addition)
1612 Add,
1613 /// Like `Add`, but with UB on overflow. (Integers only.)
1614 AddUnchecked,
1615 /// Like `Add`, but returns `(T, bool)` of both the wrapped result
1616 /// and a bool indicating whether it overflowed.
1617 AddWithOverflow,
1618 /// The `-` operator (subtraction)
1619 Sub,
1620 /// Like `Sub`, but with UB on overflow. (Integers only.)
1621 SubUnchecked,
1622 /// Like `Sub`, but returns `(T, bool)` of both the wrapped result
1623 /// and a bool indicating whether it overflowed.
1624 SubWithOverflow,
1625 /// The `*` operator (multiplication)
1626 Mul,
1627 /// Like `Mul`, but with UB on overflow. (Integers only.)
1628 MulUnchecked,
1629 /// Like `Mul`, but returns `(T, bool)` of both the wrapped result
1630 /// and a bool indicating whether it overflowed.
1631 MulWithOverflow,
1632 /// The `/` operator (division)
1633 ///
1634 /// For integer types, division by zero is UB, as is `MIN / -1` for signed.
1635 /// The compiler should have inserted checks prior to this.
1636 ///
1637 /// Floating-point division by zero is safe, and does not need guards.
1638 Div,
1639 /// The `%` operator (modulus)
1640 ///
1641 /// For integer types, using zero as the modulus (second operand) is UB,
1642 /// as is `MIN % -1` for signed.
1643 /// The compiler should have inserted checks prior to this.
1644 ///
1645 /// Floating-point remainder by zero is safe, and does not need guards.
1646 Rem,
1647 /// The `^` operator (bitwise xor)
1648 BitXor,
1649 /// The `&` operator (bitwise and)
1650 BitAnd,
1651 /// The `|` operator (bitwise or)
1652 BitOr,
1653 /// The `<<` operator (shift left)
1654 ///
1655 /// The offset is given by `RHS.rem_euclid(LHS::BITS)`.
1656 /// In other words, it is (uniquely) determined as follows:
1657 /// - it is "equal modulo LHS::BITS" to the RHS
1658 /// - it is in the range `0..LHS::BITS`
1659 Shl,
1660 /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
1661 ShlUnchecked,
1662 /// The `>>` operator (shift right)
1663 ///
1664 /// The offset is given by `RHS.rem_euclid(LHS::BITS)`.
1665 /// In other words, it is (uniquely) determined as follows:
1666 /// - it is "equal modulo LHS::BITS" to the RHS
1667 /// - it is in the range `0..LHS::BITS`
1668 ///
1669 /// This is an arithmetic shift if the LHS is signed
1670 /// and a logical shift if the LHS is unsigned.
1671 Shr,
1672 /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
1673 ShrUnchecked,
1674 /// The `==` operator (equality)
1675 Eq,
1676 /// The `<` operator (less than)
1677 Lt,
1678 /// The `<=` operator (less than or equal to)
1679 Le,
1680 /// The `!=` operator (not equal to)
1681 Ne,
1682 /// The `>=` operator (greater than or equal to)
1683 Ge,
1684 /// The `>` operator (greater than)
1685 Gt,
1686 /// The `<=>` operator (three-way comparison, like `Ord::cmp`)
1687 ///
1688 /// This is supported only on the integer types and `char`, always returning
1689 /// [`rustc_hir::LangItem::OrderingEnum`] (aka [`std::cmp::Ordering`]).
1690 ///
1691 /// [`Rvalue::BinaryOp`]`(BinOp::Cmp, A, B)` returns
1692 /// - `Ordering::Less` (`-1_i8`, as a Scalar) if `A < B`
1693 /// - `Ordering::Equal` (`0_i8`, as a Scalar) if `A == B`
1694 /// - `Ordering::Greater` (`+1_i8`, as a Scalar) if `A > B`
1695 Cmp,
1696 /// The `ptr.offset` operator
1697 Offset,
1698}
1699
1700// Assignment operators, e.g. `+=`. See comments on the corresponding variants
1701// in `BinOp` for details.
1702#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
1703pub enum AssignOp {
1704 AddAssign,
1705 SubAssign,
1706 MulAssign,
1707 DivAssign,
1708 RemAssign,
1709 BitXorAssign,
1710 BitAndAssign,
1711 BitOrAssign,
1712 ShlAssign,
1713 ShrAssign,
1714}
1715
1716// Sometimes `BinOp` and `AssignOp` need the same treatment. The operations
1717// covered by `AssignOp` are a subset of those covered by `BinOp`, so it makes
1718// sense to convert `AssignOp` to `BinOp`.
1719impl From<AssignOp> for BinOp {
1720 fn from(op: AssignOp) -> BinOp {
1721 match op {
1722 AssignOp::AddAssign => BinOp::Add,
1723 AssignOp::SubAssign => BinOp::Sub,
1724 AssignOp::MulAssign => BinOp::Mul,
1725 AssignOp::DivAssign => BinOp::Div,
1726 AssignOp::RemAssign => BinOp::Rem,
1727 AssignOp::BitXorAssign => BinOp::BitXor,
1728 AssignOp::BitAndAssign => BinOp::BitAnd,
1729 AssignOp::BitOrAssign => BinOp::BitOr,
1730 AssignOp::ShlAssign => BinOp::Shl,
1731 AssignOp::ShrAssign => BinOp::Shr,
1732 }
1733 }
1734}
1735
1736// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
1737#[cfg(target_pointer_width = "64")]
1738mod size_asserts {
1739 use rustc_data_structures::static_assert_size;
1740
1741 use super::*;
1742 // tidy-alphabetical-start
1743 static_assert_size!(AggregateKind<'_>, 32);
1744 static_assert_size!(Operand<'_>, 24);
1745 static_assert_size!(Place<'_>, 16);
1746 static_assert_size!(PlaceElem<'_>, 24);
1747 static_assert_size!(Rvalue<'_>, 40);
1748 static_assert_size!(StatementKind<'_>, 16);
1749 static_assert_size!(TerminatorKind<'_>, 80);
1750 // tidy-alphabetical-end
1751}