rustc_middle/mir/
syntax.rs

1//! This defines the syntax of MIR, i.e., the set of available MIR operations, and other definitions
2//! closely related to MIR semantics.
3//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
4//! The intention is that this file only contains datatype declarations, no code.
5
6use rustc_abi::{FieldIdx, VariantIdx};
7use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece, Mutability};
8use rustc_data_structures::packed::Pu128;
9use rustc_hir::CoroutineKind;
10use rustc_hir::def_id::DefId;
11use rustc_index::IndexVec;
12use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable};
13use rustc_span::def_id::LocalDefId;
14use rustc_span::source_map::Spanned;
15use rustc_span::{Span, Symbol};
16use rustc_target::asm::InlineAsmRegOrRegClass;
17use smallvec::SmallVec;
18
19use super::{BasicBlock, Const, Local, UserTypeProjection};
20use crate::mir::coverage::CoverageKind;
21use crate::ty::adjustment::PointerCoercion;
22use crate::ty::{self, GenericArgsRef, List, Region, Ty, UserTypeAnnotationIndex};
23
24/// Represents the "flavors" of MIR.
25///
26/// The MIR pipeline is structured into a few major dialects, with one or more phases within each
27/// dialect. A MIR flavor is identified by a dialect-phase pair. A single `MirPhase` value
28/// specifies such a pair. All flavors of MIR use the same data structure to represent the program.
29///
30/// Different MIR dialects have different semantics. (The differences between dialects are small,
31/// but they do exist.) The progression from one MIR dialect to the next is technically a lowering
32/// from one IR to another. In other words, a single well-formed [`Body`](crate::mir::Body) might
33/// have different semantic meaning and different behavior at runtime in the different dialects.
34/// The specific differences between dialects are described on the variants below.
35///
36/// Phases exist only to place restrictions on what language constructs are permitted in
37/// well-formed MIR, and subsequent phases mostly increase those restrictions. I.e. to convert MIR
38/// from one phase to the next might require removing/replacing certain MIR constructs.
39///
40/// When adding dialects or phases, remember to update [`MirPhase::index`].
41#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
42#[derive(HashStable)]
43pub enum MirPhase {
44    /// The "built MIR" dialect, as generated by MIR building.
45    ///
46    /// The only things that operate on this dialect are unsafeck, the various MIR lints, and const
47    /// qualifs.
48    ///
49    /// This dialect has just the one (implicit) phase, which places few restrictions on what MIR
50    /// constructs are allowed.
51    Built,
52
53    /// The "analysis MIR" dialect, used for borrowck and friends.
54    ///
55    /// The only semantic difference between built MIR and analysis MIR relates to constant
56    /// promotion. In built MIR, sequences of statements that would generally be subject to
57    /// constant promotion are semantically constants, while in analysis MIR all constants are
58    /// explicit.
59    ///
60    /// The result of const promotion is available from the `mir_promoted` and `promoted_mir`
61    /// queries.
62    ///
63    /// The phases of this dialect are described in `AnalysisPhase`.
64    Analysis(AnalysisPhase),
65
66    /// The "runtime MIR" dialect, used for CTFE, optimizations, and codegen.
67    ///
68    /// The semantic differences between analysis MIR and runtime MIR are as follows.
69    ///
70    /// - Drops: In analysis MIR, `Drop` terminators represent *conditional* drops; roughly
71    ///   speaking, if dataflow analysis determines that the place being dropped is uninitialized,
72    ///   the drop will not be executed. The exact semantics of this aren't written down anywhere,
73    ///   which means they are essentially "what drop elaboration does." In runtime MIR, the drops
74    ///   are unconditional; when a `Drop` terminator is reached, if the type has drop glue that
75    ///   drop glue is always executed. This may be UB if the underlying place is not initialized.
76    /// - Packed drops: Places might in general be misaligned - in most cases this is UB, the
77    ///   exception is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be
78    ///   misaligned for this reason implicitly moves `P` to a temporary before dropping. Runtime
79    ///   MIR has no such rules, and dropping a misaligned place is simply UB.
80    /// - Async drops: after drop elaboration some drops may become async (`drop`, `async_fut` fields).
81    ///   StateTransform pass will expand those async drops or reset to sync.
82    /// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In
83    ///   runtime MIR, this is UB.
84    /// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same
85    ///   way that Rust itself has them. Where exactly these are is generally subject to change,
86    ///   and so we don't document this here. Runtime MIR has most retags explicit (though implicit
87    ///   retags can still occur at `Rvalue::{Ref,AddrOf}`).
88    /// - Coroutine bodies: In analysis MIR, locals may actually be behind a pointer that user code
89    ///   has access to. This occurs in coroutine bodies. Such locals do not behave like other
90    ///   locals, because they e.g. may be aliased in surprising ways. Runtime MIR has no such
91    ///   special locals. All coroutine bodies are lowered and so all places that look like locals
92    ///   really are locals.
93    ///
94    /// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part
95    /// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that
96    /// transformations that can suppress such errors should not run on analysis MIR.
97    ///
98    /// The phases of this dialect are described in `RuntimePhase`.
99    Runtime(RuntimePhase),
100}
101
102/// See [`MirPhase::Analysis`].
103#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
104#[derive(HashStable)]
105pub enum AnalysisPhase {
106    Initial = 0,
107    /// Beginning in this phase, the following variants are disallowed:
108    /// * [`TerminatorKind::FalseUnwind`]
109    /// * [`TerminatorKind::FalseEdge`]
110    /// * [`StatementKind::FakeRead`]
111    /// * [`StatementKind::AscribeUserType`]
112    /// * [`StatementKind::Coverage`] with [`CoverageKind::BlockMarker`] or
113    ///   [`CoverageKind::SpanMarker`]
114    /// * [`Rvalue::Ref`] with `BorrowKind::Fake`
115    /// * [`CastKind::PointerCoercion`] with any of the following:
116    ///   * [`PointerCoercion::ArrayToPointer`]
117    ///   * [`PointerCoercion::MutToConstPointer`]
118    ///
119    /// Furthermore, `Deref` projections must be the first projection within any place (if they
120    /// appear at all)
121    PostCleanup = 1,
122}
123
124/// See [`MirPhase::Runtime`].
125#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
126#[derive(HashStable)]
127pub enum RuntimePhase {
128    /// In addition to the semantic changes, beginning with this phase, the following variants are
129    /// disallowed:
130    /// * [`TerminatorKind::Yield`]
131    /// * [`TerminatorKind::CoroutineDrop`]
132    /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
133    /// * [`Rvalue::CopyForDeref`]
134    /// * [`PlaceElem::OpaqueCast`]
135    /// * [`LocalInfo::DerefTemp`](super::LocalInfo::DerefTemp)
136    ///
137    /// And the following variants are allowed:
138    /// * [`StatementKind::Retag`]
139    /// * [`StatementKind::SetDiscriminant`]
140    /// * [`PlaceElem::ConstantIndex`] / [`PlaceElem::Subslice`] after [`PlaceElem::Subslice`]
141    ///
142    /// Furthermore, `Copy` operands are allowed for non-`Copy` types.
143    Initial = 0,
144    /// Beginning with this phase, the following variant is disallowed:
145    /// * [`ProjectionElem::Deref`] of `Box`
146    PostCleanup = 1,
147    Optimized = 2,
148}
149
150///////////////////////////////////////////////////////////////////////////
151// Borrow kinds
152
153#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
154#[derive(Hash, HashStable)]
155pub enum BorrowKind {
156    /// Data must be immutable and is aliasable.
157    Shared,
158
159    /// An immutable, aliasable borrow that is discarded after borrow-checking. Can behave either
160    /// like a normal shared borrow or like a special shallow borrow (see [`FakeBorrowKind`]).
161    ///
162    /// This is used when lowering index expressions and matches. This is used to prevent code like
163    /// the following from compiling:
164    /// ```compile_fail,E0510
165    /// let mut x: &[_] = &[[0, 1]];
166    /// let y: &[_] = &[];
167    /// let _ = x[0][{x = y; 1}];
168    /// ```
169    /// ```compile_fail,E0510
170    /// let mut x = &Some(0);
171    /// match *x {
172    ///     None => (),
173    ///     Some(_) if { x = &None; false } => (),
174    ///     Some(_) => (),
175    /// }
176    /// ```
177    /// We can also report errors with this kind of borrow differently.
178    Fake(FakeBorrowKind),
179
180    /// Data is mutable and not aliasable.
181    Mut { kind: MutBorrowKind },
182}
183
184#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
185#[derive(Hash, HashStable)]
186pub enum RawPtrKind {
187    Mut,
188    Const,
189    /// Creates a raw pointer to a place that will only be used to access its metadata,
190    /// not the data behind the pointer. Note that this limitation is *not* enforced
191    /// by the validator.
192    ///
193    /// The borrow checker allows overlap of these raw pointers with references to the
194    /// data. This is sound even if the pointer is "misused" since any such use is anyway
195    /// unsafe. In terms of the operational semantics (i.e., Miri), this is equivalent
196    /// to `RawPtrKind::Mut`, but will never incur a retag.
197    FakeForPtrMetadata,
198}
199
200#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
201#[derive(Hash, HashStable)]
202pub enum MutBorrowKind {
203    Default,
204    /// This borrow arose from method-call auto-ref. (i.e., `adjustment::Adjust::Borrow`)
205    TwoPhaseBorrow,
206    /// Data must be immutable but not aliasable. This kind of borrow
207    /// cannot currently be expressed by the user and is used only in
208    /// implicit closure bindings. It is needed when the closure is
209    /// borrowing or mutating a mutable referent, e.g.:
210    /// ```
211    /// let mut z = 3;
212    /// let x: &mut isize = &mut z;
213    /// let y = || *x += 5;
214    /// ```
215    /// If we were to try to translate this closure into a more explicit
216    /// form, we'd encounter an error with the code as written:
217    /// ```compile_fail,E0594
218    /// struct Env<'a> { x: &'a &'a mut isize }
219    /// let mut z = 3;
220    /// let x: &mut isize = &mut z;
221    /// let y = (&mut Env { x: &x }, fn_ptr);  // Closure is pair of env and fn
222    /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
223    /// ```
224    /// This is then illegal because you cannot mutate an `&mut` found
225    /// in an aliasable location. To solve, you'd have to translate with
226    /// an `&mut` borrow:
227    /// ```compile_fail,E0596
228    /// struct Env<'a> { x: &'a mut &'a mut isize }
229    /// let mut z = 3;
230    /// let x: &mut isize = &mut z;
231    /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
232    /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
233    /// ```
234    /// Now the assignment to `**env.x` is legal, but creating a
235    /// mutable pointer to `x` is not because `x` is not mutable. We
236    /// could fix this by declaring `x` as `let mut x`. This is ok in
237    /// user code, if awkward, but extra weird for closures, since the
238    /// borrow is hidden.
239    ///
240    /// So we introduce a `ClosureCapture` borrow -- user will not have to mark the variable
241    /// containing the mutable reference as `mut`, as they didn't ever
242    /// intend to mutate the mutable reference itself. We still mutable capture it in order to
243    /// mutate the pointed value through it (but not mutating the reference itself).
244    ///
245    /// This solves the problem. For simplicity, we don't give users the way to express this
246    /// borrow, it's just used when translating closures.
247    ClosureCapture,
248}
249
250#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
251#[derive(Hash, HashStable)]
252pub enum FakeBorrowKind {
253    /// A shared shallow borrow. The immediately borrowed place must be immutable, but projections
254    /// from it don't need to be. For example, a shallow borrow of `a.b` doesn't conflict with a
255    /// mutable borrow of `a.b.c`.
256    ///
257    /// This is used when lowering matches: when matching on a place we want to ensure that place
258    /// have the same value from the start of the match until an arm is selected. This prevents this
259    /// code from compiling:
260    /// ```compile_fail,E0510
261    /// let mut x = &Some(0);
262    /// match *x {
263    ///     None => (),
264    ///     Some(_) if { x = &None; false } => (),
265    ///     Some(_) => (),
266    /// }
267    /// ```
268    /// This can't be a shared borrow because mutably borrowing `(*x as Some).0` should not checking
269    /// the discriminant or accessing other variants, because the mutating `(*x as Some).0` can't
270    /// affect the discriminant of `x`. E.g. the following is allowed:
271    /// ```rust
272    /// let mut x = Some(0);
273    /// match x {
274    ///     Some(_)
275    ///         if {
276    ///             if let Some(ref mut y) = x {
277    ///                 *y += 1;
278    ///             };
279    ///             true
280    ///         } => {}
281    ///     _ => {}
282    /// }
283    /// ```
284    Shallow,
285    /// A shared (deep) borrow. Data must be immutable and is aliasable.
286    ///
287    /// This is used when lowering deref patterns, where shallow borrows wouldn't prevent something
288    /// like:
289    /// ```compile_fail
290    /// let mut b = Box::new(false);
291    /// match b {
292    ///     deref!(true) => {} // not reached because `*b == false`
293    ///     _ if { *b = true; false } => {} // not reached because the guard is `false`
294    ///     deref!(false) => {} // not reached because the guard changed it
295    ///     // UB because we reached the unreachable.
296    /// }
297    /// ```
298    Deep,
299}
300
301///////////////////////////////////////////////////////////////////////////
302// Statements
303
304/// The various kinds of statements that can appear in MIR.
305///
306/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
307/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
308/// causing an ICE if they are violated.
309#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
310#[derive(TypeFoldable, TypeVisitable)]
311pub enum StatementKind<'tcx> {
312    /// Assign statements roughly correspond to an assignment in Rust proper (`x = ...`) except
313    /// without the possibility of dropping the previous value (that must be done separately, if at
314    /// all). The *exact* way this works is undecided. It probably does something like evaluating
315    /// the LHS to a place and the RHS to a value, and then storing the value to the place. Various
316    /// parts of this may do type specific things that are more complicated than simply copying
317    /// bytes.
318    ///
319    /// **Needs clarification**: The implication of the above idea would be that assignment implies
320    /// that the resulting value is initialized. I believe we could commit to this separately from
321    /// committing to whatever part of the memory model we would need to decide on to make the above
322    /// paragraph precise. Do we want to?
323    ///
324    /// Assignments in which the types of the place and rvalue differ are not well-formed.
325    ///
326    /// **Needs clarification**: Do we ever want to worry about non-free (in the body) lifetimes for
327    /// the typing requirement in post drop-elaboration MIR? I think probably not - I'm not sure we
328    /// could meaningfully require this anyway. How about free lifetimes? Is ignoring this
329    /// interesting for optimizations? Do we want to allow such optimizations?
330    ///
331    /// **Needs clarification**: We currently require that the LHS place not overlap with any place
332    /// read as part of computation of the RHS for some rvalues. This requirement is under
333    /// discussion in [#68364]. Specifically, overlap is permitted only for assignments of a type
334    /// with `BackendRepr::Scalar | BackendRepr::ScalarPair` where all the scalar fields are
335    /// [`Scalar::Initialized`][rustc_abi::Scalar::Initialized]. As a part of this discussion, it is
336    /// also unclear in what order the components are evaluated.
337    ///
338    /// [#68364]: https://github.com/rust-lang/rust/issues/68364
339    ///
340    /// See [`Rvalue`] documentation for details on each of those.
341    Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
342
343    /// When executed at runtime, this is a nop.
344    ///
345    /// During static analysis, a fake read:
346    /// - requires that the value being read is initialized (or, in the case
347    ///   of closures, that it was fully initialized at some point in the past)
348    /// - constitutes a use of a value for the purposes of NLL (i.e. if the
349    ///   value being fake-read is a reference, the lifetime of that reference
350    ///   will be extended to cover the `FakeRead`)
351    /// - but, unlike an actual read, does *not* invalidate any exclusive
352    ///   borrows.
353    ///
354    /// See [`FakeReadCause`] for more details on the situations in which a
355    /// `FakeRead` is emitted.
356    ///
357    /// Disallowed after drop elaboration.
358    FakeRead(Box<(FakeReadCause, Place<'tcx>)>),
359
360    /// Write the discriminant for a variant to the enum Place.
361    ///
362    /// This is permitted for both coroutines and ADTs. This does not necessarily write to the
363    /// entire place; instead, it writes to the minimum set of bytes as required by the layout for
364    /// the type.
365    SetDiscriminant { place: Box<Place<'tcx>>, variant_index: VariantIdx },
366
367    /// `StorageLive` and `StorageDead` statements mark the live range of a local.
368    ///
369    /// At any point during the execution of a function, each local is either allocated or
370    /// unallocated. Except as noted below, all locals except function parameters are initially
371    /// unallocated. `StorageLive` statements cause memory to be allocated for the local while
372    /// `StorageDead` statements cause the memory to be freed. In other words,
373    /// `StorageLive`/`StorageDead` act like the heap operations `allocate`/`deallocate`, but for
374    /// stack-allocated local variables. Using a local in any way (not only reading/writing from it)
375    /// while it is unallocated is UB.
376    ///
377    /// Some locals have no `StorageLive` or `StorageDead` statements within the entire MIR body.
378    /// These locals are implicitly allocated for the full duration of the function. There is a
379    /// convenience method at `rustc_mir_dataflow::storage::always_storage_live_locals` for
380    /// computing these locals.
381    ///
382    /// If the local is already allocated, calling `StorageLive` again will implicitly free the
383    /// local and then allocate fresh uninitialized memory. If a local is already deallocated,
384    /// calling `StorageDead` again is a NOP.
385    StorageLive(Local),
386
387    /// See `StorageLive` above.
388    StorageDead(Local),
389
390    /// Retag references in the given place, ensuring they got fresh tags.
391    ///
392    /// This is part of the Stacked Borrows model. These statements are currently only interpreted
393    /// by miri and only generated when `-Z mir-emit-retag` is passed. See
394    /// <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/> for
395    /// more details.
396    ///
397    /// For code that is not specific to stacked borrows, you should consider retags to read and
398    /// modify the place in an opaque way.
399    ///
400    /// Only `RetagKind::Default` and `RetagKind::FnEntry` are permitted.
401    Retag(RetagKind, Box<Place<'tcx>>),
402
403    /// This statement exists to preserve a trace of a scrutinee matched against a wildcard binding.
404    /// This is especially useful for `let _ = PLACE;` bindings that desugar to a single
405    /// `PlaceMention(PLACE)`.
406    ///
407    /// When executed at runtime, this computes the given place, but then discards
408    /// it without doing a load. `let _ = *ptr;` is fine even if the pointer is dangling.
409    PlaceMention(Box<Place<'tcx>>),
410
411    /// Encodes a user's type ascription. These need to be preserved
412    /// intact so that NLL can respect them. For example:
413    /// ```ignore (illustrative)
414    /// let a: T = y;
415    /// ```
416    /// The effect of this annotation is to relate the type `T_y` of the place `y`
417    /// to the user-given type `T`. The effect depends on the specified variance:
418    ///
419    /// - `Covariant` -- requires that `T_y <: T`
420    /// - `Contravariant` -- requires that `T_y :> T`
421    /// - `Invariant` -- requires that `T_y == T`
422    /// - `Bivariant` -- no effect
423    ///
424    /// When executed at runtime this is a nop.
425    ///
426    /// Disallowed after drop elaboration.
427    AscribeUserType(Box<(Place<'tcx>, UserTypeProjection)>, ty::Variance),
428
429    /// Carries control-flow-sensitive information injected by `-Cinstrument-coverage`,
430    /// such as where to generate physical coverage-counter-increments during codegen.
431    ///
432    /// Coverage statements are used in conjunction with the coverage mappings and other
433    /// information stored in the function's
434    /// [`mir::Body::function_coverage_info`](crate::mir::Body::function_coverage_info).
435    /// (For inlined MIR, take care to look up the *original function's* coverage info.)
436    ///
437    /// Interpreters and codegen backends that don't support coverage instrumentation
438    /// can usually treat this as a no-op.
439    Coverage(
440        // Coverage statements are unlikely to ever contain type information in
441        // the foreseeable future, so excluding them from TypeFoldable/TypeVisitable
442        // avoids some unhelpful derive boilerplate.
443        #[type_foldable(identity)]
444        #[type_visitable(ignore)]
445        CoverageKind,
446    ),
447
448    /// Denotes a call to an intrinsic that does not require an unwind path and always returns.
449    /// This avoids adding a new block and a terminator for simple intrinsics.
450    Intrinsic(Box<NonDivergingIntrinsic<'tcx>>),
451
452    /// Instructs the const eval interpreter to increment a counter; this counter is used to track
453    /// how many steps the interpreter has taken. It is used to prevent the user from writing const
454    /// code that runs for too long or infinitely. Other than in the const eval interpreter, this
455    /// is a no-op.
456    ConstEvalCounter,
457
458    /// No-op. Useful for deleting instructions without affecting statement indices.
459    Nop,
460
461    /// Marker statement indicating where `place` would be dropped.
462    /// This is semantically equivalent to `Nop`, so codegen and MIRI should interpret this
463    /// statement as such.
464    /// The only use case of this statement is for linting in MIR to detect temporary lifetime
465    /// changes.
466    BackwardIncompatibleDropHint {
467        /// Place to drop
468        place: Box<Place<'tcx>>,
469        /// Reason for backward incompatibility
470        reason: BackwardIncompatibleDropReason,
471    },
472}
473
474#[derive(
475    Clone,
476    TyEncodable,
477    TyDecodable,
478    Debug,
479    PartialEq,
480    Hash,
481    HashStable,
482    TypeFoldable,
483    TypeVisitable
484)]
485pub enum NonDivergingIntrinsic<'tcx> {
486    /// Denotes a call to the intrinsic function `assume`.
487    ///
488    /// The operand must be a boolean. Optimizers may use the value of the boolean to backtrack its
489    /// computation to infer information about other variables. So if the boolean came from a
490    /// `x < y` operation, subsequent operations on `x` and `y` could elide various bound checks.
491    /// If the argument is `false`, this operation is equivalent to `TerminatorKind::Unreachable`.
492    Assume(Operand<'tcx>),
493
494    /// Denotes a call to the intrinsic function `copy_nonoverlapping`.
495    ///
496    /// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
497    /// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
498    /// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
499    /// the `src` place are copied to the contiguous range of bytes beginning with the first byte
500    /// of `dest`.
501    ///
502    /// **Needs clarification**: In what order are operands computed and dereferenced? It should
503    /// probably match the order for assignment, but that is also undecided.
504    ///
505    /// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
506    /// I vaguely remember Ralf saying somewhere that he thought it should not be.
507    CopyNonOverlapping(CopyNonOverlapping<'tcx>),
508}
509
510/// Describes what kind of retag is to be performed.
511#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
512#[rustc_pass_by_value]
513pub enum RetagKind {
514    /// The initial retag of arguments when entering a function.
515    FnEntry,
516    /// Retag preparing for a two-phase borrow.
517    TwoPhase,
518    /// Retagging raw pointers.
519    Raw,
520    /// A "normal" retag.
521    Default,
522}
523
524/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
525#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, Hash, HashStable, PartialEq)]
526pub enum FakeReadCause {
527    /// A fake read injected into a match guard to ensure that the discriminants
528    /// that are being matched on aren't modified while the match guard is being
529    /// evaluated.
530    ///
531    /// At the beginning of each match guard, a [fake borrow][FakeBorrowKind] is
532    /// inserted for each discriminant accessed in the entire `match` statement.
533    ///
534    /// Then, at the end of the match guard, a `FakeRead(ForMatchGuard)` is
535    /// inserted to keep the fake borrows alive until that point.
536    ///
537    /// This should ensure that you cannot change the variant for an enum while
538    /// you are in the midst of matching on it.
539    ForMatchGuard,
540
541    /// Fake read of the scrutinee of a `match` or destructuring `let`
542    /// (i.e. `let` with non-trivial pattern).
543    ///
544    /// In `match x { ... }`, we generate a `FakeRead(ForMatchedPlace, x)`
545    /// and insert it into the `otherwise_block` (which is supposed to be
546    /// unreachable for irrefutable pattern-matches like `match` or `let`).
547    ///
548    /// This is necessary because `let x: !; match x {}` doesn't generate any
549    /// actual read of x, so we need to generate a `FakeRead` to check that it
550    /// is initialized.
551    ///
552    /// If the `FakeRead(ForMatchedPlace)` is being performed with a closure
553    /// that doesn't capture the required upvars, the `FakeRead` within the
554    /// closure is omitted entirely.
555    ///
556    /// To make sure that this is still sound, if a closure matches against
557    /// a Place starting with an Upvar, we hoist the `FakeRead` to the
558    /// definition point of the closure.
559    ///
560    /// If the `FakeRead` comes from being hoisted out of a closure like this,
561    /// we record the `LocalDefId` of the closure. Otherwise, the `Option` will be `None`.
562    //
563    // We can use LocalDefId here since fake read statements are removed
564    // before codegen in the `CleanupNonCodegenStatements` pass.
565    ForMatchedPlace(Option<LocalDefId>),
566
567    /// A fake read injected into a match guard to ensure that the places
568    /// bound by the pattern are immutable for the duration of the match guard.
569    ///
570    /// Within a match guard, references are created for each place that the
571    /// pattern creates a binding for — this is known as the `RefWithinGuard`
572    /// version of the variables. To make sure that the references stay
573    /// alive until the end of the match guard, and properly prevent the
574    /// places in question from being modified, a `FakeRead(ForGuardBinding)`
575    /// is inserted at the end of the match guard.
576    ///
577    /// For details on how these references are created, see the extensive
578    /// documentation on `bind_matched_candidate_for_guard` in
579    /// `rustc_mir_build`.
580    ForGuardBinding,
581
582    /// Officially, the semantics of
583    ///
584    /// `let pattern = <expr>;`
585    ///
586    /// is that `<expr>` is evaluated into a temporary and then this temporary is
587    /// into the pattern.
588    ///
589    /// However, if we see the simple pattern `let var = <expr>`, we optimize this to
590    /// evaluate `<expr>` directly into the variable `var`. This is mostly unobservable,
591    /// but in some cases it can affect the borrow checker, as in #53695.
592    ///
593    /// Therefore, we insert a `FakeRead(ForLet)` immediately after each `let`
594    /// with a trivial pattern.
595    ///
596    /// FIXME: `ExprUseVisitor` has an entirely different opinion on what `FakeRead(ForLet)`
597    /// is supposed to mean. If it was accurate to what MIR lowering does,
598    /// would it even make sense to hoist these out of closures like
599    /// `ForMatchedPlace`?
600    ForLet(Option<LocalDefId>),
601
602    /// Currently, index expressions overloaded through the `Index` trait
603    /// get lowered differently than index expressions with builtin semantics
604    /// for arrays and slices — the latter will emit code to perform
605    /// bound checks, and then return a MIR place that will only perform the
606    /// indexing "for real" when it gets incorporated into an instruction.
607    ///
608    /// This is observable in the fact that the following compiles:
609    ///
610    /// ```
611    /// fn f(x: &mut [&mut [u32]], i: usize) {
612    ///     x[i][x[i].len() - 1] += 1;
613    /// }
614    /// ```
615    ///
616    /// However, we need to be careful to not let the user invalidate the
617    /// bound check with an expression like
618    ///
619    /// `(*x)[1][{ x = y; 4}]`
620    ///
621    /// Here, the first bounds check would be invalidated when we evaluate the
622    /// second index expression. To make sure that this doesn't happen, we
623    /// create a fake borrow of `x` and hold it while we evaluate the second
624    /// index.
625    ///
626    /// This borrow is kept alive by a `FakeRead(ForIndex)` at the end of its
627    /// scope.
628    ForIndex,
629}
630
631#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
632#[derive(TypeFoldable, TypeVisitable)]
633pub struct CopyNonOverlapping<'tcx> {
634    pub src: Operand<'tcx>,
635    pub dst: Operand<'tcx>,
636    /// Number of elements to copy from src to dest, not bytes.
637    pub count: Operand<'tcx>,
638}
639
640/// Represents how a [`TerminatorKind::Call`] was constructed.
641/// Used only for diagnostics.
642#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, PartialEq, Hash, HashStable)]
643#[derive(TypeFoldable, TypeVisitable)]
644pub enum CallSource {
645    /// This came from something such as `a > b` or `a + b`. In THIR, if `from_hir_call`
646    /// is false then this is the desugaring.
647    OverloadedOperator,
648    /// This was from comparison generated by a match, used by const-eval for better errors
649    /// when the comparison cannot be done in compile time.
650    ///
651    /// (see <https://github.com/rust-lang/rust/issues/90237>)
652    MatchCmp,
653    /// Other types of desugaring that did not come from the HIR, but we don't care about
654    /// for diagnostics (yet).
655    Misc,
656    /// Use of value, generating a clone function call
657    Use,
658    /// Normal function call, no special source
659    Normal,
660}
661
662#[derive(Clone, Copy, Debug, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
663#[derive(TypeFoldable, TypeVisitable)]
664/// The macro that an inline assembly block was created by
665pub enum InlineAsmMacro {
666    /// The `asm!` macro
667    Asm,
668    /// The `naked_asm!` macro
669    NakedAsm,
670}
671
672///////////////////////////////////////////////////////////////////////////
673// Terminators
674
675/// The various kinds of terminators, representing ways of exiting from a basic block.
676///
677/// A note on unwinding: Panics may occur during the execution of some terminators. Depending on the
678/// `-C panic` flag, this may either cause the program to abort or the call stack to unwind. Such
679/// terminators have a `unwind: UnwindAction` field on them. If stack unwinding occurs, then
680/// once the current function is reached, an action will be taken based on the `unwind` field.
681/// If the action is `Cleanup`, then the execution continues at the given basic block. If the
682/// action is `Continue` then no cleanup is performed, and the stack continues unwinding.
683///
684/// The basic block pointed to by a `Cleanup` unwind action must have its `cleanup` flag set.
685/// `cleanup` basic blocks have a couple restrictions:
686///  1. All `unwind` fields in them must be `UnwindAction::Terminate` or `UnwindAction::Unreachable`.
687///  2. `Return` terminators are not allowed in them. `Terminate` and `Resume` terminators are.
688///  3. All other basic blocks (in the current body) that are reachable from `cleanup` basic blocks
689///     must also be `cleanup`. This is a part of the type system and checked statically, so it is
690///     still an error to have such an edge in the CFG even if it's known that it won't be taken at
691///     runtime.
692///  4. The control flow between cleanup blocks must look like an upside down tree. Roughly
693///     speaking, this means that control flow that looks like a V is allowed, while control flow
694///     that looks like a W is not. This is necessary to ensure that landing pad information can be
695///     correctly codegened on MSVC. More precisely:
696///
697///     Begin with the standard control flow graph `G`. Modify `G` as follows: for any two cleanup
698///     vertices `u` and `v` such that `u` dominates `v`, contract `u` and `v` into a single vertex,
699///     deleting self edges and duplicate edges in the process. Now remove all vertices from `G`
700///     that are not cleanup vertices or are not reachable. The resulting graph must be an inverted
701///     tree, that is each vertex may have at most one successor and there may be no cycles.
702#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
703pub enum TerminatorKind<'tcx> {
704    /// Block has one successor; we continue execution there.
705    Goto { target: BasicBlock },
706
707    /// Switches based on the computed value.
708    ///
709    /// First, evaluates the `discr` operand. The type of the operand must be a signed or unsigned
710    /// integer, char, or bool, and must match the given type. Then, if the list of switch targets
711    /// contains the computed value, continues execution at the associated basic block. Otherwise,
712    /// continues execution at the "otherwise" basic block.
713    ///
714    /// Target values may not appear more than once.
715    SwitchInt {
716        /// The discriminant value being tested.
717        discr: Operand<'tcx>,
718        targets: SwitchTargets,
719    },
720
721    /// Indicates that the landing pad is finished and that the process should continue unwinding.
722    ///
723    /// Like a return, this marks the end of this invocation of the function.
724    ///
725    /// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after
726    /// deaggregation runs.
727    UnwindResume,
728
729    /// Indicates that the landing pad is finished and that the process should terminate.
730    ///
731    /// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
732    /// cleanup blocks.
733    UnwindTerminate(UnwindTerminateReason),
734
735    /// Returns from the function.
736    ///
737    /// Like function calls, the exact semantics of returns in Rust are unclear. Returning very
738    /// likely at least assigns the value currently in the return place (`_0`) to the place
739    /// specified in the associated `Call` terminator in the calling function, as if assigned via
740    /// `dest = move _0`. It might additionally do other things, like have side-effects in the
741    /// aliasing model.
742    ///
743    /// If the body is a coroutine body, this has slightly different semantics; it instead causes a
744    /// `CoroutineState::Returned(_0)` to be created (as if by an `Aggregate` rvalue) and assigned
745    /// to the return place.
746    Return,
747
748    /// Indicates a terminator that can never be reached.
749    ///
750    /// Executing this terminator is UB.
751    Unreachable,
752
753    /// The behavior of this statement differs significantly before and after drop elaboration.
754    ///
755    /// After drop elaboration: `Drop` terminators are a complete nop for types that have no drop
756    /// glue. For other types, `Drop` terminators behave exactly like a call to
757    /// `core::mem::drop_in_place` with a pointer to the given place.
758    ///
759    /// `Drop` before drop elaboration is a *conditional* execution of the drop glue. Specifically,
760    /// the `Drop` will be executed if...
761    ///
762    /// **Needs clarification**: End of that sentence. This in effect should document the exact
763    /// behavior of drop elaboration. The following sounds vaguely right, but I'm not quite sure:
764    ///
765    /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to
766    /// > the place or one of its "parents" occurred more recently than a move out of it. This does not
767    /// > consider indirect assignments.
768    ///
769    /// The `replace` flag indicates whether this terminator was created as part of an assignment.
770    /// This should only be used for diagnostic purposes, and does not have any operational
771    /// meaning.
772    ///
773    /// Async drop processing:
774    /// In compiler/rustc_mir_build/src/build/scope.rs we detect possible async drop:
775    ///   drop of object with `needs_async_drop`.
776    /// Async drop later, in StateTransform pass, may be expanded into additional yield-point
777    ///   for poll-loop of async drop future.
778    /// So we need prepared 'drop' target block in the similar way as for `Yield` terminator
779    ///   (see `drops.build_mir::<CoroutineDrop>` in scopes.rs).
780    /// In compiler/rustc_mir_transform/src/elaborate_drops.rs for object implementing `AsyncDrop` trait
781    ///   we need to prepare async drop feature - resolve `AsyncDrop::drop` and codegen call.
782    /// `async_fut` is set to the corresponding local.
783    /// For coroutine drop we don't need this logic because coroutine drop works with the same
784    ///   layout object as coroutine itself. So `async_fut` will be `None` for coroutine drop.
785    /// Both `drop` and `async_fut` fields are only used in compiler/rustc_mir_transform/src/coroutine.rs,
786    ///   StateTransform pass. In `expand_async_drops` async drops are expanded
787    ///   into one or two yield points with poll ready/pending switch.
788    /// When a coroutine has any internal async drop, the coroutine drop function will be async
789    ///   (generated by `create_coroutine_drop_shim_async`, not `create_coroutine_drop_shim`).
790    Drop {
791        place: Place<'tcx>,
792        target: BasicBlock,
793        unwind: UnwindAction,
794        replace: bool,
795        /// Cleanup to be done if the coroutine is dropped at this suspend point (for async drop).
796        drop: Option<BasicBlock>,
797        /// Prepared async future local (for async drop)
798        async_fut: Option<Local>,
799    },
800
801    /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
802    /// the referred to function. The operand types must match the argument types of the function.
803    /// The return place type must match the return type. The type of the `func` operand must be
804    /// callable, meaning either a function pointer, a function type, or a closure type.
805    ///
806    /// **Needs clarification**: The exact semantics of this. Current backends rely on `move`
807    /// operands not aliasing the return place. It is unclear how this is justified in MIR, see
808    /// [#71117].
809    ///
810    /// [#71117]: https://github.com/rust-lang/rust/issues/71117
811    Call {
812        /// The function that’s being called.
813        func: Operand<'tcx>,
814        /// Arguments the function is called with.
815        /// These are owned by the callee, which is free to modify them.
816        /// This allows the memory occupied by "by-value" arguments to be
817        /// reused across function calls without duplicating the contents.
818        /// The span for each arg is also included
819        /// (e.g. `a` and `b` in `x.foo(a, b)`).
820        args: Box<[Spanned<Operand<'tcx>>]>,
821        /// Where the returned value will be written
822        destination: Place<'tcx>,
823        /// Where to go after this call returns. If none, the call necessarily diverges.
824        target: Option<BasicBlock>,
825        /// Action to be taken if the call unwinds.
826        unwind: UnwindAction,
827        /// Where this call came from in HIR/THIR.
828        call_source: CallSource,
829        /// This `Span` is the span of the function, without the dot and receiver
830        /// e.g. `foo(a, b)` in `x.foo(a, b)`
831        fn_span: Span,
832    },
833
834    /// Tail call.
835    ///
836    /// Roughly speaking this is a chimera of [`Call`] and [`Return`], with some caveats.
837    /// Semantically tail calls consists of two actions:
838    /// - pop of the current stack frame
839    /// - a call to the `func`, with the return address of the **current** caller
840    ///   - so that a `return` inside `func` returns to the caller of the caller
841    ///     of the function that is currently being executed
842    ///
843    /// Note that in difference with [`Call`] this is missing
844    /// - `destination` (because it's always the return place)
845    /// - `target` (because it's always taken from the current stack frame)
846    /// - `unwind` (because it's always taken from the current stack frame)
847    ///
848    /// [`Call`]: TerminatorKind::Call
849    /// [`Return`]: TerminatorKind::Return
850    TailCall {
851        /// The function that’s being called.
852        func: Operand<'tcx>,
853        /// Arguments the function is called with.
854        /// These are owned by the callee, which is free to modify them.
855        /// This allows the memory occupied by "by-value" arguments to be
856        /// reused across function calls without duplicating the contents.
857        args: Box<[Spanned<Operand<'tcx>>]>,
858        // FIXME(explicit_tail_calls): should we have the span for `become`? is this span accurate? do we need it?
859        /// This `Span` is the span of the function, without the dot and receiver
860        /// (e.g. `foo(a, b)` in `x.foo(a, b)`
861        fn_span: Span,
862    },
863
864    /// Evaluates the operand, which must have type `bool`. If it is not equal to `expected`,
865    /// initiates a panic. Initiating a panic corresponds to a `Call` terminator with some
866    /// unspecified constant as the function to call, all the operands stored in the `AssertMessage`
867    /// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
868    /// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
869    /// assertion does not fail, execution continues at the specified basic block.
870    ///
871    /// When overflow checking is disabled and this is run-time MIR (as opposed to compile-time MIR
872    /// that is used for CTFE), the following variants of this terminator behave as `goto target`:
873    /// - `OverflowNeg(..)`,
874    /// - `Overflow(op, ..)` if op is add, sub, mul, shl, shr, but NOT div or rem.
875    Assert {
876        cond: Operand<'tcx>,
877        expected: bool,
878        msg: Box<AssertMessage<'tcx>>,
879        target: BasicBlock,
880        unwind: UnwindAction,
881    },
882
883    /// Marks a suspend point.
884    ///
885    /// Like `Return` terminators in coroutine bodies, this computes `value` and then a
886    /// `CoroutineState::Yielded(value)` as if by `Aggregate` rvalue. That value is then assigned to
887    /// the return place of the function calling this one, and execution continues in the calling
888    /// function. When next invoked with the same first argument, execution of this function
889    /// continues at the `resume` basic block, with the second argument written to the `resume_arg`
890    /// place. If the coroutine is dropped before then, the `drop` basic block is invoked.
891    ///
892    /// Note that coroutines can be (unstably) cloned under certain conditions, which means that
893    /// this terminator can **return multiple times**! MIR optimizations that reorder code into
894    /// different basic blocks needs to be aware of that.
895    /// See <https://github.com/rust-lang/rust/issues/95360>.
896    ///
897    /// Not permitted in bodies that are not coroutine bodies, or after coroutine lowering.
898    ///
899    /// **Needs clarification**: What about the evaluation order of the `resume_arg` and `value`?
900    Yield {
901        /// The value to return.
902        value: Operand<'tcx>,
903        /// Where to resume to.
904        resume: BasicBlock,
905        /// The place to store the resume argument in.
906        resume_arg: Place<'tcx>,
907        /// Cleanup to be done if the coroutine is dropped at this suspend point.
908        drop: Option<BasicBlock>,
909    },
910
911    /// Indicates the end of dropping a coroutine.
912    ///
913    /// Semantically just a `return` (from the coroutines drop glue). Only permitted in the same situations
914    /// as `yield`.
915    ///
916    /// **Needs clarification**: Is that even correct? The coroutine drop code is always confusing
917    /// to me, because it's not even really in the current body.
918    ///
919    /// **Needs clarification**: Are there type system constraints on these terminators? Should
920    /// there be a "block type" like `cleanup` blocks for them?
921    CoroutineDrop,
922
923    /// A block where control flow only ever takes one real path, but borrowck needs to be more
924    /// conservative.
925    ///
926    /// At runtime this is semantically just a goto.
927    ///
928    /// Disallowed after drop elaboration.
929    FalseEdge {
930        /// The target normal control flow will take.
931        real_target: BasicBlock,
932        /// A block control flow could conceptually jump to, but won't in
933        /// practice.
934        imaginary_target: BasicBlock,
935    },
936
937    /// A terminator for blocks that only take one path in reality, but where we reserve the right
938    /// to unwind in borrowck, even if it won't happen in practice. This can arise in infinite loops
939    /// with no function calls for example.
940    ///
941    /// At runtime this is semantically just a goto.
942    ///
943    /// Disallowed after drop elaboration.
944    FalseUnwind {
945        /// The target normal control flow will take.
946        real_target: BasicBlock,
947        /// The imaginary cleanup block link. This particular path will never be taken
948        /// in practice, but in order to avoid fragility we want to always
949        /// consider it in borrowck. We don't want to accept programs which
950        /// pass borrowck only when `panic=abort` or some assertions are disabled
951        /// due to release vs. debug mode builds.
952        unwind: UnwindAction,
953    },
954
955    /// Block ends with an inline assembly block. This is a terminator since
956    /// inline assembly is allowed to diverge.
957    InlineAsm {
958        /// Macro used to create this inline asm: one of `asm!` or `naked_asm!`
959        asm_macro: InlineAsmMacro,
960
961        /// The template for the inline assembly, with placeholders.
962        #[type_foldable(identity)]
963        #[type_visitable(ignore)]
964        template: &'tcx [InlineAsmTemplatePiece],
965
966        /// The operands for the inline assembly, as `Operand`s or `Place`s.
967        operands: Box<[InlineAsmOperand<'tcx>]>,
968
969        /// Miscellaneous options for the inline assembly.
970        options: InlineAsmOptions,
971
972        /// Source spans for each line of the inline assembly code. These are
973        /// used to map assembler errors back to the line in the source code.
974        #[type_foldable(identity)]
975        #[type_visitable(ignore)]
976        line_spans: &'tcx [Span],
977
978        /// Valid targets for the inline assembly.
979        /// The first element is the fallthrough destination, unless
980        /// asm_macro == InlineAsmMacro::NakedAsm or InlineAsmOptions::NORETURN is set.
981        targets: Box<[BasicBlock]>,
982
983        /// Action to be taken if the inline assembly unwinds. This is present
984        /// if and only if InlineAsmOptions::MAY_UNWIND is set.
985        unwind: UnwindAction,
986    },
987}
988
989#[derive(
990    Clone,
991    Debug,
992    TyEncodable,
993    TyDecodable,
994    Hash,
995    HashStable,
996    PartialEq,
997    TypeFoldable,
998    TypeVisitable
999)]
1000pub enum BackwardIncompatibleDropReason {
1001    Edition2024,
1002}
1003
1004#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
1005pub struct SwitchTargets {
1006    /// Possible values. For each value, the location to branch to is found in
1007    /// the corresponding element in the `targets` vector.
1008    pub(super) values: SmallVec<[Pu128; 1]>,
1009
1010    /// Possible branch targets. The last element of this vector is used for
1011    /// the "otherwise" branch, so `targets.len() == values.len() + 1` always
1012    /// holds.
1013    //
1014    // Note: This invariant is non-obvious and easy to violate. This would be a
1015    // more rigorous representation:
1016    //
1017    //   normal: SmallVec<[(Pu128, BasicBlock); 1]>,
1018    //   otherwise: BasicBlock,
1019    //
1020    // But it's important to have the targets in a sliceable type, because
1021    // target slices show up elsewhere. E.g. `TerminatorKind::InlineAsm` has a
1022    // boxed slice, and `TerminatorKind::FalseEdge` has a single target that
1023    // can be converted to a slice with `slice::from_ref`.
1024    //
1025    // Why does this matter? In functions like `TerminatorKind::successors` we
1026    // return `impl Iterator` and a non-slice-of-targets representation here
1027    // causes problems because multiple different concrete iterator types would
1028    // be involved and we would need a boxed trait object, which requires an
1029    // allocation, which is expensive if done frequently.
1030    pub(super) targets: SmallVec<[BasicBlock; 2]>,
1031}
1032
1033/// Action to be taken when a stack unwind happens.
1034#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1035#[derive(TypeFoldable, TypeVisitable)]
1036pub enum UnwindAction {
1037    /// No action is to be taken. Continue unwinding.
1038    ///
1039    /// This is similar to `Cleanup(bb)` where `bb` does nothing but `Resume`, but they are not
1040    /// equivalent, as presence of `Cleanup(_)` will make a frame non-POF.
1041    Continue,
1042    /// Triggers undefined behavior if unwind happens.
1043    Unreachable,
1044    /// Terminates the execution if unwind happens.
1045    ///
1046    /// Depending on the platform and situation this may cause a non-unwindable panic or abort.
1047    Terminate(UnwindTerminateReason),
1048    /// Cleanups to be done.
1049    Cleanup(BasicBlock),
1050}
1051
1052/// The reason we are terminating the process during unwinding.
1053#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1054#[derive(TypeFoldable, TypeVisitable)]
1055pub enum UnwindTerminateReason {
1056    /// Unwinding is just not possible given the ABI of this function.
1057    Abi,
1058    /// We were already cleaning up for an ongoing unwind, and a *second*, *nested* unwind was
1059    /// triggered by the drop glue.
1060    InCleanup,
1061}
1062
1063/// Information about an assertion failure.
1064#[derive(Clone, Hash, HashStable, PartialEq, Debug)]
1065#[derive(TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
1066pub enum AssertKind<O> {
1067    BoundsCheck { len: O, index: O },
1068    Overflow(BinOp, O, O),
1069    OverflowNeg(O),
1070    DivisionByZero(O),
1071    RemainderByZero(O),
1072    ResumedAfterReturn(CoroutineKind),
1073    ResumedAfterPanic(CoroutineKind),
1074    ResumedAfterDrop(CoroutineKind),
1075    MisalignedPointerDereference { required: O, found: O },
1076    NullPointerDereference,
1077    InvalidEnumConstruction(O),
1078}
1079
1080#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
1081#[derive(TypeFoldable, TypeVisitable)]
1082pub enum InlineAsmOperand<'tcx> {
1083    In {
1084        reg: InlineAsmRegOrRegClass,
1085        value: Operand<'tcx>,
1086    },
1087    Out {
1088        reg: InlineAsmRegOrRegClass,
1089        late: bool,
1090        place: Option<Place<'tcx>>,
1091    },
1092    InOut {
1093        reg: InlineAsmRegOrRegClass,
1094        late: bool,
1095        in_value: Operand<'tcx>,
1096        out_place: Option<Place<'tcx>>,
1097    },
1098    Const {
1099        value: Box<ConstOperand<'tcx>>,
1100    },
1101    SymFn {
1102        value: Box<ConstOperand<'tcx>>,
1103    },
1104    SymStatic {
1105        def_id: DefId,
1106    },
1107    Label {
1108        /// This represents the index into the `targets` array in `TerminatorKind::InlineAsm`.
1109        target_index: usize,
1110    },
1111}
1112
1113/// Type for MIR `Assert` terminator error messages.
1114pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
1115
1116///////////////////////////////////////////////////////////////////////////
1117// Places
1118
1119/// Places roughly correspond to a "location in memory." Places in MIR are the same mathematical
1120/// object as places in Rust. This of course means that what exactly they are is undecided and part
1121/// of the Rust memory model. However, they will likely contain at least the following pieces of
1122/// information in some form:
1123///
1124///  1. The address in memory that the place refers to.
1125///  2. The provenance with which the place is being accessed.
1126///  3. The type of the place and an optional variant index. See [`PlaceTy`][super::PlaceTy].
1127///  4. Optionally, some metadata. This exists if and only if the type of the place is not `Sized`.
1128///
1129/// We'll give a description below of how all pieces of the place except for the provenance are
1130/// calculated. We cannot give a description of the provenance, because that is part of the
1131/// undecided aliasing model - we only include it here at all to acknowledge its existence.
1132///
1133/// Each local naturally corresponds to the place `Place { local, projection: [] }`. This place has
1134/// the address of the local's allocation and the type of the local.
1135///
1136/// For places that are not locals, ie they have a non-empty list of projections, we define the
1137/// values as a function of the parent place, that is the place with its last [`ProjectionElem`]
1138/// stripped. The way this is computed of course depends on the kind of that last projection
1139/// element:
1140///
1141///  - [`Downcast`](ProjectionElem::Downcast): This projection sets the place's variant index to the
1142///    given one, and makes no other changes. A `Downcast` projection must always be followed
1143///    immediately by a `Field` projection.
1144///  - [`Field`](ProjectionElem::Field): `Field` projections take their parent place and create a
1145///    place referring to one of the fields of the type. The resulting address is the parent
1146///    address, plus the offset of the field. The type becomes the type of the field. If the parent
1147///    was unsized and so had metadata associated with it, then the metadata is retained if the
1148///    field is unsized and thrown out if it is sized.
1149///
1150///    These projections are only legal for tuples, ADTs, closures, and coroutines. If the ADT or
1151///    coroutine has more than one variant, the parent place's variant index must be set, indicating
1152///    which variant is being used. If it has just one variant, the variant index may or may not be
1153///    included - the single possible variant is inferred if it is not included.
1154///  - [`OpaqueCast`](ProjectionElem::OpaqueCast): This projection changes the place's type to the
1155///    given one, and makes no other changes. A `OpaqueCast` projection on any type other than an
1156///    opaque type from the current crate is not well-formed.
1157///  - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
1158///    place as described in the documentation for the `ProjectionElem`. The resulting address is
1159///    the parent's address plus that offset, and the type is `T`. This is only legal if the parent
1160///    place has type `[T;  N]` or `[T]` (*not* `&[T]`). Since such a `T` is always sized, any
1161///    resulting metadata is thrown out.
1162///  - [`Subslice`](ProjectionElem::Subslice): This projection calculates an offset and a new
1163///    address in a similar manner as `ConstantIndex`. It is also only legal on `[T; N]` and `[T]`.
1164///    However, this yields a `Place` of type `[T]`, and additionally sets the metadata to be the
1165///    length of the subslice.
1166///  - [`Index`](ProjectionElem::Index): Like `ConstantIndex`, only legal on `[T; N]` or `[T]`.
1167///    However, `Index` additionally takes a local from which the value of the index is computed at
1168///    runtime. Computing the value of the index involves interpreting the `Local` as a
1169///    `Place { local, projection: [] }`, and then computing its value as if done via
1170///    [`Operand::Copy`]. The array/slice is then indexed with the resulting value. The local must
1171///    have type `usize`.
1172///  - [`Deref`](ProjectionElem::Deref): Derefs are the last type of projection, and the most
1173///    complicated. They are only legal on parent places that are references, pointers, or `Box`. A
1174///    `Deref` projection begins by loading a value from the parent place, as if by
1175///    [`Operand::Copy`]. It then dereferences the resulting pointer, creating a place of the
1176///    pointee's type. The resulting address is the address that was stored in the pointer. If the
1177///    pointee type is unsized, the pointer additionally stored the value of the metadata.
1178///
1179/// The "validity invariant" of places is the same as that of raw pointers, meaning that e.g.
1180/// `*ptr` on a dangling or unaligned pointer is never UB. (Later doing a load/store on that place
1181/// or turning it into a reference can be UB though!) The only ways for a place computation can
1182/// cause UB are:
1183/// - On a `Deref` projection, we do an actual load of the inner place, with all the usual
1184///   consequences (the inner place must be based on an aligned pointer, it must point to allocated
1185///   memory, the aliasig model must allow reads, this must not be a data race).
1186/// - For the projections that perform pointer arithmetic, the offset must in-bounds of an
1187///   allocation (i.e., the preconditions of `ptr::offset` must be met).
1188#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable, TypeFoldable, TypeVisitable)]
1189pub struct Place<'tcx> {
1190    pub local: Local,
1191
1192    /// projection out of a place (access a field, deref a pointer, etc)
1193    pub projection: &'tcx List<PlaceElem<'tcx>>,
1194}
1195
1196#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1197#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
1198pub enum ProjectionElem<V, T> {
1199    Deref,
1200
1201    /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
1202    /// rustc can identify that a field projection refers to either two different regions of memory
1203    /// or the same one between the base and the 'projection element'.
1204    /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
1205    ///
1206    /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
1207    Field(FieldIdx, T),
1208
1209    /// Index into a slice/array.
1210    ///
1211    /// Note that this does not also dereference, and so it does not exactly correspond to slice
1212    /// indexing in Rust. In other words, in the below Rust code:
1213    ///
1214    /// ```rust
1215    /// let x = &[1, 2, 3, 4];
1216    /// let i = 2;
1217    /// x[i];
1218    /// ```
1219    ///
1220    /// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
1221    /// thing is true of the `ConstantIndex` and `Subslice` projections below.
1222    Index(V),
1223
1224    /// These indices are generated by slice patterns. Easiest to explain
1225    /// by example:
1226    ///
1227    /// ```ignore (illustrative)
1228    /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false },
1229    /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false },
1230    /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true },
1231    /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true },
1232    /// ```
1233    ConstantIndex {
1234        /// index or -index (in Python terms), depending on from_end
1235        offset: u64,
1236        /// The thing being indexed must be at least this long -- otherwise, the
1237        /// projection is UB.
1238        ///
1239        /// For arrays this is always the exact length.
1240        min_length: u64,
1241        /// Counting backwards from end? This is always false when indexing an
1242        /// array.
1243        from_end: bool,
1244    },
1245
1246    /// These indices are generated by slice patterns.
1247    ///
1248    /// If `from_end` is true `slice[from..slice.len() - to]`.
1249    /// Otherwise `array[from..to]`.
1250    ///
1251    /// This projection cannot have `ConstantIndex` or additional `Subslice` projections after it
1252    /// before runtime MIR.
1253    Subslice {
1254        from: u64,
1255        to: u64,
1256        /// Whether `to` counts from the start or end of the array/slice.
1257        /// For `PlaceElem`s this is `true` if and only if the base is a slice.
1258        /// For `ProjectionKind`, this can also be `true` for arrays.
1259        from_end: bool,
1260    },
1261
1262    /// "Downcast" to a variant of an enum or a coroutine.
1263    ///
1264    /// The included Symbol is the name of the variant, used for printing MIR.
1265    ///
1266    /// This operation itself is never UB, all it does is change the type of the place.
1267    Downcast(Option<Symbol>, VariantIdx),
1268
1269    /// Like an explicit cast from an opaque type to a concrete type, but without
1270    /// requiring an intermediate variable.
1271    ///
1272    /// This is unused with `-Znext-solver`.
1273    OpaqueCast(T),
1274
1275    /// A transmute from an unsafe binder to the type that it wraps. This is a projection
1276    /// of a place, so it doesn't necessarily constitute a move out of the binder.
1277    UnwrapUnsafeBinder(T),
1278}
1279
1280/// Alias for projections as they appear in places, where the base is a place
1281/// and the index is a local.
1282pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
1283
1284///////////////////////////////////////////////////////////////////////////
1285// Operands
1286
1287/// An operand in MIR represents a "value" in Rust, the definition of which is undecided and part of
1288/// the memory model. One proposal for a definition of values can be found [on UCG][value-def].
1289///
1290/// [value-def]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/value-domain.md
1291///
1292/// The most common way to create values is via loading a place. Loading a place is an operation
1293/// which reads the memory of the place and converts it to a value. This is a fundamentally *typed*
1294/// operation. The nature of the value produced depends on the type of the conversion. Furthermore,
1295/// there may be other effects: if the type has a validity constraint loading the place might be UB
1296/// if the validity constraint is not met.
1297///
1298/// **Needs clarification:** Is loading a place that has its variant index set well-formed? Miri
1299/// currently implements it, but it seems like this may be something to check against in the
1300/// validator.
1301#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
1302pub enum Operand<'tcx> {
1303    /// Creates a value by loading the given place.
1304    ///
1305    /// Before drop elaboration, the type of the place must be `Copy`. After drop elaboration there
1306    /// is no such requirement.
1307    Copy(Place<'tcx>),
1308
1309    /// Creates a value by performing loading the place, just like the `Copy` operand.
1310    ///
1311    /// This *may* additionally overwrite the place with `uninit` bytes, depending on how we decide
1312    /// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this
1313    /// place without first re-initializing it.
1314    ///
1315    /// **Needs clarification:** The operational impact of `Move` is unclear. Currently (both in
1316    /// Miri and codegen) it has no effect at all unless it appears in an argument to `Call`; for
1317    /// `Call` it allows the argument to be passed to the callee "in-place", i.e. the callee might
1318    /// just get a reference to this place instead of a full copy. Miri implements this with a
1319    /// combination of aliasing model "protectors" and putting `uninit` into the place. Ralf
1320    /// proposes that we don't want these semantics for `Move` in regular assignments, because
1321    /// loading a place should not have side-effects, and the aliasing model "protectors" are
1322    /// inherently tied to a function call. Are these the semantics we want for MIR? Is this
1323    /// something we can even decide without knowing more about Rust's memory model?
1324    ///
1325    /// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188
1326    Move(Place<'tcx>),
1327
1328    /// Constants are already semantically values, and remain unchanged.
1329    Constant(Box<ConstOperand<'tcx>>),
1330}
1331
1332#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
1333#[derive(TypeFoldable, TypeVisitable)]
1334pub struct ConstOperand<'tcx> {
1335    pub span: Span,
1336
1337    /// Optional user-given type: for something like
1338    /// `collect::<Vec<_>>`, this would be present and would
1339    /// indicate that `Vec<_>` was explicitly specified.
1340    ///
1341    /// Needed for NLL to impose user-given type constraints.
1342    pub user_ty: Option<UserTypeAnnotationIndex>,
1343
1344    pub const_: Const<'tcx>,
1345}
1346
1347///////////////////////////////////////////////////////////////////////////
1348// Rvalues
1349
1350/// The various kinds of rvalues that can appear in MIR.
1351///
1352/// Not all of these are allowed at every [`MirPhase`] - when this is the case, it's stated below.
1353///
1354/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
1355/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
1356/// value that an [`Operand`] produces.
1357#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
1358pub enum Rvalue<'tcx> {
1359    /// Yields the operand unchanged
1360    Use(Operand<'tcx>),
1361
1362    /// Creates an array where each element is the value of the operand.
1363    ///
1364    /// Corresponds to source code like `[x; 32]`.
1365    Repeat(Operand<'tcx>, ty::Const<'tcx>),
1366
1367    /// Creates a reference of the indicated kind to the place.
1368    ///
1369    /// There is not much to document here, because besides the obvious parts the semantics of this
1370    /// are essentially entirely a part of the aliasing model. There are many UCG issues discussing
1371    /// exactly what the behavior of this operation should be.
1372    ///
1373    /// `Shallow` borrows are disallowed after drop lowering.
1374    Ref(Region<'tcx>, BorrowKind, Place<'tcx>),
1375
1376    /// Creates a pointer/reference to the given thread local.
1377    ///
1378    /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a
1379    /// `*const T`, and if neither of those apply a `&T`.
1380    ///
1381    /// **Note:** This is a runtime operation that actually executes code and is in this sense more
1382    /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to
1383    /// SIGILL for some reason that I (JakobDegen) never got a chance to look into.
1384    ///
1385    /// **Needs clarification**: Are there weird additional semantics here related to the runtime
1386    /// nature of this operation?
1387    ThreadLocalRef(DefId),
1388
1389    /// Creates a raw pointer with the indicated mutability to the place.
1390    ///
1391    /// This is generated by pointer casts like `&v as *const _` or raw borrow expressions like
1392    /// `&raw const v`.
1393    ///
1394    /// Like with references, the semantics of this operation are heavily dependent on the aliasing
1395    /// model.
1396    RawPtr(RawPtrKind, Place<'tcx>),
1397
1398    /// Performs essentially all of the casts that can be performed via `as`.
1399    ///
1400    /// This allows for casts from/to a variety of types.
1401    ///
1402    /// **FIXME**: Document exactly which `CastKind`s allow which types of casts.
1403    Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
1404
1405    /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
1406    ///   parameter may be a `usize` as well.
1407    /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
1408    ///   raw pointers, or function pointers and return a `bool`. The types of the operands must be
1409    ///   matching, up to the usual caveat of the lifetimes in function pointers.
1410    /// * Left and right shift operations accept signed or unsigned integers not necessarily of the
1411    ///   same type and return a value of the same type as their LHS. Like in Rust, the RHS is
1412    ///   truncated as needed.
1413    /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching
1414    ///   types and return a value of that type.
1415    /// * The `FooWithOverflow` are like the `Foo`, but returning `(T, bool)` instead of just `T`,
1416    ///   where the `bool` is true if the result is not equal to the infinite-precision result.
1417    /// * The remaining operations accept signed integers, unsigned integers, or floats with
1418    ///   matching types and return a value of that type.
1419    BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
1420
1421    /// Computes a value as described by the operation.
1422    NullaryOp(NullOp<'tcx>, Ty<'tcx>),
1423
1424    /// Exactly like `BinaryOp`, but less operands.
1425    ///
1426    /// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
1427    /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds
1428    /// return a value with the same type as their operand.
1429    UnaryOp(UnOp, Operand<'tcx>),
1430
1431    /// Computes the discriminant of the place, returning it as an integer of type
1432    /// [`discriminant_ty`]. Returns zero for types without discriminant.
1433    ///
1434    /// The validity requirements for the underlying value are undecided for this rvalue, see
1435    /// [#91095]. Note too that the value of the discriminant is not the same thing as the
1436    /// variant index; use [`discriminant_for_variant`] to convert.
1437    ///
1438    /// [`discriminant_ty`]: crate::ty::Ty::discriminant_ty
1439    /// [#91095]: https://github.com/rust-lang/rust/issues/91095
1440    /// [`discriminant_for_variant`]: crate::ty::Ty::discriminant_for_variant
1441    Discriminant(Place<'tcx>),
1442
1443    /// Creates an aggregate value, like a tuple or struct.
1444    ///
1445    /// This is needed because dataflow analysis needs to distinguish
1446    /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo`
1447    /// has a destructor.
1448    ///
1449    /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Coroutine`. After
1450    /// coroutine lowering, `Coroutine` aggregate kinds are disallowed too.
1451    Aggregate(Box<AggregateKind<'tcx>>, IndexVec<FieldIdx, Operand<'tcx>>),
1452
1453    /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
1454    ///
1455    /// This is different from a normal transmute because dataflow analysis will treat the box as
1456    /// initialized but its content as uninitialized. Like other pointer casts, this in general
1457    /// affects alias analysis.
1458    ShallowInitBox(Operand<'tcx>, Ty<'tcx>),
1459
1460    /// A CopyForDeref is equivalent to a read from a place at the
1461    /// codegen level, but is treated specially by drop elaboration. When such a read happens, it
1462    /// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator)
1463    /// that the returned value is written into a `DerefTemp` local and that its only use is a deref operation,
1464    /// immediately followed by one or more projections. Drop elaboration treats this rvalue as if the
1465    /// read never happened and just projects further. This allows simplifying various MIR
1466    /// optimizations and codegen backends that previously had to handle deref operations anywhere
1467    /// in a place.
1468    ///
1469    /// Disallowed in runtime MIR and is replaced by normal copies.
1470    CopyForDeref(Place<'tcx>),
1471
1472    /// Wraps a value in an unsafe binder.
1473    WrapUnsafeBinder(Operand<'tcx>, Ty<'tcx>),
1474}
1475
1476#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1477pub enum CastKind {
1478    /// An exposing pointer to address cast. A cast between a pointer and an integer type, or
1479    /// between a function pointer and an integer type.
1480    /// See the docs on `expose_provenance` for more details.
1481    PointerExposeProvenance,
1482    /// An address-to-pointer cast that picks up an exposed provenance.
1483    /// See the docs on `with_exposed_provenance` for more details.
1484    PointerWithExposedProvenance,
1485    /// Pointer related casts that are done by coercions. Note that reference-to-raw-ptr casts are
1486    /// translated into `&raw mut/const *r`, i.e., they are not actually casts.
1487    ///
1488    /// The following are allowed in [`AnalysisPhase::Initial`] as they're needed for borrowck,
1489    /// but after that are forbidden (including in all phases of runtime MIR):
1490    /// * [`PointerCoercion::ArrayToPointer`]
1491    /// * [`PointerCoercion::MutToConstPointer`]
1492    ///
1493    /// Both are runtime nops, so should be [`CastKind::PtrToPtr`] instead in runtime MIR.
1494    PointerCoercion(PointerCoercion, CoercionSource),
1495    IntToInt,
1496    FloatToInt,
1497    FloatToFloat,
1498    IntToFloat,
1499    PtrToPtr,
1500    FnPtrToPtr,
1501    /// Reinterpret the bits of the input as a different type.
1502    ///
1503    /// MIR is well-formed if the input and output types have different sizes,
1504    /// but running a transmute between differently-sized types is UB.
1505    Transmute,
1506
1507    /// A `Subtype` cast is applied to any `StatementKind::Assign` where
1508    /// type of lvalue doesn't match the type of rvalue, the primary goal is making subtyping
1509    /// explicit during optimizations and codegen.
1510    ///
1511    /// This cast doesn't impact the runtime behavior of the program except for potentially changing
1512    /// some type metadata of the interpreter or codegen backend.
1513    ///
1514    /// This goal is achieved with mir_transform pass `Subtyper`, which runs right after
1515    /// borrowchecker, as we only care about subtyping that can affect trait selection and
1516    /// `TypeId`.
1517    Subtype,
1518}
1519
1520/// Represents how a [`CastKind::PointerCoercion`] was constructed.
1521/// Used only for diagnostics.
1522#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1523pub enum CoercionSource {
1524    /// The coercion was manually written by the user with an `as` cast.
1525    AsCast,
1526    /// The coercion was automatically inserted by the compiler.
1527    Implicit,
1528}
1529
1530#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1531#[derive(TypeFoldable, TypeVisitable)]
1532pub enum AggregateKind<'tcx> {
1533    /// The type is of the element
1534    Array(Ty<'tcx>),
1535    Tuple,
1536
1537    /// The second field is the variant index. It's equal to 0 for struct
1538    /// and union expressions. The last field is the
1539    /// active field number and is present only for union expressions
1540    /// -- e.g., for a union expression `SomeUnion { c: .. }`, the
1541    /// active field index would identity the field `c`
1542    Adt(DefId, VariantIdx, GenericArgsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
1543
1544    Closure(DefId, GenericArgsRef<'tcx>),
1545    Coroutine(DefId, GenericArgsRef<'tcx>),
1546    CoroutineClosure(DefId, GenericArgsRef<'tcx>),
1547
1548    /// Construct a raw pointer from the data pointer and metadata.
1549    ///
1550    /// The `Ty` here is the type of the *pointee*, not the pointer itself.
1551    /// The `Mutability` indicates whether this produces a `*const` or `*mut`.
1552    ///
1553    /// The [`Rvalue::Aggregate`] operands for thus must be
1554    ///
1555    /// 0. A raw pointer of matching mutability with any [`core::ptr::Thin`] pointee
1556    /// 1. A value of the appropriate [`core::ptr::Pointee::Metadata`] type
1557    ///
1558    /// *Both* operands must always be included, even the unit value if this is
1559    /// creating a thin pointer. If you're just converting between thin pointers,
1560    /// you may want an [`Rvalue::Cast`] with [`CastKind::PtrToPtr`] instead.
1561    RawPtr(Ty<'tcx>, Mutability),
1562}
1563
1564#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
1565pub enum NullOp<'tcx> {
1566    /// Returns the offset of a field
1567    OffsetOf(&'tcx List<(VariantIdx, FieldIdx)>),
1568    /// Returns whether we should perform some UB-checking at runtime.
1569    /// See the `ub_checks` intrinsic docs for details.
1570    UbChecks,
1571    /// Returns whether we should perform contract-checking at runtime.
1572    /// See the `contract_checks` intrinsic docs for details.
1573    ContractChecks,
1574}
1575
1576#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1577#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
1578pub enum UnOp {
1579    /// The `!` operator for logical inversion
1580    Not,
1581    /// The `-` operator for negation
1582    Neg,
1583    /// Gets the metadata `M` from a `*const`/`*mut`/`&`/`&mut` to
1584    /// `impl Pointee<Metadata = M>`.
1585    ///
1586    /// For example, this will give a `()` from `*const i32`, a `usize` from
1587    /// `&mut [u8]`, or a `ptr::DynMetadata<dyn Foo>` (internally a pointer)
1588    /// from a `*mut dyn Foo`.
1589    ///
1590    /// Allowed only in [`MirPhase::Runtime`]; earlier it's an intrinsic.
1591    PtrMetadata,
1592}
1593
1594#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1595#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
1596pub enum BinOp {
1597    /// The `+` operator (addition)
1598    Add,
1599    /// Like `Add`, but with UB on overflow.  (Integers only.)
1600    AddUnchecked,
1601    /// Like `Add`, but returns `(T, bool)` of both the wrapped result
1602    /// and a bool indicating whether it overflowed.
1603    AddWithOverflow,
1604    /// The `-` operator (subtraction)
1605    Sub,
1606    /// Like `Sub`, but with UB on overflow.  (Integers only.)
1607    SubUnchecked,
1608    /// Like `Sub`, but returns `(T, bool)` of both the wrapped result
1609    /// and a bool indicating whether it overflowed.
1610    SubWithOverflow,
1611    /// The `*` operator (multiplication)
1612    Mul,
1613    /// Like `Mul`, but with UB on overflow.  (Integers only.)
1614    MulUnchecked,
1615    /// Like `Mul`, but returns `(T, bool)` of both the wrapped result
1616    /// and a bool indicating whether it overflowed.
1617    MulWithOverflow,
1618    /// The `/` operator (division)
1619    ///
1620    /// For integer types, division by zero is UB, as is `MIN / -1` for signed.
1621    /// The compiler should have inserted checks prior to this.
1622    ///
1623    /// Floating-point division by zero is safe, and does not need guards.
1624    Div,
1625    /// The `%` operator (modulus)
1626    ///
1627    /// For integer types, using zero as the modulus (second operand) is UB,
1628    /// as is `MIN % -1` for signed.
1629    /// The compiler should have inserted checks prior to this.
1630    ///
1631    /// Floating-point remainder by zero is safe, and does not need guards.
1632    Rem,
1633    /// The `^` operator (bitwise xor)
1634    BitXor,
1635    /// The `&` operator (bitwise and)
1636    BitAnd,
1637    /// The `|` operator (bitwise or)
1638    BitOr,
1639    /// The `<<` operator (shift left)
1640    ///
1641    /// The offset is given by `RHS.rem_euclid(LHS::BITS)`.
1642    /// In other words, it is (uniquely) determined as follows:
1643    /// - it is "equal modulo LHS::BITS" to the RHS
1644    /// - it is in the range `0..LHS::BITS`
1645    Shl,
1646    /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
1647    ShlUnchecked,
1648    /// The `>>` operator (shift right)
1649    ///
1650    /// The offset is given by `RHS.rem_euclid(LHS::BITS)`.
1651    /// In other words, it is (uniquely) determined as follows:
1652    /// - it is "equal modulo LHS::BITS" to the RHS
1653    /// - it is in the range `0..LHS::BITS`
1654    ///
1655    /// This is an arithmetic shift if the LHS is signed
1656    /// and a logical shift if the LHS is unsigned.
1657    Shr,
1658    /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
1659    ShrUnchecked,
1660    /// The `==` operator (equality)
1661    Eq,
1662    /// The `<` operator (less than)
1663    Lt,
1664    /// The `<=` operator (less than or equal to)
1665    Le,
1666    /// The `!=` operator (not equal to)
1667    Ne,
1668    /// The `>=` operator (greater than or equal to)
1669    Ge,
1670    /// The `>` operator (greater than)
1671    Gt,
1672    /// The `<=>` operator (three-way comparison, like `Ord::cmp`)
1673    ///
1674    /// This is supported only on the integer types and `char`, always returning
1675    /// [`rustc_hir::LangItem::OrderingEnum`] (aka [`std::cmp::Ordering`]).
1676    ///
1677    /// [`Rvalue::BinaryOp`]`(BinOp::Cmp, A, B)` returns
1678    /// - `Ordering::Less` (`-1_i8`, as a Scalar) if `A < B`
1679    /// - `Ordering::Equal` (`0_i8`, as a Scalar) if `A == B`
1680    /// - `Ordering::Greater` (`+1_i8`, as a Scalar) if `A > B`
1681    Cmp,
1682    /// The `ptr.offset` operator
1683    Offset,
1684}
1685
1686// Assignment operators, e.g. `+=`. See comments on the corresponding variants
1687// in `BinOp` for details.
1688#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
1689pub enum AssignOp {
1690    AddAssign,
1691    SubAssign,
1692    MulAssign,
1693    DivAssign,
1694    RemAssign,
1695    BitXorAssign,
1696    BitAndAssign,
1697    BitOrAssign,
1698    ShlAssign,
1699    ShrAssign,
1700}
1701
1702// Sometimes `BinOp` and `AssignOp` need the same treatment. The operations
1703// covered by `AssignOp` are a subset of those covered by `BinOp`, so it makes
1704// sense to convert `AssignOp` to `BinOp`.
1705impl From<AssignOp> for BinOp {
1706    fn from(op: AssignOp) -> BinOp {
1707        match op {
1708            AssignOp::AddAssign => BinOp::Add,
1709            AssignOp::SubAssign => BinOp::Sub,
1710            AssignOp::MulAssign => BinOp::Mul,
1711            AssignOp::DivAssign => BinOp::Div,
1712            AssignOp::RemAssign => BinOp::Rem,
1713            AssignOp::BitXorAssign => BinOp::BitXor,
1714            AssignOp::BitAndAssign => BinOp::BitAnd,
1715            AssignOp::BitOrAssign => BinOp::BitOr,
1716            AssignOp::ShlAssign => BinOp::Shl,
1717            AssignOp::ShrAssign => BinOp::Shr,
1718        }
1719    }
1720}
1721
1722// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
1723#[cfg(target_pointer_width = "64")]
1724mod size_asserts {
1725    use rustc_data_structures::static_assert_size;
1726
1727    use super::*;
1728    // tidy-alphabetical-start
1729    static_assert_size!(AggregateKind<'_>, 32);
1730    static_assert_size!(Operand<'_>, 24);
1731    static_assert_size!(Place<'_>, 16);
1732    static_assert_size!(PlaceElem<'_>, 24);
1733    static_assert_size!(Rvalue<'_>, 40);
1734    static_assert_size!(StatementKind<'_>, 16);
1735    static_assert_size!(TerminatorKind<'_>, 80);
1736    // tidy-alphabetical-end
1737}