1use std::borrow::{Borrow, Cow};
2use std::fmt;
3use std::hash::Hash;
45use rustc_abi::{Align, Size};
6use rustc_ast::Mutability;
7use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
8use rustc_errors::inline_fluent;
9use rustc_hir::def_id::{DefId, LocalDefId};
10use rustc_hir::{selfas hir, CRATE_HIR_ID, LangItem};
11use rustc_middle::mir::AssertMessage;
12use rustc_middle::mir::interpret::{Pointer, ReportedErrorInfo};
13use rustc_middle::query::TyCtxtAt;
14use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout, ValidityRequirement};
15use rustc_middle::ty::{self, Ty, TyCtxt};
16use rustc_middle::{bug, mir};
17use rustc_span::{Span, Symbol, sym};
18use rustc_target::callconv::FnAbi;
19use tracing::debug;
2021use super::error::*;
22use crate::errors::{LongRunning, LongRunningWarn};
23use crate::interpret::{
24self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
25GlobalAlloc, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, RangeSet, Scalar,
26compile_time_machine, err_inval, interp_ok, throw_exhaust, throw_inval, throw_ub,
27throw_ub_custom, throw_unsup, throw_unsup_format,
28};
2930/// When hitting this many interpreted terminators we emit a deny by default lint
31/// that notfies the user that their constant takes a long time to evaluate. If that's
32/// what they intended, they can just allow the lint.
33const LINT_TERMINATOR_LIMIT: usize = 2_000_000;
34/// The limit used by `-Z tiny-const-eval-limit`. This smaller limit is useful for internal
35/// tests not needing to run 30s or more to show some behaviour.
36const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
37/// After this many interpreted terminators, we start emitting progress indicators at every
38/// power of two of interpreted terminators.
39const PROGRESS_INDICATOR_START: usize = 4_000_000;
4041/// Extra machine state for CTFE, and the Machine instance.
42//
43// Should be public because out-of-tree rustc consumers need this
44// if they want to interact with constant values.
45pub struct CompileTimeMachine<'tcx> {
46/// The number of terminators that have been evaluated.
47 ///
48 /// This is used to produce lints informing the user that the compiler is not stuck.
49 /// Set to `usize::MAX` to never report anything.
50pub(super) num_evaluated_steps: usize,
5152/// The virtual call stack.
53pub(super) stack: Vec<Frame<'tcx>>,
5455/// Pattern matching on consts with references would be unsound if those references
56 /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees,
57 /// we ensure that only immutable global memory can be accessed.
58pub(super) can_access_mut_global: CanAccessMutGlobal,
5960/// Whether to check alignment during evaluation.
61pub(super) check_alignment: CheckAlignment,
6263/// If `Some`, we are evaluating the initializer of the static with the given `LocalDefId`,
64 /// storing the result in the given `AllocId`.
65 /// Used to prevent accesses to a static's base allocation, as that may allow for self-initialization loops.
66pub(crate) static_root_ids: Option<(AllocId, LocalDefId)>,
6768/// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
69union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
70}
7172#[derive(#[automatically_derived]
impl ::core::marker::Copy for CheckAlignment { }Copy, #[automatically_derived]
impl ::core::clone::Clone for CheckAlignment {
#[inline]
fn clone(&self) -> CheckAlignment { *self }
}Clone)]
73pub enum CheckAlignment {
74/// Ignore all alignment requirements.
75 /// This is mainly used in interning.
76No,
77/// Hard error when dereferencing a misaligned pointer.
78Error,
79}
8081#[derive(#[automatically_derived]
impl ::core::marker::Copy for CanAccessMutGlobal { }Copy, #[automatically_derived]
impl ::core::clone::Clone for CanAccessMutGlobal {
#[inline]
fn clone(&self) -> CanAccessMutGlobal { *self }
}Clone, #[automatically_derived]
impl ::core::cmp::PartialEq for CanAccessMutGlobal {
#[inline]
fn eq(&self, other: &CanAccessMutGlobal) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq)]
82pub(crate) enum CanAccessMutGlobal {
83 No,
84 Yes,
85}
8687impl From<bool> for CanAccessMutGlobal {
88fn from(value: bool) -> Self {
89if value { Self::Yes } else { Self::No }
90 }
91}
9293impl<'tcx> CompileTimeMachine<'tcx> {
94pub(crate) fn new(
95 can_access_mut_global: CanAccessMutGlobal,
96 check_alignment: CheckAlignment,
97 ) -> Self {
98CompileTimeMachine {
99 num_evaluated_steps: 0,
100 stack: Vec::new(),
101can_access_mut_global,
102check_alignment,
103 static_root_ids: None,
104 union_data_ranges: FxHashMap::default(),
105 }
106 }
107}
108109impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
110#[inline(always)]
111fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool112where
113K: Borrow<Q>,
114 {
115FxIndexMap::contains_key(self, k)
116 }
117118#[inline(always)]
119fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool120where
121K: Borrow<Q>,
122 {
123FxIndexMap::contains_key(self, k)
124 }
125126#[inline(always)]
127fn insert(&mut self, k: K, v: V) -> Option<V> {
128FxIndexMap::insert(self, k, v)
129 }
130131#[inline(always)]
132fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
133where
134K: Borrow<Q>,
135 {
136// FIXME(#120456) - is `swap_remove` correct?
137FxIndexMap::swap_remove(self, k)
138 }
139140#[inline(always)]
141fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
142self.iter().filter_map(move |(k, v)| f(k, v)).collect()
143 }
144145#[inline(always)]
146fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
147match self.get(&k) {
148Some(v) => Ok(v),
149None => {
150vacant()?;
151::rustc_middle::util::bug::bug_fmt(format_args!("The CTFE machine shouldn\'t ever need to extend the alloc_map when reading"))bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")152 }
153 }
154 }
155156#[inline(always)]
157fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
158match self.entry(k) {
159IndexEntry::Occupied(e) => Ok(e.into_mut()),
160IndexEntry::Vacant(e) => {
161let v = vacant()?;
162Ok(e.insert(v))
163 }
164 }
165 }
166}
167168pub type CompileTimeInterpCx<'tcx> = InterpCx<'tcx, CompileTimeMachine<'tcx>>;
169170#[derive(#[automatically_derived]
impl ::core::fmt::Debug for MemoryKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
match self {
MemoryKind::Heap { was_made_global: __self_0 } =>
::core::fmt::Formatter::debug_struct_field1_finish(f, "Heap",
"was_made_global", &__self_0),
}
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for MemoryKind {
#[inline]
fn eq(&self, other: &MemoryKind) -> bool {
match (self, other) {
(MemoryKind::Heap { was_made_global: __self_0 },
MemoryKind::Heap { was_made_global: __arg1_0 }) =>
__self_0 == __arg1_0,
}
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for MemoryKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_receiver_is_total_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<bool>;
}
}Eq, #[automatically_derived]
impl ::core::marker::Copy for MemoryKind { }Copy, #[automatically_derived]
impl ::core::clone::Clone for MemoryKind {
#[inline]
fn clone(&self) -> MemoryKind {
let _: ::core::clone::AssertParamIsClone<bool>;
*self
}
}Clone)]
171pub enum MemoryKind {
172 Heap {
173/// Indicates whether `make_global` was called on this allocation.
174 /// If this is `true`, the allocation must be immutable.
175was_made_global: bool,
176 },
177}
178179impl fmt::Displayfor MemoryKind {
180fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
181match self {
182 MemoryKind::Heap { was_made_global } => {
183f.write_fmt(format_args!("heap allocation{0}",
if *was_made_global { " (made global)" } else { "" }))write!(f, "heap allocation{}", if *was_made_global { " (made global)" } else { "" })184 }
185 }
186 }
187}
188189impl interpret::MayLeakfor MemoryKind {
190#[inline(always)]
191fn may_leak(self) -> bool {
192match self {
193 MemoryKind::Heap { was_made_global } => was_made_global,
194 }
195 }
196}
197198impl interpret::MayLeakfor ! {
199#[inline(always)]
200fn may_leak(self) -> bool {
201// `self` is uninhabited
202self203 }
204}
205206impl<'tcx> CompileTimeInterpCx<'tcx> {
207fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
208let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
209let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
210211use rustc_span::RemapPathScopeComponents;
212 (
213Symbol::intern(
214&caller.file.name.display(RemapPathScopeComponents::DIAGNOSTICS).to_string_lossy(),
215 ),
216u32::try_from(caller.line).unwrap(),
217u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
218 )
219 }
220221/// "Intercept" a function call, because we have something special to do for it.
222 /// All `#[rustc_do_not_const_check]` functions MUST be hooked here.
223 /// If this returns `Some` function, which may be `instance` or a different function with
224 /// compatible arguments, then evaluation should continue with that function.
225 /// If this returns `None`, the function call has been handled and the function has returned.
226fn hook_special_const_fn(
227&mut self,
228 instance: ty::Instance<'tcx>,
229 args: &[FnArg<'tcx>],
230 _dest: &PlaceTy<'tcx>,
231 _ret: Option<mir::BasicBlock>,
232 ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
233let def_id = instance.def_id();
234235if self.tcx.is_lang_item(def_id, LangItem::PanicDisplay)
236 || self.tcx.is_lang_item(def_id, LangItem::BeginPanic)
237 {
238let args = self.copy_fn_args(args);
239// &str or &&str
240if !(args.len() == 1) {
::core::panicking::panic("assertion failed: args.len() == 1")
};assert!(args.len() == 1);
241242let mut msg_place = self.deref_pointer(&args[0])?;
243while msg_place.layout.ty.is_ref() {
244 msg_place = self.deref_pointer(&msg_place)?;
245 }
246247let msg = Symbol::intern(self.read_str(&msg_place)?);
248let span = self.find_closest_untracked_caller_location();
249let (file, line, col) = self.location_triple_for_span(span);
250return Err(ConstEvalErrKind::Panic { msg, file, line, col }).into();
251 } else if self.tcx.is_lang_item(def_id, LangItem::PanicFmt) {
252// For panic_fmt, call const_panic_fmt instead.
253let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, self.tcx.span);
254let new_instance = ty::Instance::expect_resolve(
255*self.tcx,
256self.typing_env(),
257const_def_id,
258instance.args,
259self.cur_span(),
260 );
261262return interp_ok(Some(new_instance));
263 }
264interp_ok(Some(instance))
265 }
266267/// See documentation on the `ptr_guaranteed_cmp` intrinsic.
268 /// Returns `2` if the result is unknown.
269 /// Returns `1` if the pointers are guaranteed equal.
270 /// Returns `0` if the pointers are guaranteed inequal.
271 ///
272 /// Note that this intrinsic is exposed on stable for comparison with null. In other words, any
273 /// change to this function that affects comparison with null is insta-stable!
274fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
275interp_ok(match (a, b) {
276// Comparisons between integers are always known.
277(Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8,
278// Comparing a pointer `ptr` with an integer `int` is equivalent to comparing
279 // `ptr-int` with null, so we can reduce this case to a `scalar_may_be_null` test.
280(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
281let int = int.to_target_usize(*self.tcx);
282// The `wrapping_neg` here may produce a value that is not
283 // a valid target usize any more... but `wrapping_offset` handles that correctly.
284let offset_ptr = ptr.wrapping_offset(Size::from_bytes(int.wrapping_neg()), self);
285if !self.scalar_may_be_null(Scalar::from_pointer(offset_ptr, self))? {
286// `ptr.wrapping_sub(int)` is definitely not equal to `0`, so `ptr != int`
2870
288} else {
289// `ptr.wrapping_sub(int)` could be equal to `0`, but might not be,
290 // so we cannot know for sure if `ptr == int` or not
2912
292}
293 }
294 (Scalar::Ptr(a, _), Scalar::Ptr(b, _)) => {
295let (a_prov, a_offset) = a.prov_and_relative_offset();
296let (b_prov, b_offset) = b.prov_and_relative_offset();
297let a_allocid = a_prov.alloc_id();
298let b_allocid = b_prov.alloc_id();
299let a_info = self.get_alloc_info(a_allocid);
300let b_info = self.get_alloc_info(b_allocid);
301302// Check if the pointers cannot be equal due to alignment
303if a_info.align > Align::ONE && b_info.align > Align::ONE {
304let min_align = Ord::min(a_info.align.bytes(), b_info.align.bytes());
305let a_residue = a_offset.bytes() % min_align;
306let b_residue = b_offset.bytes() % min_align;
307if a_residue != b_residue {
308// If the two pointers have a different residue modulo their
309 // common alignment, they cannot be equal.
310return interp_ok(0);
311 }
312// The pointers have the same residue modulo their common alignment,
313 // so they could be equal. Try the other checks.
314}
315316if let (Some(GlobalAlloc::Static(a_did)), Some(GlobalAlloc::Static(b_did))) = (
317self.tcx.try_get_global_alloc(a_allocid),
318self.tcx.try_get_global_alloc(b_allocid),
319 ) {
320if a_allocid == b_allocid {
321if true {
match (&a_did, &b_did) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val,
::core::option::Option::Some(format_args!("different static item DefIds had same AllocId? {0:?} == {1:?}, {2:?} != {3:?}",
a_allocid, b_allocid, a_did, b_did)));
}
}
};
};debug_assert_eq!(
322 a_did, b_did,
323"different static item DefIds had same AllocId? {a_allocid:?} == {b_allocid:?}, {a_did:?} != {b_did:?}"
324);
325// Comparing two pointers into the same static. As per
326 // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
327 // a static cannot be duplicated, so if two pointers are into the same
328 // static, they are equal if and only if their offsets are equal.
329(a_offset == b_offset) as u8330 } else {
331if true {
match (&(a_did), &(b_did)) {
(left_val, right_val) => {
if *left_val == *right_val {
let kind = ::core::panicking::AssertKind::Ne;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val,
::core::option::Option::Some(format_args!("same static item DefId had two different AllocIds? {0:?} != {1:?}, {2:?} == {3:?}",
a_allocid, b_allocid, a_did, b_did)));
}
}
};
};debug_assert_ne!(
332 a_did, b_did,
333"same static item DefId had two different AllocIds? {a_allocid:?} != {b_allocid:?}, {a_did:?} == {b_did:?}"
334);
335// Comparing two pointers into the different statics.
336 // We can never determine for sure that two pointers into different statics
337 // are *equal*, but we can know that they are *inequal* if they are both
338 // strictly in-bounds (i.e. in-bounds and not one-past-the-end) of
339 // their respective static, as different non-zero-sized statics cannot
340 // overlap or be deduplicated as per
341 // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
342 // (non-deduplication), and
343 // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
344 // (non-overlapping).
345if a_offset < a_info.size && b_offset < b_info.size {
3460
347} else {
348// Otherwise, conservatively say we don't know.
349 // There are some cases we could still return `0` for, e.g.
350 // if the pointers being equal would require their statics to overlap
351 // one or more bytes, but for simplicity we currently only check
352 // strictly in-bounds pointers.
3532
354}
355 }
356 } else {
357// All other cases we conservatively say we don't know.
358 //
359 // For comparing statics to non-statics, as per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
360 // immutable statics can overlap with other kinds of allocations sometimes.
361 //
362 // FIXME: We could be more decisive for (non-zero-sized) mutable statics,
363 // which cannot overlap with other kinds of allocations.
364 //
365 // Functions and vtables can be duplicated and deduplicated, so we
366 // cannot be sure of runtime equality of pointers to the same one, or the
367 // runtime inequality of pointers to different ones (see e.g. #73722),
368 // so comparing those should return 2, whether they are the same allocation
369 // or not.
370 //
371 // `GlobalAlloc::TypeId` exists mostly to prevent consteval from comparing
372 // `TypeId`s, so comparing those should always return 2, whether they are the
373 // same allocation or not.
374 //
375 // FIXME: We could revisit comparing pointers into the same
376 // `GlobalAlloc::Memory` once https://github.com/rust-lang/rust/issues/128775
377 // is fixed (but they can be deduplicated, so comparing pointers into different
378 // ones should return 2).
3792
380}
381 }
382 })
383 }
384}
385386impl<'tcx> CompileTimeMachine<'tcx> {
387#[inline(always)]
388/// Find the first stack frame that is within the current crate, if any.
389 /// Otherwise, return the crate's HirId
390pub fn best_lint_scope(&self, tcx: TyCtxt<'tcx>) -> hir::HirId {
391self.stack.iter().find_map(|frame| frame.lint_root(tcx)).unwrap_or(CRATE_HIR_ID)
392 }
393}
394395impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
396CtfeProvenance
bool
!
crate::const_eval::MemoryKind
rustc_data_structures::fx::FxIndexMap<AllocId,
(MemoryKind<Self::MemoryKind>, Allocation)>
Option<Self::MemoryKind>
None
()
()
Box<[u8]>
&InterpCx<'tcx, Self>
_ecx
bool
false;
&mut InterpCx<'tcx, Self>
_ecx
mir::UnwindTerminateReason
_reason
InterpResult<'tcx>
{
::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
format_args!("unwinding cannot happen during compile-time evaluation")));
}
&InterpCx<'tcx, Self>
_ecx
ty::Instance<'tcx>
_instance
InterpResult<'tcx>
interp_ok(());
&mut InterpCx<'tcx, Self>
_ecx
!
fn_val
&FnAbi<'tcx, Ty<'tcx>>
_abi
&[FnArg<'tcx>]
_args
&PlaceTy<'tcx, Self::Provenance>
_destination
Option<mir::BasicBlock>
_target
mir::UnwindAction
_unwind
InterpResult<'tcx>
match fn_val {}
&InterpCx<'tcx, Self>
_ecx
bool
true;
&InterpCx<'tcx, Self>
_ecx
AllocId
_id
&'b Allocation
alloc
InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance>>>
interp_ok(Cow::Borrowed(alloc));
&InterpCx<'tcx, Self>
_ecx
AllocId
_id
MemoryKind<Self::MemoryKind>
_kind
Size
_size
Align
_align
InterpResult<'tcx, Self::AllocExtra>
interp_ok(());
&InterpCx<'tcx, Self>
ecx
DefId
def_id
InterpResult<'tcx, Pointer>
interp_ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(),
Size::ZERO));
&InterpCx<'tcx, Self>
_ecx
Pointer<CtfeProvenance>
ptr
Option<MemoryKind<Self::MemoryKind>>
_kind
InterpResult<'tcx, Pointer<CtfeProvenance>>
interp_ok(ptr);
&InterpCx<'tcx, Self>
_ecx
u64
addr
InterpResult<'tcx, Pointer<Option<CtfeProvenance>>>
interp_ok(Pointer::without_provenance(addr));
&InterpCx<'tcx, Self>
_ecx
Pointer<CtfeProvenance>
ptr
i64
_size
Option<(AllocId, Size, Self::ProvenanceExtra)>
let (prov, offset) = ptr.prov_and_relative_offset();
Some((prov.alloc_id(), offset, prov.immutable()));
&InterpCx<'tcx, Self>
_ecx
Option<ty::Instance<'tcx>>
_instance
usize
CTFE_ALLOC_SALT;compile_time_machine!(<'tcx>);
397398const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
399400#[inline(always)]
401fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool {
402#[allow(non_exhaustive_omitted_patterns)] match ecx.machine.check_alignment {
CheckAlignment::Error => true,
_ => false,
}matches!(ecx.machine.check_alignment, CheckAlignment::Error)403 }
404405#[inline(always)]
406fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
407ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.is_uninhabited()
408 }
409410fn load_mir(
411 ecx: &InterpCx<'tcx, Self>,
412 instance: ty::InstanceKind<'tcx>,
413 ) -> &'tcx mir::Body<'tcx> {
414match instance {
415 ty::InstanceKind::Item(def) => ecx.tcx.mir_for_ctfe(def),
416_ => ecx.tcx.instance_mir(instance),
417 }
418 }
419420fn find_mir_or_eval_fn(
421 ecx: &mut InterpCx<'tcx, Self>,
422 orig_instance: ty::Instance<'tcx>,
423 _abi: &FnAbi<'tcx, Ty<'tcx>>,
424 args: &[FnArg<'tcx>],
425 dest: &PlaceTy<'tcx>,
426 ret: Option<mir::BasicBlock>,
427 _unwind: mir::UnwindAction, // unwinding is not supported in consts
428) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
429{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_const_eval/src/const_eval/machine.rs:429",
"rustc_const_eval::const_eval::machine",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_const_eval/src/const_eval/machine.rs"),
::tracing_core::__macro_support::Option::Some(429u32),
::tracing_core::__macro_support::Option::Some("rustc_const_eval::const_eval::machine"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("find_mir_or_eval_fn: {0:?}",
orig_instance) as &dyn Value))])
});
} else { ; }
};debug!("find_mir_or_eval_fn: {:?}", orig_instance);
430431// Replace some functions.
432let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
433// Call has already been handled.
434return interp_ok(None);
435 };
436437// Only check non-glue functions
438if let ty::InstanceKind::Item(def) = instance.def {
439// Execution might have wandered off into other crates, so we cannot do a stability-
440 // sensitive check here. But we can at least rule out functions that are not const at
441 // all. That said, we have to allow calling functions inside a `const trait`. These
442 // *are* const-checked!
443if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) {
444// We certainly do *not* want to actually call the fn
445 // though, so be sure we return here.
446do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("calling non-const function `{0}`",
instance))
})))throw_unsup_format!("calling non-const function `{}`", instance)447 }
448 }
449450// This is a const fn. Call it.
451 // In case of replacement, we return the *original* instance to make backtraces work out
452 // (and we hope this does not confuse the FnAbi checks too much).
453interp_ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
454 }
455456fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
457let msg = Symbol::intern(msg);
458let span = ecx.find_closest_untracked_caller_location();
459let (file, line, col) = ecx.location_triple_for_span(span);
460Err(ConstEvalErrKind::Panic { msg, file, line, col }).into()
461 }
462463fn call_intrinsic(
464 ecx: &mut InterpCx<'tcx, Self>,
465 instance: ty::Instance<'tcx>,
466 args: &[OpTy<'tcx>],
467 dest: &PlaceTy<'tcx, Self::Provenance>,
468 target: Option<mir::BasicBlock>,
469 _unwind: mir::UnwindAction,
470 ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
471// Shared intrinsics.
472if ecx.eval_intrinsic(instance, args, dest, target)? {
473return interp_ok(None);
474 }
475let intrinsic_name = ecx.tcx.item_name(instance.def_id());
476477// CTFE-specific intrinsics.
478match intrinsic_name {
479 sym::ptr_guaranteed_cmp => {
480let a = ecx.read_scalar(&args[0])?;
481let b = ecx.read_scalar(&args[1])?;
482let cmp = ecx.guaranteed_cmp(a, b)?;
483ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
484 }
485 sym::const_allocate => {
486let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
487let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
488489let align = match Align::from_bytes(align) {
490Ok(a) => a,
491Err(err) => do yeet {
let (name, err_kind, align) =
("const_allocate", err.diag_ident(), err.align());
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid align passed to `{$name}`: {$align} is {$err_kind ->
[not_power_of_two] not a power of 2
[too_large] too large
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("name".into(),
rustc_errors::IntoDiagArg::into_diag_arg(name, &mut None));
set_arg("err_kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(err_kind,
&mut None));
set_arg("align".into(),
rustc_errors::IntoDiagArg::into_diag_arg(align, &mut None));
}),
}))
}throw_ub_custom!(
492inline_fluent!(
493"invalid align passed to `{$name}`: {$align} is {$err_kind ->
494 [not_power_of_two] not a power of 2
495 [too_large] too large
496 *[other] {\"\"}
497}"
498),
499 name = "const_allocate",
500 err_kind = err.diag_ident(),
501 align = err.align()
502 ),
503 };
504505let ptr = ecx.allocate_ptr(
506Size::from_bytes(size),
507align,
508 interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }),
509 AllocInit::Uninit,
510 )?;
511ecx.write_pointer(ptr, dest)?;
512 }
513 sym::const_deallocate => {
514let ptr = ecx.read_pointer(&args[0])?;
515let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
516let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
517518let size = Size::from_bytes(size);
519let align = match Align::from_bytes(align) {
520Ok(a) => a,
521Err(err) => do yeet {
let (name, err_kind, align) =
("const_deallocate", err.diag_ident(), err.align());
::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::Custom(::rustc_middle::error::CustomSubdiagnostic {
msg: ||
rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("invalid align passed to `{$name}`: {$align} is {$err_kind ->
[not_power_of_two] not a power of 2
[too_large] too large
*[other] {\"\"}
}")),
add_args: Box::new(move |mut set_arg|
{
set_arg("name".into(),
rustc_errors::IntoDiagArg::into_diag_arg(name, &mut None));
set_arg("err_kind".into(),
rustc_errors::IntoDiagArg::into_diag_arg(err_kind,
&mut None));
set_arg("align".into(),
rustc_errors::IntoDiagArg::into_diag_arg(align, &mut None));
}),
}))
}throw_ub_custom!(
522inline_fluent!(
523"invalid align passed to `{$name}`: {$align} is {$err_kind ->
524 [not_power_of_two] not a power of 2
525 [too_large] too large
526 *[other] {\"\"}
527}"
528),
529 name = "const_deallocate",
530 err_kind = err.diag_ident(),
531 align = err.align()
532 ),
533 };
534535// If an allocation is created in an another const,
536 // we don't deallocate it.
537let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr, 0)?;
538let is_allocated_in_another_const = #[allow(non_exhaustive_omitted_patterns)] match ecx.tcx.try_get_global_alloc(alloc_id)
{
Some(interpret::GlobalAlloc::Memory(_)) => true,
_ => false,
}matches!(
539 ecx.tcx.try_get_global_alloc(alloc_id),
540Some(interpret::GlobalAlloc::Memory(_))
541 );
542543if !is_allocated_in_another_const {
544ecx.deallocate_ptr(
545ptr,
546Some((size, align)),
547 interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }),
548 )?;
549 }
550 }
551552 sym::const_make_global => {
553let ptr = ecx.read_pointer(&args[0])?;
554ecx.make_const_heap_ptr_global(ptr)?;
555ecx.write_pointer(ptr, dest)?;
556 }
557558// The intrinsic represents whether the value is known to the optimizer (LLVM).
559 // We're not doing any optimizations here, so there is no optimizer that could know the value.
560 // (We know the value here in the machine of course, but this is the runtime of that code,
561 // not the optimization stage.)
562sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
563564// We handle these here since Miri does not want to have them.
565sym::assert_inhabited566 | sym::assert_zero_valid567 | sym::assert_mem_uninitialized_valid => {
568let ty = instance.args.type_at(0);
569let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
570571let should_panic = !ecx572 .tcx
573 .check_validity_requirement((requirement, ecx.typing_env().as_query_input(ty)))
574 .map_err(|_| ::rustc_middle::mir::interpret::InterpErrorKind::InvalidProgram(::rustc_middle::mir::interpret::InvalidProgramInfo::TooGeneric)err_inval!(TooGeneric))?;
575576if should_panic {
577let layout = ecx.layout_of(ty)?;
578579let msg = match requirement {
580// For *all* intrinsics we first check `is_uninhabited` to give a more specific
581 // error message.
582_ if layout.is_uninhabited() => ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("aborted execution: attempted to instantiate uninhabited type `{0}`",
ty))
})format!(
583"aborted execution: attempted to instantiate uninhabited type `{ty}`"
584),
585 ValidityRequirement::Inhabited => ::rustc_middle::util::bug::bug_fmt(format_args!("handled earlier"))bug!("handled earlier"),
586 ValidityRequirement::Zero => ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("aborted execution: attempted to zero-initialize type `{0}`, which is invalid",
ty))
})format!(
587"aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
588),
589 ValidityRequirement::UninitMitigated0x01Fill => ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("aborted execution: attempted to leave type `{0}` uninitialized, which is invalid",
ty))
})format!(
590"aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
591),
592 ValidityRequirement::Uninit => ::rustc_middle::util::bug::bug_fmt(format_args!("assert_uninit_valid doesn\'t exist"))bug!("assert_uninit_valid doesn't exist"),
593 };
594595Self::panic_nounwind(ecx, &msg)?;
596// Skip the `return_to_block` at the end (we panicked, we do not return).
597return interp_ok(None);
598 }
599 }
600601 sym::type_of => {
602let ty = ecx.read_type_id(&args[0])?;
603ecx.write_type_info(ty, dest)?;
604 }
605606_ => {
607// We haven't handled the intrinsic, let's see if we can use a fallback body.
608if ecx.tcx.intrinsic(instance.def_id()).unwrap().must_be_overridden {
609do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("intrinsic `{0}` is not supported at compile-time",
intrinsic_name))
})));throw_unsup_format!(
610"intrinsic `{intrinsic_name}` is not supported at compile-time"
611);
612 }
613return interp_ok(Some(ty::Instance {
614 def: ty::InstanceKind::Item(instance.def_id()),
615 args: instance.args,
616 }));
617 }
618 }
619620// Intrinsic is done, jump to next block.
621ecx.return_to_block(target)?;
622interp_ok(None)
623 }
624625fn assert_panic(
626 ecx: &mut InterpCx<'tcx, Self>,
627 msg: &AssertMessage<'tcx>,
628 _unwind: mir::UnwindAction,
629 ) -> InterpResult<'tcx> {
630use rustc_middle::mir::AssertKind::*;
631// Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
632let eval_to_int =
633 |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
634let err = match msg {
635BoundsCheck { len, index } => {
636let len = eval_to_int(len)?;
637let index = eval_to_int(index)?;
638BoundsCheck { len, index }
639 }
640Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
641OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
642DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
643RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
644ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
645ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
646ResumedAfterDrop(coroutine_kind) => ResumedAfterDrop(*coroutine_kind),
647MisalignedPointerDereference { required, found } => MisalignedPointerDereference {
648 required: eval_to_int(required)?,
649 found: eval_to_int(found)?,
650 },
651NullPointerDereference => NullPointerDereference,
652InvalidEnumConstruction(source) => InvalidEnumConstruction(eval_to_int(source)?),
653 };
654Err(ConstEvalErrKind::AssertFailure(err)).into()
655 }
656657#[inline(always)]
658fn runtime_checks(
659 _ecx: &InterpCx<'tcx, Self>,
660 _r: mir::RuntimeChecks,
661 ) -> InterpResult<'tcx, bool> {
662// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
663 // unsound differences in evaluating the same constant at different instantiation sites.
664interp_ok(true)
665 }
666667fn binary_ptr_op(
668 _ecx: &InterpCx<'tcx, Self>,
669 _bin_op: mir::BinOp,
670 _left: &ImmTy<'tcx>,
671 _right: &ImmTy<'tcx>,
672 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
673do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("pointer arithmetic or comparison is not supported at compile-time"))
})));throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
674 }
675676fn increment_const_eval_counter(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
677// The step limit has already been hit in a previous call to `increment_const_eval_counter`.
678679if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
680let (limit, start) = if ecx.tcx.sess.opts.unstable_opts.tiny_const_eval_limit {
681 (TINY_LINT_TERMINATOR_LIMIT, TINY_LINT_TERMINATOR_LIMIT)
682 } else {
683 (LINT_TERMINATOR_LIMIT, PROGRESS_INDICATOR_START)
684 };
685686ecx.machine.num_evaluated_steps = new_steps;
687// By default, we have a *deny* lint kicking in after some time
688 // to ensure `loop {}` doesn't just go forever.
689 // In case that lint got reduced, in particular for `--cap-lint` situations, we also
690 // have a hard warning shown every now and then for really long executions.
691if new_steps == limit {
692// By default, we stop after a million steps, but the user can disable this lint
693 // to be able to run until the heat death of the universe or power loss, whichever
694 // comes first.
695let hir_id = ecx.machine.best_lint_scope(*ecx.tcx);
696let is_error = ecx697 .tcx
698 .lint_level_at_node(
699 rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
700hir_id,
701 )
702 .level
703 .is_error();
704let span = ecx.cur_span();
705ecx.tcx.emit_node_span_lint(
706 rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
707hir_id,
708span,
709LongRunning { item_span: ecx.tcx.span },
710 );
711// If this was a hard error, don't bother continuing evaluation.
712if is_error {
713let guard = ecx714 .tcx
715 .dcx()
716 .span_delayed_bug(span, "The deny lint should have already errored");
717do yeet ::rustc_middle::mir::interpret::InterpErrorKind::InvalidProgram(::rustc_middle::mir::interpret::InvalidProgramInfo::AlreadyReported(ReportedErrorInfo::allowed_in_infallible(guard)));throw_inval!(AlreadyReported(ReportedErrorInfo::allowed_in_infallible(guard)));
718 }
719 } else if new_steps > start && new_steps.is_power_of_two() {
720// Only report after a certain number of terminators have been evaluated and the
721 // current number of evaluated terminators is a power of 2. The latter gives us a cheap
722 // way to implement exponential backoff.
723let span = ecx.cur_span();
724// We store a unique number in `force_duplicate` to evade `-Z deduplicate-diagnostics`.
725 // `new_steps` is guaranteed to be unique because `ecx.machine.num_evaluated_steps` is
726 // always increasing.
727ecx.tcx.dcx().emit_warn(LongRunningWarn {
728span,
729 item_span: ecx.tcx.span,
730 force_duplicate: new_steps,
731 });
732 }
733 }
734735interp_ok(())
736 }
737738#[inline(always)]
739fn expose_provenance(
740 _ecx: &InterpCx<'tcx, Self>,
741 _provenance: Self::Provenance,
742 ) -> InterpResult<'tcx> {
743// This is only reachable with -Zunleash-the-miri-inside-of-you.
744do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::Unsupported(::alloc::__export::must_use({
::alloc::fmt::format(format_args!("exposing pointers is not possible at compile-time"))
})))throw_unsup_format!("exposing pointers is not possible at compile-time")745 }
746747#[inline(always)]
748fn init_frame(
749 ecx: &mut InterpCx<'tcx, Self>,
750 frame: Frame<'tcx>,
751 ) -> InterpResult<'tcx, Frame<'tcx>> {
752// Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
753if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
754do yeet ::rustc_middle::mir::interpret::InterpErrorKind::ResourceExhaustion(::rustc_middle::mir::interpret::ResourceExhaustionInfo::StackFrameLimitReached)throw_exhaust!(StackFrameLimitReached)755 } else {
756interp_ok(frame)
757 }
758 }
759760#[inline(always)]
761fn stack<'a>(
762 ecx: &'a InterpCx<'tcx, Self>,
763 ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
764&ecx.machine.stack
765 }
766767#[inline(always)]
768fn stack_mut<'a>(
769 ecx: &'a mut InterpCx<'tcx, Self>,
770 ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
771&mut ecx.machine.stack
772 }
773774fn before_access_global(
775 _tcx: TyCtxtAt<'tcx>,
776 machine: &Self,
777 alloc_id: AllocId,
778 alloc: ConstAllocation<'tcx>,
779 _static_def_id: Option<DefId>,
780 is_write: bool,
781 ) -> InterpResult<'tcx> {
782let alloc = alloc.inner();
783if is_write {
784// Write access. These are never allowed, but we give a targeted error message.
785match alloc.mutability {
786 Mutability::Not => do yeet ::rustc_middle::mir::interpret::InterpErrorKind::UndefinedBehavior(::rustc_middle::mir::interpret::UndefinedBehaviorInfo::WriteToReadOnly(alloc_id))throw_ub!(WriteToReadOnly(alloc_id)),
787 Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal).into(),
788 }
789 } else {
790// Read access. These are usually allowed, with some exceptions.
791if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
792// Machine configuration allows us read from anything (e.g., `static` initializer).
793interp_ok(())
794 } else if alloc.mutability == Mutability::Mut {
795// Machine configuration does not allow us to read statics (e.g., `const`
796 // initializer).
797Err(ConstEvalErrKind::ConstAccessesMutGlobal).into()
798 } else {
799// Immutable global, this read is fine.
800match (&alloc.mutability, &Mutability::Not) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(alloc.mutability, Mutability::Not);
801interp_ok(())
802 }
803 }
804 }
805806fn retag_ptr_value(
807 ecx: &mut InterpCx<'tcx, Self>,
808 _kind: mir::RetagKind,
809 val: &ImmTy<'tcx, CtfeProvenance>,
810 ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> {
811// If it's a frozen shared reference that's not already immutable, potentially make it immutable.
812 // (Do nothing on `None` provenance, that cannot store immutability anyway.)
813if let ty::Ref(_, ty, mutbl) = val.layout.ty.kind()
814 && *mutbl == Mutability::Not815 && val816 .to_scalar_and_meta()
817 .0
818.to_pointer(ecx)?
819.provenance
820 .is_some_and(|p| !p.immutable())
821 {
822// That next check is expensive, that's why we have all the guards above.
823let is_immutable = ty.is_freeze(*ecx.tcx, ecx.typing_env());
824let place = ecx.ref_to_mplace(val)?;
825let new_place = if is_immutable {
826place.map_provenance(CtfeProvenance::as_immutable)
827 } else {
828// Even if it is not immutable, remember that it is a shared reference.
829 // This allows it to become part of the final value of the constant.
830 // (See <https://github.com/rust-lang/rust/pull/128543> for why we allow this
831 // even when there is interior mutability.)
832place.map_provenance(CtfeProvenance::as_shared_ref)
833 };
834interp_ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
835 } else {
836interp_ok(val.clone())
837 }
838 }
839840fn before_memory_write(
841 _tcx: TyCtxtAt<'tcx>,
842 _machine: &mut Self,
843 _alloc_extra: &mut Self::AllocExtra,
844 _ptr: Pointer<Option<Self::Provenance>>,
845 (_alloc_id, immutable): (AllocId, bool),
846 range: AllocRange,
847 ) -> InterpResult<'tcx> {
848if range.size == Size::ZERO {
849// Nothing to check.
850return interp_ok(());
851 }
852// Reject writes through immutable pointers.
853if immutable {
854return Err(ConstEvalErrKind::WriteThroughImmutablePointer).into();
855 }
856// Everything else is fine.
857interp_ok(())
858 }
859860fn before_alloc_access(
861 tcx: TyCtxtAt<'tcx>,
862 machine: &Self,
863 alloc_id: AllocId,
864 ) -> InterpResult<'tcx> {
865if machine.stack.is_empty() {
866// Get out of the way for the final copy.
867return interp_ok(());
868 }
869// Check if this is the currently evaluated static.
870if Some(alloc_id) == machine.static_root_ids.map(|(id, _)| id) {
871return Err(ConstEvalErrKind::RecursiveStatic).into();
872 }
873// If this is another static, make sure we fire off the query to detect cycles.
874 // But only do that when checks for static recursion are enabled.
875if machine.static_root_ids.is_some() {
876if let Some(GlobalAlloc::Static(def_id)) = tcx.try_get_global_alloc(alloc_id) {
877if tcx.is_foreign_item(def_id) {
878do yeet ::rustc_middle::mir::interpret::InterpErrorKind::Unsupported(::rustc_middle::mir::interpret::UnsupportedOpInfo::ExternStatic(def_id));throw_unsup!(ExternStatic(def_id));
879 }
880tcx.eval_static_initializer(def_id)?;
881 }
882 }
883interp_ok(())
884 }
885886fn cached_union_data_range<'e>(
887 ecx: &'e mut InterpCx<'tcx, Self>,
888 ty: Ty<'tcx>,
889 compute_range: impl FnOnce() -> RangeSet,
890 ) -> Cow<'e, RangeSet> {
891if ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks {
892 Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
893 } else {
894// Don't bother caching, we're only doing one validation at the end anyway.
895Cow::Owned(compute_range())
896 }
897 }
898899fn get_default_alloc_params(&self) -> <Self::Bytes as mir::interpret::AllocBytes>::AllocParams {
900 }
901}
902903// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
904// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
905// at the bottom of this file.