1//! The data that we will serialize and deserialize.
2//!
3//! Notionally, the dep-graph is a sequence of NodeInfo with the dependencies
4//! specified inline. The total number of nodes and edges are stored as the last
5//! 16 bytes of the file, so we can find them easily at decoding time.
6//!
7//! The serialisation is performed on-demand when each node is emitted. Using this
8//! scheme, we do not need to keep the current graph in memory.
9//!
10//! The deserialization is performed manually, in order to convert from the stored
11//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
12//! node and edge count are stored at the end of the file, all the arrays can be
13//! pre-allocated with the right length.
14//!
15//! The encoding of the dep-graph is generally designed around the fact that fixed-size
16//! reads of encoded data are generally faster than variable-sized reads. Ergo we adopt
17//! essentially the same varint encoding scheme used in the rmeta format; the edge lists
18//! for each node on the graph store a 2-bit integer which is the number of bytes per edge
19//! index in that node's edge list. We effectively ignore that an edge index of 0 could be
20//! encoded with 0 bytes in order to not require 3 bits to store the byte width of the edges.
21//! The overhead of calculating the correct byte width for each edge is mitigated by
22//! building edge lists with [`EdgesVec`] which keeps a running max of the edges in a node.
23//!
24//! When we decode this data, we do not immediately create [`SerializedDepNodeIndex`] and
25//! instead keep the data in its denser serialized form which lets us turn our on-disk size
26//! efficiency directly into a peak memory reduction. When we convert these encoded-in-memory
27//! values into their fully-deserialized type, we use a fixed-size read of the encoded array
28//! then mask off any errant bytes we read. The array of edge index bytes is padded to permit this.
29//!
30//! We also encode and decode the entire rest of each node using [`SerializedNodeHeader`]
31//! to let this encoding and decoding be done in one fixed-size operation. These headers contain
32//! two [`Fingerprint`]s along with the serialized [`DepKind`], and the number of edge indices
33//! in the node and the number of bytes used to encode the edge indices for this node. The
34//! [`DepKind`], number of edges, and bytes per edge are all bit-packed together, if they fit.
35//! If the number of edges in this node does not fit in the bits available in the header, we
36//! store it directly after the header with leb128.
37//!
38//! Dep-graph indices are bulk allocated to threads inside `LocalEncoderState`. Having threads
39//! own these indices helps avoid races when they are conditionally used when marking nodes green.
40//! It also reduces congestion on the shared index count.
4142use std::cell::RefCell;
43use std::cmp::max;
44use std::sync::Arc;
45use std::sync::atomic::Ordering;
46use std::{iter, mem, u64};
4748use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
49use rustc_data_structures::fx::FxHashMap;
50use rustc_data_structures::outline;
51use rustc_data_structures::profiling::SelfProfilerRef;
52use rustc_data_structures::sync::{AtomicU64, Lock, WorkerLocal, broadcast};
53use rustc_data_structures::unhash::UnhashMap;
54use rustc_index::IndexVec;
55use rustc_serialize::opaque::mem_encoder::MemEncoder;
56use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
57use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
58use rustc_session::Session;
59use tracing::{debug, instrument};
6061use super::graph::{CurrentDepGraph, DepNodeColorMap};
62use super::retained::RetainedDepGraph;
63use super::{DepKind, DepNode, DepNodeIndex};
64use crate::dep_graph::edges::EdgesVec;
6566// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
67// unused so that we can store multiple index types in `CompressedHybridIndex`,
68// and use those bits to encode which index type it contains.
69impl ::std::fmt::Debug for SerializedDepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
70#[encodable]
71 #[max = 0x7FFF_FFFF]
72pub struct SerializedDepNodeIndex {}
73}7475const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>();
76/// Amount of padding we need to add to the edge list data so that we can retrieve every
77/// SerializedDepNodeIndex with a fixed-size read then mask.
78const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
79/// Number of bits we need to store the number of used bytes in a SerializedDepNodeIndex.
80/// Note that wherever we encode byte widths like this we actually store the number of bytes used
81/// minus 1; for a 4-byte value we technically would have 5 widths to store, but using one byte to
82/// store zeroes (which are relatively rare) is a decent tradeoff to save a bit in our bitfields.
83const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
8485/// Data for use when recompiling the **current crate**.
86///
87/// There may be unused indices with DepKind::Null in this graph due to batch allocation of
88/// indices to threads.
89#[derive(#[automatically_derived]
impl ::core::fmt::Debug for SerializedDepGraph {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ =
&["nodes", "value_fingerprints", "edge_list_indices",
"edge_list_data", "index", "session_count"];
let values: &[&dyn ::core::fmt::Debug] =
&[&self.nodes, &self.value_fingerprints, &self.edge_list_indices,
&self.edge_list_data, &self.index, &&self.session_count];
::core::fmt::Formatter::debug_struct_fields_finish(f,
"SerializedDepGraph", names, values)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for SerializedDepGraph {
#[inline]
fn default() -> SerializedDepGraph {
SerializedDepGraph {
nodes: ::core::default::Default::default(),
value_fingerprints: ::core::default::Default::default(),
edge_list_indices: ::core::default::Default::default(),
edge_list_data: ::core::default::Default::default(),
index: ::core::default::Default::default(),
session_count: ::core::default::Default::default(),
}
}
}Default)]
90pub struct SerializedDepGraph {
91/// The set of all DepNodes in the graph
92nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
93/// A value fingerprint associated with each [`DepNode`] in [`Self::nodes`],
94 /// typically a hash of the value returned by the node's query in the
95 /// previous incremental-compilation session.
96 ///
97 /// Some nodes don't have a meaningful value hash (e.g. queries with `no_hash`),
98 /// so they store a dummy value here instead (e.g. [`Fingerprint::ZERO`]).
99value_fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
100/// For each DepNode, stores the list of edges originating from that
101 /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
102 /// which holds the actual DepNodeIndices of the target nodes.
103edge_list_indices: IndexVec<SerializedDepNodeIndex, EdgeHeader>,
104/// A flattened list of all edge targets in the graph, stored in the same
105 /// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
106edge_list_data: Vec<u8>,
107/// For each dep kind, stores a map from key fingerprints back to the index
108 /// of the corresponding node. This is the inverse of `nodes`.
109index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
110/// The number of previous compilation sessions. This is used to generate
111 /// unique anon dep nodes per session.
112session_count: u64,
113}
114115impl SerializedDepGraph {
116#[inline]
117pub fn edge_targets_from(
118&self,
119 source: SerializedDepNodeIndex,
120 ) -> impl Iterator<Item = SerializedDepNodeIndex> + Clone {
121let header = self.edge_list_indices[source];
122let mut raw = &self.edge_list_data[header.start()..];
123124let bytes_per_index = header.bytes_per_index();
125126// LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
127let mask = header.mask();
128 (0..header.num_edges).map(move |_| {
129// Doing this slicing in this order ensures that the first bounds check suffices for
130 // all the others.
131let index = &raw[..DEP_NODE_SIZE];
132raw = &raw[bytes_per_index..];
133let index = u32::from_le_bytes(index.try_into().unwrap()) & mask;
134SerializedDepNodeIndex::from_u32(index)
135 })
136 }
137138#[inline]
139pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> &DepNode {
140&self.nodes[dep_node_index]
141 }
142143#[inline]
144pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
145self.index.get(dep_node.kind.as_usize())?.get(&dep_node.key_fingerprint).copied()
146 }
147148#[inline]
149pub fn value_fingerprint_for_index(
150&self,
151 dep_node_index: SerializedDepNodeIndex,
152 ) -> Fingerprint {
153self.value_fingerprints[dep_node_index]
154 }
155156#[inline]
157pub fn node_count(&self) -> usize {
158self.nodes.len()
159 }
160161#[inline]
162pub fn session_count(&self) -> u64 {
163self.session_count
164 }
165}
166167/// A packed representation of an edge's start index and byte width.
168///
169/// This is packed by stealing 2 bits from the start index, which means we only accommodate edge
170/// data arrays up to a quarter of our address space. Which seems fine.
171#[derive(#[automatically_derived]
impl ::core::fmt::Debug for EdgeHeader {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "EdgeHeader",
"repr", &self.repr, "num_edges", &&self.num_edges)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for EdgeHeader {
#[inline]
fn clone(&self) -> EdgeHeader {
let _: ::core::clone::AssertParamIsClone<usize>;
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EdgeHeader { }Copy)]
172struct EdgeHeader {
173 repr: usize,
174 num_edges: u32,
175}
176177impl EdgeHeader {
178#[inline]
179fn start(self) -> usize {
180self.repr >> DEP_NODE_WIDTH_BITS181 }
182183#[inline]
184fn bytes_per_index(self) -> usize {
185 (self.repr & mask(DEP_NODE_WIDTH_BITS)) + 1
186}
187188#[inline]
189fn mask(self) -> u32 {
190mask(self.bytes_per_index() * 8) as u32191 }
192}
193194#[inline]
195fn mask(bits: usize) -> usize {
196usize::MAX >> ((size_of::<usize>() * 8) - bits)
197}
198199impl SerializedDepGraph {
200#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("decode",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(200u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Arc<SerializedDepGraph> = loop {};
return __tracing_attr_fake_return;
}
{
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:203",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(203u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
let (node_max, node_count, edge_count) =
d.with_position(d.len() -
3 * IntEncodedWithFixedSize::ENCODED_SIZE,
|d|
{
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:209",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(209u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
let node_max =
IntEncodedWithFixedSize::decode(d).0 as usize;
let node_count =
IntEncodedWithFixedSize::decode(d).0 as usize;
let edge_count =
IntEncodedWithFixedSize::decode(d).0 as usize;
(node_max, node_count, edge_count)
});
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:215",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(215u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:217",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(217u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["node_count",
"edge_count"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_count)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&edge_count)
as &dyn Value))])
});
} else { ; }
};
let graph_bytes =
d.len() - (3 * IntEncodedWithFixedSize::ENCODED_SIZE) -
d.position();
let mut nodes =
IndexVec::from_elem_n(DepNode {
kind: DepKind::Null,
key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
}, node_max);
let mut value_fingerprints =
IndexVec::from_elem_n(Fingerprint::ZERO, node_max);
let mut edge_list_indices =
IndexVec::from_elem_n(EdgeHeader { repr: 0, num_edges: 0 },
node_max);
let mut edge_list_data =
Vec::with_capacity(graph_bytes -
node_count * size_of::<SerializedNodeHeader>());
for _ in 0..node_count {
let node_header =
SerializedNodeHeader { bytes: d.read_array() };
let index = node_header.index();
let node = &mut nodes[index];
if !(node_header.node().kind != DepKind::Null &&
node.kind == DepKind::Null) {
::core::panicking::panic("assertion failed: node_header.node().kind != DepKind::Null && node.kind == DepKind::Null")
};
*node = node_header.node();
value_fingerprints[index] = node_header.value_fingerprint();
let num_edges =
node_header.len().unwrap_or_else(|| d.read_u32());
let edges_len_bytes =
node_header.bytes_per_index() * (num_edges as usize);
let edges_header =
node_header.edges_header(&edge_list_data, num_edges);
edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
edge_list_indices[index] = edges_header;
}
edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
let mut index: Vec<_> =
(0..(DepKind::MAX +
1)).map(|_|
UnhashMap::with_capacity_and_hasher(d.read_u32() as usize,
Default::default())).collect();
let session_count = d.read_u64();
for (idx, node) in nodes.iter_enumerated() {
if index[node.kind.as_usize()].insert(node.key_fingerprint,
idx).is_some() {
if node.kind != DepKind::Null &&
node.kind != DepKind::SideEffect {
let kind = node.kind;
{
::core::panicking::panic_fmt(format_args!("Error: A dep graph node ({0:?}) does not have an unique index. Running a clean build on a nightly compiler with `-Z incremental-verify-ich` can help narrow down the issue for reporting. A clean build may also work around the issue.\n\n DepNode: {1:?}",
kind, node));
}
}
}
}
Arc::new(SerializedDepGraph {
nodes,
value_fingerprints,
edge_list_indices,
edge_list_data,
index,
session_count,
})
}
}
}#[instrument(level = "debug", skip(d))]201pub fn decode(d: &mut MemDecoder<'_>) -> Arc<SerializedDepGraph> {
202// The last 16 bytes are the node count and edge count.
203debug!("position: {:?}", d.position());
204205// `node_max` is the number of indices including empty nodes while `node_count`
206 // is the number of actually encoded nodes.
207let (node_max, node_count, edge_count) =
208 d.with_position(d.len() - 3 * IntEncodedWithFixedSize::ENCODED_SIZE, |d| {
209debug!("position: {:?}", d.position());
210let node_max = IntEncodedWithFixedSize::decode(d).0 as usize;
211let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
212let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
213 (node_max, node_count, edge_count)
214 });
215debug!("position: {:?}", d.position());
216217debug!(?node_count, ?edge_count);
218219let graph_bytes = d.len() - (3 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
220221let mut nodes = IndexVec::from_elem_n(
222 DepNode {
223 kind: DepKind::Null,
224 key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
225 },
226 node_max,
227 );
228let mut value_fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO, node_max);
229let mut edge_list_indices =
230 IndexVec::from_elem_n(EdgeHeader { repr: 0, num_edges: 0 }, node_max);
231232// This estimation assumes that all of the encoded bytes are for the edge lists or for the
233 // fixed-size node headers. But that's not necessarily true; if any edge list has a length
234 // that spills out of the size we can bit-pack into SerializedNodeHeader then some of the
235 // total serialized size is also used by leb128-encoded edge list lengths. Neglecting that
236 // contribution to graph_bytes means our estimation of the bytes needed for edge_list_data
237 // slightly overshoots. But it cannot overshoot by much; consider that the worse case is
238 // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
239 // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
240 // length is about the same fractional overhead and it amortizes for yet greater lengths.
241let mut edge_list_data =
242 Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader>());
243244for _ in 0..node_count {
245// Decode the header for this edge; the header packs together as many of the fixed-size
246 // fields as possible to limit the number of times we update decoder state.
247let node_header = SerializedNodeHeader { bytes: d.read_array() };
248249let index = node_header.index();
250251let node = &mut nodes[index];
252// Make sure there's no duplicate indices in the dep graph.
253assert!(node_header.node().kind != DepKind::Null && node.kind == DepKind::Null);
254*node = node_header.node();
255256 value_fingerprints[index] = node_header.value_fingerprint();
257258// If the length of this node's edge list is small, the length is stored in the header.
259 // If it is not, we fall back to another decoder call.
260let num_edges = node_header.len().unwrap_or_else(|| d.read_u32());
261262// The edges index list uses the same varint strategy as rmeta tables; we select the
263 // number of byte elements per-array not per-element. This lets us read the whole edge
264 // list for a node with one decoder call and also use the on-disk format in memory.
265let edges_len_bytes = node_header.bytes_per_index() * (num_edges as usize);
266// The in-memory structure for the edges list stores the byte width of the edges on
267 // this node with the offset into the global edge data array.
268let edges_header = node_header.edges_header(&edge_list_data, num_edges);
269270 edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
271272 edge_list_indices[index] = edges_header;
273 }
274275// When we access the edge list data, we do a fixed-size read from the edge list data then
276 // mask off the bytes that aren't for that edge index, so the last read may dangle off the
277 // end of the array. This padding ensure it doesn't.
278edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
279280// Read the number of each dep kind and use it to create an hash map with a suitable size.
281let mut index: Vec<_> = (0..(DepKind::MAX + 1))
282 .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
283 .collect();
284285let session_count = d.read_u64();
286287for (idx, node) in nodes.iter_enumerated() {
288if index[node.kind.as_usize()].insert(node.key_fingerprint, idx).is_some() {
289// Empty nodes and side effect nodes can have duplicates
290if node.kind != DepKind::Null && node.kind != DepKind::SideEffect {
291let kind = node.kind;
292panic!(
293"Error: A dep graph node ({kind:?}) does not have an unique index. \
294 Running a clean build on a nightly compiler with \
295 `-Z incremental-verify-ich` can help narrow down the issue for reporting. \
296 A clean build may also work around the issue.\n
297 DepNode: {node:?}"
298)
299 }
300 }
301 }
302303 Arc::new(SerializedDepGraph {
304 nodes,
305 value_fingerprints,
306 edge_list_indices,
307 edge_list_data,
308 index,
309 session_count,
310 })
311 }
312}
313314/// A packed representation of all the fixed-size fields in a `NodeInfo`.
315///
316/// This stores in one byte array:
317/// * The value `Fingerprint` in the `NodeInfo`
318/// * The key `Fingerprint` in `DepNode` that is in this `NodeInfo`
319/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
320/// * The byte width of the encoded edges for this node
321/// * In whatever bits remain, the length of the edge list for this node, if it fits
322struct SerializedNodeHeader {
323// 2 bytes for the DepNode
324 // 4 bytes for the index
325 // 16 for Fingerprint in DepNode
326 // 16 for Fingerprint in NodeInfo
327bytes: [u8; 38],
328}
329330// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
331// to make the implementation of `SerializedNodeHeader` simpler.
332struct Unpacked {
333 len: Option<u32>,
334 bytes_per_index: usize,
335 kind: DepKind,
336 index: SerializedDepNodeIndex,
337 key_fingerprint: PackedFingerprint,
338 value_fingerprint: Fingerprint,
339}
340341// Bit fields, where
342// M: bits used to store the length of a node's edge list
343// N: bits used to store the byte width of elements of the edge list
344// are
345// 0..M length of the edge
346// M..M+N bytes per index
347// M+N..16 kind
348impl SerializedNodeHeader {
349const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
350const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
351const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
352const KIND_BITS: usize = Self::TOTAL_BITS - DepKind::MAX.leading_zeros() as usize;
353const MAX_INLINE_LEN: usize = (u16::MAXas usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
354355#[inline]
356fn new(
357 node: &DepNode,
358 index: DepNodeIndex,
359 value_fingerprint: Fingerprint,
360 edge_max_index: u32,
361 edge_count: usize,
362 ) -> Self {
363if true {
match (&Self::TOTAL_BITS,
&(Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
364365let mut head = node.kind.as_u16();
366367let free_bytes = edge_max_index.leading_zeros() as usize / 8;
368let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
369head |= (bytes_per_indexas u16) << Self::KIND_BITS;
370371// Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
372 // in this bitfield.
373if edge_count <= Self::MAX_INLINE_LEN {
374head |= (edge_countas u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
375 }
376377let hash: Fingerprint = node.key_fingerprint.into();
378379// Using half-open ranges ensures an unconditional panic if we get the magic numbers wrong.
380let mut bytes = [0u8; 38];
381bytes[..2].copy_from_slice(&head.to_le_bytes());
382bytes[2..6].copy_from_slice(&index.as_u32().to_le_bytes());
383bytes[6..22].copy_from_slice(&hash.to_le_bytes());
384bytes[22..].copy_from_slice(&value_fingerprint.to_le_bytes());
385386#[cfg(debug_assertions)]
387{
388let res = Self { bytes };
389match (&value_fingerprint, &res.value_fingerprint()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(value_fingerprint, res.value_fingerprint());
390match (&*node, &res.node()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(*node, res.node());
391if let Some(len) = res.len() {
392match (&edge_count, &(len as usize)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(edge_count, len as usize);
393 }
394 }
395Self { bytes }
396 }
397398#[inline]
399fn unpack(&self) -> Unpacked {
400let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
401let index = u32::from_le_bytes(self.bytes[2..6].try_into().unwrap());
402let key_fingerprint = self.bytes[6..22].try_into().unwrap();
403let value_fingerprint = self.bytes[22..].try_into().unwrap();
404405let kind = head & mask(Self::KIND_BITS) as u16;
406let bytes_per_index = (head >> Self::KIND_BITS) & mask(Self::WIDTH_BITS) as u16;
407let len = (headas u32) >> (Self::WIDTH_BITS + Self::KIND_BITS);
408409Unpacked {
410 len: len.checked_sub(1),
411 bytes_per_index: bytes_per_indexas usize + 1,
412 kind: DepKind::from_u16(kind),
413 index: SerializedDepNodeIndex::from_u32(index),
414 key_fingerprint: Fingerprint::from_le_bytes(key_fingerprint).into(),
415 value_fingerprint: Fingerprint::from_le_bytes(value_fingerprint),
416 }
417 }
418419#[inline]
420fn len(&self) -> Option<u32> {
421self.unpack().len
422 }
423424#[inline]
425fn bytes_per_index(&self) -> usize {
426self.unpack().bytes_per_index
427 }
428429#[inline]
430fn index(&self) -> SerializedDepNodeIndex {
431self.unpack().index
432 }
433434#[inline]
435fn value_fingerprint(&self) -> Fingerprint {
436self.unpack().value_fingerprint
437 }
438439#[inline]
440fn node(&self) -> DepNode {
441let Unpacked { kind, key_fingerprint, .. } = self.unpack();
442DepNode { kind, key_fingerprint }
443 }
444445#[inline]
446fn edges_header(&self, edge_list_data: &[u8], num_edges: u32) -> EdgeHeader {
447EdgeHeader {
448 repr: (edge_list_data.len() << DEP_NODE_WIDTH_BITS) | (self.bytes_per_index() - 1),
449num_edges,
450 }
451 }
452}
453454#[derive(#[automatically_derived]
impl ::core::fmt::Debug for NodeInfo {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "NodeInfo",
"node", &self.node, "value_fingerprint", &self.value_fingerprint,
"edges", &&self.edges)
}
}Debug)]
455struct NodeInfo {
456 node: DepNode,
457 value_fingerprint: Fingerprint,
458 edges: EdgesVec,
459}
460461impl NodeInfo {
462fn encode(&self, e: &mut MemEncoder, index: DepNodeIndex) {
463let NodeInfo { ref node, value_fingerprint, ref edges } = *self;
464let header = SerializedNodeHeader::new(
465node,
466index,
467value_fingerprint,
468edges.max_index(),
469edges.len(),
470 );
471e.write_array(header.bytes);
472473if header.len().is_none() {
474// The edges are all unique and the number of unique indices is less than u32::MAX.
475e.emit_u32(edges.len().try_into().unwrap());
476 }
477478let bytes_per_index = header.bytes_per_index();
479for node_index in edges.iter() {
480 e.write_with(|dest| {
481*dest = node_index.as_u32().to_le_bytes();
482 bytes_per_index
483 });
484 }
485 }
486487/// Encode a node that was promoted from the previous graph. It reads the edges directly from
488 /// the previous dep graph and expects all edges to already have a new dep node index assigned.
489 /// This avoids the overhead of constructing `EdgesVec`, which would be needed to call `encode`.
490#[inline]
491fn encode_promoted(
492 e: &mut MemEncoder,
493 node: &DepNode,
494 index: DepNodeIndex,
495 value_fingerprint: Fingerprint,
496 prev_index: SerializedDepNodeIndex,
497 colors: &DepNodeColorMap,
498 previous: &SerializedDepGraph,
499 ) -> usize {
500let edges = previous.edge_targets_from(prev_index);
501let edge_count = edges.size_hint().0;
502503// Find the highest edge in the new dep node indices
504let edge_max =
505edges.clone().map(|i| colors.current(i).unwrap().as_u32()).max().unwrap_or(0);
506507let header =
508SerializedNodeHeader::new(node, index, value_fingerprint, edge_max, edge_count);
509e.write_array(header.bytes);
510511if header.len().is_none() {
512// The edges are all unique and the number of unique indices is less than u32::MAX.
513e.emit_u32(edge_count.try_into().unwrap());
514 }
515516let bytes_per_index = header.bytes_per_index();
517for node_index in edges {
518let node_index = colors.current(node_index).unwrap();
519 e.write_with(|dest| {
520*dest = node_index.as_u32().to_le_bytes();
521 bytes_per_index
522 });
523 }
524525edge_count526 }
527}
528529struct Stat {
530 kind: DepKind,
531 node_counter: u64,
532 edge_counter: u64,
533}
534535struct LocalEncoderState {
536 next_node_index: u32,
537 remaining_node_index: u32,
538 encoder: MemEncoder,
539 node_count: usize,
540 edge_count: usize,
541542/// Stores the number of times we've encoded each dep kind.
543kind_stats: Vec<u32>,
544}
545546struct LocalEncoderResult {
547 node_max: u32,
548 node_count: usize,
549 edge_count: usize,
550551/// Stores the number of times we've encoded each dep kind.
552kind_stats: Vec<u32>,
553}
554555struct EncoderState {
556 next_node_index: AtomicU64,
557 previous: Arc<SerializedDepGraph>,
558 file: Lock<Option<FileEncoder>>,
559 local: WorkerLocal<RefCell<LocalEncoderState>>,
560 stats: Option<Lock<FxHashMap<DepKind, Stat>>>,
561}
562563impl EncoderState {
564fn new(encoder: FileEncoder, record_stats: bool, previous: Arc<SerializedDepGraph>) -> Self {
565Self {
566previous,
567 next_node_index: AtomicU64::new(0),
568 stats: record_stats.then(|| Lock::new(FxHashMap::default())),
569 file: Lock::new(Some(encoder)),
570 local: WorkerLocal::new(|_| {
571RefCell::new(LocalEncoderState {
572 next_node_index: 0,
573 remaining_node_index: 0,
574 edge_count: 0,
575 node_count: 0,
576 encoder: MemEncoder::new(),
577 kind_stats: iter::repeat_n(0, DepKind::MAXas usize + 1).collect(),
578 })
579 }),
580 }
581 }
582583#[inline]
584fn next_index(&self, local: &mut LocalEncoderState) -> DepNodeIndex {
585if local.remaining_node_index == 0 {
586const COUNT: u32 = 256;
587588// We assume that there won't be enough active threads to overflow `u64` from `u32::MAX` here.
589 // This can exceed u32::MAX by at most `N` * `COUNT` where `N` is the thread pool count since
590 // `try_into().unwrap()` will make threads panic when `self.next_node_index` exceeds u32::MAX.
591local.next_node_index =
592self.next_node_index.fetch_add(COUNTas u64, Ordering::Relaxed).try_into().unwrap();
593594// Check that we'll stay within `u32`
595local.next_node_index.checked_add(COUNT).unwrap();
596597local.remaining_node_index = COUNT;
598 }
599600DepNodeIndex::from_u32(local.next_node_index)
601 }
602603/// Marks the index previously returned by `next_index` as used.
604#[inline]
605fn bump_index(&self, local: &mut LocalEncoderState) {
606local.remaining_node_index -= 1;
607local.next_node_index += 1;
608local.node_count += 1;
609 }
610611#[inline]
612fn record(
613&self,
614 node: &DepNode,
615 index: DepNodeIndex,
616 edge_count: usize,
617 edges: impl FnOnce(&Self) -> Vec<DepNodeIndex>,
618 retained_graph: &Option<Lock<RetainedDepGraph>>,
619 local: &mut LocalEncoderState,
620 ) {
621local.kind_stats[node.kind.as_usize()] += 1;
622local.edge_count += edge_count;
623624if let Some(retained_graph) = &retained_graph {
625// Call `edges` before the outlined code to allow the closure to be optimized out.
626let edges = edges(self);
627628// Outline the build of the full dep graph as it's typically disabled and cold.
629outline(move || {
630// Do not ICE when a query is called from within `with_query`.
631if let Some(retained_graph) = &mut retained_graph.try_lock() {
632retained_graph.push(index, *node, &edges);
633 }
634 });
635 }
636637if let Some(stats) = &self.stats {
638let kind = node.kind;
639640// Outline the stats code as it's typically disabled and cold.
641outline(move || {
642let mut stats = stats.lock();
643let stat =
644stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
645stat.node_counter += 1;
646stat.edge_counter += edge_countas u64;
647 });
648 }
649 }
650651#[inline]
652fn flush_mem_encoder(&self, local: &mut LocalEncoderState) {
653let data = &mut local.encoder.data;
654if data.len() > 64 * 1024 {
655self.file.lock().as_mut().unwrap().emit_raw_bytes(&data[..]);
656data.clear();
657 }
658 }
659660/// Encodes a node to the current graph.
661fn encode_node(
662&self,
663 index: DepNodeIndex,
664 node: &NodeInfo,
665 retained_graph: &Option<Lock<RetainedDepGraph>>,
666 local: &mut LocalEncoderState,
667 ) {
668node.encode(&mut local.encoder, index);
669self.flush_mem_encoder(&mut *local);
670self.record(
671&node.node,
672index,
673node.edges.len(),
674 |_| node.edges[..].to_vec(),
675retained_graph,
676&mut *local,
677 );
678 }
679680/// Encodes a node that was promoted from the previous graph. It reads the information directly from
681 /// the previous dep graph for performance reasons.
682 ///
683 /// This differs from `encode_node` where you have to explicitly provide the relevant `NodeInfo`.
684 ///
685 /// It expects all edges to already have a new dep node index assigned.
686#[inline]
687fn encode_promoted_node(
688&self,
689 index: DepNodeIndex,
690 prev_index: SerializedDepNodeIndex,
691 retained_graph: &Option<Lock<RetainedDepGraph>>,
692 colors: &DepNodeColorMap,
693 local: &mut LocalEncoderState,
694 ) {
695let node = self.previous.index_to_node(prev_index);
696let value_fingerprint = self.previous.value_fingerprint_for_index(prev_index);
697let edge_count = NodeInfo::encode_promoted(
698&mut local.encoder,
699node,
700index,
701value_fingerprint,
702prev_index,
703colors,
704&self.previous,
705 );
706self.flush_mem_encoder(&mut *local);
707self.record(
708node,
709index,
710edge_count,
711 |this| {
712this.previous
713 .edge_targets_from(prev_index)
714 .map(|i| colors.current(i).unwrap())
715 .collect()
716 },
717retained_graph,
718&mut *local,
719 );
720 }
721722fn finish(&self, profiler: &SelfProfilerRef, current: &CurrentDepGraph) -> FileEncodeResult {
723// Prevent more indices from being allocated.
724self.next_node_index.store(u32::MAXas u64 + 1, Ordering::SeqCst);
725726let results = broadcast(|_| {
727let mut local = self.local.borrow_mut();
728729// Prevent more indices from being allocated on this thread.
730local.remaining_node_index = 0;
731732let data = mem::replace(&mut local.encoder.data, Vec::new());
733self.file.lock().as_mut().unwrap().emit_raw_bytes(&data);
734735LocalEncoderResult {
736 kind_stats: local.kind_stats.clone(),
737 node_max: local.next_node_index,
738 node_count: local.node_count,
739 edge_count: local.edge_count,
740 }
741 });
742743let mut encoder = self.file.lock().take().unwrap();
744745let mut kind_stats: Vec<u32> = iter::repeat_n(0, DepKind::MAXas usize + 1).collect();
746747let mut node_max = 0;
748let mut node_count = 0;
749let mut edge_count = 0;
750751for result in results {
752 node_max = max(node_max, result.node_max);
753 node_count += result.node_count;
754 edge_count += result.edge_count;
755for (i, stat) in result.kind_stats.iter().enumerate() {
756 kind_stats[i] += stat;
757 }
758 }
759760// Encode the number of each dep kind encountered
761for count in kind_stats.iter() {
762 count.encode(&mut encoder);
763 }
764765self.previous.session_count.checked_add(1).unwrap().encode(&mut encoder);
766767{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:767",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(767u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["node_max",
"node_count", "edge_count"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_max)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_count)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&edge_count)
as &dyn Value))])
});
} else { ; }
};debug!(?node_max, ?node_count, ?edge_count);
768{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:768",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(768u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
encoder.position()) as &dyn Value))])
});
} else { ; }
};debug!("position: {:?}", encoder.position());
769IntEncodedWithFixedSize(node_max.try_into().unwrap()).encode(&mut encoder);
770IntEncodedWithFixedSize(node_count.try_into().unwrap()).encode(&mut encoder);
771IntEncodedWithFixedSize(edge_count.try_into().unwrap()).encode(&mut encoder);
772{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_middle/src/dep_graph/serialized.rs:772",
"rustc_middle::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(772u32),
::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
encoder.position()) as &dyn Value))])
});
} else { ; }
};debug!("position: {:?}", encoder.position());
773// Drop the encoder so that nothing is written after the counts.
774let result = encoder.finish();
775if let Ok(position) = result {
776// FIXME(rylev): we hardcode the dep graph file name so we
777 // don't need a dependency on rustc_incremental just for that.
778profiler.artifact_size("dep_graph", "dep-graph.bin", positionas u64);
779 }
780781self.print_incremental_info(current, node_count, edge_count);
782783result784 }
785786fn print_incremental_info(
787&self,
788 current: &CurrentDepGraph,
789 total_node_count: usize,
790 total_edge_count: usize,
791 ) {
792if let Some(record_stats) = &self.stats {
793let record_stats = record_stats.lock();
794// `stats` is sorted below so we can allow this lint here.
795#[allow(rustc::potential_query_instability)]
796let mut stats: Vec<_> = record_stats.values().collect();
797stats.sort_by_key(|s| -(s.node_counter as i64));
798799const SEPARATOR: &str = "[incremental] --------------------------------\
800 ----------------------------------------------\
801 ------------";
802803{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
804{ ::std::io::_eprint(format_args!("[incremental] DepGraph Statistics\n")); };eprintln!("[incremental] DepGraph Statistics");
805{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
806{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
807{
::std::io::_eprint(format_args!("[incremental] Total Node Count: {0}\n",
total_node_count));
};eprintln!("[incremental] Total Node Count: {}", total_node_count);
808{
::std::io::_eprint(format_args!("[incremental] Total Edge Count: {0}\n",
total_edge_count));
};eprintln!("[incremental] Total Edge Count: {}", total_edge_count);
809810if truecfg!(debug_assertions) {
811let total_read_count = current.total_read_count.load(Ordering::Relaxed);
812let total_duplicate_read_count =
813current.total_duplicate_read_count.load(Ordering::Relaxed);
814{
::std::io::_eprint(format_args!("[incremental] Total Edge Reads: {0}\n",
total_read_count));
};eprintln!("[incremental] Total Edge Reads: {total_read_count}");
815{
::std::io::_eprint(format_args!("[incremental] Total Duplicate Edge Reads: {0}\n",
total_duplicate_read_count));
};eprintln!("[incremental] Total Duplicate Edge Reads: {total_duplicate_read_count}");
816 }
817818{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
819{
::std::io::_eprint(format_args!("[incremental] {0:<36}| {1:<17}| {2:<12}| {3:<17}|\n",
"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"));
};eprintln!(
820"[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
821"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
822);
823{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
824825for stat in stats {
826let node_kind_ratio =
827 (100.0 * (stat.node_counter as f64)) / (total_node_count as f64);
828let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
829830{
::std::io::_eprint(format_args!("[incremental] {0:<36}|{1:>16.1}% |{2:>12} |{3:>17.1} |\n",
::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", stat.kind))
}), node_kind_ratio, stat.node_counter, node_kind_avg_edges));
};eprintln!(
831"[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
832format!("{:?}", stat.kind),
833 node_kind_ratio,
834 stat.node_counter,
835 node_kind_avg_edges,
836 );
837 }
838839{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
840{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
841 }
842 }
843}
844845pub(crate) struct GraphEncoder {
846 profiler: SelfProfilerRef,
847 status: EncoderState,
848/// In-memory copy of the dep graph; only present if `-Zquery-dep-graph` is set.
849retained_graph: Option<Lock<RetainedDepGraph>>,
850}
851852impl GraphEncoder {
853pub(crate) fn new(
854 sess: &Session,
855 encoder: FileEncoder,
856 prev_node_count: usize,
857 previous: Arc<SerializedDepGraph>,
858 ) -> Self {
859let retained_graph = sess860 .opts
861 .unstable_opts
862 .query_dep_graph
863 .then(|| Lock::new(RetainedDepGraph::new(prev_node_count)));
864let status = EncoderState::new(encoder, sess.opts.unstable_opts.incremental_info, previous);
865GraphEncoder { status, retained_graph, profiler: sess.prof.clone() }
866 }
867868pub(crate) fn with_retained_dep_graph(&self, f: impl Fn(&RetainedDepGraph)) {
869if let Some(retained_graph) = &self.retained_graph {
870f(&retained_graph.lock())
871 }
872 }
873874/// Encodes a node that does not exists in the previous graph.
875pub(crate) fn send_new(
876&self,
877 node: DepNode,
878 value_fingerprint: Fingerprint,
879 edges: EdgesVec,
880 ) -> DepNodeIndex {
881let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
882let node = NodeInfo { node, value_fingerprint, edges };
883let mut local = self.status.local.borrow_mut();
884let index = self.status.next_index(&mut *local);
885self.status.bump_index(&mut *local);
886self.status.encode_node(index, &node, &self.retained_graph, &mut *local);
887index888 }
889890/// Encodes a node that exists in the previous graph, but was re-executed.
891 ///
892 /// This will also ensure the dep node is colored either red or green.
893pub(crate) fn send_and_color(
894&self,
895 prev_index: SerializedDepNodeIndex,
896 colors: &DepNodeColorMap,
897 node: DepNode,
898 value_fingerprint: Fingerprint,
899 edges: EdgesVec,
900 is_green: bool,
901 ) -> DepNodeIndex {
902let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
903let node = NodeInfo { node, value_fingerprint, edges };
904905let mut local = self.status.local.borrow_mut();
906907let index = self.status.next_index(&mut *local);
908909// Use `try_mark` to avoid racing when `send_promoted` is called concurrently
910 // on the same index.
911match colors.try_mark(prev_index, index, is_green) {
912Ok(()) => (),
913Err(None) => {
::core::panicking::panic_fmt(format_args!("dep node {0:?} is unexpectedly red",
prev_index));
}panic!("dep node {:?} is unexpectedly red", prev_index),
914Err(Some(dep_node_index)) => return dep_node_index,
915 }
916917self.status.bump_index(&mut *local);
918self.status.encode_node(index, &node, &self.retained_graph, &mut *local);
919index920 }
921922/// Encodes a node that was promoted from the previous graph. It reads the information directly
923 /// from the previous dep graph and expects all edges to already have a new dep node index
924 /// assigned.
925 ///
926 /// This will also ensure the dep node is marked green if `Some` is returned.
927#[inline]
928pub(crate) fn send_promoted(
929&self,
930 prev_index: SerializedDepNodeIndex,
931 colors: &DepNodeColorMap,
932 ) -> Option<DepNodeIndex> {
933let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
934935let mut local = self.status.local.borrow_mut();
936let index = self.status.next_index(&mut *local);
937938// Use `try_mark_green` to avoid racing when `send_promoted` or `send_and_color`
939 // is called concurrently on the same index.
940match colors.try_mark(prev_index, index, true) {
941Ok(()) => {
942self.status.bump_index(&mut *local);
943self.status.encode_promoted_node(
944index,
945prev_index,
946&self.retained_graph,
947colors,
948&mut *local,
949 );
950Some(index)
951 }
952Err(dep_node_index) => dep_node_index,
953 }
954 }
955956pub(crate) fn finish(&self, current: &CurrentDepGraph) -> FileEncodeResult {
957let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph_finish");
958959self.status.finish(&self.profiler, current)
960 }
961}