1//! The data that we will serialize and deserialize.
2//!
3//! Notionally, the dep-graph is a sequence of NodeInfo with the dependencies
4//! specified inline. The total number of nodes and edges are stored as the last
5//! 16 bytes of the file, so we can find them easily at decoding time.
6//!
7//! The serialisation is performed on-demand when each node is emitted. Using this
8//! scheme, we do not need to keep the current graph in memory.
9//!
10//! The deserialization is performed manually, in order to convert from the stored
11//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
12//! node and edge count are stored at the end of the file, all the arrays can be
13//! pre-allocated with the right length.
14//!
15//! The encoding of the dep-graph is generally designed around the fact that fixed-size
16//! reads of encoded data are generally faster than variable-sized reads. Ergo we adopt
17//! essentially the same varint encoding scheme used in the rmeta format; the edge lists
18//! for each node on the graph store a 2-bit integer which is the number of bytes per edge
19//! index in that node's edge list. We effectively ignore that an edge index of 0 could be
20//! encoded with 0 bytes in order to not require 3 bits to store the byte width of the edges.
21//! The overhead of calculating the correct byte width for each edge is mitigated by
22//! building edge lists with [`EdgesVec`] which keeps a running max of the edges in a node.
23//!
24//! When we decode this data, we do not immediately create [`SerializedDepNodeIndex`] and
25//! instead keep the data in its denser serialized form which lets us turn our on-disk size
26//! efficiency directly into a peak memory reduction. When we convert these encoded-in-memory
27//! values into their fully-deserialized type, we use a fixed-size read of the encoded array
28//! then mask off any errant bytes we read. The array of edge index bytes is padded to permit this.
29//!
30//! We also encode and decode the entire rest of each node using [`SerializedNodeHeader`]
31//! to let this encoding and decoding be done in one fixed-size operation. These headers contain
32//! two [`Fingerprint`]s along with the serialized [`DepKind`], and the number of edge indices
33//! in the node and the number of bytes used to encode the edge indices for this node. The
34//! [`DepKind`], number of edges, and bytes per edge are all bit-packed together, if they fit.
35//! If the number of edges in this node does not fit in the bits available in the header, we
36//! store it directly after the header with leb128.
37//!
38//! Dep-graph indices are bulk allocated to threads inside `LocalEncoderState`. Having threads
39//! own these indices helps avoid races when they are conditionally used when marking nodes green.
40//! It also reduces congestion on the shared index count.
4142use std::cell::RefCell;
43use std::cmp::max;
44use std::marker::PhantomData;
45use std::sync::Arc;
46use std::sync::atomic::Ordering;
47use std::{iter, mem, u64};
4849use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
50use rustc_data_structures::fx::FxHashMap;
51use rustc_data_structures::outline;
52use rustc_data_structures::profiling::SelfProfilerRef;
53use rustc_data_structures::sync::{AtomicU64, Lock, WorkerLocal, broadcast};
54use rustc_data_structures::unhash::UnhashMap;
55use rustc_index::IndexVec;
56use rustc_serialize::opaque::mem_encoder::MemEncoder;
57use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
58use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
59use rustc_session::Session;
60use tracing::{debug, instrument};
6162use super::graph::{CurrentDepGraph, DepNodeColorMap};
63use super::query::DepGraphQuery;
64use super::{DepKind, DepNode, DepNodeIndex, Deps};
65use crate::dep_graph::edges::EdgesVec;
6667// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
68// unused so that we can store multiple index types in `CompressedHybridIndex`,
69// and use those bits to encode which index type it contains.
70impl ::std::fmt::Debug for SerializedDepNodeIndex {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_fmt(format_args!("{0}", self.as_u32()))
}
}rustc_index::newtype_index! {
71#[encodable]
72 #[max = 0x7FFF_FFFF]
73pub struct SerializedDepNodeIndex {}
74}7576const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>();
77/// Amount of padding we need to add to the edge list data so that we can retrieve every
78/// SerializedDepNodeIndex with a fixed-size read then mask.
79const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
80/// Number of bits we need to store the number of used bytes in a SerializedDepNodeIndex.
81/// Note that wherever we encode byte widths like this we actually store the number of bytes used
82/// minus 1; for a 4-byte value we technically would have 5 widths to store, but using one byte to
83/// store zeroes (which are relatively rare) is a decent tradeoff to save a bit in our bitfields.
84const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
8586/// Data for use when recompiling the **current crate**.
87///
88/// There may be unused indices with DEP_KIND_NULL in this graph due to batch allocation of
89/// indices to threads.
90#[derive(#[automatically_derived]
impl ::core::fmt::Debug for SerializedDepGraph {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
let names: &'static _ =
&["nodes", "fingerprints", "edge_list_indices", "edge_list_data",
"index", "session_count"];
let values: &[&dyn ::core::fmt::Debug] =
&[&self.nodes, &self.fingerprints, &self.edge_list_indices,
&self.edge_list_data, &self.index, &&self.session_count];
::core::fmt::Formatter::debug_struct_fields_finish(f,
"SerializedDepGraph", names, values)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for SerializedDepGraph {
#[inline]
fn default() -> SerializedDepGraph {
SerializedDepGraph {
nodes: ::core::default::Default::default(),
fingerprints: ::core::default::Default::default(),
edge_list_indices: ::core::default::Default::default(),
edge_list_data: ::core::default::Default::default(),
index: ::core::default::Default::default(),
session_count: ::core::default::Default::default(),
}
}
}Default)]
91pub struct SerializedDepGraph {
92/// The set of all DepNodes in the graph
93nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
94/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
95 /// the DepNode at the same index in the nodes vector.
96fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
97/// For each DepNode, stores the list of edges originating from that
98 /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
99 /// which holds the actual DepNodeIndices of the target nodes.
100edge_list_indices: IndexVec<SerializedDepNodeIndex, EdgeHeader>,
101/// A flattened list of all edge targets in the graph, stored in the same
102 /// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
103edge_list_data: Vec<u8>,
104/// Stores a map from fingerprints to nodes per dep node kind.
105 /// This is the reciprocal of `nodes`.
106index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
107/// The number of previous compilation sessions. This is used to generate
108 /// unique anon dep nodes per session.
109session_count: u64,
110}
111112impl SerializedDepGraph {
113#[inline]
114pub fn edge_targets_from(
115&self,
116 source: SerializedDepNodeIndex,
117 ) -> impl Iterator<Item = SerializedDepNodeIndex> + Clone {
118let header = self.edge_list_indices[source];
119let mut raw = &self.edge_list_data[header.start()..];
120121let bytes_per_index = header.bytes_per_index();
122123// LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
124let mask = header.mask();
125 (0..header.num_edges).map(move |_| {
126// Doing this slicing in this order ensures that the first bounds check suffices for
127 // all the others.
128let index = &raw[..DEP_NODE_SIZE];
129raw = &raw[bytes_per_index..];
130let index = u32::from_le_bytes(index.try_into().unwrap()) & mask;
131SerializedDepNodeIndex::from_u32(index)
132 })
133 }
134135#[inline]
136pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
137self.nodes[dep_node_index]
138 }
139140#[inline]
141pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
142self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
143 }
144145#[inline]
146pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
147self.fingerprints[dep_node_index]
148 }
149150#[inline]
151pub fn node_count(&self) -> usize {
152self.nodes.len()
153 }
154155#[inline]
156pub fn session_count(&self) -> u64 {
157self.session_count
158 }
159}
160161/// A packed representation of an edge's start index and byte width.
162///
163/// This is packed by stealing 2 bits from the start index, which means we only accommodate edge
164/// data arrays up to a quarter of our address space. Which seems fine.
165#[derive(#[automatically_derived]
impl ::core::fmt::Debug for EdgeHeader {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "EdgeHeader",
"repr", &self.repr, "num_edges", &&self.num_edges)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for EdgeHeader {
#[inline]
fn clone(&self) -> EdgeHeader {
let _: ::core::clone::AssertParamIsClone<usize>;
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EdgeHeader { }Copy)]
166struct EdgeHeader {
167 repr: usize,
168 num_edges: u32,
169}
170171impl EdgeHeader {
172#[inline]
173fn start(self) -> usize {
174self.repr >> DEP_NODE_WIDTH_BITS175 }
176177#[inline]
178fn bytes_per_index(self) -> usize {
179 (self.repr & mask(DEP_NODE_WIDTH_BITS)) + 1
180}
181182#[inline]
183fn mask(self) -> u32 {
184mask(self.bytes_per_index() * 8) as u32185 }
186}
187188#[inline]
189fn mask(bits: usize) -> usize {
190usize::MAX >> ((size_of::<usize>() * 8) - bits)
191}
192193impl SerializedDepGraph {
194#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("decode",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(194u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&[],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{ meta.fields().value_set(&[]) })
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: Arc<SerializedDepGraph> = loop {};
return __tracing_attr_fake_return;
}
{
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:197",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(197u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
let (node_max, node_count, edge_count) =
d.with_position(d.len() -
3 * IntEncodedWithFixedSize::ENCODED_SIZE,
|d|
{
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:203",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(203u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
let node_max =
IntEncodedWithFixedSize::decode(d).0 as usize;
let node_count =
IntEncodedWithFixedSize::decode(d).0 as usize;
let edge_count =
IntEncodedWithFixedSize::decode(d).0 as usize;
(node_max, node_count, edge_count)
});
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:209",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(209u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
d.position()) as &dyn Value))])
});
} else { ; }
};
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:211",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(211u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["node_count",
"edge_count"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_count)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&edge_count)
as &dyn Value))])
});
} else { ; }
};
let graph_bytes =
d.len() - (3 * IntEncodedWithFixedSize::ENCODED_SIZE) -
d.position();
let mut nodes =
IndexVec::from_elem_n(DepNode {
kind: D::DEP_KIND_NULL,
hash: PackedFingerprint::from(Fingerprint::ZERO),
}, node_max);
let mut fingerprints =
IndexVec::from_elem_n(Fingerprint::ZERO, node_max);
let mut edge_list_indices =
IndexVec::from_elem_n(EdgeHeader { repr: 0, num_edges: 0 },
node_max);
let mut edge_list_data =
Vec::with_capacity(graph_bytes -
node_count * size_of::<SerializedNodeHeader<D>>());
for _ in 0..node_count {
let node_header =
SerializedNodeHeader::<D> {
bytes: d.read_array(),
_marker: PhantomData,
};
let index = node_header.index();
let node = &mut nodes[index];
if !(node_header.node().kind != D::DEP_KIND_NULL &&
node.kind == D::DEP_KIND_NULL) {
::core::panicking::panic("assertion failed: node_header.node().kind != D::DEP_KIND_NULL && node.kind == D::DEP_KIND_NULL")
};
*node = node_header.node();
fingerprints[index] = node_header.fingerprint();
let num_edges =
node_header.len().unwrap_or_else(|| d.read_u32());
let edges_len_bytes =
node_header.bytes_per_index() * (num_edges as usize);
let edges_header =
node_header.edges_header(&edge_list_data, num_edges);
edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
edge_list_indices[index] = edges_header;
}
edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
let mut index: Vec<_> =
(0..(D::DEP_KIND_MAX +
1)).map(|_|
UnhashMap::with_capacity_and_hasher(d.read_u32() as usize,
Default::default())).collect();
let session_count = d.read_u64();
for (idx, node) in nodes.iter_enumerated() {
if index[node.kind.as_usize()].insert(node.hash,
idx).is_some() {
if node.kind != D::DEP_KIND_NULL &&
node.kind != D::DEP_KIND_SIDE_EFFECT {
let name = deps.name(node.kind);
{
::core::panicking::panic_fmt(format_args!("Error: A dep graph node ({0}) does not have an unique index. Running a clean build on a nightly compiler with `-Z incremental-verify-ich` can help narrow down the issue for reporting. A clean build may also work around the issue.\n\n DepNode: {1:?}",
name, node));
}
}
}
}
Arc::new(SerializedDepGraph {
nodes,
fingerprints,
edge_list_indices,
edge_list_data,
index,
session_count,
})
}
}
}#[instrument(level = "debug", skip(d, deps))]195pub fn decode<D: Deps>(d: &mut MemDecoder<'_>, deps: &D) -> Arc<SerializedDepGraph> {
196// The last 16 bytes are the node count and edge count.
197debug!("position: {:?}", d.position());
198199// `node_max` is the number of indices including empty nodes while `node_count`
200 // is the number of actually encoded nodes.
201let (node_max, node_count, edge_count) =
202 d.with_position(d.len() - 3 * IntEncodedWithFixedSize::ENCODED_SIZE, |d| {
203debug!("position: {:?}", d.position());
204let node_max = IntEncodedWithFixedSize::decode(d).0 as usize;
205let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
206let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
207 (node_max, node_count, edge_count)
208 });
209debug!("position: {:?}", d.position());
210211debug!(?node_count, ?edge_count);
212213let graph_bytes = d.len() - (3 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
214215let mut nodes = IndexVec::from_elem_n(
216 DepNode { kind: D::DEP_KIND_NULL, hash: PackedFingerprint::from(Fingerprint::ZERO) },
217 node_max,
218 );
219let mut fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO, node_max);
220let mut edge_list_indices =
221 IndexVec::from_elem_n(EdgeHeader { repr: 0, num_edges: 0 }, node_max);
222223// This estimation assumes that all of the encoded bytes are for the edge lists or for the
224 // fixed-size node headers. But that's not necessarily true; if any edge list has a length
225 // that spills out of the size we can bit-pack into SerializedNodeHeader then some of the
226 // total serialized size is also used by leb128-encoded edge list lengths. Neglecting that
227 // contribution to graph_bytes means our estimation of the bytes needed for edge_list_data
228 // slightly overshoots. But it cannot overshoot by much; consider that the worse case is
229 // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
230 // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
231 // length is about the same fractional overhead and it amortizes for yet greater lengths.
232let mut edge_list_data =
233 Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>());
234235for _ in 0..node_count {
236// Decode the header for this edge; the header packs together as many of the fixed-size
237 // fields as possible to limit the number of times we update decoder state.
238let node_header =
239 SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
240241let index = node_header.index();
242243let node = &mut nodes[index];
244// Make sure there's no duplicate indices in the dep graph.
245assert!(node_header.node().kind != D::DEP_KIND_NULL && node.kind == D::DEP_KIND_NULL);
246*node = node_header.node();
247248 fingerprints[index] = node_header.fingerprint();
249250// If the length of this node's edge list is small, the length is stored in the header.
251 // If it is not, we fall back to another decoder call.
252let num_edges = node_header.len().unwrap_or_else(|| d.read_u32());
253254// The edges index list uses the same varint strategy as rmeta tables; we select the
255 // number of byte elements per-array not per-element. This lets us read the whole edge
256 // list for a node with one decoder call and also use the on-disk format in memory.
257let edges_len_bytes = node_header.bytes_per_index() * (num_edges as usize);
258// The in-memory structure for the edges list stores the byte width of the edges on
259 // this node with the offset into the global edge data array.
260let edges_header = node_header.edges_header(&edge_list_data, num_edges);
261262 edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
263264 edge_list_indices[index] = edges_header;
265 }
266267// When we access the edge list data, we do a fixed-size read from the edge list data then
268 // mask off the bytes that aren't for that edge index, so the last read may dangle off the
269 // end of the array. This padding ensure it doesn't.
270edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
271272// Read the number of each dep kind and use it to create an hash map with a suitable size.
273let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
274 .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
275 .collect();
276277let session_count = d.read_u64();
278279for (idx, node) in nodes.iter_enumerated() {
280if index[node.kind.as_usize()].insert(node.hash, idx).is_some() {
281// Empty nodes and side effect nodes can have duplicates
282if node.kind != D::DEP_KIND_NULL && node.kind != D::DEP_KIND_SIDE_EFFECT {
283let name = deps.name(node.kind);
284panic!(
285"Error: A dep graph node ({name}) does not have an unique index. \
286 Running a clean build on a nightly compiler with `-Z incremental-verify-ich` \
287 can help narrow down the issue for reporting. A clean build may also work around the issue.\n
288 DepNode: {node:?}"
289)
290 }
291 }
292 }
293294 Arc::new(SerializedDepGraph {
295 nodes,
296 fingerprints,
297 edge_list_indices,
298 edge_list_data,
299 index,
300 session_count,
301 })
302 }
303}
304305/// A packed representation of all the fixed-size fields in a `NodeInfo`.
306///
307/// This stores in one byte array:
308/// * The `Fingerprint` in the `NodeInfo`
309/// * The `Fingerprint` in `DepNode` that is in this `NodeInfo`
310/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
311/// * The byte width of the encoded edges for this node
312/// * In whatever bits remain, the length of the edge list for this node, if it fits
313struct SerializedNodeHeader<D> {
314// 2 bytes for the DepNode
315 // 4 bytes for the index
316 // 16 for Fingerprint in DepNode
317 // 16 for Fingerprint in NodeInfo
318bytes: [u8; 38],
319 _marker: PhantomData<D>,
320}
321322// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
323// to make the implementation of `SerializedNodeHeader` simpler.
324struct Unpacked {
325 len: Option<u32>,
326 bytes_per_index: usize,
327 kind: DepKind,
328 index: SerializedDepNodeIndex,
329 hash: PackedFingerprint,
330 fingerprint: Fingerprint,
331}
332333// Bit fields, where
334// M: bits used to store the length of a node's edge list
335// N: bits used to store the byte width of elements of the edge list
336// are
337// 0..M length of the edge
338// M..M+N bytes per index
339// M+N..16 kind
340impl<D: Deps> SerializedNodeHeader<D> {
341const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
342const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
343const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
344const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
345const MAX_INLINE_LEN: usize = (u16::MAXas usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
346347#[inline]
348fn new(
349 node: DepNode,
350 index: DepNodeIndex,
351 fingerprint: Fingerprint,
352 edge_max_index: u32,
353 edge_count: usize,
354 ) -> Self {
355if true {
match (&Self::TOTAL_BITS,
&(Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
356357let mut head = node.kind.as_inner();
358359let free_bytes = edge_max_index.leading_zeros() as usize / 8;
360let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
361head |= (bytes_per_indexas u16) << Self::KIND_BITS;
362363// Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
364 // in this bitfield.
365if edge_count <= Self::MAX_INLINE_LEN {
366head |= (edge_countas u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
367 }
368369let hash: Fingerprint = node.hash.into();
370371// Using half-open ranges ensures an unconditional panic if we get the magic numbers wrong.
372let mut bytes = [0u8; 38];
373bytes[..2].copy_from_slice(&head.to_le_bytes());
374bytes[2..6].copy_from_slice(&index.as_u32().to_le_bytes());
375bytes[6..22].copy_from_slice(&hash.to_le_bytes());
376bytes[22..].copy_from_slice(&fingerprint.to_le_bytes());
377378#[cfg(debug_assertions)]
379{
380let res = Self { bytes, _marker: PhantomData };
381match (&fingerprint, &res.fingerprint()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(fingerprint, res.fingerprint());
382match (&node, &res.node()) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(node, res.node());
383if let Some(len) = res.len() {
384match (&edge_count, &(len as usize)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(edge_count, len as usize);
385 }
386 }
387Self { bytes, _marker: PhantomData }
388 }
389390#[inline]
391fn unpack(&self) -> Unpacked {
392let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
393let index = u32::from_le_bytes(self.bytes[2..6].try_into().unwrap());
394let hash = self.bytes[6..22].try_into().unwrap();
395let fingerprint = self.bytes[22..].try_into().unwrap();
396397let kind = head & mask(Self::KIND_BITS) as u16;
398let bytes_per_index = (head >> Self::KIND_BITS) & mask(Self::WIDTH_BITS) as u16;
399let len = (headas u32) >> (Self::WIDTH_BITS + Self::KIND_BITS);
400401Unpacked {
402 len: len.checked_sub(1),
403 bytes_per_index: bytes_per_indexas usize + 1,
404 kind: DepKind::new(kind),
405 index: SerializedDepNodeIndex::from_u32(index),
406 hash: Fingerprint::from_le_bytes(hash).into(),
407 fingerprint: Fingerprint::from_le_bytes(fingerprint),
408 }
409 }
410411#[inline]
412fn len(&self) -> Option<u32> {
413self.unpack().len
414 }
415416#[inline]
417fn bytes_per_index(&self) -> usize {
418self.unpack().bytes_per_index
419 }
420421#[inline]
422fn index(&self) -> SerializedDepNodeIndex {
423self.unpack().index
424 }
425426#[inline]
427fn fingerprint(&self) -> Fingerprint {
428self.unpack().fingerprint
429 }
430431#[inline]
432fn node(&self) -> DepNode {
433let Unpacked { kind, hash, .. } = self.unpack();
434DepNode { kind, hash }
435 }
436437#[inline]
438fn edges_header(&self, edge_list_data: &[u8], num_edges: u32) -> EdgeHeader {
439EdgeHeader {
440 repr: (edge_list_data.len() << DEP_NODE_WIDTH_BITS) | (self.bytes_per_index() - 1),
441num_edges,
442 }
443 }
444}
445446#[derive(#[automatically_derived]
impl ::core::fmt::Debug for NodeInfo {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field3_finish(f, "NodeInfo",
"node", &self.node, "fingerprint", &self.fingerprint, "edges",
&&self.edges)
}
}Debug)]
447struct NodeInfo {
448 node: DepNode,
449 fingerprint: Fingerprint,
450 edges: EdgesVec,
451}
452453impl NodeInfo {
454fn encode<D: Deps>(&self, e: &mut MemEncoder, index: DepNodeIndex) {
455let NodeInfo { node, fingerprint, ref edges } = *self;
456let header = SerializedNodeHeader::<D>::new(
457node,
458index,
459fingerprint,
460edges.max_index(),
461edges.len(),
462 );
463e.write_array(header.bytes);
464465if header.len().is_none() {
466// The edges are all unique and the number of unique indices is less than u32::MAX.
467e.emit_u32(edges.len().try_into().unwrap());
468 }
469470let bytes_per_index = header.bytes_per_index();
471for node_index in edges.iter() {
472 e.write_with(|dest| {
473*dest = node_index.as_u32().to_le_bytes();
474 bytes_per_index
475 });
476 }
477 }
478479/// Encode a node that was promoted from the previous graph. It reads the edges directly from
480 /// the previous dep graph and expects all edges to already have a new dep node index assigned.
481 /// This avoids the overhead of constructing `EdgesVec`, which would be needed to call `encode`.
482#[inline]
483fn encode_promoted<D: Deps>(
484 e: &mut MemEncoder,
485 node: DepNode,
486 index: DepNodeIndex,
487 fingerprint: Fingerprint,
488 prev_index: SerializedDepNodeIndex,
489 colors: &DepNodeColorMap,
490 previous: &SerializedDepGraph,
491 ) -> usize {
492let edges = previous.edge_targets_from(prev_index);
493let edge_count = edges.size_hint().0;
494495// Find the highest edge in the new dep node indices
496let edge_max =
497edges.clone().map(|i| colors.current(i).unwrap().as_u32()).max().unwrap_or(0);
498499let header = SerializedNodeHeader::<D>::new(node, index, fingerprint, edge_max, edge_count);
500e.write_array(header.bytes);
501502if header.len().is_none() {
503// The edges are all unique and the number of unique indices is less than u32::MAX.
504e.emit_u32(edge_count.try_into().unwrap());
505 }
506507let bytes_per_index = header.bytes_per_index();
508for node_index in edges {
509let node_index = colors.current(node_index).unwrap();
510 e.write_with(|dest| {
511*dest = node_index.as_u32().to_le_bytes();
512 bytes_per_index
513 });
514 }
515516edge_count517 }
518}
519520struct Stat {
521 kind: DepKind,
522 node_counter: u64,
523 edge_counter: u64,
524}
525526struct LocalEncoderState {
527 next_node_index: u32,
528 remaining_node_index: u32,
529 encoder: MemEncoder,
530 node_count: usize,
531 edge_count: usize,
532533/// Stores the number of times we've encoded each dep kind.
534kind_stats: Vec<u32>,
535}
536537struct LocalEncoderResult {
538 node_max: u32,
539 node_count: usize,
540 edge_count: usize,
541542/// Stores the number of times we've encoded each dep kind.
543kind_stats: Vec<u32>,
544}
545546struct EncoderState<D: Deps> {
547 next_node_index: AtomicU64,
548 previous: Arc<SerializedDepGraph>,
549 file: Lock<Option<FileEncoder>>,
550 local: WorkerLocal<RefCell<LocalEncoderState>>,
551 stats: Option<Lock<FxHashMap<DepKind, Stat>>>,
552 marker: PhantomData<D>,
553}
554555impl<D: Deps> EncoderState<D> {
556fn new(encoder: FileEncoder, record_stats: bool, previous: Arc<SerializedDepGraph>) -> Self {
557Self {
558previous,
559 next_node_index: AtomicU64::new(0),
560 stats: record_stats.then(|| Lock::new(FxHashMap::default())),
561 file: Lock::new(Some(encoder)),
562 local: WorkerLocal::new(|_| {
563RefCell::new(LocalEncoderState {
564 next_node_index: 0,
565 remaining_node_index: 0,
566 edge_count: 0,
567 node_count: 0,
568 encoder: MemEncoder::new(),
569 kind_stats: iter::repeat_n(0, D::DEP_KIND_MAXas usize + 1).collect(),
570 })
571 }),
572 marker: PhantomData,
573 }
574 }
575576#[inline]
577fn next_index(&self, local: &mut LocalEncoderState) -> DepNodeIndex {
578if local.remaining_node_index == 0 {
579const COUNT: u32 = 256;
580581// We assume that there won't be enough active threads to overflow `u64` from `u32::MAX` here.
582 // This can exceed u32::MAX by at most `N` * `COUNT` where `N` is the thread pool count since
583 // `try_into().unwrap()` will make threads panic when `self.next_node_index` exceeds u32::MAX.
584local.next_node_index =
585self.next_node_index.fetch_add(COUNTas u64, Ordering::Relaxed).try_into().unwrap();
586587// Check that we'll stay within `u32`
588local.next_node_index.checked_add(COUNT).unwrap();
589590local.remaining_node_index = COUNT;
591 }
592593DepNodeIndex::from_u32(local.next_node_index)
594 }
595596/// Marks the index previously returned by `next_index` as used.
597#[inline]
598fn bump_index(&self, local: &mut LocalEncoderState) {
599local.remaining_node_index -= 1;
600local.next_node_index += 1;
601local.node_count += 1;
602 }
603604#[inline]
605fn record(
606&self,
607 node: DepNode,
608 index: DepNodeIndex,
609 edge_count: usize,
610 edges: impl FnOnce(&Self) -> Vec<DepNodeIndex>,
611 record_graph: &Option<Lock<DepGraphQuery>>,
612 local: &mut LocalEncoderState,
613 ) {
614local.kind_stats[node.kind.as_usize()] += 1;
615local.edge_count += edge_count;
616617if let Some(record_graph) = &record_graph {
618// Call `edges` before the outlined code to allow the closure to be optimized out.
619let edges = edges(self);
620621// Outline the build of the full dep graph as it's typically disabled and cold.
622outline(move || {
623// Do not ICE when a query is called from within `with_query`.
624if let Some(record_graph) = &mut record_graph.try_lock() {
625record_graph.push(index, node, &edges);
626 }
627 });
628 }
629630if let Some(stats) = &self.stats {
631let kind = node.kind;
632633// Outline the stats code as it's typically disabled and cold.
634outline(move || {
635let mut stats = stats.lock();
636let stat =
637stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
638stat.node_counter += 1;
639stat.edge_counter += edge_countas u64;
640 });
641 }
642 }
643644#[inline]
645fn flush_mem_encoder(&self, local: &mut LocalEncoderState) {
646let data = &mut local.encoder.data;
647if data.len() > 64 * 1024 {
648self.file.lock().as_mut().unwrap().emit_raw_bytes(&data[..]);
649data.clear();
650 }
651 }
652653/// Encodes a node to the current graph.
654fn encode_node(
655&self,
656 index: DepNodeIndex,
657 node: &NodeInfo,
658 record_graph: &Option<Lock<DepGraphQuery>>,
659 local: &mut LocalEncoderState,
660 ) {
661node.encode::<D>(&mut local.encoder, index);
662self.flush_mem_encoder(&mut *local);
663self.record(
664node.node,
665index,
666node.edges.len(),
667 |_| node.edges[..].to_vec(),
668record_graph,
669&mut *local,
670 );
671 }
672673/// Encodes a node that was promoted from the previous graph. It reads the information directly from
674 /// the previous dep graph for performance reasons.
675 ///
676 /// This differs from `encode_node` where you have to explicitly provide the relevant `NodeInfo`.
677 ///
678 /// It expects all edges to already have a new dep node index assigned.
679#[inline]
680fn encode_promoted_node(
681&self,
682 index: DepNodeIndex,
683 prev_index: SerializedDepNodeIndex,
684 record_graph: &Option<Lock<DepGraphQuery>>,
685 colors: &DepNodeColorMap,
686 local: &mut LocalEncoderState,
687 ) {
688let node = self.previous.index_to_node(prev_index);
689let fingerprint = self.previous.fingerprint_by_index(prev_index);
690let edge_count = NodeInfo::encode_promoted::<D>(
691&mut local.encoder,
692node,
693index,
694fingerprint,
695prev_index,
696colors,
697&self.previous,
698 );
699self.flush_mem_encoder(&mut *local);
700self.record(
701node,
702index,
703edge_count,
704 |this| {
705this.previous
706 .edge_targets_from(prev_index)
707 .map(|i| colors.current(i).unwrap())
708 .collect()
709 },
710record_graph,
711&mut *local,
712 );
713 }
714715fn finish(&self, profiler: &SelfProfilerRef, current: &CurrentDepGraph<D>) -> FileEncodeResult {
716// Prevent more indices from being allocated.
717self.next_node_index.store(u32::MAXas u64 + 1, Ordering::SeqCst);
718719let results = broadcast(|_| {
720let mut local = self.local.borrow_mut();
721722// Prevent more indices from being allocated on this thread.
723local.remaining_node_index = 0;
724725let data = mem::replace(&mut local.encoder.data, Vec::new());
726self.file.lock().as_mut().unwrap().emit_raw_bytes(&data);
727728LocalEncoderResult {
729 kind_stats: local.kind_stats.clone(),
730 node_max: local.next_node_index,
731 node_count: local.node_count,
732 edge_count: local.edge_count,
733 }
734 });
735736let mut encoder = self.file.lock().take().unwrap();
737738let mut kind_stats: Vec<u32> = iter::repeat_n(0, D::DEP_KIND_MAXas usize + 1).collect();
739740let mut node_max = 0;
741let mut node_count = 0;
742let mut edge_count = 0;
743744for result in results {
745 node_max = max(node_max, result.node_max);
746 node_count += result.node_count;
747 edge_count += result.edge_count;
748for (i, stat) in result.kind_stats.iter().enumerate() {
749 kind_stats[i] += stat;
750 }
751 }
752753// Encode the number of each dep kind encountered
754for count in kind_stats.iter() {
755 count.encode(&mut encoder);
756 }
757758self.previous.session_count.checked_add(1).unwrap().encode(&mut encoder);
759760{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:760",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(760u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["node_max",
"node_count", "edge_count"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_max)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&node_count)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&edge_count)
as &dyn Value))])
});
} else { ; }
};debug!(?node_max, ?node_count, ?edge_count);
761{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:761",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(761u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
encoder.position()) as &dyn Value))])
});
} else { ; }
};debug!("position: {:?}", encoder.position());
762IntEncodedWithFixedSize(node_max.try_into().unwrap()).encode(&mut encoder);
763IntEncodedWithFixedSize(node_count.try_into().unwrap()).encode(&mut encoder);
764IntEncodedWithFixedSize(edge_count.try_into().unwrap()).encode(&mut encoder);
765{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_query_system/src/dep_graph/serialized.rs:765",
"rustc_query_system::dep_graph::serialized",
::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/dep_graph/serialized.rs"),
::tracing_core::__macro_support::Option::Some(765u32),
::tracing_core::__macro_support::Option::Some("rustc_query_system::dep_graph::serialized"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("position: {0:?}",
encoder.position()) as &dyn Value))])
});
} else { ; }
};debug!("position: {:?}", encoder.position());
766// Drop the encoder so that nothing is written after the counts.
767let result = encoder.finish();
768if let Ok(position) = result {
769// FIXME(rylev): we hardcode the dep graph file name so we
770 // don't need a dependency on rustc_incremental just for that.
771profiler.artifact_size("dep_graph", "dep-graph.bin", positionas u64);
772 }
773774self.print_incremental_info(current, node_count, edge_count);
775776result777 }
778779fn print_incremental_info(
780&self,
781 current: &CurrentDepGraph<D>,
782 total_node_count: usize,
783 total_edge_count: usize,
784 ) {
785if let Some(record_stats) = &self.stats {
786let record_stats = record_stats.lock();
787// `stats` is sorted below so we can allow this lint here.
788#[allow(rustc::potential_query_instability)]
789let mut stats: Vec<_> = record_stats.values().collect();
790stats.sort_by_key(|s| -(s.node_counter as i64));
791792const SEPARATOR: &str = "[incremental] --------------------------------\
793 ----------------------------------------------\
794 ------------";
795796{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
797{ ::std::io::_eprint(format_args!("[incremental] DepGraph Statistics\n")); };eprintln!("[incremental] DepGraph Statistics");
798{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
799{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
800{
::std::io::_eprint(format_args!("[incremental] Total Node Count: {0}\n",
total_node_count));
};eprintln!("[incremental] Total Node Count: {}", total_node_count);
801{
::std::io::_eprint(format_args!("[incremental] Total Edge Count: {0}\n",
total_edge_count));
};eprintln!("[incremental] Total Edge Count: {}", total_edge_count);
802803if truecfg!(debug_assertions) {
804let total_read_count = current.total_read_count.load(Ordering::Relaxed);
805let total_duplicate_read_count =
806current.total_duplicate_read_count.load(Ordering::Relaxed);
807{
::std::io::_eprint(format_args!("[incremental] Total Edge Reads: {0}\n",
total_read_count));
};eprintln!("[incremental] Total Edge Reads: {total_read_count}");
808{
::std::io::_eprint(format_args!("[incremental] Total Duplicate Edge Reads: {0}\n",
total_duplicate_read_count));
};eprintln!("[incremental] Total Duplicate Edge Reads: {total_duplicate_read_count}");
809 }
810811{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
812{
::std::io::_eprint(format_args!("[incremental] {0:<36}| {1:<17}| {2:<12}| {3:<17}|\n",
"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"));
};eprintln!(
813"[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
814"Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
815);
816{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
817818for stat in stats {
819let node_kind_ratio =
820 (100.0 * (stat.node_counter as f64)) / (total_node_count as f64);
821let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
822823{
::std::io::_eprint(format_args!("[incremental] {0:<36}|{1:>16.1}% |{2:>12} |{3:>17.1} |\n",
::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", stat.kind))
}), node_kind_ratio, stat.node_counter, node_kind_avg_edges));
};eprintln!(
824"[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
825format!("{:?}", stat.kind),
826 node_kind_ratio,
827 stat.node_counter,
828 node_kind_avg_edges,
829 );
830 }
831832{ ::std::io::_eprint(format_args!("{0}\n", SEPARATOR)); };eprintln!("{SEPARATOR}");
833{ ::std::io::_eprint(format_args!("[incremental]\n")); };eprintln!("[incremental]");
834 }
835 }
836}
837838pub(crate) struct GraphEncoder<D: Deps> {
839 profiler: SelfProfilerRef,
840 status: EncoderState<D>,
841 record_graph: Option<Lock<DepGraphQuery>>,
842}
843844impl<D: Deps> GraphEncoder<D> {
845pub(crate) fn new(
846 sess: &Session,
847 encoder: FileEncoder,
848 prev_node_count: usize,
849 previous: Arc<SerializedDepGraph>,
850 ) -> Self {
851let record_graph = sess852 .opts
853 .unstable_opts
854 .query_dep_graph
855 .then(|| Lock::new(DepGraphQuery::new(prev_node_count)));
856let status = EncoderState::new(encoder, sess.opts.unstable_opts.incremental_info, previous);
857GraphEncoder { status, record_graph, profiler: sess.prof.clone() }
858 }
859860pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
861if let Some(record_graph) = &self.record_graph {
862f(&record_graph.lock())
863 }
864 }
865866/// Encodes a node that does not exists in the previous graph.
867pub(crate) fn send_new(
868&self,
869 node: DepNode,
870 fingerprint: Fingerprint,
871 edges: EdgesVec,
872 ) -> DepNodeIndex {
873let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
874let node = NodeInfo { node, fingerprint, edges };
875let mut local = self.status.local.borrow_mut();
876let index = self.status.next_index(&mut *local);
877self.status.bump_index(&mut *local);
878self.status.encode_node(index, &node, &self.record_graph, &mut *local);
879index880 }
881882/// Encodes a node that exists in the previous graph, but was re-executed.
883 ///
884 /// This will also ensure the dep node is colored either red or green.
885pub(crate) fn send_and_color(
886&self,
887 prev_index: SerializedDepNodeIndex,
888 colors: &DepNodeColorMap,
889 node: DepNode,
890 fingerprint: Fingerprint,
891 edges: EdgesVec,
892 is_green: bool,
893 ) -> DepNodeIndex {
894let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
895let node = NodeInfo { node, fingerprint, edges };
896897let mut local = self.status.local.borrow_mut();
898899let index = self.status.next_index(&mut *local);
900901if is_green {
902// Use `try_mark_green` to avoid racing when `send_promoted` is called concurrently
903 // on the same index.
904match colors.try_mark_green(prev_index, index) {
905Ok(()) => (),
906Err(dep_node_index) => return dep_node_index,
907 }
908 } else {
909colors.insert_red(prev_index);
910 }
911912self.status.bump_index(&mut *local);
913self.status.encode_node(index, &node, &self.record_graph, &mut *local);
914index915 }
916917/// Encodes a node that was promoted from the previous graph. It reads the information directly
918 /// from the previous dep graph and expects all edges to already have a new dep node index
919 /// assigned.
920 ///
921 /// This will also ensure the dep node is marked green.
922#[inline]
923pub(crate) fn send_promoted(
924&self,
925 prev_index: SerializedDepNodeIndex,
926 colors: &DepNodeColorMap,
927 ) -> DepNodeIndex {
928let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
929930let mut local = self.status.local.borrow_mut();
931let index = self.status.next_index(&mut *local);
932933// Use `try_mark_green` to avoid racing when `send_promoted` or `send_and_color`
934 // is called concurrently on the same index.
935match colors.try_mark_green(prev_index, index) {
936Ok(()) => {
937self.status.bump_index(&mut *local);
938self.status.encode_promoted_node(
939index,
940prev_index,
941&self.record_graph,
942colors,
943&mut *local,
944 );
945index946 }
947Err(dep_node_index) => dep_node_index,
948 }
949 }
950951pub(crate) fn finish(&self, current: &CurrentDepGraph<D>) -> FileEncodeResult {
952let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph_finish");
953954self.status.finish(&self.profiler, current)
955 }
956}