rustc_query_system/dep_graph/
serialized.rs

1//! The data that we will serialize and deserialize.
2//!
3//! Notionally, the dep-graph is a sequence of NodeInfo with the dependencies
4//! specified inline. The total number of nodes and edges are stored as the last
5//! 16 bytes of the file, so we can find them easily at decoding time.
6//!
7//! The serialisation is performed on-demand when each node is emitted. Using this
8//! scheme, we do not need to keep the current graph in memory.
9//!
10//! The deserialization is performed manually, in order to convert from the stored
11//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
12//! node and edge count are stored at the end of the file, all the arrays can be
13//! pre-allocated with the right length.
14//!
15//! The encoding of the de-pgraph is generally designed around the fact that fixed-size
16//! reads of encoded data are generally faster than variable-sized reads. Ergo we adopt
17//! essentially the same varint encoding scheme used in the rmeta format; the edge lists
18//! for each node on the graph store a 2-bit integer which is the number of bytes per edge
19//! index in that node's edge list. We effectively ignore that an edge index of 0 could be
20//! encoded with 0 bytes in order to not require 3 bits to store the byte width of the edges.
21//! The overhead of calculating the correct byte width for each edge is mitigated by
22//! building edge lists with [`EdgesVec`] which keeps a running max of the edges in a node.
23//!
24//! When we decode this data, we do not immediately create [`SerializedDepNodeIndex`] and
25//! instead keep the data in its denser serialized form which lets us turn our on-disk size
26//! efficiency directly into a peak memory reduction. When we convert these encoded-in-memory
27//! values into their fully-deserialized type, we use a fixed-size read of the encoded array
28//! then mask off any errant bytes we read. The array of edge index bytes is padded to permit this.
29//!
30//! We also encode and decode the entire rest of each node using [`SerializedNodeHeader`]
31//! to let this encoding and decoding be done in one fixed-size operation. These headers contain
32//! two [`Fingerprint`]s along with the serialized [`DepKind`], and the number of edge indices
33//! in the node and the number of bytes used to encode the edge indices for this node. The
34//! [`DepKind`], number of edges, and bytes per edge are all bit-packed together, if they fit.
35//! If the number of edges in this node does not fit in the bits available in the header, we
36//! store it directly after the header with leb128.
37
38use std::iter;
39use std::marker::PhantomData;
40use std::sync::Arc;
41
42use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
43use rustc_data_structures::fx::FxHashMap;
44use rustc_data_structures::outline;
45use rustc_data_structures::profiling::SelfProfilerRef;
46use rustc_data_structures::sync::Lock;
47use rustc_data_structures::unhash::UnhashMap;
48use rustc_index::{Idx, IndexVec};
49use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
50use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
51use tracing::{debug, instrument};
52
53use super::graph::{DepNodeColor, DepNodeColorMap};
54use super::query::DepGraphQuery;
55use super::{DepKind, DepNode, DepNodeIndex, Deps};
56use crate::dep_graph::edges::EdgesVec;
57
58// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
59// unused so that we can store multiple index types in `CompressedHybridIndex`,
60// and use those bits to encode which index type it contains.
61rustc_index::newtype_index! {
62    #[encodable]
63    #[max = 0x7FFF_FFFF]
64    pub struct SerializedDepNodeIndex {}
65}
66
67const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>();
68/// Amount of padding we need to add to the edge list data so that we can retrieve every
69/// SerializedDepNodeIndex with a fixed-size read then mask.
70const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
71/// Number of bits we need to store the number of used bytes in a SerializedDepNodeIndex.
72/// Note that wherever we encode byte widths like this we actually store the number of bytes used
73/// minus 1; for a 4-byte value we technically would have 5 widths to store, but using one byte to
74/// store zeroes (which are relatively rare) is a decent tradeoff to save a bit in our bitfields.
75const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
76
77/// Data for use when recompiling the **current crate**.
78#[derive(Debug, Default)]
79pub struct SerializedDepGraph {
80    /// The set of all DepNodes in the graph
81    nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
82    /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
83    /// the DepNode at the same index in the nodes vector.
84    fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
85    /// For each DepNode, stores the list of edges originating from that
86    /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
87    /// which holds the actual DepNodeIndices of the target nodes.
88    edge_list_indices: IndexVec<SerializedDepNodeIndex, EdgeHeader>,
89    /// A flattened list of all edge targets in the graph, stored in the same
90    /// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
91    edge_list_data: Vec<u8>,
92    /// Stores a map from fingerprints to nodes per dep node kind.
93    /// This is the reciprocal of `nodes`.
94    index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
95}
96
97impl SerializedDepGraph {
98    #[inline]
99    pub fn edge_targets_from(
100        &self,
101        source: SerializedDepNodeIndex,
102    ) -> impl Iterator<Item = SerializedDepNodeIndex> + Clone {
103        let header = self.edge_list_indices[source];
104        let mut raw = &self.edge_list_data[header.start()..];
105        // Figure out where the edge list for `source` ends by getting the start index of the next
106        // edge list, or the end of the array if this is the last edge.
107        let end = self
108            .edge_list_indices
109            .get(source + 1)
110            .map(|h| h.start())
111            .unwrap_or_else(|| self.edge_list_data.len() - DEP_NODE_PAD);
112
113        // The number of edges for this node is implicitly stored in the combination of the byte
114        // width and the length.
115        let bytes_per_index = header.bytes_per_index();
116        let len = (end - header.start()) / bytes_per_index;
117
118        // LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
119        let mask = header.mask();
120        (0..len).map(move |_| {
121            // Doing this slicing in this order ensures that the first bounds check suffices for
122            // all the others.
123            let index = &raw[..DEP_NODE_SIZE];
124            raw = &raw[bytes_per_index..];
125            let index = u32::from_le_bytes(index.try_into().unwrap()) & mask;
126            SerializedDepNodeIndex::from_u32(index)
127        })
128    }
129
130    #[inline]
131    pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
132        self.nodes[dep_node_index]
133    }
134
135    #[inline]
136    pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
137        self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
138    }
139
140    #[inline]
141    pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
142        self.fingerprints[dep_node_index]
143    }
144
145    #[inline]
146    pub fn node_count(&self) -> usize {
147        self.nodes.len()
148    }
149}
150
151/// A packed representation of an edge's start index and byte width.
152///
153/// This is packed by stealing 2 bits from the start index, which means we only accommodate edge
154/// data arrays up to a quarter of our address space. Which seems fine.
155#[derive(Debug, Clone, Copy)]
156struct EdgeHeader {
157    repr: usize,
158}
159
160impl EdgeHeader {
161    #[inline]
162    fn start(self) -> usize {
163        self.repr >> DEP_NODE_WIDTH_BITS
164    }
165
166    #[inline]
167    fn bytes_per_index(self) -> usize {
168        (self.repr & mask(DEP_NODE_WIDTH_BITS)) + 1
169    }
170
171    #[inline]
172    fn mask(self) -> u32 {
173        mask(self.bytes_per_index() * 8) as u32
174    }
175}
176
177#[inline]
178fn mask(bits: usize) -> usize {
179    usize::MAX >> ((size_of::<usize>() * 8) - bits)
180}
181
182impl SerializedDepGraph {
183    #[instrument(level = "debug", skip(d, deps))]
184    pub fn decode<D: Deps>(d: &mut MemDecoder<'_>, deps: &D) -> Arc<SerializedDepGraph> {
185        // The last 16 bytes are the node count and edge count.
186        debug!("position: {:?}", d.position());
187        let (node_count, edge_count) =
188            d.with_position(d.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE, |d| {
189                debug!("position: {:?}", d.position());
190                let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
191                let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
192                (node_count, edge_count)
193            });
194        debug!("position: {:?}", d.position());
195
196        debug!(?node_count, ?edge_count);
197
198        let graph_bytes = d.len() - (2 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
199
200        let mut nodes = IndexVec::with_capacity(node_count);
201        let mut fingerprints = IndexVec::with_capacity(node_count);
202        let mut edge_list_indices = IndexVec::with_capacity(node_count);
203        // This estimation assumes that all of the encoded bytes are for the edge lists or for the
204        // fixed-size node headers. But that's not necessarily true; if any edge list has a length
205        // that spills out of the size we can bit-pack into SerializedNodeHeader then some of the
206        // total serialized size is also used by leb128-encoded edge list lengths. Neglecting that
207        // contribution to graph_bytes means our estimation of the bytes needed for edge_list_data
208        // slightly overshoots. But it cannot overshoot by much; consider that the worse case is
209        // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
210        // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
211        // length is about the same fractional overhead and it amortizes for yet greater lengths.
212        let mut edge_list_data =
213            Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>());
214
215        for _index in 0..node_count {
216            // Decode the header for this edge; the header packs together as many of the fixed-size
217            // fields as possible to limit the number of times we update decoder state.
218            let node_header =
219                SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
220
221            let _i: SerializedDepNodeIndex = nodes.push(node_header.node());
222            debug_assert_eq!(_i.index(), _index);
223
224            let _i: SerializedDepNodeIndex = fingerprints.push(node_header.fingerprint());
225            debug_assert_eq!(_i.index(), _index);
226
227            // If the length of this node's edge list is small, the length is stored in the header.
228            // If it is not, we fall back to another decoder call.
229            let num_edges = node_header.len().unwrap_or_else(|| d.read_usize());
230
231            // The edges index list uses the same varint strategy as rmeta tables; we select the
232            // number of byte elements per-array not per-element. This lets us read the whole edge
233            // list for a node with one decoder call and also use the on-disk format in memory.
234            let edges_len_bytes = node_header.bytes_per_index() * num_edges;
235            // The in-memory structure for the edges list stores the byte width of the edges on
236            // this node with the offset into the global edge data array.
237            let edges_header = node_header.edges_header(&edge_list_data);
238
239            edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
240
241            let _i: SerializedDepNodeIndex = edge_list_indices.push(edges_header);
242            debug_assert_eq!(_i.index(), _index);
243        }
244
245        // When we access the edge list data, we do a fixed-size read from the edge list data then
246        // mask off the bytes that aren't for that edge index, so the last read may dangle off the
247        // end of the array. This padding ensure it doesn't.
248        edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
249
250        // Read the number of each dep kind and use it to create an hash map with a suitable size.
251        let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
252            .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
253            .collect();
254
255        for (idx, node) in nodes.iter_enumerated() {
256            if index[node.kind.as_usize()].insert(node.hash, idx).is_some() {
257                // Side effect nodes can have duplicates
258                if node.kind != D::DEP_KIND_SIDE_EFFECT {
259                    let name = deps.name(node.kind);
260                    panic!(
261                    "Error: A dep graph node ({name}) does not have an unique index. \
262                     Running a clean build on a nightly compiler with `-Z incremental-verify-ich` \
263                     can help narrow down the issue for reporting. A clean build may also work around the issue.\n
264                     DepNode: {node:?}"
265                )
266                }
267            }
268        }
269
270        Arc::new(SerializedDepGraph {
271            nodes,
272            fingerprints,
273            edge_list_indices,
274            edge_list_data,
275            index,
276        })
277    }
278}
279
280/// A packed representation of all the fixed-size fields in a `NodeInfo`.
281///
282/// This stores in one byte array:
283/// * The `Fingerprint` in the `NodeInfo`
284/// * The `Fingerprint` in `DepNode` that is in this `NodeInfo`
285/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
286/// * The byte width of the encoded edges for this node
287/// * In whatever bits remain, the length of the edge list for this node, if it fits
288struct SerializedNodeHeader<D> {
289    // 2 bytes for the DepNode
290    // 16 for Fingerprint in DepNode
291    // 16 for Fingerprint in NodeInfo
292    bytes: [u8; 34],
293    _marker: PhantomData<D>,
294}
295
296// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
297// to make the implementation of `SerializedNodeHeader` simpler.
298struct Unpacked {
299    len: Option<usize>,
300    bytes_per_index: usize,
301    kind: DepKind,
302    hash: PackedFingerprint,
303    fingerprint: Fingerprint,
304}
305
306// Bit fields, where
307// M: bits used to store the length of a node's edge list
308// N: bits used to store the byte width of elements of the edge list
309// are
310// 0..M    length of the edge
311// M..M+N  bytes per index
312// M+N..16 kind
313impl<D: Deps> SerializedNodeHeader<D> {
314    const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
315    const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
316    const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
317    const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
318    const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
319
320    #[inline]
321    fn new(
322        node: DepNode,
323        fingerprint: Fingerprint,
324        edge_max_index: u32,
325        edge_count: usize,
326    ) -> Self {
327        debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
328
329        let mut head = node.kind.as_inner();
330
331        let free_bytes = edge_max_index.leading_zeros() as usize / 8;
332        let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
333        head |= (bytes_per_index as u16) << Self::KIND_BITS;
334
335        // Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
336        // in this bitfield.
337        if edge_count <= Self::MAX_INLINE_LEN {
338            head |= (edge_count as u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
339        }
340
341        let hash: Fingerprint = node.hash.into();
342
343        // Using half-open ranges ensures an unconditional panic if we get the magic numbers wrong.
344        let mut bytes = [0u8; 34];
345        bytes[..2].copy_from_slice(&head.to_le_bytes());
346        bytes[2..18].copy_from_slice(&hash.to_le_bytes());
347        bytes[18..].copy_from_slice(&fingerprint.to_le_bytes());
348
349        #[cfg(debug_assertions)]
350        {
351            let res = Self { bytes, _marker: PhantomData };
352            assert_eq!(fingerprint, res.fingerprint());
353            assert_eq!(node, res.node());
354            if let Some(len) = res.len() {
355                assert_eq!(edge_count, len);
356            }
357        }
358        Self { bytes, _marker: PhantomData }
359    }
360
361    #[inline]
362    fn unpack(&self) -> Unpacked {
363        let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
364        let hash = self.bytes[2..18].try_into().unwrap();
365        let fingerprint = self.bytes[18..].try_into().unwrap();
366
367        let kind = head & mask(Self::KIND_BITS) as u16;
368        let bytes_per_index = (head >> Self::KIND_BITS) & mask(Self::WIDTH_BITS) as u16;
369        let len = (head as usize) >> (Self::WIDTH_BITS + Self::KIND_BITS);
370
371        Unpacked {
372            len: len.checked_sub(1),
373            bytes_per_index: bytes_per_index as usize + 1,
374            kind: DepKind::new(kind),
375            hash: Fingerprint::from_le_bytes(hash).into(),
376            fingerprint: Fingerprint::from_le_bytes(fingerprint),
377        }
378    }
379
380    #[inline]
381    fn len(&self) -> Option<usize> {
382        self.unpack().len
383    }
384
385    #[inline]
386    fn bytes_per_index(&self) -> usize {
387        self.unpack().bytes_per_index
388    }
389
390    #[inline]
391    fn fingerprint(&self) -> Fingerprint {
392        self.unpack().fingerprint
393    }
394
395    #[inline]
396    fn node(&self) -> DepNode {
397        let Unpacked { kind, hash, .. } = self.unpack();
398        DepNode { kind, hash }
399    }
400
401    #[inline]
402    fn edges_header(&self, edge_list_data: &[u8]) -> EdgeHeader {
403        EdgeHeader {
404            repr: (edge_list_data.len() << DEP_NODE_WIDTH_BITS) | (self.bytes_per_index() - 1),
405        }
406    }
407}
408
409#[derive(Debug)]
410struct NodeInfo {
411    node: DepNode,
412    fingerprint: Fingerprint,
413    edges: EdgesVec,
414}
415
416impl NodeInfo {
417    fn encode<D: Deps>(&self, e: &mut FileEncoder) {
418        let NodeInfo { node, fingerprint, ref edges } = *self;
419        let header =
420            SerializedNodeHeader::<D>::new(node, fingerprint, edges.max_index(), edges.len());
421        e.write_array(header.bytes);
422
423        if header.len().is_none() {
424            e.emit_usize(edges.len());
425        }
426
427        let bytes_per_index = header.bytes_per_index();
428        for node_index in edges.iter() {
429            e.write_with(|dest| {
430                *dest = node_index.as_u32().to_le_bytes();
431                bytes_per_index
432            });
433        }
434    }
435
436    /// Encode a node that was promoted from the previous graph. It reads the edges directly from
437    /// the previous dep graph and expects all edges to already have a new dep node index assigned.
438    /// This avoids the overhead of constructing `EdgesVec`, which would be needed to call `encode`.
439    #[inline]
440    fn encode_promoted<D: Deps>(
441        e: &mut FileEncoder,
442        node: DepNode,
443        fingerprint: Fingerprint,
444        prev_index: SerializedDepNodeIndex,
445        colors: &DepNodeColorMap,
446        previous: &SerializedDepGraph,
447    ) -> usize {
448        let edges = previous.edge_targets_from(prev_index);
449        let edge_count = edges.size_hint().0;
450
451        // Find the highest edge in the new dep node indices
452        let edge_max =
453            edges.clone().map(|i| colors.current(i).unwrap().as_u32()).max().unwrap_or(0);
454
455        let header = SerializedNodeHeader::<D>::new(node, fingerprint, edge_max, edge_count);
456        e.write_array(header.bytes);
457
458        if header.len().is_none() {
459            e.emit_usize(edge_count);
460        }
461
462        let bytes_per_index = header.bytes_per_index();
463        for node_index in edges {
464            let node_index = colors.current(node_index).unwrap();
465            e.write_with(|dest| {
466                *dest = node_index.as_u32().to_le_bytes();
467                bytes_per_index
468            });
469        }
470
471        edge_count
472    }
473}
474
475struct Stat {
476    kind: DepKind,
477    node_counter: u64,
478    edge_counter: u64,
479}
480
481struct EncoderState<D: Deps> {
482    previous: Arc<SerializedDepGraph>,
483    encoder: FileEncoder,
484    total_node_count: usize,
485    total_edge_count: usize,
486    stats: Option<FxHashMap<DepKind, Stat>>,
487
488    /// Stores the number of times we've encoded each dep kind.
489    kind_stats: Vec<u32>,
490    marker: PhantomData<D>,
491}
492
493impl<D: Deps> EncoderState<D> {
494    fn new(encoder: FileEncoder, record_stats: bool, previous: Arc<SerializedDepGraph>) -> Self {
495        Self {
496            previous,
497            encoder,
498            total_edge_count: 0,
499            total_node_count: 0,
500            stats: record_stats.then(FxHashMap::default),
501            kind_stats: iter::repeat(0).take(D::DEP_KIND_MAX as usize + 1).collect(),
502            marker: PhantomData,
503        }
504    }
505
506    #[inline]
507    fn record(
508        &mut self,
509        node: DepNode,
510        edge_count: usize,
511        edges: impl FnOnce(&mut Self) -> Vec<DepNodeIndex>,
512        record_graph: &Option<Lock<DepGraphQuery>>,
513    ) -> DepNodeIndex {
514        let index = DepNodeIndex::new(self.total_node_count);
515
516        self.total_node_count += 1;
517        self.kind_stats[node.kind.as_usize()] += 1;
518        self.total_edge_count += edge_count;
519
520        if let Some(record_graph) = &record_graph {
521            // Call `edges` before the outlined code to allow the closure to be optimized out.
522            let edges = edges(self);
523
524            // Outline the build of the full dep graph as it's typically disabled and cold.
525            outline(move || {
526                // Do not ICE when a query is called from within `with_query`.
527                if let Some(record_graph) = &mut record_graph.try_lock() {
528                    record_graph.push(index, node, &edges);
529                }
530            });
531        }
532
533        if let Some(stats) = &mut self.stats {
534            let kind = node.kind;
535
536            // Outline the stats code as it's typically disabled and cold.
537            outline(move || {
538                let stat =
539                    stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
540                stat.node_counter += 1;
541                stat.edge_counter += edge_count as u64;
542            });
543        }
544
545        index
546    }
547
548    /// Encodes a node to the current graph.
549    fn encode_node(
550        &mut self,
551        node: &NodeInfo,
552        record_graph: &Option<Lock<DepGraphQuery>>,
553    ) -> DepNodeIndex {
554        node.encode::<D>(&mut self.encoder);
555        self.record(node.node, node.edges.len(), |_| node.edges[..].to_vec(), record_graph)
556    }
557
558    /// Encodes a node that was promoted from the previous graph. It reads the information directly from
559    /// the previous dep graph for performance reasons.
560    ///
561    /// This differs from `encode_node` where you have to explicitly provide the relevant `NodeInfo`.
562    ///
563    /// It expects all edges to already have a new dep node index assigned.
564    #[inline]
565    fn encode_promoted_node(
566        &mut self,
567        prev_index: SerializedDepNodeIndex,
568        record_graph: &Option<Lock<DepGraphQuery>>,
569        colors: &DepNodeColorMap,
570    ) -> DepNodeIndex {
571        let node = self.previous.index_to_node(prev_index);
572
573        let fingerprint = self.previous.fingerprint_by_index(prev_index);
574        let edge_count = NodeInfo::encode_promoted::<D>(
575            &mut self.encoder,
576            node,
577            fingerprint,
578            prev_index,
579            colors,
580            &self.previous,
581        );
582
583        self.record(
584            node,
585            edge_count,
586            |this| {
587                this.previous
588                    .edge_targets_from(prev_index)
589                    .map(|i| colors.current(i).unwrap())
590                    .collect()
591            },
592            record_graph,
593        )
594    }
595
596    fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
597        let Self {
598            mut encoder,
599            total_node_count,
600            total_edge_count,
601            stats: _,
602            kind_stats,
603            marker: _,
604            previous: _,
605        } = self;
606
607        let node_count = total_node_count.try_into().unwrap();
608        let edge_count = total_edge_count.try_into().unwrap();
609
610        // Encode the number of each dep kind encountered
611        for count in kind_stats.iter() {
612            count.encode(&mut encoder);
613        }
614
615        debug!(?node_count, ?edge_count);
616        debug!("position: {:?}", encoder.position());
617        IntEncodedWithFixedSize(node_count).encode(&mut encoder);
618        IntEncodedWithFixedSize(edge_count).encode(&mut encoder);
619        debug!("position: {:?}", encoder.position());
620        // Drop the encoder so that nothing is written after the counts.
621        let result = encoder.finish();
622        if let Ok(position) = result {
623            // FIXME(rylev): we hardcode the dep graph file name so we
624            // don't need a dependency on rustc_incremental just for that.
625            profiler.artifact_size("dep_graph", "dep-graph.bin", position as u64);
626        }
627        result
628    }
629}
630
631pub(crate) struct GraphEncoder<D: Deps> {
632    profiler: SelfProfilerRef,
633    status: Lock<Option<EncoderState<D>>>,
634    record_graph: Option<Lock<DepGraphQuery>>,
635}
636
637impl<D: Deps> GraphEncoder<D> {
638    pub(crate) fn new(
639        encoder: FileEncoder,
640        prev_node_count: usize,
641        record_graph: bool,
642        record_stats: bool,
643        profiler: &SelfProfilerRef,
644        previous: Arc<SerializedDepGraph>,
645    ) -> Self {
646        let record_graph = record_graph.then(|| Lock::new(DepGraphQuery::new(prev_node_count)));
647        let status = Lock::new(Some(EncoderState::new(encoder, record_stats, previous)));
648        GraphEncoder { status, record_graph, profiler: profiler.clone() }
649    }
650
651    pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
652        if let Some(record_graph) = &self.record_graph {
653            f(&record_graph.lock())
654        }
655    }
656
657    pub(crate) fn print_incremental_info(
658        &self,
659        total_read_count: u64,
660        total_duplicate_read_count: u64,
661    ) {
662        let mut status = self.status.lock();
663        let status = status.as_mut().unwrap();
664        if let Some(record_stats) = &status.stats {
665            let mut stats: Vec<_> = record_stats.values().collect();
666            stats.sort_by_key(|s| -(s.node_counter as i64));
667
668            const SEPARATOR: &str = "[incremental] --------------------------------\
669                                     ----------------------------------------------\
670                                     ------------";
671
672            eprintln!("[incremental]");
673            eprintln!("[incremental] DepGraph Statistics");
674            eprintln!("{SEPARATOR}");
675            eprintln!("[incremental]");
676            eprintln!("[incremental] Total Node Count: {}", status.total_node_count);
677            eprintln!("[incremental] Total Edge Count: {}", status.total_edge_count);
678
679            if cfg!(debug_assertions) {
680                eprintln!("[incremental] Total Edge Reads: {total_read_count}");
681                eprintln!("[incremental] Total Duplicate Edge Reads: {total_duplicate_read_count}");
682            }
683
684            eprintln!("[incremental]");
685            eprintln!(
686                "[incremental]  {:<36}| {:<17}| {:<12}| {:<17}|",
687                "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
688            );
689            eprintln!("{SEPARATOR}");
690
691            for stat in stats {
692                let node_kind_ratio =
693                    (100.0 * (stat.node_counter as f64)) / (status.total_node_count as f64);
694                let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
695
696                eprintln!(
697                    "[incremental]  {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
698                    format!("{:?}", stat.kind),
699                    node_kind_ratio,
700                    stat.node_counter,
701                    node_kind_avg_edges,
702                );
703            }
704
705            eprintln!("{SEPARATOR}");
706            eprintln!("[incremental]");
707        }
708    }
709
710    pub(crate) fn send(
711        &self,
712        node: DepNode,
713        fingerprint: Fingerprint,
714        edges: EdgesVec,
715    ) -> DepNodeIndex {
716        let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
717        let node = NodeInfo { node, fingerprint, edges };
718        self.status.lock().as_mut().unwrap().encode_node(&node, &self.record_graph)
719    }
720
721    /// Encodes a node that was promoted from the previous graph. It reads the information directly from
722    /// the previous dep graph and expects all edges to already have a new dep node index assigned.
723    ///
724    /// This will also ensure the dep node is marked green.
725    #[inline]
726    pub(crate) fn send_promoted(
727        &self,
728        prev_index: SerializedDepNodeIndex,
729        colors: &DepNodeColorMap,
730    ) -> DepNodeIndex {
731        let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph");
732
733        let mut status = self.status.lock();
734        let status = status.as_mut().unwrap();
735
736        // Check colors inside the lock to avoid racing when `send_promoted` is called concurrently
737        // on the same index.
738        match colors.get(prev_index) {
739            None => {
740                let dep_node_index =
741                    status.encode_promoted_node(prev_index, &self.record_graph, colors);
742                colors.insert(prev_index, DepNodeColor::Green(dep_node_index));
743                dep_node_index
744            }
745            Some(DepNodeColor::Green(dep_node_index)) => dep_node_index,
746            Some(DepNodeColor::Red) => panic!(),
747        }
748    }
749
750    pub(crate) fn finish(&self) -> FileEncodeResult {
751        let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph_finish");
752
753        self.status.lock().take().unwrap().finish(&self.profiler)
754    }
755}