1use std::fs;
2use std::sync::Arc;
34use rustc_data_structures::fx::FxIndexMap;
5use rustc_data_structures::sync::par_join;
6use rustc_middle::dep_graph::{
7 DepGraph, SerializedDepGraph, WorkProduct, WorkProductId, WorkProductMap,
8};
9use rustc_middle::ty::TyCtxt;
10use rustc_serialize::Encodableas RustcEncodable;
11use rustc_serialize::opaque::FileEncoder;
12use rustc_session::Session;
13use tracing::debug;
1415use super::data::*;
16use super::fs::*;
17use super::{clean, file_format, work_product};
18use crate::assert_dep_graph::assert_dep_graph;
19use crate::errors;
2021/// Saves and writes the [`DepGraph`] to the file system.
22///
23/// This function saves both the dep-graph and the query result cache,
24/// and drops the result cache.
25///
26/// This function should only run after all queries have completed.
27/// Trying to execute a query afterwards would attempt to read the result cache we just dropped.
28pub(crate) fn save_dep_graph(tcx: TyCtxt<'_>) {
29{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_incremental/src/persist/save.rs:29",
"rustc_incremental::persist::save", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_incremental/src/persist/save.rs"),
::tracing_core::__macro_support::Option::Some(29u32),
::tracing_core::__macro_support::Option::Some("rustc_incremental::persist::save"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("save_dep_graph()")
as &dyn Value))])
});
} else { ; }
};debug!("save_dep_graph()");
30tcx.dep_graph.with_ignore(|| {
31let sess = tcx.sess;
32if sess.opts.incremental.is_none() {
33return;
34 }
35// This is going to be deleted in finalize_session_directory, so let's not create it.
36if sess.dcx().has_errors_or_delayed_bugs().is_some() {
37return;
38 }
3940let query_cache_path = query_cache_path(sess);
41let dep_graph_path = dep_graph_path(sess);
42let staging_dep_graph_path = staging_dep_graph_path(sess);
4344sess.time("assert_dep_graph", || assert_dep_graph(tcx));
45sess.time("check_clean", || clean::check_clean_annotations(tcx));
4647par_join(
48move || {
49sess.time("incr_comp_persist_dep_graph", || {
50if let Err(err) = fs::rename(&staging_dep_graph_path, &dep_graph_path) {
51sess.dcx().emit_err(errors::MoveDepGraph {
52 from: &staging_dep_graph_path,
53 to: &dep_graph_path,
54err,
55 });
56 }
57 });
58 },
59move || {
60// We execute this after `incr_comp_persist_dep_graph` for the serial compiler
61 // to catch any potential query execution writing to the dep graph.
62sess.time("incr_comp_persist_result_cache", || {
63// The on-disk cache struct is always present in incremental mode,
64 // even if there was no previous session.
65let on_disk_cache = tcx.query_system.on_disk_cache.as_ref().unwrap();
6667// For every green dep node that has a disk-cached value from the
68 // previous session, make sure the value is loaded into the memory
69 // cache, so that it will be serialized as part of this session.
70 //
71 // This reads data from the previous session, so it needs to happen
72 // before dropping the mmap.
73 //
74 // FIXME(Zalathar): This step is intended to be cheap, but still does
75 // quite a lot of work, especially in builds with few or no changes.
76 // Can we be smarter about how we identify values that need promotion?
77 // Can we promote values without decoding them into the memory cache?
78tcx.dep_graph.exec_cache_promotions(tcx);
7980// Drop the memory map so that we can remove the file and write to it.
81on_disk_cache.close_serialized_data_mmap();
8283 file_format::save_in(sess, query_cache_path, "query cache", |encoder| {
84tcx.sess.time("incr_comp_serialize_result_cache", || {
85on_disk_cache.serialize(tcx, encoder)
86 })
87 });
88 });
89 },
90 );
91 })
92}
9394/// Saves the work product index.
95pub fn save_work_product_index(
96 sess: &Session,
97 dep_graph: &DepGraph,
98 new_work_products: FxIndexMap<WorkProductId, WorkProduct>,
99) {
100if sess.opts.incremental.is_none() {
101return;
102 }
103// This is going to be deleted in finalize_session_directory, so let's not create it
104if sess.dcx().has_errors().is_some() {
105return;
106 }
107108{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_incremental/src/persist/save.rs:108",
"rustc_incremental::persist::save", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_incremental/src/persist/save.rs"),
::tracing_core::__macro_support::Option::Some(108u32),
::tracing_core::__macro_support::Option::Some("rustc_incremental::persist::save"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("save_work_product_index()")
as &dyn Value))])
});
} else { ; }
};debug!("save_work_product_index()");
109dep_graph.assert_ignored();
110let path = work_products_path(sess);
111 file_format::save_in(sess, path, "work product index", |mut e| {
112encode_work_product_index(&new_work_products, &mut e);
113e.finish()
114 });
115116// We also need to clean out old work-products, as not all of them are
117 // deleted during invalidation. Some object files don't change their
118 // content, they are just not needed anymore.
119let previous_work_products = dep_graph.previous_work_products();
120for (id, wp) in previous_work_products.to_sorted_stable_ord() {
121if !new_work_products.contains_key(id) {
122 work_product::delete_workproduct_files(sess, wp);
123if true {
if !!wp.saved_files.items().all(|(_, path)|
in_incr_comp_dir_sess(sess, path).exists()) {
::core::panicking::panic("assertion failed: !wp.saved_files.items().all(|(_, path)|\n in_incr_comp_dir_sess(sess, path).exists())")
};
};debug_assert!(
124 !wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
125 );
126 }
127 }
128129// Check that we did not delete one of the current work-products:
130if true {
if !{
new_work_products.iter().all(|(_, wp)|
{
wp.saved_files.items().all(|(_, path)|
in_incr_comp_dir_sess(sess, path).exists())
})
} {
::core::panicking::panic("assertion failed: {\n new_work_products.iter().all(|(_, wp)|\n {\n wp.saved_files.items().all(|(_, path)|\n in_incr_comp_dir_sess(sess, path).exists())\n })\n}")
};
};debug_assert!({
131 new_work_products.iter().all(|(_, wp)| {
132 wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
133 })
134 });
135}
136137fn encode_work_product_index(
138 work_products: &FxIndexMap<WorkProductId, WorkProduct>,
139 encoder: &mut FileEncoder,
140) {
141let serialized_products: Vec<_> = work_products142 .iter()
143 .map(|(id, work_product)| SerializedWorkProduct {
144 id: *id,
145 work_product: work_product.clone(),
146 })
147 .collect();
148149serialized_products.encode(encoder)
150}
151152/// Builds the dependency graph.
153///
154/// This function creates the *staging dep-graph*. When the dep-graph is modified by a query
155/// execution, the new dependency information is not kept in memory but directly
156/// output to this file. `save_dep_graph` then finalizes the staging dep-graph
157/// and moves it to the permanent dep-graph path
158pub(crate) fn build_dep_graph(
159 sess: &Session,
160 prev_graph: Arc<SerializedDepGraph>,
161 prev_work_products: WorkProductMap,
162) -> Option<DepGraph> {
163if sess.opts.incremental.is_none() {
164// No incremental compilation.
165return None;
166 }
167168// Stream the dep-graph to an alternate file, to avoid overwriting anything in case of errors.
169let path_buf = staging_dep_graph_path(sess);
170171let mut encoder = match FileEncoder::new(&path_buf) {
172Ok(encoder) => encoder,
173Err(err) => {
174sess.dcx().emit_err(errors::CreateDepGraph { path: &path_buf, err });
175return None;
176 }
177 };
178179 file_format::write_file_header(&mut encoder, sess);
180181// First encode the commandline arguments hash
182sess.opts.dep_tracking_hash(false).encode(&mut encoder);
183184Some(DepGraph::new(sess, prev_graph, prev_work_products, encoder))
185}