Skip to content

Commit b27266f

Browse files
committed
Use a QueryContext for try_mark_green.
1 parent 3bd14c7 commit b27266f

File tree

10 files changed

+133
-143
lines changed

10 files changed

+133
-143
lines changed

compiler/rustc_codegen_cranelift/src/driver/aot.rs

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -465,9 +465,5 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
465465
cgu.name()
466466
);
467467

468-
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
469-
CguReuse::PreLto
470-
} else {
471-
CguReuse::No
472-
}
468+
if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
473469
}

compiler/rustc_codegen_ssa/src/base.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -867,7 +867,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
867867
cgu.name()
868868
);
869869

870-
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
870+
if tcx.try_mark_green(&dep_node) {
871871
// We can re-use either the pre- or the post-thinlto state. If no LTO is
872872
// being performed then we can use post-LTO artifacts, otherwise we must
873873
// reuse pre-LTO artifacts

compiler/rustc_middle/src/dep_graph/dep_node.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ pub struct DepKindStruct {
135135
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
136136
/// add it to the "We don't have enough information to reconstruct..." group in
137137
/// the match below.
138-
pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
138+
pub(crate) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
139139

140140
/// Invoke a query to put the on-disk cached value in memory.
141141
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),

compiler/rustc_middle/src/dep_graph/mod.rs

Lines changed: 0 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,6 @@ use crate::ich::StableHashingContext;
22
use crate::ty::{self, TyCtxt};
33
use rustc_data_structures::profiling::SelfProfilerRef;
44
use rustc_data_structures::sync::Lock;
5-
use rustc_data_structures::thin_vec::ThinVec;
6-
use rustc_errors::Diagnostic;
7-
use rustc_hir::def_id::LocalDefId;
85

96
mod dep_node;
107

@@ -116,99 +113,7 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
116113
&self.dep_graph
117114
}
118115

119-
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
120-
// FIXME: This match is just a workaround for incremental bugs and should
121-
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
122-
// bug that must be fixed before removing this.
123-
match dep_node.kind {
124-
DepKind::hir_owner | DepKind::hir_owner_nodes => {
125-
if let Some(def_id) = dep_node.extract_def_id(*self) {
126-
if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
127-
// This `DefPath` does not have a
128-
// corresponding `DepNode` (e.g. a
129-
// struct field), and the ` DefPath`
130-
// collided with the `DefPath` of a
131-
// proper item that existed in the
132-
// previous compilation session.
133-
//
134-
// Since the given `DefPath` does not
135-
// denote the item that previously
136-
// existed, we just fail to mark green.
137-
return false;
138-
}
139-
} else {
140-
// If the node does not exist anymore, we
141-
// just fail to mark green.
142-
return false;
143-
}
144-
}
145-
_ => {
146-
// For other kinds of nodes it's OK to be
147-
// forced.
148-
}
149-
}
150-
151-
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
152-
153-
// We must avoid ever having to call `force_from_dep_node()` for a
154-
// `DepNode::codegen_unit`:
155-
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
156-
// would always end up having to evaluate the first caller of the
157-
// `codegen_unit` query that *is* reconstructible. This might very well be
158-
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
159-
// to re-trigger calling the `codegen_unit` query with the right key. At
160-
// that point we would already have re-done all the work we are trying to
161-
// avoid doing in the first place.
162-
// The solution is simple: Just explicitly call the `codegen_unit` query for
163-
// each CGU, right after partitioning. This way `try_mark_green` will always
164-
// hit the cache instead of having to go through `force_from_dep_node`.
165-
// This assertion makes sure, we actually keep applying the solution above.
166-
debug_assert!(
167-
dep_node.kind != DepKind::codegen_unit,
168-
"calling force_from_dep_node() on DepKind::codegen_unit"
169-
);
170-
171-
(dep_node.kind.force_from_dep_node)(*self, dep_node)
172-
}
173-
174-
fn has_errors_or_delayed_span_bugs(&self) -> bool {
175-
self.sess.has_errors_or_delayed_span_bugs()
176-
}
177-
178-
fn diagnostic(&self) -> &rustc_errors::Handler {
179-
self.sess.diagnostic()
180-
}
181-
182-
// Interactions with on_disk_cache
183-
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
184-
self.on_disk_cache
185-
.as_ref()
186-
.map(|c| c.load_diagnostics(*self, prev_dep_node_index))
187-
.unwrap_or_default()
188-
}
189-
190-
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
191-
if let Some(c) = self.on_disk_cache.as_ref() {
192-
c.store_diagnostics(dep_node_index, diagnostics)
193-
}
194-
}
195-
196-
fn store_diagnostics_for_anon_node(
197-
&self,
198-
dep_node_index: DepNodeIndex,
199-
diagnostics: ThinVec<Diagnostic>,
200-
) {
201-
if let Some(c) = self.on_disk_cache.as_ref() {
202-
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
203-
}
204-
}
205-
206116
fn profiler(&self) -> &SelfProfilerRef {
207117
&self.prof
208118
}
209119
}
210-
211-
fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
212-
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
213-
def_id == hir_id.owner
214-
}

compiler/rustc_middle/src/ty/query/mod.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,11 @@ impl TyCtxt<'tcx> {
119119
pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
120120
TyCtxtAt { tcx: self, span }
121121
}
122+
123+
pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
124+
let qcx = QueryCtxt { tcx: self, queries: self.queries };
125+
self.dep_graph.try_mark_green(qcx, dep_node).is_some()
126+
}
122127
}
123128

124129
macro_rules! define_callbacks {

compiler/rustc_middle/src/ty/query/plumbing.rs

Lines changed: 90 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
//! generate the actual methods on tcx which find and execute the provider,
33
//! manage the caches, and so forth.
44
5-
use crate::dep_graph;
5+
use crate::dep_graph::{self, DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
66
use crate::ty::query::{on_disk_cache, Queries, Query};
77
use crate::ty::tls::{self, ImplicitCtxt};
88
use crate::ty::{self, TyCtxt};
@@ -72,6 +72,95 @@ impl QueryContext for QueryCtxt<'tcx> {
7272
(dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
7373
}
7474

75+
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
76+
// FIXME: This match is just a workaround for incremental bugs and should
77+
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
78+
// bug that must be fixed before removing this.
79+
match dep_node.kind {
80+
DepKind::hir_owner | DepKind::hir_owner_nodes => {
81+
if let Some(def_id) = dep_node.extract_def_id(**self) {
82+
let def_id = def_id.expect_local();
83+
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
84+
if def_id != hir_id.owner {
85+
// This `DefPath` does not have a
86+
// corresponding `DepNode` (e.g. a
87+
// struct field), and the ` DefPath`
88+
// collided with the `DefPath` of a
89+
// proper item that existed in the
90+
// previous compilation session.
91+
//
92+
// Since the given `DefPath` does not
93+
// denote the item that previously
94+
// existed, we just fail to mark green.
95+
return false;
96+
}
97+
} else {
98+
// If the node does not exist anymore, we
99+
// just fail to mark green.
100+
return false;
101+
}
102+
}
103+
_ => {
104+
// For other kinds of nodes it's OK to be
105+
// forced.
106+
}
107+
}
108+
109+
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
110+
111+
// We must avoid ever having to call `force_from_dep_node()` for a
112+
// `DepNode::codegen_unit`:
113+
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
114+
// would always end up having to evaluate the first caller of the
115+
// `codegen_unit` query that *is* reconstructible. This might very well be
116+
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
117+
// to re-trigger calling the `codegen_unit` query with the right key. At
118+
// that point we would already have re-done all the work we are trying to
119+
// avoid doing in the first place.
120+
// The solution is simple: Just explicitly call the `codegen_unit` query for
121+
// each CGU, right after partitioning. This way `try_mark_green` will always
122+
// hit the cache instead of having to go through `force_from_dep_node`.
123+
// This assertion makes sure, we actually keep applying the solution above.
124+
debug_assert!(
125+
dep_node.kind != DepKind::codegen_unit,
126+
"calling force_from_dep_node() on DepKind::codegen_unit"
127+
);
128+
129+
(dep_node.kind.force_from_dep_node)(**self, dep_node)
130+
}
131+
132+
fn has_errors_or_delayed_span_bugs(&self) -> bool {
133+
self.sess.has_errors_or_delayed_span_bugs()
134+
}
135+
136+
fn diagnostic(&self) -> &rustc_errors::Handler {
137+
self.sess.diagnostic()
138+
}
139+
140+
// Interactions with on_disk_cache
141+
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
142+
self.on_disk_cache
143+
.as_ref()
144+
.map(|c| c.load_diagnostics(**self, prev_dep_node_index))
145+
.unwrap_or_default()
146+
}
147+
148+
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
149+
if let Some(c) = self.on_disk_cache.as_ref() {
150+
c.store_diagnostics(dep_node_index, diagnostics)
151+
}
152+
}
153+
154+
fn store_diagnostics_for_anon_node(
155+
&self,
156+
dep_node_index: DepNodeIndex,
157+
diagnostics: ThinVec<Diagnostic>,
158+
) {
159+
if let Some(c) = self.on_disk_cache.as_ref() {
160+
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
161+
}
162+
}
163+
75164
/// Executes a job by changing the `ImplicitCtxt` to point to the
76165
/// new query job while it executes. It returns the diagnostics
77166
/// captured during execution and the actual result.

compiler/rustc_query_system/src/dep_graph/graph.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -587,7 +587,7 @@ impl<K: DepKind> DepGraph<K> {
587587
/// A node will have an index, when it's already been marked green, or when we can mark it
588588
/// green. This function will mark the current task as a reader of the specified node, when
589589
/// a node index can be found for that node.
590-
pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
590+
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
591591
&self,
592592
tcx: Ctxt,
593593
dep_node: &DepNode<K>,
@@ -599,7 +599,7 @@ impl<K: DepKind> DepGraph<K> {
599599
})
600600
}
601601

602-
pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
602+
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
603603
&self,
604604
tcx: Ctxt,
605605
dep_node: &DepNode<K>,
@@ -627,7 +627,7 @@ impl<K: DepKind> DepGraph<K> {
627627
}
628628

629629
/// Try to mark a dep-node which existed in the previous compilation session as green.
630-
fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
630+
fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
631631
&self,
632632
tcx: Ctxt,
633633
data: &DepGraphData<K>,
@@ -811,7 +811,7 @@ impl<K: DepKind> DepGraph<K> {
811811
/// This may be called concurrently on multiple threads for the same dep node.
812812
#[cold]
813813
#[inline(never)]
814-
fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
814+
fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
815815
&self,
816816
tcx: Ctxt,
817817
data: &DepGraphData<K>,

compiler/rustc_query_system/src/dep_graph/mod.rs

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
1313

1414
use rustc_data_structures::profiling::SelfProfilerRef;
1515
use rustc_data_structures::sync::Lock;
16-
use rustc_data_structures::thin_vec::ThinVec;
17-
use rustc_errors::Diagnostic;
1816

1917
use std::fmt;
2018
use std::hash::Hash;
@@ -32,30 +30,8 @@ pub trait DepContext: Copy {
3230
/// Access the DepGraph.
3331
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
3432

35-
/// Try to force a dep node to execute and see if it's green.
36-
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
37-
3833
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
3934

40-
/// Return whether the current session is tainted by errors.
41-
fn has_errors_or_delayed_span_bugs(&self) -> bool;
42-
43-
/// Return the diagnostic handler.
44-
fn diagnostic(&self) -> &rustc_errors::Handler;
45-
46-
/// Load diagnostics associated to the node in the previous session.
47-
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
48-
49-
/// Register diagnostics for the given node, for use in next session.
50-
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
51-
52-
/// Register diagnostics for the given node, for use in next session.
53-
fn store_diagnostics_for_anon_node(
54-
&self,
55-
dep_node_index: DepNodeIndex,
56-
diagnostics: ThinVec<Diagnostic>,
57-
);
58-
5935
/// Access the profiler.
6036
fn profiler(&self) -> &SelfProfilerRef;
6137
}

compiler/rustc_query_system/src/query/mod.rs

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ pub use self::caches::{
1414
mod config;
1515
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
1616

17-
use crate::dep_graph::{DepNode, HasDepContext};
17+
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
1818
use crate::query::job::QueryMap;
1919

2020
use rustc_data_structures::stable_hasher::HashStable;
@@ -40,6 +40,28 @@ pub trait QueryContext: HasDepContext {
4040
/// Load data from the on-disk cache.
4141
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
4242

43+
/// Try to force a dep node to execute and see if it's green.
44+
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
45+
46+
/// Return whether the current session is tainted by errors.
47+
fn has_errors_or_delayed_span_bugs(&self) -> bool;
48+
49+
/// Return the diagnostic handler.
50+
fn diagnostic(&self) -> &rustc_errors::Handler;
51+
52+
/// Load diagnostics associated to the node in the previous session.
53+
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;
54+
55+
/// Register diagnostics for the given node, for use in next session.
56+
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);
57+
58+
/// Register diagnostics for the given node, for use in next session.
59+
fn store_diagnostics_for_anon_node(
60+
&self,
61+
dep_node_index: DepNodeIndex,
62+
diagnostics: ThinVec<Diagnostic>,
63+
);
64+
4365
/// Executes a job by changing the `ImplicitCtxt` to point to the
4466
/// new query job while it executes. It returns the diagnostics
4567
/// captured during execution and the actual result.

0 commit comments

Comments
 (0)