Skip to content

Commit 6fecd78

Browse files
committed
Verify dep-graph consistency when loading it.
Always recover from duplicate DepNode.
1 parent b2807b2 commit 6fecd78

File tree

1 file changed

+27
-7
lines changed

1 file changed

+27
-7
lines changed

compiler/rustc_query_system/src/dep_graph/serialized.rs

Lines changed: 27 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,15 +85,13 @@ impl<K: DepKind> SerializedDepGraph<K> {
8585
}
8686

8787
pub fn node_count(&self) -> usize {
88-
self.index.len()
88+
self.nodes.len()
8989
}
9090
}
9191

92-
impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
93-
for SerializedDepGraph<K>
94-
{
92+
impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> SerializedDepGraph<K> {
9593
#[instrument(level = "debug", skip(d))]
96-
fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
94+
pub fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
9795
// The last 16 bytes are the node count and edge count.
9896
debug!("position: {:?}", d.position());
9997
let (node_count, edge_count) =
@@ -133,8 +131,30 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
133131
debug_assert_eq!(_i.index(), _index);
134132
}
135133

136-
let index: FxHashMap<_, _> =
137-
nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
134+
let mut duplicates = Vec::new();
135+
let mut index: FxHashMap<DepNode<K>, SerializedDepNodeIndex> = Default::default();
136+
index.reserve(nodes.len());
137+
for (idx, &dep_node) in nodes.iter_enumerated() {
138+
if index.insert(dep_node, idx).is_some() {
139+
duplicates.push(dep_node);
140+
}
141+
}
142+
143+
// Creating the index detected a duplicated DepNode.
144+
//
145+
// If the new session presents us with a DepNode among those, we have no
146+
// way to know which SerializedDepNodeIndex it corresponds to. To avoid
147+
// making the wrong connection between a DepNodeIndex and a SerializedDepNodeIndex,
148+
// we remove all the duplicates from the index.
149+
//
150+
// This way, when the new session presents us with a DepNode among the duplicates,
151+
// we just create a new node with no counterpart in the previous graph.
152+
//
153+
// Red/green marking still works for those nodes, as that algorithm does not
154+
// need to know about DepNode at all.
155+
for dep_node in duplicates {
156+
index.remove(&dep_node);
157+
}
138158

139159
SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
140160
}

0 commit comments

Comments
 (0)