@@ -16,7 +16,9 @@ use std::sync::atomic::Ordering::Relaxed;
16
16
17
17
use super :: debug:: EdgeFilter ;
18
18
use super :: query:: DepGraphQuery ;
19
- use super :: serialized:: { DepNodeColor , DepNodeIndex , SerializedDepGraph , SerializedDepNodeIndex } ;
19
+ use super :: serialized:: {
20
+ CurrentDepGraph , DepNodeColor , DepNodeIndex , SerializedDepGraph , SerializedDepNodeIndex ,
21
+ } ;
20
22
use super :: { DepContext , DepKind , DepNode , HasDepContext , WorkProductId } ;
21
23
use crate :: query:: QueryContext ;
22
24
@@ -45,7 +47,7 @@ impl std::convert::From<DepNodeIndex> for QueryInvocationId {
45
47
struct DepGraphData < K : DepKind > {
46
48
/// The dep-graph from the previous compilation session. It contains all
47
49
/// nodes and edges as well as all fingerprints of nodes that have them.
48
- previous : RwLock < SerializedDepGraph < K > > ,
50
+ previous : RwLock < CurrentDepGraph < K > > ,
49
51
50
52
/// Used to trap when a specific edge is added to the graph.
51
53
/// This is used for debug purposes and is only active with `debug_assertions`.
@@ -97,8 +99,6 @@ impl<K: DepKind> DepGraph<K> {
97
99
prev_graph : SerializedDepGraph < K > ,
98
100
prev_work_products : FxHashMap < WorkProductId , WorkProduct > ,
99
101
) -> DepGraph < K > {
100
- let _prev_graph_node_count = prev_graph. serialized_node_count ( ) ;
101
-
102
102
use std:: time:: { SystemTime , UNIX_EPOCH } ;
103
103
104
104
let duration = SystemTime :: now ( ) . duration_since ( UNIX_EPOCH ) . unwrap ( ) ;
@@ -118,41 +118,6 @@ impl<K: DepKind> DepGraph<K> {
118
118
None
119
119
} ;
120
120
121
- /*
122
- // Pre-allocate the dep node structures. We over-allocate a little so
123
- // that we hopefully don't have to re-allocate during this compilation
124
- // session. The over-allocation for new nodes is 2% plus a small
125
- // constant to account for the fact that in very small crates 2% might
126
- // not be enough. The allocation for red and green node data doesn't
127
- // include a constant, as we don't want to allocate anything for these
128
- // structures during full incremental builds, where they aren't used.
129
- //
130
- // These estimates are based on the distribution of node and edge counts
131
- // seen in rustc-perf benchmarks, adjusted somewhat to account for the
132
- // fact that these benchmarks aren't perfectly representative.
133
- //
134
- // FIXME Use a collection type that doesn't copy node and edge data and
135
- // grow multiplicatively on reallocation. Without such a collection or
136
- // solution having the same effect, there is a performance hazard here
137
- // in both time and space, as growing these collections means copying a
138
- // large amount of data and doubling already large buffer capacities. A
139
- // solution for this will also mean that it's less important to get
140
- // these estimates right.
141
- let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200;
142
- let red_node_count_estimate = (prev_graph_node_count * 3) / 100;
143
- let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100;
144
- let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate;
145
-
146
- let average_edges_per_node_estimate = 6;
147
- let unshared_edge_count_estimate = average_edges_per_node_estimate
148
- * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate);
149
- */
150
-
151
- // We store a large collection of these in `prev_index_to_index` during
152
- // non-full incremental builds, and want to ensure that the element size
153
- // doesn't inadvertently increase.
154
- static_assert_size ! ( Option <DepNodeIndex >, 4 ) ;
155
-
156
121
DepGraph {
157
122
data : Some ( Lrc :: new ( DepGraphData {
158
123
previous_work_products : prev_work_products,
@@ -162,7 +127,7 @@ impl<K: DepKind> DepGraph<K> {
162
127
total_read_count : AtomicU64 :: new ( 0 ) ,
163
128
total_duplicate_read_count : AtomicU64 :: new ( 0 ) ,
164
129
emitting_diagnostics : Default :: default ( ) ,
165
- previous : RwLock :: new ( prev_graph) ,
130
+ previous : RwLock :: new ( CurrentDepGraph :: new ( prev_graph) ) ,
166
131
} ) ) ,
167
132
virtual_dep_node_index : Lrc :: new ( AtomicU32 :: new ( 0 ) ) ,
168
133
}
@@ -627,21 +592,13 @@ impl<K: DepKind> DepGraph<K> {
627
592
debug_assert ! ( !dep_node. kind. is_eval_always( ) ) ;
628
593
debug_assert_eq ! ( data. previous. read( ) . index_to_node( prev_dep_node_index) , * dep_node) ;
629
594
630
- // Do not keep a reference to the borrowed `previous` graph,
631
- // because the recursive calls.
632
- let prev_deps: Vec < _ > =
633
- data. previous . read ( ) . edge_targets_from_serialized ( prev_dep_node_index) . collect ( ) ;
634
- debug ! (
635
- "try_mark_previous_green({:?}) --- {:?} -- deps={:?}" ,
636
- dep_node,
637
- prev_dep_node_index,
638
- prev_deps
639
- . iter( )
640
- . map( |& d| ( d, data. previous. read( ) . index_to_node( d) ) )
641
- . collect:: <Vec <_>>( ) ,
642
- ) ;
643
-
644
- for dep_dep_node_index in prev_deps {
595
+ let prev_deps = data. previous . read ( ) . color_or_edges ( prev_dep_node_index) ;
596
+ let prev_deps = match prev_deps {
597
+ Err ( prev_deps) => prev_deps,
598
+ Ok ( DepNodeColor :: Green ) => return Some ( prev_dep_node_index. rejuvenate ( ) ) ,
599
+ Ok ( DepNodeColor :: Red ) | Ok ( DepNodeColor :: New ) => return None ,
600
+ } ;
601
+ for & dep_dep_node_index in prev_deps {
645
602
self . try_mark_parent_green ( tcx, data, dep_dep_node_index, dep_node) ?
646
603
}
647
604
@@ -764,7 +721,7 @@ impl<K: DepKind> DepGraph<K> {
764
721
765
722
let mut stats: FxHashMap < _ , Stat < K > > = FxHashMap :: with_hasher ( Default :: default ( ) ) ;
766
723
767
- for index in prev. indices ( ) {
724
+ for index in prev. live_indices ( ) {
768
725
let kind = prev. dep_node_of ( index) . kind ;
769
726
let edge_count = prev. edge_targets_from ( index) . len ( ) ;
770
727
0 commit comments