26
26
//! - bound the maximum depth by a constant `MAX_BACKTRACK`;
27
27
//! - we only traverse `Goto` terminators.
28
28
//!
29
+ //! We try to avoid creating irreducible control-flow by not threading through a loop header.
30
+ //!
29
31
//! Likewise, applying the optimisation can create a lot of new MIR, so we bound the instruction
30
32
//! cost by `MAX_COST`.
31
33
32
34
use rustc_arena:: DroplessArena ;
33
35
use rustc_data_structures:: fx:: FxHashSet ;
36
+ use rustc_index:: bit_set:: BitSet ;
34
37
use rustc_index:: IndexVec ;
35
38
use rustc_middle:: mir:: visit:: Visitor ;
36
39
use rustc_middle:: mir:: * ;
@@ -58,14 +61,22 @@ impl<'tcx> MirPass<'tcx> for JumpThreading {
58
61
59
62
let param_env = tcx. param_env_reveal_all_normalized ( def_id) ;
60
63
let map = Map :: new ( tcx, body, Some ( MAX_PLACES ) ) ;
64
+ let loop_headers = loop_headers ( body) ;
61
65
62
66
let arena = DroplessArena :: default ( ) ;
63
- let mut finder =
64
- TOFinder { tcx, param_env, body, arena : & arena, map : & map, opportunities : Vec :: new ( ) } ;
67
+ let mut finder = TOFinder {
68
+ tcx,
69
+ param_env,
70
+ body,
71
+ arena : & arena,
72
+ map : & map,
73
+ loop_headers : & loop_headers,
74
+ opportunities : Vec :: new ( ) ,
75
+ } ;
65
76
66
77
for ( bb, bbdata) in body. basic_blocks . iter_enumerated ( ) {
67
78
debug ! ( ?bb, term = ?bbdata. terminator( ) ) ;
68
- if bbdata. is_cleanup {
79
+ if bbdata. is_cleanup || loop_headers . contains ( bb ) {
69
80
continue ;
70
81
}
71
82
let Some ( ( discr, targets) ) = bbdata. terminator ( ) . kind . as_switch ( ) else { continue } ;
@@ -108,6 +119,10 @@ impl<'tcx> MirPass<'tcx> for JumpThreading {
108
119
return ;
109
120
}
110
121
122
+ // Verify that we do not thread through a loop header.
123
+ for to in opportunities. iter ( ) {
124
+ assert ! ( to. chain. iter( ) . all( |& block| !loop_headers. contains( block) ) ) ;
125
+ }
111
126
OpportunitySet :: new ( body, opportunities) . apply ( body) ;
112
127
}
113
128
}
@@ -125,6 +140,7 @@ struct TOFinder<'tcx, 'a> {
125
140
param_env : ty:: ParamEnv < ' tcx > ,
126
141
body : & ' a Body < ' tcx > ,
127
142
map : & ' a Map ,
143
+ loop_headers : & ' a BitSet < BasicBlock > ,
128
144
/// We use an arena to avoid cloning the slices when cloning `state`.
129
145
arena : & ' a DroplessArena ,
130
146
opportunities : Vec < ThreadingOpportunity > ,
@@ -180,6 +196,11 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
180
196
mut cost : CostChecker < ' _ , ' tcx > ,
181
197
depth : usize ,
182
198
) {
199
+ // Do not thread through loop headers.
200
+ if self . loop_headers . contains ( bb) {
201
+ return ;
202
+ }
203
+
183
204
debug ! ( cost = ?cost. cost( ) ) ;
184
205
for ( statement_index, stmt) in
185
206
self . body . basic_blocks [ bb] . statements . iter ( ) . enumerate ( ) . rev ( )
@@ -636,3 +657,21 @@ enum Update {
636
657
Incr ,
637
658
Decr ,
638
659
}
660
+
661
+ /// Compute the set of loop headers in the given body. We define a loop header as a block which has
662
+ /// at least a predecessor which it dominates. This definition is only correct for reducible CFGs.
663
+ /// But if the CFG is already irreducible, there is no point in trying much harder.
664
+ /// is already irreducibl
665
+ fn loop_headers ( body : & Body < ' _ > ) -> BitSet < BasicBlock > {
666
+ let mut loop_headers = BitSet :: new_empty ( body. basic_blocks . len ( ) ) ;
667
+ let dominators = body. basic_blocks . dominators ( ) ;
668
+ // Only visit reachable blocks.
669
+ for ( bb, bbdata) in traversal:: preorder ( body) {
670
+ for succ in bbdata. terminator ( ) . successors ( ) {
671
+ if dominators. dominates ( succ, bb) {
672
+ loop_headers. insert ( bb) ;
673
+ }
674
+ }
675
+ }
676
+ loop_headers
677
+ }
0 commit comments