16
16
#include " llvm/ADT/STLExtras.h"
17
17
#include < optional>
18
18
#include < queue>
19
- #include < set>
20
19
#include < vector>
21
20
22
21
namespace clang {
23
22
24
23
// Intermediate data used in constructing a CFGIntervalNode.
25
24
template <typename Node> struct BuildResult {
26
25
// Use a vector to maintain the insertion order. Given the expected small
27
- // number of nodes, vector should be sufficiently efficient.
26
+ // number of nodes, vector should be sufficiently efficient. Elements must not
27
+ // be null.
28
28
std::vector<const Node *> Nodes;
29
+ // Elements must not be null.
29
30
llvm::SmallDenseSet<const Node *> Successors;
30
31
};
31
32
32
33
namespace internal {
33
- static unsigned getID (const CFGBlock * B) { return B-> getBlockID (); }
34
- static unsigned getID (const CFGIntervalNode * I) { return I-> ID ; }
34
+ static unsigned getID (const CFGBlock & B) { return B. getBlockID (); }
35
+ static unsigned getID (const CFGIntervalNode & I) { return I. ID ; }
35
36
36
37
// `Node` must be one of `CFGBlock` or `CFGIntervalNode`.
37
38
template <typename Node>
38
39
BuildResult<Node> buildInterval (llvm::BitVector &Partitioned,
39
40
const Node *Header) {
40
- assert (Header);
41
+ assert (Header != nullptr );
41
42
BuildResult<Node> Interval;
42
43
Interval.Nodes .push_back (Header);
43
- Partitioned.set (getID (Header));
44
+ Partitioned.set (getID (* Header));
44
45
45
46
// FIXME: Compare performance against using RPO to consider nodes, rather than
46
47
// following successors.
@@ -50,7 +51,7 @@ BuildResult<Node> buildInterval(llvm::BitVector &Partitioned,
50
51
llvm::BitVector Workset (Partitioned.size (), false );
51
52
for (const Node *S : Header->succs ())
52
53
if (S != nullptr )
53
- if (auto SID = getID (S); !Partitioned.test (SID)) {
54
+ if (auto SID = getID (* S); !Partitioned.test (SID)) {
54
55
// Successors are unique, so we don't test against `Workset` before
55
56
// adding to `Worklist`.
56
57
Worklist.push (S);
@@ -63,12 +64,12 @@ BuildResult<Node> buildInterval(llvm::BitVector &Partitioned,
63
64
// yet. In the latter case, we'll revisit the block through some other path
64
65
// from the interval. At the end of processing the worklist, we filter out any
65
66
// that ended up in the interval to produce the output set of interval
66
- // successors.
67
+ // successors. Elements are never null.
67
68
std::vector<const Node *> MaybeSuccessors;
68
69
69
70
while (!Worklist.empty ()) {
70
71
const auto *B = Worklist.front ();
71
- auto ID = getID (B);
72
+ auto ID = getID (* B);
72
73
Worklist.pop ();
73
74
Workset.reset (ID);
74
75
@@ -82,7 +83,7 @@ BuildResult<Node> buildInterval(llvm::BitVector &Partitioned,
82
83
Partitioned.set (ID);
83
84
for (const Node *S : B->succs ())
84
85
if (S != nullptr )
85
- if (auto SID = getID (S);
86
+ if (auto SID = getID (* S);
86
87
!Partitioned.test (SID) && !Workset.test (SID)) {
87
88
Worklist.push (S);
88
89
Workset.set (SID);
@@ -116,8 +117,9 @@ void fillIntervalNode(CFGIntervalGraph &Graph,
116
117
// graph. In this case, the new interval has identifier `ID` so all of its
117
118
// nodes (`Result.Nodes`) map to `ID`.
118
119
for (const auto *N : Result.Nodes ) {
119
- assert (getID (N) < Index.size ());
120
- Index[getID (N)] = &Interval;
120
+ assert (N != nullptr );
121
+ assert (getID (*N) < Index.size ());
122
+ Index[getID (*N)] = &Interval;
121
123
}
122
124
123
125
if constexpr (std::is_same_v<std::decay_t <Node>, CFGBlock>)
@@ -159,7 +161,8 @@ CFGIntervalGraph partitionIntoIntervalsImpl(unsigned NumBlockIDs,
159
161
while (!Successors.empty ()) {
160
162
const auto *B = Successors.front ();
161
163
Successors.pop ();
162
- if (Partitioned.test (getID (B)))
164
+ assert (B != nullptr );
165
+ if (Partitioned.test (getID (*B)))
163
166
continue ;
164
167
165
168
// B has not been partitioned, but it has a predecessor that has. Create a
@@ -173,8 +176,11 @@ CFGIntervalGraph partitionIntoIntervalsImpl(unsigned NumBlockIDs,
173
176
// Map input-graph predecessors to output-graph nodes and mark those as
174
177
// predecessors of `N`. Then, mark `N` as a successor of said predecessor.
175
178
for (const Node *P : H->preds ()) {
176
- assert (getID (P) < NumBlockIDs);
177
- CFGIntervalNode *Pred = Index[getID (P)];
179
+ if (P == nullptr )
180
+ continue ;
181
+
182
+ assert (getID (*P) < NumBlockIDs);
183
+ CFGIntervalNode *Pred = Index[getID (*P)];
178
184
if (Pred == nullptr )
179
185
// Unreachable node.
180
186
continue ;
@@ -229,7 +235,7 @@ WTOCompare::WTOCompare(const WeakTopologicalOrdering &WTO) {
229
235
return ;
230
236
auto N = WTO[0 ]->getParent ()->getNumBlockIDs ();
231
237
BlockOrder.resize (N, 0 );
232
- for (unsigned I = 0 ; I < N ; ++I)
238
+ for (unsigned I = 0 , S = WTO. size () ; I < S ; ++I)
233
239
BlockOrder[WTO[I]->getBlockID ()] = I + 1 ;
234
240
}
235
241
} // namespace clang
0 commit comments