8
8
9
9
#include " mlir/Transforms/TopologicalSortUtils.h"
10
10
#include " mlir/IR/OpDefinition.h"
11
+ #include " llvm/ADT/SetVector.h"
11
12
12
13
using namespace mlir ;
13
14
14
- bool mlir::sortTopologically (
15
- Block *block, llvm::iterator_range<Block::iterator> ops,
16
- function_ref<bool (Value, Operation *)> isOperandReady) {
17
- if (ops.empty ())
18
- return true ;
19
-
20
- // The set of operations that have not yet been scheduled.
21
- DenseSet<Operation *> unscheduledOps;
22
- // Mark all operations as unscheduled.
23
- for (Operation &op : ops)
24
- unscheduledOps.insert (&op);
25
-
26
- Block::iterator nextScheduledOp = ops.begin ();
27
- Block::iterator end = ops.end ();
28
-
15
+ // / Return `true` if the given operation is ready to be scheduled.
16
+ static bool isOpReady (Block *block, Operation *op,
17
+ DenseSet<Operation *> &unscheduledOps,
18
+ function_ref<bool (Value, Operation *)> isOperandReady) {
29
19
// An operation is ready to be scheduled if all its operands are ready. An
30
20
// operation is ready if:
31
21
const auto isReady = [&](Value value, Operation *top) {
32
22
// - the user-provided callback marks it as ready,
33
- if (isOperandReady && isOperandReady (value, top ))
23
+ if (isOperandReady && isOperandReady (value, op ))
34
24
return true ;
35
25
Operation *parent = value.getDefiningOp ();
36
26
// - it is a block argument,
@@ -41,12 +31,38 @@ bool mlir::sortTopologically(
41
31
if (!ancestor)
42
32
return true ;
43
33
// - it is defined in a nested region, or
44
- if (ancestor == top )
34
+ if (ancestor == op )
45
35
return true ;
46
36
// - its ancestor in the block is scheduled.
47
37
return !unscheduledOps.contains (ancestor);
48
38
};
49
39
40
+ // An operation is recursively ready to be scheduled of it and its nested
41
+ // operations are ready.
42
+ WalkResult readyToSchedule = op->walk ([&](Operation *nestedOp) {
43
+ return llvm::all_of (nestedOp->getOperands (),
44
+ [&](Value operand) { return isReady (operand, op); })
45
+ ? WalkResult::advance ()
46
+ : WalkResult::interrupt ();
47
+ });
48
+ return !readyToSchedule.wasInterrupted ();
49
+ }
50
+
51
+ bool mlir::sortTopologically (
52
+ Block *block, llvm::iterator_range<Block::iterator> ops,
53
+ function_ref<bool (Value, Operation *)> isOperandReady) {
54
+ if (ops.empty ())
55
+ return true ;
56
+
57
+ // The set of operations that have not yet been scheduled.
58
+ DenseSet<Operation *> unscheduledOps;
59
+ // Mark all operations as unscheduled.
60
+ for (Operation &op : ops)
61
+ unscheduledOps.insert (&op);
62
+
63
+ Block::iterator nextScheduledOp = ops.begin ();
64
+ Block::iterator end = ops.end ();
65
+
50
66
bool allOpsScheduled = true ;
51
67
while (!unscheduledOps.empty ()) {
52
68
bool scheduledAtLeastOnce = false ;
@@ -56,16 +72,7 @@ bool mlir::sortTopologically(
56
72
// set, and "schedule" it (move it before the `nextScheduledOp`).
57
73
for (Operation &op :
58
74
llvm::make_early_inc_range (llvm::make_range (nextScheduledOp, end))) {
59
- // An operation is recursively ready to be scheduled of it and its nested
60
- // operations are ready.
61
- WalkResult readyToSchedule = op.walk ([&](Operation *nestedOp) {
62
- return llvm::all_of (
63
- nestedOp->getOperands (),
64
- [&](Value operand) { return isReady (operand, &op); })
65
- ? WalkResult::advance ()
66
- : WalkResult::interrupt ();
67
- });
68
- if (readyToSchedule.wasInterrupted ())
75
+ if (!isOpReady (block, &op, unscheduledOps, isOperandReady))
69
76
continue ;
70
77
71
78
// Schedule the operation by moving it to the start.
@@ -96,3 +103,48 @@ bool mlir::sortTopologically(
96
103
isOperandReady);
97
104
return sortTopologically (block, *block, isOperandReady);
98
105
}
106
+
107
+ bool mlir::computeTopologicalSorting (
108
+ Block *block, MutableArrayRef<Operation *> ops,
109
+ function_ref<bool (Value, Operation *)> isOperandReady) {
110
+ if (ops.empty ())
111
+ return true ;
112
+
113
+ // The set of operations that have not yet been scheduled.
114
+ DenseSet<Operation *> unscheduledOps;
115
+
116
+ // Mark all operations as unscheduled.
117
+ for (Operation *op : ops) {
118
+ assert (op->getBlock () == block && " op must belong to block" );
119
+ unscheduledOps.insert (op);
120
+ }
121
+
122
+ unsigned nextScheduledOp = 0 ;
123
+
124
+ bool allOpsScheduled = true ;
125
+ while (!unscheduledOps.empty ()) {
126
+ bool scheduledAtLeastOnce = false ;
127
+
128
+ // Loop over the ops that are not sorted yet, try to find the ones "ready",
129
+ // i.e. the ones for which there aren't any operand produced by an op in the
130
+ // set, and "schedule" it (swap it with the op at `nextScheduledOp`).
131
+ for (unsigned i = nextScheduledOp; i < ops.size (); ++i) {
132
+ if (!isOpReady (block, ops[i], unscheduledOps, isOperandReady))
133
+ continue ;
134
+
135
+ // Schedule the operation by moving it to the start.
136
+ unscheduledOps.erase (ops[i]);
137
+ std::swap (ops[i], ops[nextScheduledOp]);
138
+ scheduledAtLeastOnce = true ;
139
+ ++nextScheduledOp;
140
+ }
141
+
142
+ // If no operations were scheduled, just schedule the first op and continue.
143
+ if (!scheduledAtLeastOnce) {
144
+ allOpsScheduled = false ;
145
+ unscheduledOps.erase (ops[nextScheduledOp++]);
146
+ }
147
+ }
148
+
149
+ return allOpsScheduled;
150
+ }
0 commit comments