1
- // ===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
1
+ // ===- AtomicExpandPass.cpp - Expand atomic instructions ----------- -------===//
2
2
//
3
3
// The LLVM Compiler Infrastructure
4
4
//
15
15
//
16
16
// ===----------------------------------------------------------------------===//
17
17
18
+ #include " llvm/ADT/ArrayRef.h"
19
+ #include " llvm/ADT/STLExtras.h"
20
+ #include " llvm/ADT/SmallVector.h"
18
21
#include " llvm/CodeGen/AtomicExpandUtils.h"
19
- #include " llvm/CodeGen/Passes .h"
22
+ #include " llvm/CodeGen/RuntimeLibcalls .h"
20
23
#include " llvm/CodeGen/TargetPassConfig.h"
24
+ #include " llvm/CodeGen/ValueTypes.h"
25
+ #include " llvm/IR/Attributes.h"
26
+ #include " llvm/IR/BasicBlock.h"
27
+ #include " llvm/IR/Constant.h"
28
+ #include " llvm/IR/Constants.h"
29
+ #include " llvm/IR/DataLayout.h"
30
+ #include " llvm/IR/DerivedTypes.h"
21
31
#include " llvm/IR/Function.h"
22
32
#include " llvm/IR/IRBuilder.h"
23
33
#include " llvm/IR/InstIterator.h"
34
+ #include " llvm/IR/Instruction.h"
24
35
#include " llvm/IR/Instructions.h"
25
- #include " llvm/IR/Intrinsics.h"
26
36
#include " llvm/IR/Module.h"
37
+ #include " llvm/IR/Type.h"
38
+ #include " llvm/IR/User.h"
39
+ #include " llvm/IR/Value.h"
40
+ #include " llvm/Pass.h"
41
+ #include " llvm/Support/AtomicOrdering.h"
42
+ #include " llvm/Support/Casting.h"
27
43
#include " llvm/Support/Debug.h"
44
+ #include " llvm/Support/ErrorHandling.h"
28
45
#include " llvm/Support/raw_ostream.h"
29
46
#include " llvm/Target/TargetLowering.h"
30
47
#include " llvm/Target/TargetMachine.h"
31
48
#include " llvm/Target/TargetSubtargetInfo.h"
49
+ #include < cassert>
50
+ #include < cstdint>
51
+ #include < iterator>
32
52
33
53
using namespace llvm ;
34
54
35
55
#define DEBUG_TYPE " atomic-expand"
36
56
37
57
namespace {
58
+
38
59
class AtomicExpand : public FunctionPass {
39
- const TargetLowering *TLI;
60
+ const TargetLowering *TLI = nullptr ;
61
+
40
62
public:
41
63
static char ID; // Pass identification, replacement for typeid
42
- AtomicExpand () : FunctionPass(ID), TLI(nullptr ) {
64
+
65
+ AtomicExpand () : FunctionPass(ID) {
43
66
initializeAtomicExpandPass (*PassRegistry::getPassRegistry ());
44
67
}
45
68
@@ -92,39 +115,41 @@ namespace {
92
115
llvm::expandAtomicRMWToCmpXchg (AtomicRMWInst *AI,
93
116
CreateCmpXchgInstFun CreateCmpXchg);
94
117
};
95
- }
118
+
119
+ } // end anonymous namespace
96
120
97
121
char AtomicExpand::ID = 0 ;
122
+
98
123
char &llvm::AtomicExpandID = AtomicExpand::ID;
124
+
99
125
INITIALIZE_PASS (AtomicExpand, DEBUG_TYPE, " Expand Atomic instructions" ,
100
126
false , false )
101
127
102
128
FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand (); }
103
129
104
- namespace {
105
130
// Helper functions to retrieve the size of atomic instructions.
106
- unsigned getAtomicOpSize (LoadInst *LI) {
131
+ static unsigned getAtomicOpSize (LoadInst *LI) {
107
132
const DataLayout &DL = LI->getModule ()->getDataLayout ();
108
133
return DL.getTypeStoreSize (LI->getType ());
109
134
}
110
135
111
- unsigned getAtomicOpSize (StoreInst *SI) {
136
+ static unsigned getAtomicOpSize (StoreInst *SI) {
112
137
const DataLayout &DL = SI->getModule ()->getDataLayout ();
113
138
return DL.getTypeStoreSize (SI->getValueOperand ()->getType ());
114
139
}
115
140
116
- unsigned getAtomicOpSize (AtomicRMWInst *RMWI) {
141
+ static unsigned getAtomicOpSize (AtomicRMWInst *RMWI) {
117
142
const DataLayout &DL = RMWI->getModule ()->getDataLayout ();
118
143
return DL.getTypeStoreSize (RMWI->getValOperand ()->getType ());
119
144
}
120
145
121
- unsigned getAtomicOpSize (AtomicCmpXchgInst *CASI) {
146
+ static unsigned getAtomicOpSize (AtomicCmpXchgInst *CASI) {
122
147
const DataLayout &DL = CASI->getModule ()->getDataLayout ();
123
148
return DL.getTypeStoreSize (CASI->getCompareOperand ()->getType ());
124
149
}
125
150
126
151
// Helper functions to retrieve the alignment of atomic instructions.
127
- unsigned getAtomicOpAlign (LoadInst *LI) {
152
+ static unsigned getAtomicOpAlign (LoadInst *LI) {
128
153
unsigned Align = LI->getAlignment ();
129
154
// In the future, if this IR restriction is relaxed, we should
130
155
// return DataLayout::getABITypeAlignment when there's no align
@@ -133,7 +158,7 @@ unsigned getAtomicOpAlign(LoadInst *LI) {
133
158
return Align;
134
159
}
135
160
136
- unsigned getAtomicOpAlign (StoreInst *SI) {
161
+ static unsigned getAtomicOpAlign (StoreInst *SI) {
137
162
unsigned Align = SI->getAlignment ();
138
163
// In the future, if this IR restriction is relaxed, we should
139
164
// return DataLayout::getABITypeAlignment when there's no align
@@ -142,15 +167,15 @@ unsigned getAtomicOpAlign(StoreInst *SI) {
142
167
return Align;
143
168
}
144
169
145
- unsigned getAtomicOpAlign (AtomicRMWInst *RMWI) {
170
+ static unsigned getAtomicOpAlign (AtomicRMWInst *RMWI) {
146
171
// TODO(PR27168): This instruction has no alignment attribute, but unlike the
147
172
// default alignment for load/store, the default here is to assume
148
173
// it has NATURAL alignment, not DataLayout-specified alignment.
149
174
const DataLayout &DL = RMWI->getModule ()->getDataLayout ();
150
175
return DL.getTypeStoreSize (RMWI->getValOperand ()->getType ());
151
176
}
152
177
153
- unsigned getAtomicOpAlign (AtomicCmpXchgInst *CASI) {
178
+ static unsigned getAtomicOpAlign (AtomicCmpXchgInst *CASI) {
154
179
// TODO(PR27168): same comment as above.
155
180
const DataLayout &DL = CASI->getModule ()->getDataLayout ();
156
181
return DL.getTypeStoreSize (CASI->getCompareOperand ()->getType ());
@@ -160,14 +185,12 @@ unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
160
185
// and is of appropriate alignment, to be passed through for target
161
186
// lowering. (Versus turning into a __atomic libcall)
162
187
template <typename Inst>
163
- bool atomicSizeSupported (const TargetLowering *TLI, Inst *I) {
188
+ static bool atomicSizeSupported (const TargetLowering *TLI, Inst *I) {
164
189
unsigned Size = getAtomicOpSize (I);
165
190
unsigned Align = getAtomicOpAlign (I);
166
191
return Align >= Size && Size <= TLI->getMaxAtomicSizeInBitsSupported () / 8 ;
167
192
}
168
193
169
- } // end anonymous namespace
170
-
171
194
bool AtomicExpand::runOnFunction (Function &F) {
172
195
auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
173
196
if (!TPC)
@@ -556,6 +579,7 @@ struct PartwordMaskValues {
556
579
Value *Mask;
557
580
Value *Inv_Mask;
558
581
};
582
+
559
583
} // end anonymous namespace
560
584
561
585
// / This is a helper function which builds instructions to provide
@@ -574,7 +598,6 @@ struct PartwordMaskValues {
574
598
// / include only the part that would've been loaded from Addr.
575
599
// /
576
600
// / Inv_Mask: The inverse of Mask.
577
-
578
601
static PartwordMaskValues createMaskInstrs (IRBuilder<> &Builder, Instruction *I,
579
602
Type *ValueType, Value *Addr,
580
603
unsigned WordSize) {
@@ -680,7 +703,6 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
680
703
// / part of the value.
681
704
void AtomicExpand::expandPartwordAtomicRMW (
682
705
AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
683
-
684
706
assert (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg);
685
707
686
708
AtomicOrdering MemOpOrder = AI->getOrdering ();
@@ -937,7 +959,6 @@ AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *
937
959
return NewCI;
938
960
}
939
961
940
-
941
962
bool AtomicExpand::expandAtomicCmpXchg (AtomicCmpXchgInst *CI) {
942
963
AtomicOrdering SuccessOrder = CI->getSuccessOrdering ();
943
964
AtomicOrdering FailureOrder = CI->getFailureOrdering ();
0 commit comments