@@ -1025,7 +1025,7 @@ static void translateUnPackMask(CallInst &CI) {
1025
1025
CI.replaceAllUsesWith (TransCI);
1026
1026
}
1027
1027
1028
- static bool translateVLoad (CallInst &CI, SmallPtrSet <Type *, 4 > &GVTS) {
1028
+ static bool translateVLoad (CallInst &CI, SmallPtrSetImpl <Type *> &GVTS) {
1029
1029
if (GVTS.find (CI.getType ()) != GVTS.end ())
1030
1030
return false ;
1031
1031
IRBuilder<> Builder (&CI);
@@ -1035,7 +1035,7 @@ static bool translateVLoad(CallInst &CI, SmallPtrSet<Type *, 4> &GVTS) {
1035
1035
return true ;
1036
1036
}
1037
1037
1038
- static bool translateVStore (CallInst &CI, SmallPtrSet <Type *, 4 > &GVTS) {
1038
+ static bool translateVStore (CallInst &CI, SmallPtrSetImpl <Type *> &GVTS) {
1039
1039
if (GVTS.find (CI.getOperand (1 )->getType ()) != GVTS.end ())
1040
1040
return false ;
1041
1041
IRBuilder<> Builder (&CI);
@@ -1728,7 +1728,7 @@ SmallPtrSet<Type *, 4> collectGenXVolatileTypes(Module &M) {
1728
1728
// of the simd object operations, but in some cases clang can implicitly
1729
1729
// insert stores, such as after a write in inline assembly. To handle that
1730
1730
// case, lower any stores of genx_volatiles into vstores.
1731
- void lowerGlobalStores (Module &M, const SmallPtrSet <Type *, 4 > &GVTS) {
1731
+ void lowerGlobalStores (Module &M, const SmallPtrSetImpl <Type *> &GVTS) {
1732
1732
SmallVector<Instruction *, 4 > ToErase;
1733
1733
for (auto &F : M.functions ()) {
1734
1734
for (Instruction &I : instructions (F)) {
@@ -1781,7 +1781,7 @@ PreservedAnalyses SYCLLowerESIMDPass::run(Module &M, ModuleAnalysisManager &) {
1781
1781
}
1782
1782
1783
1783
size_t SYCLLowerESIMDPass::runOnFunction (Function &F,
1784
- SmallPtrSet <Type *, 4 > &GVTS) {
1784
+ SmallPtrSetImpl <Type *> &GVTS) {
1785
1785
// There is a current limitation of GPU vector backend that requires kernel
1786
1786
// functions to be inlined into the kernel itself. To overcome this
1787
1787
// limitation, mark every function called from ESIMD kernel with
0 commit comments