@@ -92,29 +92,28 @@ static void replaceValue(Value &Old, Value &New) {
92
92
}
93
93
94
94
bool VectorCombine::vectorizeLoadInsert (Instruction &I) {
95
- // Match insert of scalar load.
95
+ // Match insert into fixed vector of scalar load.
96
+ auto *Ty = dyn_cast<FixedVectorType>(I.getType ());
96
97
Value *Scalar;
97
- if (!match (&I, m_InsertElt (m_Undef (), m_Value (Scalar), m_ZeroInt ())))
98
+ if (!Ty || ! match (&I, m_InsertElt (m_Undef (), m_Value (Scalar), m_ZeroInt ())))
98
99
return false ;
99
- auto *Load = dyn_cast<LoadInst>(Scalar);
100
- Type *ScalarTy = Scalar->getType ();
100
+
101
101
// Do not vectorize scalar load (widening) if atomic/volatile or under
102
102
// asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
103
103
// or create data races non-existent in the source.
104
+ auto *Load = dyn_cast<LoadInst>(Scalar);
104
105
if (!Load || !Load->isSimple () ||
105
106
Load->getFunction ()->hasFnAttribute (Attribute::SanitizeMemTag) ||
106
107
mustSuppressSpeculation (*Load))
107
108
return false ;
108
- auto *Ty = dyn_cast<FixedVectorType>(I.getType ());
109
- if (!Ty)
110
- return false ;
111
109
112
110
// TODO: Extend this to match GEP with constant offsets.
113
111
Value *PtrOp = Load->getPointerOperand ()->stripPointerCasts ();
114
112
assert (isa<PointerType>(PtrOp->getType ()) && " Expected a pointer type" );
115
113
116
- unsigned MinVectorSize = TTI. getMinVectorRegisterBitWidth ();
114
+ Type *ScalarTy = Scalar-> getType ();
117
115
uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits ();
116
+ unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth ();
118
117
if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 )
119
118
return false ;
120
119
0 commit comments