Skip to content

Commit c03ef26

Browse files
committed
Restrict to constants to avoid indexing problems
1 parent 67f1d62 commit c03ef26

File tree

3 files changed

+63
-54
lines changed

3 files changed

+63
-54
lines changed

llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -373,14 +373,21 @@ RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
373373
if (!VecOperand)
374374
return std::make_pair(nullptr, nullptr);
375375

376-
// We need the number of significant bits to match the index type. IF it
377-
// doesn't, then adding the stride later may not wrap correctly.
376+
// We can't extract the stride if the arithmetic is done at a different size
377+
// than the pointer type. Adding the stride later may not wrap correctly.
378+
// Technically we could handle wider indices, but I don't expect that in
379+
// practice. Handle one special case here - constants. This simplifies
380+
// writing test cases.
378381
Value *VecIndex = Ops[*VecOperand];
379382
Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
380-
if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits()) {
381-
unsigned MaxBits = ComputeMaxSignificantBits(VecIndex, *DL);
382-
if (MaxBits > VecIntPtrTy->getScalarSizeInBits())
383+
if (VecIndex->getType() != VecIntPtrTy) {
384+
auto *VecIndexC = dyn_cast<Constant>(VecIndex);
385+
if (!VecIndexC)
383386
return std::make_pair(nullptr, nullptr);
387+
if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits())
388+
VecIndex = ConstantFoldCastInstruction(Instruction::Trunc, VecIndexC, VecIntPtrTy);
389+
else
390+
VecIndex = ConstantFoldCastInstruction(Instruction::SExt, VecIndexC, VecIntPtrTy);
384391
}
385392

386393
// Handle the non-recursive case. This is what we see if the vectorizer
@@ -398,8 +405,7 @@ RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
398405

399406
// Convert stride to pointer size if needed.
400407
Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
401-
assert(IntPtrTy == VecIntPtrTy->getScalarType());
402-
Stride = Builder.CreateSExtOrTrunc(Stride, IntPtrTy);
408+
assert(Stride->getType() == IntPtrTy && "Unexpected type");
403409

404410
// Scale the stride by the size of the indexed type.
405411
if (TypeScale != 1)
@@ -439,8 +445,7 @@ RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
439445

440446
// Convert stride to pointer size if needed.
441447
Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
442-
assert(IntPtrTy == VecIntPtrTy->getScalarType());
443-
Stride = Builder.CreateSExtOrTrunc(Stride, IntPtrTy);
448+
assert(Stride->getType() == IntPtrTy && "Unexpected type");
444449

445450
// Scale the stride by the size of the indexed type.
446451
if (TypeScale != 1)

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,50 @@ for.cond.cleanup: ; preds = %vector.body
5050
ret void
5151
}
5252

53+
; Don't transform since we might not handle wrap correctly with narrow indices.
54+
define void @gather_narrow_index(ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
55+
; CHECK-LABEL: @gather_narrow_index(
56+
; CHECK-NEXT: entry:
57+
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
58+
; CHECK: vector.body:
59+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
60+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <32 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>, [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
61+
; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw <32 x i32> [[VEC_IND]], <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
62+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], <32 x i32> [[TMP0]]
63+
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> [[TMP1]], i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
64+
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
65+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP2]], align 1
66+
; CHECK-NEXT: [[TMP4:%.*]] = add <32 x i8> [[WIDE_LOAD]], [[WIDE_MASKED_GATHER]]
67+
; CHECK-NEXT: store <32 x i8> [[TMP4]], ptr [[TMP2]], align 1
68+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
69+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i32> [[VEC_IND]], <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
70+
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
71+
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
72+
; CHECK: for.cond.cleanup:
73+
; CHECK-NEXT: ret void
74+
;
75+
entry:
76+
br label %vector.body
77+
78+
vector.body: ; preds = %vector.body, %entry
79+
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
80+
%vec.ind = phi <32 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>, %entry ], [ %vec.ind.next, %vector.body ]
81+
%0 = mul nuw nsw <32 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
82+
%1 = getelementptr inbounds i8, ptr %B, <32 x i32> %0
83+
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
84+
%2 = getelementptr inbounds i8, ptr %A, i64 %index
85+
%wide.load = load <32 x i8>, ptr %2, align 1
86+
%3 = add <32 x i8> %wide.load, %wide.masked.gather
87+
store <32 x i8> %3, ptr %2, align 1
88+
%index.next = add nuw i64 %index, 32
89+
%vec.ind.next = add <32 x i32> %vec.ind, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
90+
%4 = icmp eq i64 %index.next, 1024
91+
br i1 %4, label %for.cond.cleanup, label %vector.body
92+
93+
for.cond.cleanup: ; preds = %vector.body
94+
ret void
95+
}
96+
5397
; The last element of the start value of the phi has the wrong stride.
5498
define void @gather_broken_stride(ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
5599
; CHECK-LABEL: @gather_broken_stride(

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll

Lines changed: 5 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -970,15 +970,16 @@ define void @gather_narrow_idx(ptr noalias nocapture %A, ptr noalias nocapture r
970970
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
971971
; CHECK: vector.body:
972972
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
973-
; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i16 [ 0, [[ENTRY]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ]
974-
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[B:%.*]], i16 [[VEC_IND_SCALAR]]
975-
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr [[TMP0]], i64 5, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
973+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <32 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
974+
; CHECK-NEXT: [[I:%.*]] = mul nuw nsw <32 x i16> [[VEC_IND]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
975+
; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], <32 x i16> [[I]]
976+
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> [[I1]], i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
976977
; CHECK-NEXT: [[I2:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
977978
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[I2]], align 1
978979
; CHECK-NEXT: [[I4:%.*]] = add <32 x i8> [[WIDE_LOAD]], [[WIDE_MASKED_GATHER]]
979980
; CHECK-NEXT: store <32 x i8> [[I4]], ptr [[I2]], align 1
980981
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
981-
; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i16 [[VEC_IND_SCALAR]], 160
982+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i16> [[VEC_IND]], <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32>
982983
; CHECK-NEXT: [[I6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
983984
; CHECK-NEXT: br i1 [[I6]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
984985
; CHECK: for.cond.cleanup:
@@ -1006,44 +1007,3 @@ for.cond.cleanup: ; preds = %vector.body
10061007
ret void
10071008
}
10081009

1009-
define void @gather_narrow_index2(ptr noalias nocapture %A, ptr noalias nocapture readonly %B) {
1010-
; CHECK-LABEL: @gather_narrow_index2(
1011-
; CHECK-NEXT: entry:
1012-
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1013-
; CHECK: vector.body:
1014-
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1015-
; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ]
1016-
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[VEC_IND_SCALAR]]
1017-
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr [[TMP0]], i64 5, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
1018-
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
1019-
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1
1020-
; CHECK-NEXT: [[TMP2:%.*]] = add <32 x i8> [[WIDE_LOAD]], [[WIDE_MASKED_GATHER]]
1021-
; CHECK-NEXT: store <32 x i8> [[TMP2]], ptr [[TMP1]], align 1
1022-
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
1023-
; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i32 [[VEC_IND_SCALAR]], 160
1024-
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
1025-
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
1026-
; CHECK: for.cond.cleanup:
1027-
; CHECK-NEXT: ret void
1028-
;
1029-
entry:
1030-
br label %vector.body
1031-
1032-
vector.body: ; preds = %vector.body, %entry
1033-
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
1034-
%vec.ind = phi <32 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>, %entry ], [ %vec.ind.next, %vector.body ]
1035-
%0 = mul nuw nsw <32 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
1036-
%1 = getelementptr inbounds i8, ptr %B, <32 x i32> %0
1037-
%wide.masked.gather = call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %1, i32 1, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
1038-
%2 = getelementptr inbounds i8, ptr %A, i64 %index
1039-
%wide.load = load <32 x i8>, ptr %2, align 1
1040-
%3 = add <32 x i8> %wide.load, %wide.masked.gather
1041-
store <32 x i8> %3, ptr %2, align 1
1042-
%index.next = add nuw i64 %index, 32
1043-
%vec.ind.next = add <32 x i32> %vec.ind, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
1044-
%4 = icmp eq i64 %index.next, 1024
1045-
br i1 %4, label %for.cond.cleanup, label %vector.body
1046-
1047-
for.cond.cleanup: ; preds = %vector.body
1048-
ret void
1049-
}

0 commit comments

Comments
 (0)