@@ -766,7 +766,7 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
766
766
IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops.back()->getType()};
767
767
else
768
768
IntrinsicTypes = {ResultType, Ops.back()->getType()};
769
- SmallVector<llvm::Value*, 12 > Operands;
769
+ SmallVector<llvm::Value*, 6 > Operands;
770
770
771
771
bool NoPassthru =
772
772
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -836,7 +836,7 @@ multiclass RVVUnitStridedSegStoreTuple<string op> {
836
836
// Intrinsic: (tuple, ptr, vl)
837
837
unsigned Offset = IsMasked ? 1 : 0;
838
838
839
- SmallVector<llvm::Value*, 12 > Operands;
839
+ SmallVector<llvm::Value*, 5 > Operands;
840
840
Operands.push_back(Ops[Offset + 1]); // tuple
841
841
Operands.push_back(Ops[Offset]); // Ptr
842
842
if (IsMasked)
@@ -886,7 +886,7 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
886
886
IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
887
887
else
888
888
IntrinsicTypes = {ResultType, Ops.back()->getType()};
889
- SmallVector<llvm::Value*, 12 > Operands;
889
+ SmallVector<llvm::Value*, 6 > Operands;
890
890
891
891
bool NoPassthru =
892
892
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -961,7 +961,7 @@ multiclass RVVStridedSegLoadTuple<string op> {
961
961
IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
962
962
else
963
963
IntrinsicTypes = {ResultType, Ops.back()->getType()};
964
- SmallVector<llvm::Value*, 12 > Operands;
964
+ SmallVector<llvm::Value*, 7 > Operands;
965
965
966
966
bool NoPassthru =
967
967
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -1033,7 +1033,7 @@ multiclass RVVStridedSegStoreTuple<string op> {
1033
1033
// Intrinsic: (tuple, ptr, stride, vl)
1034
1034
unsigned Offset = IsMasked ? 1 : 0;
1035
1035
1036
- SmallVector<llvm::Value*, 12 > Operands;
1036
+ SmallVector<llvm::Value*, 6 > Operands;
1037
1037
Operands.push_back(Ops[Offset + 2]); // tuple
1038
1038
Operands.push_back(Ops[Offset]); // Ptr
1039
1039
Operands.push_back(Ops[Offset + 1]); // Stride
@@ -1075,7 +1075,7 @@ multiclass RVVIndexedSegLoadTuple<string op> {
1075
1075
[]<string>)),
1076
1076
ManualCodegen = [{
1077
1077
{
1078
- SmallVector<llvm::Value*, 12 > Operands;
1078
+ SmallVector<llvm::Value*, 7 > Operands;
1079
1079
1080
1080
bool NoPassthru =
1081
1081
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -1150,7 +1150,7 @@ multiclass RVVIndexedSegStoreTuple<string op> {
1150
1150
// Intrinsic: (tuple, ptr, index, vl)
1151
1151
unsigned Offset = IsMasked ? 1 : 0;
1152
1152
1153
- SmallVector<llvm::Value*, 12 > Operands;
1153
+ SmallVector<llvm::Value*, 6 > Operands;
1154
1154
Operands.push_back(Ops[Offset + 2]); // tuple
1155
1155
Operands.push_back(Ops[Offset]); // Ptr
1156
1156
Operands.push_back(Ops[Offset + 1]); // Idx
@@ -2476,24 +2476,22 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2476
2476
ManualCodegen = [{
2477
2477
{
2478
2478
auto *VecTy = cast<ScalableVectorType>(ResultType);
2479
- // Mask to only valid indices.
2480
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2481
2479
if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
2482
2480
unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
2483
2481
assert(isPowerOf2_32(MaxIndex));
2482
+ // Mask to only valid indices.
2483
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2484
2484
Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2485
2485
Ops[1] = Builder.CreateMul(Ops[1],
2486
2486
ConstantInt::get(Ops[1]->getType(),
2487
2487
VecTy->getMinNumElements()));
2488
2488
return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
2489
2489
}
2490
2490
2491
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2492
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2493
- Builder.getInt32Ty();
2494
2491
return Builder.CreateIntrinsic(Intrinsic::riscv_vector_extract,
2495
- {ResultType, Ops[0]->getType(), XLenTy},
2496
- {Ops[0], Ops[1]});
2492
+ {ResultType, Ops[0]->getType()},
2493
+ {Ops[0], Builder.CreateZExt(Ops[1],
2494
+ Builder.getInt32Ty())});
2497
2495
}
2498
2496
}] in {
2499
2497
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2510,25 +2508,23 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2510
2508
let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
2511
2509
ManualCodegen = [{
2512
2510
{
2513
- auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
2514
- // Mask to only valid indices.
2515
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2516
2511
if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
2512
+ auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
2517
2513
unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
2518
2514
assert(isPowerOf2_32(MaxIndex));
2515
+ // Mask to only valid indices.
2516
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2519
2517
Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2520
2518
Ops[1] = Builder.CreateMul(Ops[1],
2521
2519
ConstantInt::get(Ops[1]->getType(),
2522
2520
VecTy->getMinNumElements()));
2523
2521
return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
2524
2522
}
2525
2523
2526
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2527
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2528
- Builder.getInt32Ty();
2529
2524
return Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
2530
- {ResultType, Ops[2]->getType(), XLenTy},
2531
- {Ops[0], Ops[2], Ops[1]});
2525
+ {ResultType, Ops[2]->getType()},
2526
+ {Ops[0], Ops[2],
2527
+ Builder.CreateZExt(Ops[1],Builder.getInt32Ty())});
2532
2528
}
2533
2529
}] in {
2534
2530
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2554,23 +2550,19 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2554
2550
{
2555
2551
llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
2556
2552
auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
2557
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2558
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2559
- Builder.getInt32Ty();
2560
2553
for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
2561
- llvm::Value *Idx =
2562
- ConstantInt::get(Builder.getInt64Ty(),
2563
- isa<ScalableVectorType>(ResultType) ?
2564
- VecTy->getMinNumElements() * I : I);
2565
-
2566
- if (isa<ScalableVectorType>(ResultType))
2554
+ if (isa<ScalableVectorType>(ResultType)) {
2555
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
2556
+ VecTy->getMinNumElements() * I);
2567
2557
ReturnVector =
2568
2558
Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
2569
- else
2559
+ } else {
2560
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
2570
2561
ReturnVector =
2571
2562
Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
2572
- {ResultType, Ops[I]->getType(), XLenTy },
2563
+ {ResultType, Ops[I]->getType()},
2573
2564
{ReturnVector, Ops[I], Idx});
2565
+ }
2574
2566
2575
2567
}
2576
2568
return ReturnVector;
0 commit comments