Skip to content

Commit 9586121

Browse files
committed
[mlir][ArmSVE] Add masked arithmetic operations
These instructions map to SVE-specific instrinsics that accept a predicate operand to support control flow in vector code. Differential Revision: https://reviews.llvm.org/D100982
1 parent f16afcd commit 9586121

File tree

6 files changed

+374
-9
lines changed

6 files changed

+374
-9
lines changed

mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td

Lines changed: 130 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,13 @@ def ScalableVectorType : ArmSVE_Type<"ScalableVector"> {
9595
}];
9696
}
9797

98+
//===----------------------------------------------------------------------===//
99+
// Additional LLVM type constraints
100+
//===----------------------------------------------------------------------===//
101+
def LLVMScalableVectorType :
102+
Type<CPred<"$_self.isa<::mlir::LLVM::LLVMScalableVectorType>()">,
103+
"LLVM dialect scalable vector type">;
104+
98105
//===----------------------------------------------------------------------===//
99106
// ArmSVE op definitions
100107
//===----------------------------------------------------------------------===//
@@ -158,6 +165,52 @@ class ScalableIOp<string mnemonic, string op_description,
158165
"$src1 `,` $src2 attr-dict `:` type($src1)";
159166
}
160167

168+
class ScalableMaskedFOp<string mnemonic, string op_description,
169+
list<OpTrait> traits = []> :
170+
ArmSVE_Op<mnemonic, !listconcat(traits,
171+
[AllTypesMatch<["src1", "src2", "res"]>,
172+
TypesMatchWith<
173+
"mask has i1 element type and same shape as operands",
174+
"src1", "mask", "getI1SameShape($_self)">])> {
175+
let summary = "masked " # op_description # " for scalable vectors of floats";
176+
let description = [{
177+
The `arm_sve.}] # mnemonic # [{` operation takes one scalable vector mask
178+
and two scalable vector operands, and perform floating point }] #
179+
op_description # [{ on active lanes. Inactive lanes will keep the value of
180+
the first operand.}];
181+
let arguments = (ins
182+
ScalableVectorOf<[I1]>:$mask,
183+
ScalableVectorOf<[AnyFloat]>:$src1,
184+
ScalableVectorOf<[AnyFloat]>:$src2
185+
);
186+
let results = (outs ScalableVectorOf<[AnyFloat]>:$res);
187+
let assemblyFormat =
188+
"$mask `,` $src1 `,` $src2 attr-dict `:` type($mask) `,` type($res)";
189+
}
190+
191+
class ScalableMaskedIOp<string mnemonic, string op_description,
192+
list<OpTrait> traits = []> :
193+
ArmSVE_Op<mnemonic, !listconcat(traits,
194+
[AllTypesMatch<["src1", "src2", "res"]>,
195+
TypesMatchWith<
196+
"mask has i1 element type and same shape as operands",
197+
"src1", "mask", "getI1SameShape($_self)">])> {
198+
let summary = "masked " # op_description # " for scalable vectors of integers";
199+
let description = [{
200+
The `arm_sve.}] # mnemonic # [{` operation takes one scalable vector mask
201+
and two scalable vector operands, and perform integer }] #
202+
op_description # [{ on active lanes. Inactive lanes will keep the value of
203+
the first operand.}];
204+
let arguments = (ins
205+
ScalableVectorOf<[I1]>:$mask,
206+
ScalableVectorOf<[I8, I16, I32, I64]>:$src1,
207+
ScalableVectorOf<[I8, I16, I32, I64]>:$src2
208+
);
209+
let results = (outs ScalableVectorOf<[I8, I16, I32, I64]>:$res);
210+
let assemblyFormat =
211+
"$mask `,` $src1 `,` $src2 attr-dict `:` type($mask) `,` type($res)";
212+
}
213+
161214
def SdotOp : ArmSVE_Op<"sdot",
162215
[NoSideEffect,
163216
AllTypesMatch<["src1", "src2"]>,
@@ -321,21 +374,94 @@ def ScalableUDivIOp : ScalableIOp<"divi_unsigned", "unsigned division">;
321374

322375
def ScalableDivFOp : ScalableFOp<"divf", "division">;
323376

377+
def ScalableMaskedAddIOp : ScalableMaskedIOp<"masked.addi", "addition",
378+
[Commutative]>;
379+
380+
def ScalableMaskedAddFOp : ScalableMaskedFOp<"masked.addf", "addition",
381+
[Commutative]>;
382+
383+
def ScalableMaskedSubIOp : ScalableMaskedIOp<"masked.subi", "subtraction">;
384+
385+
def ScalableMaskedSubFOp : ScalableMaskedFOp<"masked.subf", "subtraction">;
386+
387+
def ScalableMaskedMulIOp : ScalableMaskedIOp<"masked.muli", "multiplication",
388+
[Commutative]>;
389+
390+
def ScalableMaskedMulFOp : ScalableMaskedFOp<"masked.mulf", "multiplication",
391+
[Commutative]>;
392+
393+
def ScalableMaskedSDivIOp : ScalableMaskedIOp<"masked.divi_signed",
394+
"signed division">;
395+
396+
def ScalableMaskedUDivIOp : ScalableMaskedIOp<"masked.divi_unsigned",
397+
"unsigned division">;
398+
399+
def ScalableMaskedDivFOp : ScalableMaskedFOp<"masked.divf", "division">;
400+
324401
def UmmlaIntrOp :
325402
ArmSVE_IntrBinaryOverloadedOp<"ummla">,
326-
Arguments<(ins LLVM_AnyVector, LLVM_AnyVector, LLVM_AnyVector)>;
403+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
404+
LLVMScalableVectorType)>;
327405

328406
def SmmlaIntrOp :
329407
ArmSVE_IntrBinaryOverloadedOp<"smmla">,
330-
Arguments<(ins LLVM_AnyVector, LLVM_AnyVector, LLVM_AnyVector)>;
408+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
409+
LLVMScalableVectorType)>;
331410

332411
def SdotIntrOp :
333412
ArmSVE_IntrBinaryOverloadedOp<"sdot">,
334-
Arguments<(ins LLVM_AnyVector, LLVM_AnyVector, LLVM_AnyVector)>;
413+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
414+
LLVMScalableVectorType)>;
335415

336416
def UdotIntrOp :
337417
ArmSVE_IntrBinaryOverloadedOp<"udot">,
338-
Arguments<(ins LLVM_AnyVector, LLVM_AnyVector, LLVM_AnyVector)>;
418+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
419+
LLVMScalableVectorType)>;
420+
421+
def ScalableMaskedAddIIntrOp :
422+
ArmSVE_IntrBinaryOverloadedOp<"add">,
423+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
424+
LLVMScalableVectorType)>;
425+
426+
def ScalableMaskedAddFIntrOp :
427+
ArmSVE_IntrBinaryOverloadedOp<"fadd">,
428+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
429+
LLVMScalableVectorType)>;
430+
431+
def ScalableMaskedMulIIntrOp :
432+
ArmSVE_IntrBinaryOverloadedOp<"mul">,
433+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
434+
LLVMScalableVectorType)>;
435+
436+
def ScalableMaskedMulFIntrOp :
437+
ArmSVE_IntrBinaryOverloadedOp<"fmul">,
438+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
439+
LLVMScalableVectorType)>;
440+
441+
def ScalableMaskedSubIIntrOp :
442+
ArmSVE_IntrBinaryOverloadedOp<"sub">,
443+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
444+
LLVMScalableVectorType)>;
445+
446+
def ScalableMaskedSubFIntrOp :
447+
ArmSVE_IntrBinaryOverloadedOp<"fsub">,
448+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
449+
LLVMScalableVectorType)>;
450+
451+
def ScalableMaskedSDivIIntrOp :
452+
ArmSVE_IntrBinaryOverloadedOp<"sdiv">,
453+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
454+
LLVMScalableVectorType)>;
455+
456+
def ScalableMaskedUDivIIntrOp :
457+
ArmSVE_IntrBinaryOverloadedOp<"udiv">,
458+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
459+
LLVMScalableVectorType)>;
460+
461+
def ScalableMaskedDivFIntrOp :
462+
ArmSVE_IntrBinaryOverloadedOp<"fdiv">,
463+
Arguments<(ins LLVMScalableVectorType, LLVMScalableVectorType,
464+
LLVMScalableVectorType)>;
339465

340466
def VectorScaleIntrOp:
341467
ArmSVE_NonSVEIntrUnaryOverloadedOp<"vscale">;

mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121

2222
using namespace mlir;
2323

24+
static Type getI1SameShape(Type type);
25+
2426
#define GET_OP_CLASSES
2527
#include "mlir/Dialect/ArmSVE/ArmSVE.cpp.inc"
2628

@@ -59,3 +61,16 @@ void arm_sve::ArmSVEDialect::printType(Type type, DialectAsmPrinter &os) const {
5961
if (failed(generatedTypePrinter(type, os)))
6062
llvm_unreachable("unexpected 'arm_sve' type kind");
6163
}
64+
65+
//===----------------------------------------------------------------------===//
66+
// ScalableVector versions of general helpers for comparison ops
67+
//===----------------------------------------------------------------------===//
68+
69+
// Return the scalable vector of the same shape and containing i1.
70+
static Type getI1SameShape(Type type) {
71+
auto i1Type = IntegerType::get(type.getContext(), 1);
72+
if (auto sVectorType = type.dyn_cast<arm_sve::ScalableVectorType>())
73+
return arm_sve::ScalableVectorType::get(type.getContext(),
74+
sVectorType.getShape(), i1Type);
75+
return nullptr;
76+
}

mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp

Lines changed: 67 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,33 @@ using UdotOpLowering = OneToOneConvertToLLVMPattern<UdotOp, UdotIntrOp>;
8383
using UmmlaOpLowering = OneToOneConvertToLLVMPattern<UmmlaOp, UmmlaIntrOp>;
8484
using VectorScaleOpLowering =
8585
OneToOneConvertToLLVMPattern<VectorScaleOp, VectorScaleIntrOp>;
86+
using ScalableMaskedAddIOpLowering =
87+
OneToOneConvertToLLVMPattern<ScalableMaskedAddIOp,
88+
ScalableMaskedAddIIntrOp>;
89+
using ScalableMaskedAddFOpLowering =
90+
OneToOneConvertToLLVMPattern<ScalableMaskedAddFOp,
91+
ScalableMaskedAddFIntrOp>;
92+
using ScalableMaskedSubIOpLowering =
93+
OneToOneConvertToLLVMPattern<ScalableMaskedSubIOp,
94+
ScalableMaskedSubIIntrOp>;
95+
using ScalableMaskedSubFOpLowering =
96+
OneToOneConvertToLLVMPattern<ScalableMaskedSubFOp,
97+
ScalableMaskedSubFIntrOp>;
98+
using ScalableMaskedMulIOpLowering =
99+
OneToOneConvertToLLVMPattern<ScalableMaskedMulIOp,
100+
ScalableMaskedMulIIntrOp>;
101+
using ScalableMaskedMulFOpLowering =
102+
OneToOneConvertToLLVMPattern<ScalableMaskedMulFOp,
103+
ScalableMaskedMulFIntrOp>;
104+
using ScalableMaskedSDivIOpLowering =
105+
OneToOneConvertToLLVMPattern<ScalableMaskedSDivIOp,
106+
ScalableMaskedSDivIIntrOp>;
107+
using ScalableMaskedUDivIOpLowering =
108+
OneToOneConvertToLLVMPattern<ScalableMaskedUDivIOp,
109+
ScalableMaskedUDivIIntrOp>;
110+
using ScalableMaskedDivFOpLowering =
111+
OneToOneConvertToLLVMPattern<ScalableMaskedDivFOp,
112+
ScalableMaskedDivFIntrOp>;
86113

87114
static void
88115
populateBasicSVEArithmeticExportPatterns(LLVMTypeConverter &converter,
@@ -136,16 +163,52 @@ void mlir::populateArmSVELegalizeForLLVMExportPatterns(
136163
SmmlaOpLowering,
137164
UdotOpLowering,
138165
UmmlaOpLowering,
139-
VectorScaleOpLowering>(converter);
166+
VectorScaleOpLowering,
167+
ScalableMaskedAddIOpLowering,
168+
ScalableMaskedAddFOpLowering,
169+
ScalableMaskedSubIOpLowering,
170+
ScalableMaskedSubFOpLowering,
171+
ScalableMaskedMulIOpLowering,
172+
ScalableMaskedMulFOpLowering,
173+
ScalableMaskedSDivIOpLowering,
174+
ScalableMaskedUDivIOpLowering,
175+
ScalableMaskedDivFOpLowering>(converter);
140176
// clang-format on
141177
populateBasicSVEArithmeticExportPatterns(converter, patterns);
142178
}
143179

144180
void mlir::configureArmSVELegalizeForExportTarget(
145181
LLVMConversionTarget &target) {
146-
target.addLegalOp<SdotIntrOp, SmmlaIntrOp, UdotIntrOp, UmmlaIntrOp,
147-
VectorScaleIntrOp>();
148-
target.addIllegalOp<SdotOp, SmmlaOp, UdotOp, UmmlaOp, VectorScaleOp>();
182+
// clang-format off
183+
target.addLegalOp<SdotIntrOp,
184+
SmmlaIntrOp,
185+
UdotIntrOp,
186+
UmmlaIntrOp,
187+
VectorScaleIntrOp,
188+
ScalableMaskedAddIIntrOp,
189+
ScalableMaskedAddFIntrOp,
190+
ScalableMaskedSubIIntrOp,
191+
ScalableMaskedSubFIntrOp,
192+
ScalableMaskedMulIIntrOp,
193+
ScalableMaskedMulFIntrOp,
194+
ScalableMaskedSDivIIntrOp,
195+
ScalableMaskedUDivIIntrOp,
196+
ScalableMaskedDivFIntrOp>();
197+
target.addIllegalOp<SdotOp,
198+
SmmlaOp,
199+
UdotOp,
200+
UmmlaOp,
201+
VectorScaleOp,
202+
ScalableMaskedAddIOp,
203+
ScalableMaskedAddFOp,
204+
ScalableMaskedSubIOp,
205+
ScalableMaskedSubFOp,
206+
ScalableMaskedMulIOp,
207+
ScalableMaskedMulFOp,
208+
ScalableMaskedSDivIOp,
209+
ScalableMaskedUDivIOp,
210+
ScalableMaskedDivFOp>();
211+
// clang-format on
149212
auto hasScalableVectorType = [](TypeRange types) {
150213
for (Type type : types)
151214
if (type.isa<arm_sve::ScalableVectorType>())

mlir/test/Dialect/ArmSVE/legalize-for-llvm.mlir

Lines changed: 48 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ func @arm_sve_arithi(%a: !arm_sve.vector<4xi32>,
5555
%3 = arm_sve.divi_signed %2, %e : !arm_sve.vector<4xi32>
5656
// CHECK: llvm.udiv {{.*}}: !llvm.vec<? x 4 x i32>
5757
%4 = arm_sve.divi_unsigned %2, %e : !arm_sve.vector<4xi32>
58-
return %3 : !arm_sve.vector<4xi32>
58+
return %4 : !arm_sve.vector<4xi32>
5959
}
6060

6161
func @arm_sve_arithf(%a: !arm_sve.vector<4xf32>,
@@ -74,6 +74,53 @@ func @arm_sve_arithf(%a: !arm_sve.vector<4xf32>,
7474
return %3 : !arm_sve.vector<4xf32>
7575
}
7676

77+
func @arm_sve_arithi_masked(%a: !arm_sve.vector<4xi32>,
78+
%b: !arm_sve.vector<4xi32>,
79+
%c: !arm_sve.vector<4xi32>,
80+
%d: !arm_sve.vector<4xi32>,
81+
%e: !arm_sve.vector<4xi32>,
82+
%mask: !arm_sve.vector<4xi1>
83+
) -> !arm_sve.vector<4xi32> {
84+
// CHECK: arm_sve.intr.add{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x i32>, !llvm.vec<? x 4 x i32>) -> !llvm.vec<? x 4 x i32>
85+
%0 = arm_sve.masked.addi %mask, %a, %b : !arm_sve.vector<4xi1>,
86+
!arm_sve.vector<4xi32>
87+
// CHECK: arm_sve.intr.sub{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x i32>, !llvm.vec<? x 4 x i32>) -> !llvm.vec<? x 4 x i32>
88+
%1 = arm_sve.masked.subi %mask, %0, %c : !arm_sve.vector<4xi1>,
89+
!arm_sve.vector<4xi32>
90+
// CHECK: arm_sve.intr.mul{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x i32>, !llvm.vec<? x 4 x i32>) -> !llvm.vec<? x 4 x i32>
91+
%2 = arm_sve.masked.muli %mask, %1, %d : !arm_sve.vector<4xi1>,
92+
!arm_sve.vector<4xi32>
93+
// CHECK: arm_sve.intr.sdiv{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x i32>, !llvm.vec<? x 4 x i32>) -> !llvm.vec<? x 4 x i32>
94+
%3 = arm_sve.masked.divi_signed %mask, %2, %e : !arm_sve.vector<4xi1>,
95+
!arm_sve.vector<4xi32>
96+
// CHECK: arm_sve.intr.udiv{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x i32>, !llvm.vec<? x 4 x i32>) -> !llvm.vec<? x 4 x i32>
97+
%4 = arm_sve.masked.divi_unsigned %mask, %3, %e : !arm_sve.vector<4xi1>,
98+
!arm_sve.vector<4xi32>
99+
return %4 : !arm_sve.vector<4xi32>
100+
}
101+
102+
func @arm_sve_arithf_masked(%a: !arm_sve.vector<4xf32>,
103+
%b: !arm_sve.vector<4xf32>,
104+
%c: !arm_sve.vector<4xf32>,
105+
%d: !arm_sve.vector<4xf32>,
106+
%e: !arm_sve.vector<4xf32>,
107+
%mask: !arm_sve.vector<4xi1>
108+
) -> !arm_sve.vector<4xf32> {
109+
// CHECK: arm_sve.intr.fadd{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x f32>, !llvm.vec<? x 4 x f32>) -> !llvm.vec<? x 4 x f32>
110+
%0 = arm_sve.masked.addf %mask, %a, %b : !arm_sve.vector<4xi1>,
111+
!arm_sve.vector<4xf32>
112+
// CHECK: arm_sve.intr.fsub{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x f32>, !llvm.vec<? x 4 x f32>) -> !llvm.vec<? x 4 x f32>
113+
%1 = arm_sve.masked.subf %mask, %0, %c : !arm_sve.vector<4xi1>,
114+
!arm_sve.vector<4xf32>
115+
// CHECK: arm_sve.intr.fmul{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x f32>, !llvm.vec<? x 4 x f32>) -> !llvm.vec<? x 4 x f32>
116+
%2 = arm_sve.masked.mulf %mask, %1, %d : !arm_sve.vector<4xi1>,
117+
!arm_sve.vector<4xf32>
118+
// CHECK: arm_sve.intr.fdiv{{.*}}: (!llvm.vec<? x 4 x i1>, !llvm.vec<? x 4 x f32>, !llvm.vec<? x 4 x f32>) -> !llvm.vec<? x 4 x f32>
119+
%3 = arm_sve.masked.divf %mask, %2, %e : !arm_sve.vector<4xi1>,
120+
!arm_sve.vector<4xf32>
121+
return %3 : !arm_sve.vector<4xf32>
122+
}
123+
77124
func @get_vector_scale() -> index {
78125
// CHECK: arm_sve.vscale
79126
%0 = arm_sve.vector_scale : index

mlir/test/Dialect/ArmSVE/roundtrip.mlir

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,53 @@ func @arm_sve_arithf(%a: !arm_sve.vector<4xf32>,
5656
return %1 : !arm_sve.vector<4xf32>
5757
}
5858

59+
func @arm_sve_masked_arithi(%a: !arm_sve.vector<4xi32>,
60+
%b: !arm_sve.vector<4xi32>,
61+
%c: !arm_sve.vector<4xi32>,
62+
%d: !arm_sve.vector<4xi32>,
63+
%e: !arm_sve.vector<4xi32>,
64+
%mask: !arm_sve.vector<4xi1>)
65+
-> !arm_sve.vector<4xi32> {
66+
// CHECK: arm_sve.masked.muli {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
67+
%0 = arm_sve.masked.muli %mask, %a, %b : !arm_sve.vector<4xi1>,
68+
!arm_sve.vector<4xi32>
69+
// CHECK: arm_sve.masked.addi {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
70+
%1 = arm_sve.masked.addi %mask, %0, %c : !arm_sve.vector<4xi1>,
71+
!arm_sve.vector<4xi32>
72+
// CHECK: arm_sve.masked.subi {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
73+
%2 = arm_sve.masked.subi %mask, %1, %d : !arm_sve.vector<4xi1>,
74+
!arm_sve.vector<4xi32>
75+
// CHECK: arm_sve.masked.divi_signed
76+
%3 = arm_sve.masked.divi_signed %mask, %2, %e : !arm_sve.vector<4xi1>,
77+
!arm_sve.vector<4xi32>
78+
// CHECK: arm_sve.masked.divi_unsigned
79+
%4 = arm_sve.masked.divi_unsigned %mask, %3, %e : !arm_sve.vector<4xi1>,
80+
!arm_sve.vector<4xi32>
81+
return %2 : !arm_sve.vector<4xi32>
82+
}
83+
84+
func @arm_sve_masked_arithf(%a: !arm_sve.vector<4xf32>,
85+
%b: !arm_sve.vector<4xf32>,
86+
%c: !arm_sve.vector<4xf32>,
87+
%d: !arm_sve.vector<4xf32>,
88+
%e: !arm_sve.vector<4xf32>,
89+
%mask: !arm_sve.vector<4xi1>)
90+
-> !arm_sve.vector<4xf32> {
91+
// CHECK: arm_sve.masked.mulf {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
92+
%0 = arm_sve.masked.mulf %mask, %a, %b : !arm_sve.vector<4xi1>,
93+
!arm_sve.vector<4xf32>
94+
// CHECK: arm_sve.masked.addf {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
95+
%1 = arm_sve.masked.addf %mask, %0, %c : !arm_sve.vector<4xi1>,
96+
!arm_sve.vector<4xf32>
97+
// CHECK: arm_sve.masked.subf {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
98+
%2 = arm_sve.masked.subf %mask, %1, %d : !arm_sve.vector<4xi1>,
99+
!arm_sve.vector<4xf32>
100+
// CHECK: arm_sve.masked.divf {{.*}}: !arm_sve.vector<4xi1>, !arm_sve.vector
101+
%3 = arm_sve.masked.divf %mask, %2, %e : !arm_sve.vector<4xi1>,
102+
!arm_sve.vector<4xf32>
103+
return %3 : !arm_sve.vector<4xf32>
104+
}
105+
59106
func @get_vector_scale() -> index {
60107
// CHECK: arm_sve.vector_scale : index
61108
%0 = arm_sve.vector_scale : index

0 commit comments

Comments
 (0)