Skip to content

Commit 1848df4

Browse files
krzysz00pcf000
authored andcommitted
[MLIR][AMDGPU] After fp8 conversions were lowered to AMDGPU dialect ops,
those operations were not being converted to the LLVM intrinsics they correspond to because the rewrite patterns were still checking for gfx940+. As part of this, factor out tests for type-match isto isNativeFp8() and isNativeBf8() functions in the AMDGPUToRocdl rewrites. Also, fix a typo in isGfx940() that caused it to be true for gfx950. Finally, test all these OCP format conversions by duplicating the gfx940 tests.
1 parent 0272474 commit 1848df4

File tree

4 files changed

+373
-21
lines changed

4 files changed

+373
-21
lines changed

mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp

Lines changed: 30 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,20 @@ static void wmmaPushOutputOperand(ConversionPatternRewriter &rewriter,
454454
}
455455
}
456456

457+
/// Return true if `type` is the E5M2 variant of an 8-bit float that is
458+
/// supported by the `_bf8` instructions on the given `chipset`.
459+
static bool isNativeBf8(Chipset chipset, Type type) {
460+
return (chipset.isGfx940() && type.isFloat8E5M2FNUZ()) ||
461+
(chipset.hasOcpFp8() && type.isFloat8E5M2());
462+
}
463+
464+
/// Return true if `type` is the E4M3FN variant of an 8-bit float that is
465+
/// supported by the `_fp8` instructions on the given `chipset`.
466+
static bool isNativeFp8(Chipset chipset, Type type) {
467+
return (chipset.isGfx940() && type.isFloat8E4M3FNUZ()) ||
468+
(chipset.hasOcpFp8() && type.isFloat8E4M3FN());
469+
}
470+
457471
/// Return the `rocdl` intrinsic corresponding to a MFMA operation `mfma`
458472
/// if one exists. This includes checking to ensure the intrinsic is supported
459473
/// on the architecture you are compiling for.
@@ -550,42 +564,38 @@ static std::optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma,
550564
return ROCDL::mfma_f64_4x4x4f64::getOperationName();
551565
}
552566

553-
if (destElem.isF32() &&
554-
((sourceElem.isFloat8E5M2FNUZ() && chipset >= kGfx940) ||
555-
(sourceElem.isFloat8E5M2() && chipset.hasOcpFp8()))) {
567+
if (destElem.isF32() && isNativeBf8(chipset, sourceElem)) {
556568
// Known to be correct because there are no scalar f8 instructions and
557569
// because a length mismatch will have been caught by the verifier.
558570
Type sourceBElem =
559571
cast<VectorType>(mfma.getSourceB().getType()).getElementType();
560572
if (m == 16 && n == 16 && k == 32 && b == 1) {
561-
if (sourceBElem.isFloat8E5M2FNUZ() || sourceBElem.isFloat8E5M2())
573+
if (isNativeBf8(chipset, sourceBElem))
562574
return ROCDL::mfma_f32_16x16x32_bf8_bf8::getOperationName();
563-
if (sourceBElem.isFloat8E4M3FNUZ() || sourceBElem.isFloat8E4M3FN())
575+
if (isNativeFp8(chipset, sourceBElem))
564576
return ROCDL::mfma_f32_16x16x32_bf8_fp8::getOperationName();
565577
}
566578
if (m == 32 && n == 32 && k == 16 && b == 1) {
567-
if (sourceBElem.isFloat8E5M2FNUZ() || sourceBElem.isFloat8E5M2())
579+
if (isNativeBf8(chipset, sourceBElem))
568580
return ROCDL::mfma_f32_32x32x16_bf8_bf8::getOperationName();
569-
if (sourceBElem.isFloat8E4M3FNUZ() || sourceBElem.isFloat8E4M3FN())
581+
if (isNativeFp8(chipset, sourceBElem))
570582
return ROCDL::mfma_f32_32x32x16_bf8_fp8::getOperationName();
571583
}
572584
}
573585

574-
if (destElem.isF32() &&
575-
((sourceElem.isFloat8E4M3FNUZ() && chipset >= kGfx940) ||
576-
(sourceElem.isFloat8E4M3FN() && chipset.hasOcpFp8()))) {
586+
if (destElem.isF32() && isNativeFp8(chipset, sourceElem)) {
577587
Type sourceBElem =
578588
cast<VectorType>(mfma.getSourceB().getType()).getElementType();
579589
if (m == 16 && n == 16 && k == 32 && b == 1) {
580-
if (sourceBElem.isFloat8E5M2FNUZ() || sourceBElem.isFloat8E5M2())
590+
if (isNativeBf8(chipset, sourceBElem))
581591
return ROCDL::mfma_f32_16x16x32_fp8_bf8::getOperationName();
582-
if (sourceBElem.isFloat8E4M3FNUZ() || sourceBElem.isFloat8E4M3FN())
592+
if (isNativeFp8(chipset, sourceBElem))
583593
return ROCDL::mfma_f32_16x16x32_fp8_fp8::getOperationName();
584594
}
585595
if (m == 32 && n == 32 && k == 16 && b == 1) {
586-
if (sourceBElem.isFloat8E5M2FNUZ() || sourceBElem.isFloat8E5M2())
596+
if (isNativeBf8(chipset, sourceBElem))
587597
return ROCDL::mfma_f32_32x32x16_fp8_bf8::getOperationName();
588-
if (sourceBElem.isFloat8E4M3FNUZ() || sourceBElem.isFloat8E4M3FN())
598+
if (isNativeFp8(chipset, sourceBElem))
589599
return ROCDL::mfma_f32_32x32x16_fp8_fp8::getOperationName();
590600
}
591601
}
@@ -791,11 +801,10 @@ LogicalResult ExtPackedFp8OpLowering::matchAndRewrite(
791801
}
792802
Value i32Source = rewriter.create<LLVM::BitcastOp>(loc, i32, source);
793803
Value wordSel = createI32Constant(rewriter, loc, op.getIndex());
794-
if (sourceElemType.isFloat8E5M2FNUZ() || sourceElemType.isFloat8E5M2()) {
804+
if (isNativeBf8(chipset, sourceElemType)) {
795805
rewriter.replaceOpWithNewOp<ROCDL::CvtF32Bf8Op>(op, f32, i32Source,
796806
wordSel);
797-
} else if (sourceElemType.isFloat8E4M3FNUZ() ||
798-
sourceElemType.isFloat8E4M3FN()) {
807+
} else if (isNativeFp8(chipset, sourceElemType)) {
799808
rewriter.replaceOpWithNewOp<ROCDL::CvtF32Fp8Op>(op, f32, i32Source,
800809
wordSel);
801810
}
@@ -827,10 +836,10 @@ LogicalResult PackedTrunc2xFp8OpLowering::matchAndRewrite(
827836
Value wordSel = createI1Constant(rewriter, loc, op.getWordIndex());
828837

829838
Value result;
830-
if (resultElemType.isFloat8E5M2FNUZ() || resultElemType.isFloat8E5M2())
839+
if (isNativeBf8(chipset, resultElemType))
831840
result = rewriter.create<ROCDL::CvtPkBf8F32Op>(loc, i32, sourceA, sourceB,
832841
existing, wordSel);
833-
else if (resultElemType.isFloat8E4M3FNUZ() || resultElemType.isFloat8E4M3FN())
842+
else if (isNativeFp8(chipset, resultElemType))
834843
result = rewriter.create<ROCDL::CvtPkFp8F32Op>(loc, i32, sourceA, sourceB,
835844
existing, wordSel);
836845

@@ -862,10 +871,10 @@ LogicalResult PackedStochRoundFp8OpLowering::matchAndRewrite(
862871
Value byteSel = createI32Constant(rewriter, loc, op.getStoreIndex());
863872

864873
Value result;
865-
if (resultElemType.isFloat8E5M2FNUZ() || resultElemType.isFloat8E5M2())
874+
if (isNativeBf8(chipset, resultElemType))
866875
result = rewriter.create<ROCDL::CvtSrBf8F32Op>(loc, i32, source, stoch,
867876
existing, byteSel);
868-
else if (resultElemType.isFloat8E4M3FNUZ() || resultElemType.isFloat8E4M3FN())
877+
else if (isNativeFp8(chipset, resultElemType))
869878
result = rewriter.create<ROCDL::CvtSrFp8F32Op>(loc, i32, source, stoch,
870879
existing, byteSel);
871880

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
// RUN: mlir-opt %s -convert-amdgpu-to-rocdl=chipset=gfx950 | FileCheck %s
2+
// RUN: mlir-opt %s -convert-amdgpu-to-rocdl=chipset=gfx1200 | FileCheck %s
3+
4+
// CHECK-LABEL: func @ext_scalar
5+
// CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : f8E5M2 to i8
6+
// CHECK-DAG: [[UNDEF:%.+]] = llvm.mlir.undef : vector<4xi8>
7+
// CHECK-DAG: [[C0_1:%.+]] = llvm.mlir.constant(0 : i32) : i32
8+
// CHECK: [[VEC:%.+]] = llvm.insertelement [[V]], [[UNDEF]]{{\[}}[[C0_1]] : i32] : vector<4xi8>
9+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[VEC]] : vector<4xi8> to i32
10+
// CHECK: [[C0_2:%.+]] = llvm.mlir.constant(0 : i32) : i32
11+
// CHECK: [[EXT:%.+]] = rocdl.cvt.f32.bf8 [[CAST]]{{\[}}[[C0_2]]] : f32
12+
// CHECK: return [[EXT]]
13+
func.func @ext_scalar(%v: f8E5M2) -> f32 {
14+
%ret = amdgpu.ext_packed_fp8 %v[0] : f8E5M2 to f32
15+
func.return %ret : f32
16+
}
17+
18+
// CHECK-LABEL: func @ext_short_vec
19+
// CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : vector<2xf8E4M3FN> to vector<2xi8>
20+
// CHECK-DAG: [[UNDEF:%.+]] = llvm.mlir.undef : vector<4xi8>
21+
// CHECK-DAG: [[C0:%.+]] = llvm.mlir.constant(0 : i32) : i32
22+
// CHECK: [[ELEM_0:%.+]] = llvm.extractelement [[V]]{{\[}}[[C0]] : i32] : vector<2xi8>
23+
// CHECK: [[VEC_0:%.+]] = llvm.insertelement [[ELEM_0]], [[UNDEF]]{{\[}}[[C0]] : i32] : vector<4xi8>
24+
// CHECK: [[C1_1:%.+]] = llvm.mlir.constant(1 : i32) : i32
25+
// CHECK: [[ELEM_1:%.+]] = llvm.extractelement [[V]]{{\[}}[[C1_1]] : i32] : vector<2xi8>
26+
// CHECK: [[VEC_1:%.+]] = llvm.insertelement [[ELEM_1]], [[VEC_0]]{{\[}}[[C1_1]] : i32] : vector<4xi8>
27+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[VEC_1]] : vector<4xi8> to i32
28+
// CHECK: [[C1_2:%.+]] = llvm.mlir.constant(1 : i32) : i32
29+
// CHECK: [[EXT:%.+]] = rocdl.cvt.f32.fp8 [[CAST]]{{\[}}[[C1_2]]] : f32
30+
// CHECK: return [[EXT]]
31+
func.func @ext_short_vec(%v: vector<2xf8E4M3FN>) -> f32 {
32+
%ret = amdgpu.ext_packed_fp8 %v[1] : vector<2xf8E4M3FN> to f32
33+
func.return %ret : f32
34+
}
35+
36+
// CHECK-LABEL: func @ext_full_vec(
37+
// CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : vector<4xf8E4M3FN> to vector<4xi8>
38+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[V]] : vector<4xi8> to i32
39+
// CHECK: [[C3:%.+]] = llvm.mlir.constant(3 : i32) : i32
40+
// CHECK: [[EXT:%.+]] = rocdl.cvt.f32.fp8 [[CAST]]{{\[}}[[C3]]] : f32
41+
// CHECK: return [[EXT]] : f32
42+
43+
func.func @ext_full_vec(%v: vector<4xf8E4M3FN>) -> f32 {
44+
%ret = amdgpu.ext_packed_fp8 %v[3] : vector<4xf8E4M3FN> to f32
45+
func.return %ret : f32
46+
}
47+
48+
// CHECK-LABEL: func @packed_trunc
49+
// CHECK-SAME: ([[V:%.+]]: f32)
50+
// CHECK: [[V2:%.+]] = llvm.mlir.undef : f32
51+
// CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
52+
// CHECK: [[FALSE:%.+]] = llvm.mlir.constant(false) : i1
53+
// CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.fp8.f32 [[V]], [[V2]] -> [[EXISTING]]{{\[}}[[FALSE]]] : i32
54+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
55+
// CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FN>
56+
func.func @packed_trunc(%v: f32) -> vector<4xf8E4M3FN> {
57+
%ret = amdgpu.packed_trunc_2xfp8 %v, undef into undef[word 0] : f32 to vector<4xf8E4M3FN>
58+
func.return %ret : vector<4xf8E4M3FN>
59+
}
60+
61+
// CHECK-LABEL: func @packed_truncx2
62+
// CHECK-SAME: ([[V:%.+]]: f32, [[W:%.+]]: f32)
63+
// CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
64+
// CHECK: [[FALSE:%.+]] = llvm.mlir.constant(false) : i1
65+
// CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.fp8.f32 [[V]], [[W]] -> [[EXISTING]]{{\[}}[[FALSE]]] : i32
66+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
67+
// CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FN>
68+
func.func @packed_truncx2(%v: f32, %w: f32) -> vector<4xf8E4M3FN> {
69+
%ret = amdgpu.packed_trunc_2xfp8 %v, %w into undef[word 0] : f32 to vector<4xf8E4M3FN>
70+
func.return %ret : vector<4xf8E4M3FN>
71+
}
72+
73+
// CHECK-LABEL: func @packed_truncx2_into
74+
// CHECK-SAME: ([[V:%.+]]: f32, [[W:%.+]]: f32, [[EXISTING:%.+]]: vector<4xf8E5M2>)
75+
// CHECK: [[EXISTING_BYTES:%.+]] = builtin.unrealized_conversion_cast [[EXISTING]] : vector<4xf8E5M2> to vector<4xi8>
76+
// CHECK: [[EXISTING_INT:%.+]] = llvm.bitcast [[EXISTING_BYTES]] : vector<4xi8> to i32
77+
// CHECK: [[TRUE:%.+]] = llvm.mlir.constant(true) : i1
78+
// CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.bf8.f32 [[V]], [[W]] -> [[EXISTING_INT]]{{\[}}[[TRUE]]] : i32
79+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
80+
// CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E5M2>
81+
func.func @packed_truncx2_into(%v: f32, %w: f32, %existing: vector<4xf8E5M2>) -> vector<4xf8E5M2> {
82+
%ret = amdgpu.packed_trunc_2xfp8 %v, %w into %existing[word 1] : f32 to vector<4xf8E5M2> into vector<4xf8E5M2>
83+
func.return %ret : vector<4xf8E5M2>
84+
}
85+
86+
// CHECK-LABEL: func @packed_stoch_round
87+
// CHECK-SAME: ([[V:%.+]]: f32, [[S:%.+]]: i32)
88+
// CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
89+
// CHECK: [[C0:%.+]] = llvm.mlir.constant(0 : i32) : i32
90+
// CHECK: [[PACKED:%.+]] = rocdl.cvt.sr.fp8.f32 [[V]], [[S]] -> [[EXISTING]]{{\[}}[[C0]]] : i32
91+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
92+
// CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FN>
93+
func.func @packed_stoch_round(%v: f32, %s: i32) -> vector<4xf8E4M3FN> {
94+
%ret = amdgpu.packed_stoch_round_fp8 %v + %s into undef[0] : f32 to vector<4xf8E4M3FN>
95+
func.return %ret : vector<4xf8E4M3FN>
96+
}
97+
98+
// CHECK-LABEL: func @packed_stoch_round_into
99+
// CHECK-SAME: ([[V:%.+]]: f32, [[S:%.+]]: i32, [[EXISTING:%.+]]: vector<4xf8E5M2>)
100+
// CHECK: [[EXISTING_BYTES:%.+]] = builtin.unrealized_conversion_cast [[EXISTING]] : vector<4xf8E5M2> to vector<4xi8>
101+
// CHECK: [[EXISTING_INT:%.+]] = llvm.bitcast [[EXISTING_BYTES]] : vector<4xi8> to i32
102+
// CHECK: [[C1:%.+]] = llvm.mlir.constant(1 : i32) : i32
103+
// CHECK: [[PACKED:%.+]] = rocdl.cvt.sr.bf8.f32 [[V]], [[S]] -> [[EXISTING_INT]]{{\[}}[[C1]]] : i32
104+
// CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
105+
// CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E5M2>
106+
func.func @packed_stoch_round_into(%v: f32, %s: i32, %existing: vector<4xf8E5M2>) -> vector<4xf8E5M2> {
107+
%ret = amdgpu.packed_stoch_round_fp8 %v + %s into %existing[1] : f32 to vector<4xf8E5M2> into vector<4xf8E5M2>
108+
func.return %ret : vector<4xf8E5M2>
109+
}
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
// RUN: mlir-opt --split-input-file %s \
2+
// RUN: --pass-pipeline='builtin.module(func.func(convert-arith-to-amdgpu{chipset=gfx950 saturate-fp8-truncf=true}))' \
3+
// RUN: | FileCheck %s
4+
5+
// RUN: mlir-opt --split-input-file %s \
6+
// RUN: --pass-pipeline='builtin.module(func.func(convert-arith-to-amdgpu{chipset=gfx1200 saturate-fp8-truncf=true}))' \
7+
// RUN: | FileCheck %s
8+
9+
// CHECK-LABEL: func.func @scalar_trunc
10+
// CHECK-SAME: ([[V:%.+]]: f16)
11+
// CHECK-DAG: [[CMin:%.+]] = arith.constant -5.734400e+04 : f16
12+
// CHECK-DAG: [[CMax:%.+]] = arith.constant 5.734400e+04 : f16
13+
// CHECK-DAG: [[CInf:%.+]] = arith.constant 0x7C00 : f16
14+
// CHECK-DAG: [[CNegInf:%.+]] = arith.constant 0xFC00 : f16
15+
// CHECK: [[ISINF:%.+]] = arith.cmpf oeq, [[V]], [[CInf]]
16+
// CHECK: [[ISNEGINF:%.+]] = arith.cmpf oeq, [[V]], [[CNegInf]]
17+
// CHECK: [[ISNAN:%.+]] = arith.cmpf uno, [[V]], [[V]]
18+
// CHECK: [[ISNONFINITE_1:%.+]] = arith.ori [[ISINF]], [[ISNEGINF]]
19+
// CHECK: [[ISNONFINITE:%.+]] = arith.ori [[ISNONFINITE_1]], [[ISNAN]]
20+
// CHECK: [[CLAMPEDBELOW:%.+]] = arith.maximumf [[V]], [[CMin]]
21+
// CHECK: [[CLAMPED:%.+]] = arith.minimumf [[CLAMPEDBELOW]], [[CMax]]
22+
// CHECK: [[SATURATED:%.+]] = arith.select [[ISNONFINITE]], [[V]], [[CLAMPED]]
23+
// CHECK: [[FLOAT:%.+]] = arith.extf [[SATURATED]] : f16 to f32
24+
// CHECK: [[TRUNCV:%.+]] = amdgpu.packed_trunc_2xfp8 [[FLOAT]], undef into undef[word 0] : f32 to vector<4xf8E5M2>
25+
// CHECK: [[W:%.+]] = vector.extract [[TRUNCV]][0] : f8E5M2 from vector<4xf8E5M2>
26+
// CHECK: return [[W]] : f8E5M2
27+
func.func @scalar_trunc(%v: f16) -> f8E5M2 {
28+
%w = arith.truncf %v : f16 to f8E5M2
29+
return %w : f8E5M2
30+
}
31+
32+
// No 0-D test because arith.truncf hasn't been extended to support it.
33+
34+
// -----
35+
36+
// CHECK-LABEL: func.func @vector_trunc
37+
// CHECK-SAME: ([[V:%.+]]: vector<2xf32>) -> vector<2xf8E4M3FN> {
38+
// CHECK-DAG: [[CMin:%.+]] = arith.constant dense<-4.480000e+02> : vector<2xf32>
39+
// CHECK-DAG: [[CMax:%.+]] = arith.constant dense<4.480000e+02> : vector<2xf32>
40+
// CHECK-DAG: [[CInf:%.+]] = arith.constant dense<0x7F800000> : vector<2xf32>
41+
// CHECK-DAG: [[CNegInf:%.+]] = arith.constant dense<0xFF800000> : vector<2xf32>
42+
// CHECK: [[ISINF:%.+]] = arith.cmpf oeq, [[V]], [[CInf]]
43+
// CHECK: [[ISNEGINF:%.+]] = arith.cmpf oeq, [[V]], [[CNegInf]]
44+
// CHECK: [[ISNAN:%.+]] = arith.cmpf uno, [[V]], [[V]]
45+
// CHECK: [[ISNONFINITE_1:%.+]] = arith.ori [[ISINF]], [[ISNEGINF]]
46+
// CHECK: [[ISNONFINITE:%.+]] = arith.ori [[ISNONFINITE_1]], [[ISNAN]]
47+
// CHECK: [[CLAMPEDBELOW:%.+]] = arith.maximumf [[V]], [[CMin]]
48+
// CHECK: [[CLAMPED:%.+]] = arith.minimumf [[CLAMPEDBELOW]], [[CMax]]
49+
// CHECK: [[SATURATED:%.+]] = arith.select [[ISNONFINITE]], [[V]], [[CLAMPED]]
50+
// CHECK: [[F0:%.+]] = vector.extract [[SATURATED]][0]
51+
// CHECK: [[F1:%.+]] = vector.extract [[SATURATED]][1]
52+
// CHECK: [[W0:%.+]] = amdgpu.packed_trunc_2xfp8 [[F0]], [[F1]] into undef[word 0] : f32 to vector<4xf8E4M3FN>
53+
// CHECK: [[W:%.+]] = vector.extract_strided_slice [[W0]] {offsets = [0], sizes = [2], strides = [1]} : vector<4xf8E4M3FN> to vector<2xf8E4M3FN>
54+
// CHECK: return [[W]] : vector<2xf8E4M3FN>
55+
func.func @vector_trunc_short(%v: vector<2xf32>) -> vector<2xf8E4M3FN> {
56+
%w = arith.truncf %v : vector<2xf32> to vector<2xf8E4M3FN>
57+
return %w : vector<2xf8E4M3FN>
58+
}

0 commit comments

Comments
 (0)