|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
|
2 | 2 | ; RUN: opt -passes='print<cost-model>' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh < %s | FileCheck %s
|
3 | 3 | ; RUN: opt -passes='print<cost-model>' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 < %s | FileCheck %s
|
4 |
| -; RUN: opt -passes='print<cost-model>' 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-min=256 -riscv-v-vector-bits-min=256 < %s | FileCheck %s |
5 | 4 | ; RUN: opt -passes='print<cost-model>' 2>&1 -disable-output -mtriple=riscv64 < %s | FileCheck %s
|
6 | 5 |
|
7 |
| -define i32 @masked_gather() { |
8 |
| -; CHECK-LABEL: 'masked_gather' |
| 6 | +define void @masked_gather_aligned() { |
| 7 | +; CHECK-LABEL: 'masked_gather_aligned' |
9 | 8 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64 = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x double> undef)
|
10 | 9 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F64 = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x double> undef)
|
11 | 10 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F64 = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> undef, i32 8, <vscale x 2 x i1> undef, <vscale x 2 x double> undef)
|
@@ -43,37 +42,7 @@ define i32 @masked_gather() {
|
43 | 42 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I8 = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0i8(<vscale x 4 x i8*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i8> undef)
|
44 | 43 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I8 = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i8> undef)
|
45 | 44 | ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I8 = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0i8(<vscale x 1 x i8*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i8> undef)
|
46 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64.u = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x double> undef) |
47 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F64.u = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x double> undef) |
48 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F64.u = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x double> undef) |
49 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F64.u = call <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0f64(<vscale x 1 x double*> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x double> undef) |
50 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F32.u = call <vscale x 16 x float> @llvm.masked.gather.nxv16f32.nxv16p0f32(<vscale x 16 x float*> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x float> undef) |
51 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F32.u = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x float> undef) |
52 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F32.u = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) |
53 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F32.u = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0f32(<vscale x 2 x float*> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) |
54 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F32.u = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0f32(<vscale x 1 x float*> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) |
55 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0f16(<vscale x 32 x half*> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) |
56 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0f16(<vscale x 16 x half*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) |
57 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) |
58 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0f16(<vscale x 4 x half*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) |
59 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0f16(<vscale x 2 x half*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) |
60 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0f16(<vscale x 1 x half*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) |
61 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I64.u = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> undef, i32 4, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) |
62 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I64.u = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) |
63 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I64.u = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0i64(<vscale x 2 x i64*> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) |
64 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I64.u = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0i64(<vscale x 1 x i64*> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x i64> undef) |
65 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I32.u = call <vscale x 16 x i32> @llvm.masked.gather.nxv16i32.nxv16p0i32(<vscale x 16 x i32*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i32> undef) |
66 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I32.u = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x i32> undef) |
67 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I32.u = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef) |
68 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I32.u = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i32> undef) |
69 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I32.u = call <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0i32(<vscale x 1 x i32*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i32> undef) |
70 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32I16.u = call <vscale x 32 x i16> @llvm.masked.gather.nxv32i16.nxv32p0i16(<vscale x 32 x i16*> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x i16> undef) |
71 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I16.u = call <vscale x 16 x i16> @llvm.masked.gather.nxv16i16.nxv16p0i16(<vscale x 16 x i16*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i16> undef) |
72 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I16.u = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x i16> undef) |
73 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I16.u = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i16> undef) |
74 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I16.u = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i16> undef) |
75 |
| -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I16.u = call <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0i16(<vscale x 1 x i16*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i16> undef) |
76 |
| -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 0 |
| 45 | +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void |
77 | 46 | ;
|
78 | 47 | %V8F64 = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x double> undef)
|
79 | 48 | %V4F64 = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x double> undef)
|
@@ -118,8 +87,43 @@ define i32 @masked_gather() {
|
118 | 87 | %V4I8 = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0i8(<vscale x 4 x i8*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i8> undef)
|
119 | 88 | %V2I8 = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i8> undef)
|
120 | 89 | %V1I8 = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0i8(<vscale x 1 x i8*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i8> undef)
|
| 90 | + ret void |
| 91 | +} |
121 | 92 |
|
122 |
| - ; Test unaligned gathers |
| 93 | +define void @masked_gather_unaligned() { |
| 94 | +; CHECK-LABEL: 'masked_gather_unaligned' |
| 95 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64.u = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x double> undef) |
| 96 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F64.u = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x double> undef) |
| 97 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F64.u = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x double> undef) |
| 98 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F64.u = call <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0f64(<vscale x 1 x double*> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x double> undef) |
| 99 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F32.u = call <vscale x 16 x float> @llvm.masked.gather.nxv16f32.nxv16p0f32(<vscale x 16 x float*> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x float> undef) |
| 100 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F32.u = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x float> undef) |
| 101 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F32.u = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) |
| 102 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F32.u = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0f32(<vscale x 2 x float*> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) |
| 103 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F32.u = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0f32(<vscale x 1 x float*> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) |
| 104 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0f16(<vscale x 32 x half*> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) |
| 105 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0f16(<vscale x 16 x half*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) |
| 106 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) |
| 107 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0f16(<vscale x 4 x half*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) |
| 108 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0f16(<vscale x 2 x half*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) |
| 109 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0f16(<vscale x 1 x half*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) |
| 110 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I64.u = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> undef, i32 4, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) |
| 111 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I64.u = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) |
| 112 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I64.u = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0i64(<vscale x 2 x i64*> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) |
| 113 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I64.u = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0i64(<vscale x 1 x i64*> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x i64> undef) |
| 114 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I32.u = call <vscale x 16 x i32> @llvm.masked.gather.nxv16i32.nxv16p0i32(<vscale x 16 x i32*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i32> undef) |
| 115 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I32.u = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x i32> undef) |
| 116 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I32.u = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef) |
| 117 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I32.u = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i32> undef) |
| 118 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I32.u = call <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0i32(<vscale x 1 x i32*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i32> undef) |
| 119 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32I16.u = call <vscale x 32 x i16> @llvm.masked.gather.nxv32i16.nxv32p0i16(<vscale x 32 x i16*> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x i16> undef) |
| 120 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16I16.u = call <vscale x 16 x i16> @llvm.masked.gather.nxv16i16.nxv16p0i16(<vscale x 16 x i16*> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i16> undef) |
| 121 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I16.u = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x i16> undef) |
| 122 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I16.u = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x i16> undef) |
| 123 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I16.u = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i16> undef) |
| 124 | +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1I16.u = call <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0i16(<vscale x 1 x i16*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i16> undef) |
| 125 | +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void |
| 126 | +; |
123 | 127 | %V8F64.u = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x double> undef)
|
124 | 128 | %V4F64.u = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x double> undef)
|
125 | 129 | %V2F64.u = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x double> undef)
|
@@ -156,7 +160,7 @@ define i32 @masked_gather() {
|
156 | 160 | %V2I16.u = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x i16> undef)
|
157 | 161 | %V1I16.u = call <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0i16(<vscale x 1 x i16*> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x i16> undef)
|
158 | 162 |
|
159 |
| - ret i32 0 |
| 163 | + ret void |
160 | 164 | }
|
161 | 165 |
|
162 | 166 | declare <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*>, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
|
|
0 commit comments