|
5 | 5 | //===----------------------------------------------------------------------===//
|
6 | 6 |
|
7 | 7 | // CHECK-LABEL: @iadd_scalar
|
8 |
| -func @iadd_scalar(%arg0: i32, %arg1: i32) { |
| 8 | +spv.func @iadd_scalar(%arg0: i32, %arg1: i32) "None" { |
9 | 9 | // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm.i32
|
10 | 10 | %0 = spv.IAdd %arg0, %arg1 : i32
|
11 |
| - return |
| 11 | + spv.Return |
12 | 12 | }
|
13 | 13 |
|
14 | 14 | // CHECK-LABEL: @iadd_vector
|
15 |
| -func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) { |
| 15 | +spv.func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" { |
16 | 16 | // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm.vec<4 x i64>
|
17 | 17 | %0 = spv.IAdd %arg0, %arg1 : vector<4xi64>
|
18 |
| - return |
| 18 | + spv.Return |
19 | 19 | }
|
20 | 20 |
|
21 | 21 | //===----------------------------------------------------------------------===//
|
22 | 22 | // spv.ISub
|
23 | 23 | //===----------------------------------------------------------------------===//
|
24 | 24 |
|
25 | 25 | // CHECK-LABEL: @isub_scalar
|
26 |
| -func @isub_scalar(%arg0: i8, %arg1: i8) { |
| 26 | +spv.func @isub_scalar(%arg0: i8, %arg1: i8) "None" { |
27 | 27 | // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm.i8
|
28 | 28 | %0 = spv.ISub %arg0, %arg1 : i8
|
29 |
| - return |
| 29 | + spv.Return |
30 | 30 | }
|
31 | 31 |
|
32 | 32 | // CHECK-LABEL: @isub_vector
|
33 |
| -func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) { |
| 33 | +spv.func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" { |
34 | 34 | // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm.vec<2 x i16>
|
35 | 35 | %0 = spv.ISub %arg0, %arg1 : vector<2xi16>
|
36 |
| - return |
| 36 | + spv.Return |
37 | 37 | }
|
38 | 38 |
|
39 | 39 | //===----------------------------------------------------------------------===//
|
40 | 40 | // spv.IMul
|
41 | 41 | //===----------------------------------------------------------------------===//
|
42 | 42 |
|
43 | 43 | // CHECK-LABEL: @imul_scalar
|
44 |
| -func @imul_scalar(%arg0: i32, %arg1: i32) { |
| 44 | +spv.func @imul_scalar(%arg0: i32, %arg1: i32) "None" { |
45 | 45 | // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm.i32
|
46 | 46 | %0 = spv.IMul %arg0, %arg1 : i32
|
47 |
| - return |
| 47 | + spv.Return |
48 | 48 | }
|
49 | 49 |
|
50 | 50 | // CHECK-LABEL: @imul_vector
|
51 |
| -func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) { |
| 51 | +spv.func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) "None" { |
52 | 52 | // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm.vec<3 x i32>
|
53 | 53 | %0 = spv.IMul %arg0, %arg1 : vector<3xi32>
|
54 |
| - return |
| 54 | + spv.Return |
55 | 55 | }
|
56 | 56 |
|
57 | 57 | //===----------------------------------------------------------------------===//
|
58 | 58 | // spv.FAdd
|
59 | 59 | //===----------------------------------------------------------------------===//
|
60 | 60 |
|
61 | 61 | // CHECK-LABEL: @fadd_scalar
|
62 |
| -func @fadd_scalar(%arg0: f16, %arg1: f16) { |
| 62 | +spv.func @fadd_scalar(%arg0: f16, %arg1: f16) "None" { |
63 | 63 | // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.half
|
64 | 64 | %0 = spv.FAdd %arg0, %arg1 : f16
|
65 |
| - return |
| 65 | + spv.Return |
66 | 66 | }
|
67 | 67 |
|
68 | 68 | // CHECK-LABEL: @fadd_vector
|
69 |
| -func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) { |
| 69 | +spv.func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) "None" { |
70 | 70 | // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.vec<4 x float>
|
71 | 71 | %0 = spv.FAdd %arg0, %arg1 : vector<4xf32>
|
72 |
| - return |
| 72 | + spv.Return |
73 | 73 | }
|
74 | 74 |
|
75 | 75 | //===----------------------------------------------------------------------===//
|
76 | 76 | // spv.FSub
|
77 | 77 | //===----------------------------------------------------------------------===//
|
78 | 78 |
|
79 | 79 | // CHECK-LABEL: @fsub_scalar
|
80 |
| -func @fsub_scalar(%arg0: f32, %arg1: f32) { |
| 80 | +spv.func @fsub_scalar(%arg0: f32, %arg1: f32) "None" { |
81 | 81 | // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.float
|
82 | 82 | %0 = spv.FSub %arg0, %arg1 : f32
|
83 |
| - return |
| 83 | + spv.Return |
84 | 84 | }
|
85 | 85 |
|
86 | 86 | // CHECK-LABEL: @fsub_vector
|
87 |
| -func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) { |
| 87 | +spv.func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" { |
88 | 88 | // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.vec<2 x float>
|
89 | 89 | %0 = spv.FSub %arg0, %arg1 : vector<2xf32>
|
90 |
| - return |
| 90 | + spv.Return |
91 | 91 | }
|
92 | 92 |
|
93 | 93 | //===----------------------------------------------------------------------===//
|
94 | 94 | // spv.FDiv
|
95 | 95 | //===----------------------------------------------------------------------===//
|
96 | 96 |
|
97 | 97 | // CHECK-LABEL: @fdiv_scalar
|
98 |
| -func @fdiv_scalar(%arg0: f32, %arg1: f32) { |
| 98 | +spv.func @fdiv_scalar(%arg0: f32, %arg1: f32) "None" { |
99 | 99 | // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.float
|
100 | 100 | %0 = spv.FDiv %arg0, %arg1 : f32
|
101 |
| - return |
| 101 | + spv.Return |
102 | 102 | }
|
103 | 103 |
|
104 | 104 | // CHECK-LABEL: @fdiv_vector
|
105 |
| -func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) { |
| 105 | +spv.func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" { |
106 | 106 | // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.vec<3 x double>
|
107 | 107 | %0 = spv.FDiv %arg0, %arg1 : vector<3xf64>
|
108 |
| - return |
| 108 | + spv.Return |
109 | 109 | }
|
110 | 110 |
|
111 | 111 | //===----------------------------------------------------------------------===//
|
112 | 112 | // spv.FMul
|
113 | 113 | //===----------------------------------------------------------------------===//
|
114 | 114 |
|
115 | 115 | // CHECK-LABEL: @fmul_scalar
|
116 |
| -func @fmul_scalar(%arg0: f32, %arg1: f32) { |
| 116 | +spv.func @fmul_scalar(%arg0: f32, %arg1: f32) "None" { |
117 | 117 | // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.float
|
118 | 118 | %0 = spv.FMul %arg0, %arg1 : f32
|
119 |
| - return |
| 119 | + spv.Return |
120 | 120 | }
|
121 | 121 |
|
122 | 122 | // CHECK-LABEL: @fmul_vector
|
123 |
| -func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) { |
| 123 | +spv.func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" { |
124 | 124 | // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.vec<2 x float>
|
125 | 125 | %0 = spv.FMul %arg0, %arg1 : vector<2xf32>
|
126 |
| - return |
| 126 | + spv.Return |
127 | 127 | }
|
128 | 128 |
|
129 | 129 | //===----------------------------------------------------------------------===//
|
130 | 130 | // spv.FRem
|
131 | 131 | //===----------------------------------------------------------------------===//
|
132 | 132 |
|
133 | 133 | // CHECK-LABEL: @frem_scalar
|
134 |
| -func @frem_scalar(%arg0: f32, %arg1: f32) { |
| 134 | +spv.func @frem_scalar(%arg0: f32, %arg1: f32) "None" { |
135 | 135 | // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.float
|
136 | 136 | %0 = spv.FRem %arg0, %arg1 : f32
|
137 |
| - return |
| 137 | + spv.Return |
138 | 138 | }
|
139 | 139 |
|
140 | 140 | // CHECK-LABEL: @frem_vector
|
141 |
| -func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) { |
| 141 | +spv.func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" { |
142 | 142 | // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.vec<3 x double>
|
143 | 143 | %0 = spv.FRem %arg0, %arg1 : vector<3xf64>
|
144 |
| - return |
| 144 | + spv.Return |
145 | 145 | }
|
146 | 146 |
|
147 | 147 | //===----------------------------------------------------------------------===//
|
148 | 148 | // spv.FNegate
|
149 | 149 | //===----------------------------------------------------------------------===//
|
150 | 150 |
|
151 | 151 | // CHECK-LABEL: @fneg_scalar
|
152 |
| -func @fneg_scalar(%arg: f64) { |
| 152 | +spv.func @fneg_scalar(%arg: f64) "None" { |
153 | 153 | // CHECK: llvm.fneg %{{.*}} : !llvm.double
|
154 | 154 | %0 = spv.FNegate %arg : f64
|
155 |
| - return |
| 155 | + spv.Return |
156 | 156 | }
|
157 | 157 |
|
158 | 158 | // CHECK-LABEL: @fneg_vector
|
159 |
| -func @fneg_vector(%arg: vector<2xf32>) { |
| 159 | +spv.func @fneg_vector(%arg: vector<2xf32>) "None" { |
160 | 160 | // CHECK: llvm.fneg %{{.*}} : !llvm.vec<2 x float>
|
161 | 161 | %0 = spv.FNegate %arg : vector<2xf32>
|
162 |
| - return |
| 162 | + spv.Return |
163 | 163 | }
|
164 | 164 |
|
165 | 165 | //===----------------------------------------------------------------------===//
|
166 | 166 | // spv.UDiv
|
167 | 167 | //===----------------------------------------------------------------------===//
|
168 | 168 |
|
169 | 169 | // CHECK-LABEL: @udiv_scalar
|
170 |
| -func @udiv_scalar(%arg0: i32, %arg1: i32) { |
| 170 | +spv.func @udiv_scalar(%arg0: i32, %arg1: i32) "None" { |
171 | 171 | // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm.i32
|
172 | 172 | %0 = spv.UDiv %arg0, %arg1 : i32
|
173 |
| - return |
| 173 | + spv.Return |
174 | 174 | }
|
175 | 175 |
|
176 | 176 | // CHECK-LABEL: @udiv_vector
|
177 |
| -func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) { |
| 177 | +spv.func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" { |
178 | 178 | // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm.vec<3 x i64>
|
179 | 179 | %0 = spv.UDiv %arg0, %arg1 : vector<3xi64>
|
180 |
| - return |
| 180 | + spv.Return |
181 | 181 | }
|
182 | 182 |
|
183 | 183 | //===----------------------------------------------------------------------===//
|
184 | 184 | // spv.UMod
|
185 | 185 | //===----------------------------------------------------------------------===//
|
186 | 186 |
|
187 | 187 | // CHECK-LABEL: @umod_scalar
|
188 |
| -func @umod_scalar(%arg0: i32, %arg1: i32) { |
| 188 | +spv.func @umod_scalar(%arg0: i32, %arg1: i32) "None" { |
189 | 189 | // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm.i32
|
190 | 190 | %0 = spv.UMod %arg0, %arg1 : i32
|
191 |
| - return |
| 191 | + spv.Return |
192 | 192 | }
|
193 | 193 |
|
194 | 194 | // CHECK-LABEL: @umod_vector
|
195 |
| -func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) { |
| 195 | +spv.func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" { |
196 | 196 | // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm.vec<3 x i64>
|
197 | 197 | %0 = spv.UMod %arg0, %arg1 : vector<3xi64>
|
198 |
| - return |
| 198 | + spv.Return |
199 | 199 | }
|
200 | 200 |
|
201 | 201 | //===----------------------------------------------------------------------===//
|
202 | 202 | // spv.SDiv
|
203 | 203 | //===----------------------------------------------------------------------===//
|
204 | 204 |
|
205 | 205 | // CHECK-LABEL: @sdiv_scalar
|
206 |
| -func @sdiv_scalar(%arg0: i16, %arg1: i16) { |
| 206 | +spv.func @sdiv_scalar(%arg0: i16, %arg1: i16) "None" { |
207 | 207 | // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm.i16
|
208 | 208 | %0 = spv.SDiv %arg0, %arg1 : i16
|
209 |
| - return |
| 209 | + spv.Return |
210 | 210 | }
|
211 | 211 |
|
212 | 212 | // CHECK-LABEL: @sdiv_vector
|
213 |
| -func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) { |
| 213 | +spv.func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" { |
214 | 214 | // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm.vec<2 x i64>
|
215 | 215 | %0 = spv.SDiv %arg0, %arg1 : vector<2xi64>
|
216 |
| - return |
| 216 | + spv.Return |
217 | 217 | }
|
218 | 218 |
|
219 | 219 | //===----------------------------------------------------------------------===//
|
220 | 220 | // spv.SRem
|
221 | 221 | //===----------------------------------------------------------------------===//
|
222 | 222 |
|
223 | 223 | // CHECK-LABEL: @srem_scalar
|
224 |
| -func @srem_scalar(%arg0: i32, %arg1: i32) { |
| 224 | +spv.func @srem_scalar(%arg0: i32, %arg1: i32) "None" { |
225 | 225 | // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm.i32
|
226 | 226 | %0 = spv.SRem %arg0, %arg1 : i32
|
227 |
| - return |
| 227 | + spv.Return |
228 | 228 | }
|
229 | 229 |
|
230 | 230 | // CHECK-LABEL: @srem_vector
|
231 |
| -func @srem_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) { |
| 231 | +spv.func @srem_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) "None" { |
232 | 232 | // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm.vec<4 x i32>
|
233 | 233 | %0 = spv.SRem %arg0, %arg1 : vector<4xi32>
|
234 |
| - return |
| 234 | + spv.Return |
235 | 235 | }
|
0 commit comments