Skip to content

Commit fd6e3aa

Browse files
committed
fix error in intrinsic suffix
1 parent ff8982e commit fd6e3aa

File tree

1 file changed

+54
-54
lines changed
  • llvm/test/CodeGen/LoongArch/lasx/ir-instruction

1 file changed

+54
-54
lines changed

llvm/test/CodeGen/LoongArch/lasx/ir-instruction/absd.ll

Lines changed: 54 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ define <32 x i8> @xvabsd_b(<32 x i8> %a, <32 x i8> %b) {
2323
%a.sext = sext <32 x i8> %a to <32 x i16>
2424
%b.sext = sext <32 x i8> %b to <32 x i16>
2525
%sub = sub <32 x i16> %a.sext, %b.sext
26-
%abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
26+
%abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
2727
%trunc = trunc <32 x i16> %abs to <32 x i8>
2828
ret <32 x i8> %trunc
2929
}
@@ -38,7 +38,7 @@ define <16 x i16> @xvabsd_h(<16 x i16> %a, <16 x i16> %b) {
3838
%a.sext = sext <16 x i16> %a to <16 x i32>
3939
%b.sext = sext <16 x i16> %b to <16 x i32>
4040
%sub = sub <16 x i32> %a.sext, %b.sext
41-
%abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
41+
%abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
4242
%trunc = trunc <16 x i32> %abs to <16 x i16>
4343
ret <16 x i16> %trunc
4444
}
@@ -53,7 +53,7 @@ define <8 x i32> @xvabsd_w(<8 x i32> %a, <8 x i32> %b) {
5353
%a.sext = sext <8 x i32> %a to <8 x i64>
5454
%b.sext = sext <8 x i32> %b to <8 x i64>
5555
%sub = sub <8 x i64> %a.sext, %b.sext
56-
%abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
56+
%abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
5757
%trunc = trunc <8 x i64> %abs to <8 x i32>
5858
ret <8 x i32> %trunc
5959
}
@@ -68,7 +68,7 @@ define <4 x i64> @xvabsd_d(<4 x i64> %a, <4 x i64> %b) {
6868
%a.sext = sext <4 x i64> %a to <4 x i128>
6969
%b.sext = sext <4 x i64> %b to <4 x i128>
7070
%sub = sub <4 x i128> %a.sext, %b.sext
71-
%abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
71+
%abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
7272
%trunc = trunc <4 x i128> %abs to <4 x i64>
7373
ret <4 x i64> %trunc
7474
}
@@ -83,7 +83,7 @@ define <32 x i8> @xvabsd_bu(<32 x i8> %a, <32 x i8> %b) {
8383
%a.zext = zext <32 x i8> %a to <32 x i16>
8484
%b.zext = zext <32 x i8> %b to <32 x i16>
8585
%sub = sub <32 x i16> %a.zext, %b.zext
86-
%abs = call <32 x i16> @llvm.abs.v16i16(<32 x i16> %sub, i1 true)
86+
%abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
8787
%trunc = trunc <32 x i16> %abs to <32 x i8>
8888
ret <32 x i8> %trunc
8989
}
@@ -98,7 +98,7 @@ define <16 x i16> @xvabsd_hu(<16 x i16> %a, <16 x i16> %b) {
9898
%a.zext = zext <16 x i16> %a to <16 x i32>
9999
%b.zext = zext <16 x i16> %b to <16 x i32>
100100
%sub = sub <16 x i32> %a.zext, %b.zext
101-
%abs = call <16 x i32> @llvm.abs.v8i32(<16 x i32> %sub, i1 true)
101+
%abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
102102
%trunc = trunc <16 x i32> %abs to <16 x i16>
103103
ret <16 x i16> %trunc
104104
}
@@ -113,7 +113,7 @@ define <8 x i32> @xvabsd_wu(<8 x i32> %a, <8 x i32> %b) {
113113
%a.zext = zext <8 x i32> %a to <8 x i64>
114114
%b.zext = zext <8 x i32> %b to <8 x i64>
115115
%sub = sub <8 x i64> %a.zext, %b.zext
116-
%abs = call <8 x i64> @llvm.abs.v4i64(<8 x i64> %sub, i1 true)
116+
%abs = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %sub, i1 true)
117117
%trunc = trunc <8 x i64> %abs to <8 x i32>
118118
ret <8 x i32> %trunc
119119
}
@@ -128,7 +128,7 @@ define <4 x i64> @xvabsd_du(<4 x i64> %a, <4 x i64> %b) {
128128
%a.zext = zext <4 x i64> %a to <4 x i128>
129129
%b.zext = zext <4 x i64> %b to <4 x i128>
130130
%sub = sub <4 x i128> %a.zext, %b.zext
131-
%abs = call <4 x i128> @llvm.abs.v2i128(<4 x i128> %sub, i1 true)
131+
%abs = call <4 x i128> @llvm.abs.v4i128(<4 x i128> %sub, i1 true)
132132
%trunc = trunc <4 x i128> %abs to <4 x i64>
133133
ret <4 x i64> %trunc
134134
}
@@ -142,7 +142,7 @@ define <32 x i8> @xvabsd_b_nsw(<32 x i8> %a, <32 x i8> %b) {
142142
; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
143143
; CHECK-NEXT: ret
144144
%sub = sub nsw <32 x i8> %a, %b
145-
%abs = call <32 x i8> @llvm.abs.v16i8(<32 x i8> %sub, i1 true)
145+
%abs = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %sub, i1 true)
146146
ret <32 x i8> %abs
147147
}
148148

@@ -154,7 +154,7 @@ define <16 x i16> @xvabsd_h_nsw(<16 x i16> %a, <16 x i16> %b) {
154154
; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
155155
; CHECK-NEXT: ret
156156
%sub = sub nsw <16 x i16> %a, %b
157-
%abs = call <16 x i16> @llvm.abs.v8i16(<16 x i16> %sub, i1 true)
157+
%abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
158158
ret <16 x i16> %abs
159159
}
160160

@@ -166,7 +166,7 @@ define <8 x i32> @xvabsd_w_nsw(<8 x i32> %a, <8 x i32> %b) {
166166
; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
167167
; CHECK-NEXT: ret
168168
%sub = sub nsw <8 x i32> %a, %b
169-
%abs = call <8 x i32> @llvm.abs.v4i32(<8 x i32> %sub, i1 true)
169+
%abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
170170
ret <8 x i32> %abs
171171
}
172172

@@ -178,7 +178,7 @@ define <4 x i64> @xvabsd_d_nsw(<4 x i64> %a, <4 x i64> %b) {
178178
; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
179179
; CHECK-NEXT: ret
180180
%sub = sub nsw <4 x i64> %a, %b
181-
%abs = call <4 x i64> @llvm.abs.v2i64(<4 x i64> %sub, i1 true)
181+
%abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
182182
ret <4 x i64> %abs
183183
}
184184

@@ -190,8 +190,8 @@ define <32 x i8> @maxmin_b(<32 x i8> %0, <32 x i8> %1) {
190190
; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1
191191
; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
192192
; CHECK-NEXT: ret
193-
%a = tail call <32 x i8> @llvm.smax.v16i8(<32 x i8> %0, <32 x i8> %1)
194-
%b = tail call <32 x i8> @llvm.smin.v16i8(<32 x i8> %0, <32 x i8> %1)
193+
%a = tail call <32 x i8> @llvm.smax.v32i8(<32 x i8> %0, <32 x i8> %1)
194+
%b = tail call <32 x i8> @llvm.smin.v32i8(<32 x i8> %0, <32 x i8> %1)
195195
%sub = sub <32 x i8> %a, %b
196196
ret <32 x i8> %sub
197197
}
@@ -203,8 +203,8 @@ define <16 x i16> @maxmin_h(<16 x i16> %0, <16 x i16> %1) {
203203
; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1
204204
; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
205205
; CHECK-NEXT: ret
206-
%a = tail call <16 x i16> @llvm.smax.v8i16(<16 x i16> %0, <16 x i16> %1)
207-
%b = tail call <16 x i16> @llvm.smin.v8i16(<16 x i16> %0, <16 x i16> %1)
206+
%a = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %0, <16 x i16> %1)
207+
%b = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %0, <16 x i16> %1)
208208
%sub = sub <16 x i16> %a, %b
209209
ret <16 x i16> %sub
210210
}
@@ -216,8 +216,8 @@ define <8 x i32> @maxmin_w(<8 x i32> %0, <8 x i32> %1) {
216216
; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1
217217
; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
218218
; CHECK-NEXT: ret
219-
%a = tail call <8 x i32> @llvm.smax.v4i32(<8 x i32> %0, <8 x i32> %1)
220-
%b = tail call <8 x i32> @llvm.smin.v4i32(<8 x i32> %0, <8 x i32> %1)
219+
%a = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %0, <8 x i32> %1)
220+
%b = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %0, <8 x i32> %1)
221221
%sub = sub <8 x i32> %a, %b
222222
ret <8 x i32> %sub
223223
}
@@ -229,8 +229,8 @@ define <4 x i64> @maxmin_d(<4 x i64> %0, <4 x i64> %1) {
229229
; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1
230230
; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
231231
; CHECK-NEXT: ret
232-
%a = tail call <4 x i64> @llvm.smax.v2i64(<4 x i64> %0, <4 x i64> %1)
233-
%b = tail call <4 x i64> @llvm.smin.v2i64(<4 x i64> %0, <4 x i64> %1)
232+
%a = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %0, <4 x i64> %1)
233+
%b = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %0, <4 x i64> %1)
234234
%sub = sub <4 x i64> %a, %b
235235
ret <4 x i64> %sub
236236
}
@@ -242,8 +242,8 @@ define <32 x i8> @maxmin_bu(<32 x i8> %0, <32 x i8> %1) {
242242
; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
243243
; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
244244
; CHECK-NEXT: ret
245-
%a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
246-
%b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %0, <32 x i8> %1)
245+
%a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
246+
%b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %0, <32 x i8> %1)
247247
%sub = sub <32 x i8> %a, %b
248248
ret <32 x i8> %sub
249249
}
@@ -255,8 +255,8 @@ define <16 x i16> @maxmin_hu(<16 x i16> %0, <16 x i16> %1) {
255255
; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1
256256
; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr2
257257
; CHECK-NEXT: ret
258-
%a = tail call <16 x i16> @llvm.umax.v8i16(<16 x i16> %0, <16 x i16> %1)
259-
%b = tail call <16 x i16> @llvm.umin.v8i16(<16 x i16> %0, <16 x i16> %1)
258+
%a = tail call <16 x i16> @llvm.umax.v16i16(<16 x i16> %0, <16 x i16> %1)
259+
%b = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %0, <16 x i16> %1)
260260
%sub = sub <16 x i16> %a, %b
261261
ret <16 x i16> %sub
262262
}
@@ -268,8 +268,8 @@ define <8 x i32> @maxmin_wu(<8 x i32> %0, <8 x i32> %1) {
268268
; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1
269269
; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr2
270270
; CHECK-NEXT: ret
271-
%a = tail call <8 x i32> @llvm.umax.v4i32(<8 x i32> %0, <8 x i32> %1)
272-
%b = tail call <8 x i32> @llvm.umin.v4i32(<8 x i32> %0, <8 x i32> %1)
271+
%a = tail call <8 x i32> @llvm.umax.v8i32(<8 x i32> %0, <8 x i32> %1)
272+
%b = tail call <8 x i32> @llvm.umin.v8i32(<8 x i32> %0, <8 x i32> %1)
273273
%sub = sub <8 x i32> %a, %b
274274
ret <8 x i32> %sub
275275
}
@@ -281,8 +281,8 @@ define <4 x i64> @maxmin_du(<4 x i64> %0, <4 x i64> %1) {
281281
; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1
282282
; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr2
283283
; CHECK-NEXT: ret
284-
%a = tail call <4 x i64> @llvm.umax.v2i64(<4 x i64> %0, <4 x i64> %1)
285-
%b = tail call <4 x i64> @llvm.umin.v2i64(<4 x i64> %0, <4 x i64> %1)
284+
%a = tail call <4 x i64> @llvm.umax.v4i64(<4 x i64> %0, <4 x i64> %1)
285+
%b = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %0, <4 x i64> %1)
286286
%sub = sub <4 x i64> %a, %b
287287
ret <4 x i64> %sub
288288
}
@@ -294,8 +294,8 @@ define <32 x i8> @maxmin_bu_com1(<32 x i8> %0, <32 x i8> %1) {
294294
; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1
295295
; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr2
296296
; CHECK-NEXT: ret
297-
%a = tail call <32 x i8> @llvm.umax.v16i8(<32 x i8> %0, <32 x i8> %1)
298-
%b = tail call <32 x i8> @llvm.umin.v16i8(<32 x i8> %1, <32 x i8> %0)
297+
%a = tail call <32 x i8> @llvm.umax.v32i8(<32 x i8> %0, <32 x i8> %1)
298+
%b = tail call <32 x i8> @llvm.umin.v32i8(<32 x i8> %1, <32 x i8> %0)
299299
%sub = sub <32 x i8> %a, %b
300300
ret <32 x i8> %sub
301301
}
@@ -526,32 +526,32 @@ define <4 x i64> @xvabsd_du_select(<4 x i64> %a, <4 x i64> %b) nounwind {
526526
ret <4 x i64> %sub
527527
}
528528

529-
declare <32 x i8> @llvm.abs.v16i8(<32 x i8>, i1)
529+
declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
530530

531-
declare <16 x i16> @llvm.abs.v8i16(<16 x i16>, i1)
532-
declare <32 x i16> @llvm.abs.v16i16(<32 x i16>, i1)
531+
declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
532+
declare <32 x i16> @llvm.abs.v32i16(<32 x i16>, i1)
533533

534-
declare <8 x i32> @llvm.abs.v4i32(<8 x i32>, i1)
535-
declare <16 x i32> @llvm.abs.v8i32(<16 x i32>, i1)
534+
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
535+
declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
536536

537-
declare <4 x i64> @llvm.abs.v2i64(<4 x i64>, i1)
538-
declare <8 x i64> @llvm.abs.v4i64(<8 x i64>, i1)
537+
declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
538+
declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
539539

540-
declare <4 x i128> @llvm.abs.v2i128(<4 x i128>, i1)
540+
declare <4 x i128> @llvm.abs.v4i128(<4 x i128>, i1)
541541

542-
declare <32 x i8> @llvm.smax.v16i8(<32 x i8>, <32 x i8>)
543-
declare <16 x i16> @llvm.smax.v8i16(<16 x i16>, <16 x i16>)
544-
declare <8 x i32> @llvm.smax.v4i32(<8 x i32>, <8 x i32>)
545-
declare <4 x i64> @llvm.smax.v2i64(<4 x i64>, <4 x i64>)
546-
declare <32 x i8> @llvm.smin.v16i8(<32 x i8>, <32 x i8>)
547-
declare <16 x i16> @llvm.smin.v8i16(<16 x i16>, <16 x i16>)
548-
declare <8 x i32> @llvm.smin.v4i32(<8 x i32>, <8 x i32>)
549-
declare <4 x i64> @llvm.smin.v2i64(<4 x i64>, <4 x i64>)
550-
declare <32 x i8> @llvm.umax.v16i8(<32 x i8>, <32 x i8>)
551-
declare <16 x i16> @llvm.umax.v8i16(<16 x i16>, <16 x i16>)
552-
declare <8 x i32> @llvm.umax.v4i32(<8 x i32>, <8 x i32>)
553-
declare <4 x i64> @llvm.umax.v2i64(<4 x i64>, <4 x i64>)
554-
declare <32 x i8> @llvm.umin.v16i8(<32 x i8>, <32 x i8>)
555-
declare <16 x i16> @llvm.umin.v8i16(<16 x i16>, <16 x i16>)
556-
declare <8 x i32> @llvm.umin.v4i32(<8 x i32>, <8 x i32>)
557-
declare <4 x i64> @llvm.umin.v2i64(<4 x i64>, <4 x i64>)
542+
declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>)
543+
declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
544+
declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
545+
declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
546+
declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>)
547+
declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
548+
declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
549+
declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
550+
declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>)
551+
declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
552+
declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
553+
declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
554+
declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>)
555+
declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
556+
declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
557+
declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)

0 commit comments

Comments
 (0)