@@ -12,6 +12,21 @@ define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8
12
12
ret <vscale x 1 x i8 > %vc
13
13
}
14
14
15
+ define <vscale x 1 x i8 > @vsra_vv_nxv1i8_sext_zext (<vscale x 1 x i8 > %va , <vscale x 1 x i8 > %vb ) {
16
+ ; CHECK-LABEL: vsra_vv_nxv1i8_sext_zext:
17
+ ; CHECK: # %bb.0:
18
+ ; CHECK-NEXT: li a0, 7
19
+ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
20
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
21
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
22
+ ; CHECK-NEXT: ret
23
+ %sexted_va = sext <vscale x 1 x i8 > %va to <vscale x 1 x i32 >
24
+ %zexted_vb = zext <vscale x 1 x i8 > %va to <vscale x 1 x i32 >
25
+ %expand = ashr <vscale x 1 x i32 > %sexted_va , %zexted_vb
26
+ %vc = trunc <vscale x 1 x i32 > %expand to <vscale x 1 x i8 >
27
+ ret <vscale x 1 x i8 > %vc
28
+ }
29
+
15
30
define <vscale x 1 x i8 > @vsra_vx_nxv1i8 (<vscale x 1 x i8 > %va , i8 signext %b ) {
16
31
; CHECK-LABEL: vsra_vx_nxv1i8:
17
32
; CHECK: # %bb.0:
@@ -46,6 +61,21 @@ define <vscale x 2 x i8> @vsra_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8
46
61
ret <vscale x 2 x i8 > %vc
47
62
}
48
63
64
+ define <vscale x 2 x i8 > @vsra_vv_nxv2i8_sext_zext (<vscale x 2 x i8 > %va , <vscale x 2 x i8 > %vb ) {
65
+ ; CHECK-LABEL: vsra_vv_nxv2i8_sext_zext:
66
+ ; CHECK: # %bb.0:
67
+ ; CHECK-NEXT: li a0, 7
68
+ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
69
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
70
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
71
+ ; CHECK-NEXT: ret
72
+ %sexted_va = sext <vscale x 2 x i8 > %va to <vscale x 2 x i32 >
73
+ %zexted_vb = zext <vscale x 2 x i8 > %va to <vscale x 2 x i32 >
74
+ %expand = ashr <vscale x 2 x i32 > %sexted_va , %zexted_vb
75
+ %vc = trunc <vscale x 2 x i32 > %expand to <vscale x 2 x i8 >
76
+ ret <vscale x 2 x i8 > %vc
77
+ }
78
+
49
79
define <vscale x 2 x i8 > @vsra_vx_nxv2i8 (<vscale x 2 x i8 > %va , i8 signext %b ) {
50
80
; CHECK-LABEL: vsra_vx_nxv2i8:
51
81
; CHECK: # %bb.0:
@@ -80,6 +110,21 @@ define <vscale x 4 x i8> @vsra_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8
80
110
ret <vscale x 4 x i8 > %vc
81
111
}
82
112
113
+ define <vscale x 4 x i8 > @vsra_vv_nxv4i8_sext_zext (<vscale x 4 x i8 > %va , <vscale x 4 x i8 > %vb ) {
114
+ ; CHECK-LABEL: vsra_vv_nxv4i8_sext_zext:
115
+ ; CHECK: # %bb.0:
116
+ ; CHECK-NEXT: li a0, 7
117
+ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
118
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
119
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
120
+ ; CHECK-NEXT: ret
121
+ %sexted_va = sext <vscale x 4 x i8 > %va to <vscale x 4 x i32 >
122
+ %zexted_vb = zext <vscale x 4 x i8 > %va to <vscale x 4 x i32 >
123
+ %expand = ashr <vscale x 4 x i32 > %sexted_va , %zexted_vb
124
+ %vc = trunc <vscale x 4 x i32 > %expand to <vscale x 4 x i8 >
125
+ ret <vscale x 4 x i8 > %vc
126
+ }
127
+
83
128
define <vscale x 4 x i8 > @vsra_vx_nxv4i8 (<vscale x 4 x i8 > %va , i8 signext %b ) {
84
129
; CHECK-LABEL: vsra_vx_nxv4i8:
85
130
; CHECK: # %bb.0:
@@ -114,6 +159,21 @@ define <vscale x 8 x i8> @vsra_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8
114
159
ret <vscale x 8 x i8 > %vc
115
160
}
116
161
162
+ define <vscale x 8 x i8 > @vsra_vv_nxv8i8_sext_zext (<vscale x 8 x i8 > %va , <vscale x 8 x i8 > %vb ) {
163
+ ; CHECK-LABEL: vsra_vv_nxv8i8_sext_zext:
164
+ ; CHECK: # %bb.0:
165
+ ; CHECK-NEXT: li a0, 7
166
+ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
167
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
168
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
169
+ ; CHECK-NEXT: ret
170
+ %sexted_va = sext <vscale x 8 x i8 > %va to <vscale x 8 x i32 >
171
+ %zexted_vb = zext <vscale x 8 x i8 > %va to <vscale x 8 x i32 >
172
+ %expand = ashr <vscale x 8 x i32 > %sexted_va , %zexted_vb
173
+ %vc = trunc <vscale x 8 x i32 > %expand to <vscale x 8 x i8 >
174
+ ret <vscale x 8 x i8 > %vc
175
+ }
176
+
117
177
define <vscale x 8 x i8 > @vsra_vx_nxv8i8 (<vscale x 8 x i8 > %va , i8 signext %b ) {
118
178
; CHECK-LABEL: vsra_vx_nxv8i8:
119
179
; CHECK: # %bb.0:
@@ -148,6 +208,21 @@ define <vscale x 16 x i8> @vsra_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
148
208
ret <vscale x 16 x i8 > %vc
149
209
}
150
210
211
+ define <vscale x 16 x i8 > @vsra_vv_nxv16i8_sext_zext (<vscale x 16 x i8 > %va , <vscale x 16 x i8 > %vb ) {
212
+ ; CHECK-LABEL: vsra_vv_nxv16i8_sext_zext:
213
+ ; CHECK: # %bb.0:
214
+ ; CHECK-NEXT: li a0, 7
215
+ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
216
+ ; CHECK-NEXT: vmin.vx v10, v8, a0
217
+ ; CHECK-NEXT: vsra.vv v8, v8, v10
218
+ ; CHECK-NEXT: ret
219
+ %sexted_va = sext <vscale x 16 x i8 > %va to <vscale x 16 x i32 >
220
+ %zexted_vb = zext <vscale x 16 x i8 > %va to <vscale x 16 x i32 >
221
+ %expand = ashr <vscale x 16 x i32 > %sexted_va , %zexted_vb
222
+ %vc = trunc <vscale x 16 x i32 > %expand to <vscale x 16 x i8 >
223
+ ret <vscale x 16 x i8 > %vc
224
+ }
225
+
151
226
define <vscale x 16 x i8 > @vsra_vx_nxv16i8 (<vscale x 16 x i8 > %va , i8 signext %b ) {
152
227
; CHECK-LABEL: vsra_vx_nxv16i8:
153
228
; CHECK: # %bb.0:
@@ -250,6 +325,21 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
250
325
ret <vscale x 1 x i16 > %vc
251
326
}
252
327
328
+ define <vscale x 1 x i16 > @vsra_vv_nxv1i16_sext_zext (<vscale x 1 x i16 > %va , <vscale x 1 x i16 > %vb ) {
329
+ ; CHECK-LABEL: vsra_vv_nxv1i16_sext_zext:
330
+ ; CHECK: # %bb.0:
331
+ ; CHECK-NEXT: li a0, 15
332
+ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
333
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
334
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
335
+ ; CHECK-NEXT: ret
336
+ %sexted_va = sext <vscale x 1 x i16 > %va to <vscale x 1 x i32 >
337
+ %zexted_vb = zext <vscale x 1 x i16 > %va to <vscale x 1 x i32 >
338
+ %expand = ashr <vscale x 1 x i32 > %sexted_va , %zexted_vb
339
+ %vc = trunc <vscale x 1 x i32 > %expand to <vscale x 1 x i16 >
340
+ ret <vscale x 1 x i16 > %vc
341
+ }
342
+
253
343
define <vscale x 1 x i16 > @vsra_vx_nxv1i16 (<vscale x 1 x i16 > %va , i16 signext %b ) {
254
344
; CHECK-LABEL: vsra_vx_nxv1i16:
255
345
; CHECK: # %bb.0:
@@ -284,6 +374,21 @@ define <vscale x 2 x i16> @vsra_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
284
374
ret <vscale x 2 x i16 > %vc
285
375
}
286
376
377
+ define <vscale x 2 x i16 > @vsra_vv_nxv2i16_sext_zext (<vscale x 2 x i16 > %va , <vscale x 2 x i16 > %vb ) {
378
+ ; CHECK-LABEL: vsra_vv_nxv2i16_sext_zext:
379
+ ; CHECK: # %bb.0:
380
+ ; CHECK-NEXT: li a0, 15
381
+ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
382
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
383
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
384
+ ; CHECK-NEXT: ret
385
+ %sexted_va = sext <vscale x 2 x i16 > %va to <vscale x 2 x i32 >
386
+ %zexted_vb = zext <vscale x 2 x i16 > %va to <vscale x 2 x i32 >
387
+ %expand = ashr <vscale x 2 x i32 > %sexted_va , %zexted_vb
388
+ %vc = trunc <vscale x 2 x i32 > %expand to <vscale x 2 x i16 >
389
+ ret <vscale x 2 x i16 > %vc
390
+ }
391
+
287
392
define <vscale x 2 x i16 > @vsra_vx_nxv2i16 (<vscale x 2 x i16 > %va , i16 signext %b ) {
288
393
; CHECK-LABEL: vsra_vx_nxv2i16:
289
394
; CHECK: # %bb.0:
@@ -318,6 +423,21 @@ define <vscale x 4 x i16> @vsra_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
318
423
ret <vscale x 4 x i16 > %vc
319
424
}
320
425
426
+ define <vscale x 4 x i16 > @vsra_vv_nxv4i16_sext_zext (<vscale x 4 x i16 > %va , <vscale x 4 x i16 > %vb ) {
427
+ ; CHECK-LABEL: vsra_vv_nxv4i16_sext_zext:
428
+ ; CHECK: # %bb.0:
429
+ ; CHECK-NEXT: li a0, 15
430
+ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
431
+ ; CHECK-NEXT: vmin.vx v9, v8, a0
432
+ ; CHECK-NEXT: vsra.vv v8, v8, v9
433
+ ; CHECK-NEXT: ret
434
+ %sexted_va = sext <vscale x 4 x i16 > %va to <vscale x 4 x i32 >
435
+ %zexted_vb = zext <vscale x 4 x i16 > %va to <vscale x 4 x i32 >
436
+ %expand = ashr <vscale x 4 x i32 > %sexted_va , %zexted_vb
437
+ %vc = trunc <vscale x 4 x i32 > %expand to <vscale x 4 x i16 >
438
+ ret <vscale x 4 x i16 > %vc
439
+ }
440
+
321
441
define <vscale x 4 x i16 > @vsra_vx_nxv4i16 (<vscale x 4 x i16 > %va , i16 signext %b ) {
322
442
; CHECK-LABEL: vsra_vx_nxv4i16:
323
443
; CHECK: # %bb.0:
@@ -352,6 +472,21 @@ define <vscale x 8 x i16> @vsra_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
352
472
ret <vscale x 8 x i16 > %vc
353
473
}
354
474
475
+ define <vscale x 8 x i16 > @vsra_vv_nxv8i16_sext_zext (<vscale x 8 x i16 > %va , <vscale x 8 x i16 > %vb ) {
476
+ ; CHECK-LABEL: vsra_vv_nxv8i16_sext_zext:
477
+ ; CHECK: # %bb.0:
478
+ ; CHECK-NEXT: li a0, 15
479
+ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
480
+ ; CHECK-NEXT: vmin.vx v10, v8, a0
481
+ ; CHECK-NEXT: vsra.vv v8, v8, v10
482
+ ; CHECK-NEXT: ret
483
+ %sexted_va = sext <vscale x 8 x i16 > %va to <vscale x 8 x i32 >
484
+ %zexted_vb = zext <vscale x 8 x i16 > %va to <vscale x 8 x i32 >
485
+ %expand = ashr <vscale x 8 x i32 > %sexted_va , %zexted_vb
486
+ %vc = trunc <vscale x 8 x i32 > %expand to <vscale x 8 x i16 >
487
+ ret <vscale x 8 x i16 > %vc
488
+ }
489
+
355
490
define <vscale x 8 x i16 > @vsra_vx_nxv8i16 (<vscale x 8 x i16 > %va , i16 signext %b ) {
356
491
; CHECK-LABEL: vsra_vx_nxv8i16:
357
492
; CHECK: # %bb.0:
@@ -386,6 +521,21 @@ define <vscale x 16 x i16> @vsra_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x
386
521
ret <vscale x 16 x i16 > %vc
387
522
}
388
523
524
+ define <vscale x 16 x i16 > @vsra_vv_nxv16i16_sext_zext (<vscale x 16 x i16 > %va , <vscale x 16 x i16 > %vb ) {
525
+ ; CHECK-LABEL: vsra_vv_nxv16i16_sext_zext:
526
+ ; CHECK: # %bb.0:
527
+ ; CHECK-NEXT: li a0, 15
528
+ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
529
+ ; CHECK-NEXT: vmin.vx v12, v8, a0
530
+ ; CHECK-NEXT: vsra.vv v8, v8, v12
531
+ ; CHECK-NEXT: ret
532
+ %sexted_va = sext <vscale x 16 x i16 > %va to <vscale x 16 x i32 >
533
+ %zexted_vb = zext <vscale x 16 x i16 > %va to <vscale x 16 x i32 >
534
+ %expand = ashr <vscale x 16 x i32 > %sexted_va , %zexted_vb
535
+ %vc = trunc <vscale x 16 x i32 > %expand to <vscale x 16 x i16 >
536
+ ret <vscale x 16 x i16 > %vc
537
+ }
538
+
389
539
define <vscale x 16 x i16 > @vsra_vx_nxv16i16 (<vscale x 16 x i16 > %va , i16 signext %b ) {
390
540
; CHECK-LABEL: vsra_vx_nxv16i16:
391
541
; CHECK: # %bb.0:
0 commit comments