@@ -103,15 +103,15 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
103
103
define <32 x i1 > @fv32 (ptr %p , i64 %index , i64 %tc ) {
104
104
; CHECK-LABEL: fv32:
105
105
; CHECK: # %bb.0:
106
+ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
106
107
; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
107
108
; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
108
- ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
109
109
; CHECK-NEXT: vle64.v v8, (a0)
110
+ ; CHECK-NEXT: vid.v v16
111
+ ; CHECK-NEXT: vsaddu.vx v16, v16, a1
112
+ ; CHECK-NEXT: vmsltu.vx v0, v16, a2
110
113
; CHECK-NEXT: vsaddu.vx v8, v8, a1
111
114
; CHECK-NEXT: vmsltu.vx v16, v8, a2
112
- ; CHECK-NEXT: vid.v v8
113
- ; CHECK-NEXT: vsaddu.vx v8, v8, a1
114
- ; CHECK-NEXT: vmsltu.vx v0, v8, a2
115
115
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
116
116
; CHECK-NEXT: vslideup.vi v0, v16, 2
117
117
; CHECK-NEXT: ret
@@ -122,15 +122,15 @@ define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
122
122
define <64 x i1 > @fv64 (ptr %p , i64 %index , i64 %tc ) {
123
123
; CHECK-LABEL: fv64:
124
124
; CHECK: # %bb.0:
125
+ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
125
126
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
126
127
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
127
- ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
128
128
; CHECK-NEXT: vle64.v v8, (a0)
129
+ ; CHECK-NEXT: vid.v v16
130
+ ; CHECK-NEXT: vsaddu.vx v16, v16, a1
131
+ ; CHECK-NEXT: vmsltu.vx v0, v16, a2
129
132
; CHECK-NEXT: vsaddu.vx v8, v8, a1
130
133
; CHECK-NEXT: vmsltu.vx v16, v8, a2
131
- ; CHECK-NEXT: vid.v v8
132
- ; CHECK-NEXT: vsaddu.vx v8, v8, a1
133
- ; CHECK-NEXT: vmsltu.vx v0, v8, a2
134
134
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
135
135
; CHECK-NEXT: vslideup.vi v0, v16, 2
136
136
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
@@ -157,15 +157,15 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
157
157
define <128 x i1 > @fv128 (ptr %p , i64 %index , i64 %tc ) {
158
158
; CHECK-LABEL: fv128:
159
159
; CHECK: # %bb.0:
160
+ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
160
161
; CHECK-NEXT: lui a0, %hi(.LCPI10_0)
161
162
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0)
162
- ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
163
163
; CHECK-NEXT: vle64.v v8, (a0)
164
+ ; CHECK-NEXT: vid.v v16
165
+ ; CHECK-NEXT: vsaddu.vx v16, v16, a1
166
+ ; CHECK-NEXT: vmsltu.vx v0, v16, a2
164
167
; CHECK-NEXT: vsaddu.vx v8, v8, a1
165
168
; CHECK-NEXT: vmsltu.vx v16, v8, a2
166
- ; CHECK-NEXT: vid.v v8
167
- ; CHECK-NEXT: vsaddu.vx v8, v8, a1
168
- ; CHECK-NEXT: vmsltu.vx v0, v8, a2
169
169
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
170
170
; CHECK-NEXT: vslideup.vi v0, v16, 2
171
171
; CHECK-NEXT: lui a0, %hi(.LCPI10_1)
0 commit comments