You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
[RISC-V] Limit vscale interleaving to addrspace 0. (#91573)
The vlseg and vsseg intrinsic functions are not overloaded on pointer
type, so cannot handle non-default address spaces.
This fixes an error we see after #90583.
; RV32-NEXT: [[TMP1:%.*]] = call { <8 x i32>, <8 x i32> } @llvm.riscv.seg2.load.v8i32.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 8)
29
+
; RV32-NEXT: [[TMP2:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 1
30
+
; RV32-NEXT: [[TMP3:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 0
31
+
; RV32-NEXT: ret void
32
+
;
33
+
; RV64-LABEL: @load_factor2_as(
34
+
; RV64-NEXT: [[TMP1:%.*]] = call { <8 x i32>, <8 x i32> } @llvm.riscv.seg2.load.v8i32.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 8)
35
+
; RV64-NEXT: [[TMP2:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 1
36
+
; RV64-NEXT: [[TMP3:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 0
37
+
; RV64-NEXT: ret void
38
+
;
39
+
%interleaved.vec = load <16 x i32>, ptraddrspace(1) %ptr
40
+
%v0 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i320, i322, i324, i326, i328, i3210, i3212, i3214>
41
+
%v1 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i321, i323, i325, i327, i329, i3211, i3213, i3215>
42
+
retvoid
43
+
}
44
+
45
+
definevoid@load_factor2_vscale(ptr%ptr) {
46
+
; RV32-LABEL: @load_factor2_vscale(
47
+
; RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.riscv.vlseg2.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> poison, ptr [[PTR:%.*]], i32 -1)
48
+
; RV32-NEXT: ret void
49
+
;
50
+
; RV64-LABEL: @load_factor2_vscale(
51
+
; RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.riscv.vlseg2.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> poison, ptr [[PTR:%.*]], i64 -1)
52
+
; RV64-NEXT: ret void
53
+
;
54
+
%interleaved.vec = load <vscale x 16 x i32>, ptr%ptr
55
+
%v = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %interleaved.vec)
; RV64-NEXT: call void @llvm.riscv.seg2.store.v8i8.p1.i64(<8 x i8> [[TMP1]], <8 x i8> [[TMP2]], ptr addrspace(1) [[PTR:%.*]], i64 8)
282
+
; RV64-NEXT: ret void
283
+
;
284
+
%interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i320, i328, i321, i329, i322, i3210, i323, i3211, i324, i3212, i325, i3213, i326, i3214, i327, i3215>
285
+
store <16 x i8> %interleaved.vec, ptraddrspace(1) %ptr, align4
286
+
retvoid
287
+
}
288
+
289
+
definevoid@store_factor2_vscale(ptr%ptr, <vscale x 8 x i8> %v0, <vscale x 8 x i8> %v1) {
290
+
; RV32-LABEL: @store_factor2_vscale(
291
+
; RV32-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i32(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], ptr [[PTR:%.*]], i32 -1)
292
+
; RV32-NEXT: ret void
293
+
;
294
+
; RV64-LABEL: @store_factor2_vscale(
295
+
; RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], ptr [[PTR:%.*]], i64 -1)
296
+
; RV64-NEXT: ret void
297
+
;
298
+
%interleaved.vec = call <vscale x 16 x i8> @llvm.vector.interleave2.nxv8i8(<vscale x 8 x i8> %v0, <vscale x 8 x i8> %v1)
299
+
store <vscale x 16 x i8> %interleaved.vec, ptr%ptr, align4
300
+
retvoid
301
+
}
302
+
303
+
definevoid@store_factor2_vscale_as(ptraddrspace(1) %ptr, <vscale x 8 x i8> %v0, <vscale x 8 x i8> %v1) {
304
+
; RV32-LABEL: @store_factor2_vscale_as(
305
+
; RV32-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]])
306
+
; RV32-NEXT: store <vscale x 16 x i8> [[INTERLEAVED_VEC]], ptr addrspace(1) [[PTR:%.*]], align 4
307
+
; RV32-NEXT: ret void
308
+
;
309
+
; RV64-LABEL: @store_factor2_vscale_as(
310
+
; RV64-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]])
311
+
; RV64-NEXT: store <vscale x 16 x i8> [[INTERLEAVED_VEC]], ptr addrspace(1) [[PTR:%.*]], align 4
312
+
; RV64-NEXT: ret void
313
+
;
314
+
%interleaved.vec = call <vscale x 16 x i8> @llvm.vector.interleave2.nxv8i8(<vscale x 8 x i8> %v0, <vscale x 8 x i8> %v1)
315
+
store <vscale x 16 x i8> %interleaved.vec, ptraddrspace(1) %ptr, align4
316
+
retvoid
317
+
}
318
+
222
319
definevoid@store_factor3(ptr%ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
0 commit comments