Skip to content

Commit 2029abd

Browse files
committed
[RISCV] Ignore interleaved accesses with non-default address spaces
This fixes a crash introduced in llvm#137045 (comment) where we don't have overloaded pointer types for segmented load/store intrinsics. This should be temporary until llvm#139634 lands and overloads the pointer type for these
1 parent fe56c8f commit 2029abd

File tree

2 files changed

+71
-5
lines changed

2 files changed

+71
-5
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23350,6 +23350,11 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType(
2335023350

2335123351
MVT ContainerVT = VT.getSimpleVT();
2335223352

23353+
// The intrinsics are not (yet) overloaded on pointer type and can only handle
23354+
// the default address space.
23355+
if (AddrSpace)
23356+
return false;
23357+
2335323358
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
2335423359
if (!Subtarget.useRVVForFixedLengthVectors())
2335523360
return false;
@@ -23359,11 +23364,6 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType(
2335923364
return false;
2336023365

2336123366
ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT());
23362-
} else {
23363-
// The intrinsics for scalable vectors are not overloaded on pointer type
23364-
// and can only handle the default address space.
23365-
if (AddrSpace)
23366-
return false;
2336723367
}
2336823368

2336923369
// Need to make sure that EMUL * NFIELDS ≤ 8
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt < %s -mtriple=riscv64 -mattr=+v -p interleaved-access -S | FileCheck %s
3+
4+
; Ensure we don't crash with non-zero address spaces.
5+
6+
define void @load_factor2(ptr addrspace(1) %ptr) {
7+
; RV32-LABEL: @load_factor2(
8+
; RV32-NEXT: [[TMP1:%.*]] = call { <8 x i32>, <8 x i32> } @llvm.riscv.seg2.load.mask.v8i32.i32(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i32 8)
9+
; RV32-NEXT: [[TMP2:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 1
10+
; RV32-NEXT: [[TMP3:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 0
11+
; RV32-NEXT: ret void
12+
;
13+
; RV64-LABEL: @load_factor2(
14+
; RV64-NEXT: [[TMP1:%.*]] = call { <8 x i32>, <8 x i32> } @llvm.riscv.seg2.load.mask.v8i32.i64(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i64 8)
15+
; RV64-NEXT: [[TMP2:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 1
16+
; RV64-NEXT: [[TMP3:%.*]] = extractvalue { <8 x i32>, <8 x i32> } [[TMP1]], 0
17+
; RV64-NEXT: ret void
18+
;
19+
; CHECK-LABEL: define void @load_factor2(
20+
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
21+
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x i32>, ptr addrspace(1) [[PTR]], align 64
22+
; CHECK-NEXT: [[V0:%.*]] = shufflevector <16 x i32> [[INTERLEAVED_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
23+
; CHECK-NEXT: [[V1:%.*]] = shufflevector <16 x i32> [[INTERLEAVED_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
24+
; CHECK-NEXT: ret void
25+
;
26+
%interleaved.vec = load <16 x i32>, ptr addrspace(1) %ptr
27+
%v0 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
28+
%v1 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
29+
ret void
30+
}
31+
32+
define void @load_factor2_vscale(ptr addrspace(1) %ptr) {
33+
; RV32-LABEL: @load_factor2_vscale(
34+
; RV32-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t.i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[PTR:%.*]], i32 -1, i32 5)
35+
; RV32-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i32> @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]], i32 0)
36+
; RV32-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[TMP2]], 0
37+
; RV32-NEXT: [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]], i32 1)
38+
; RV32-NEXT: [[TMP5:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP3]], <vscale x 8 x i32> [[TMP4]], 1
39+
; RV32-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP5]], 0
40+
; RV32-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP5]], 1
41+
; RV32-NEXT: ret void
42+
;
43+
; RV64-LABEL: @load_factor2_vscale(
44+
; RV64-NEXT: [[TMP1:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[PTR:%.*]], i64 -1, i64 5)
45+
; RV64-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i32> @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]], i32 0)
46+
; RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[TMP2]], 0
47+
; RV64-NEXT: [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]], i32 1)
48+
; RV64-NEXT: [[TMP5:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP3]], <vscale x 8 x i32> [[TMP4]], 1
49+
; RV64-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP5]], 0
50+
; RV64-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP5]], 1
51+
; RV64-NEXT: ret void
52+
;
53+
; CHECK-LABEL: define void @load_factor2_vscale(
54+
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
55+
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = load <vscale x 16 x i32>, ptr addrspace(1) [[PTR]], align 64
56+
; CHECK-NEXT: [[V:%.*]] = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> [[INTERLEAVED_VEC]])
57+
; CHECK-NEXT: [[T0:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[V]], 0
58+
; CHECK-NEXT: [[T1:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[V]], 1
59+
; CHECK-NEXT: ret void
60+
;
61+
%interleaved.vec = load <vscale x 16 x i32>, ptr addrspace(1) %ptr
62+
%v = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %interleaved.vec)
63+
%t0 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %v, 0
64+
%t1 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %v, 1
65+
ret void
66+
}

0 commit comments

Comments
 (0)