6
6
define <vscale x 1 x i32 > @load (ptr %x ) {
7
7
; CHECK-LABEL: define <vscale x 1 x i32> @load
8
8
; CHECK-SAME: (ptr [[X:%.*]]) {
9
- ; CHECK-NEXT: [[A:%.*]] = load [[STRUCT_TEST:%.*]], ptr [[X]], align 4
10
- ; CHECK-NEXT: [[B:%.*]] = extractvalue [[STRUCT_TEST]] [[A]], 1
11
- ; CHECK-NEXT: ret <vscale x 1 x i32> [[B]]
9
+ ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
10
+ ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2
11
+ ; CHECK-NEXT: [[A_ELT1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]]
12
+ ; CHECK-NEXT: [[A_UNPACK2:%.*]] = load <vscale x 1 x i32>, ptr [[A_ELT1]], align 4
13
+ ; CHECK-NEXT: ret <vscale x 1 x i32> [[A_UNPACK2]]
12
14
;
13
15
%a = load %struct.test , ptr %x
14
16
%b = extractvalue %struct.test %a , 1
@@ -18,9 +20,11 @@ define <vscale x 1 x i32> @load(ptr %x) {
18
20
define void @store (ptr %x , <vscale x 1 x i32 > %y , <vscale x 1 x i32 > %z ) {
19
21
; CHECK-LABEL: define void @store
20
22
; CHECK-SAME: (ptr [[X:%.*]], <vscale x 1 x i32> [[Y:%.*]], <vscale x 1 x i32> [[Z:%.*]]) {
21
- ; CHECK-NEXT: [[A:%.*]] = insertvalue [[STRUCT_TEST:%.*]] undef, <vscale x 1 x i32> [[Y]], 0
22
- ; CHECK-NEXT: [[B:%.*]] = insertvalue [[STRUCT_TEST]] [[A]], <vscale x 1 x i32> [[Z]], 1
23
- ; CHECK-NEXT: store [[STRUCT_TEST]] [[B]], ptr [[X]], align 4
23
+ ; CHECK-NEXT: store <vscale x 1 x i32> [[Y]], ptr [[X]], align 4
24
+ ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
25
+ ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2
26
+ ; CHECK-NEXT: [[X_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]]
27
+ ; CHECK-NEXT: store <vscale x 1 x i32> [[Z]], ptr [[X_REPACK1]], align 4
24
28
; CHECK-NEXT: ret void
25
29
;
26
30
%a = insertvalue %struct.test undef , <vscale x 1 x i32 > %y , 0
@@ -33,8 +37,14 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>} @split_load(ptr %p) nounwind {
33
37
; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @split_load
34
38
; CHECK-SAME: (ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
35
39
; CHECK-NEXT: entry:
36
- ; CHECK-NEXT: [[R:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[P]], align 16
37
- ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R]]
40
+ ; CHECK-NEXT: [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
41
+ ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
42
+ ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
43
+ ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4
44
+ ; CHECK-NEXT: [[R_ELT1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP2]]
45
+ ; CHECK-NEXT: [[R_UNPACK2:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT1]], align 16
46
+ ; CHECK-NEXT: [[R3:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], <vscale x 16 x i8> [[R_UNPACK2]], 1
47
+ ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R3]]
38
48
;
39
49
entry:
40
50
%r = load {<vscale x 16 x i8 >, <vscale x 16 x i8 >}, ptr %p
@@ -58,7 +68,13 @@ define void @split_store({<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p) no
58
68
; CHECK-LABEL: define void @split_store
59
69
; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
60
70
; CHECK-NEXT: entry:
61
- ; CHECK-NEXT: store { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], ptr [[P]], align 16
71
+ ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
72
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
73
+ ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
74
+ ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
75
+ ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
76
+ ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
77
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
62
78
; CHECK-NEXT: ret void
63
79
;
64
80
entry:
@@ -104,9 +120,21 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>} @check_nxv16i8_nxv4i32({<vscale
104
120
; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @check_nxv16i8_nxv4i32
105
121
; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
106
122
; CHECK-NEXT: entry:
107
- ; CHECK-NEXT: store { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], ptr [[P]], align 16
108
- ; CHECK-NEXT: [[R:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[P]], align 16
109
- ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R]]
123
+ ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
124
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
125
+ ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
126
+ ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
127
+ ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
128
+ ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
129
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
130
+ ; CHECK-NEXT: [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
131
+ ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
132
+ ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
133
+ ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 4
134
+ ; CHECK-NEXT: [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]]
135
+ ; CHECK-NEXT: [[R_UNPACK4:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT3]], align 16
136
+ ; CHECK-NEXT: [[R5:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], <vscale x 16 x i8> [[R_UNPACK4]], 1
137
+ ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R5]]
110
138
;
111
139
entry:
112
140
store {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x , ptr %p
@@ -119,9 +147,21 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>} @alloca_nxv16i8_nxv4i32({<vscale
119
147
; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]]) #[[ATTR0]] {
120
148
; CHECK-NEXT: entry:
121
149
; CHECK-NEXT: [[P:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
122
- ; CHECK-NEXT: store { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], ptr [[P]], align 16
123
- ; CHECK-NEXT: [[R:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[P]], align 16
124
- ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R]]
150
+ ; CHECK-NEXT: [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
151
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
152
+ ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
153
+ ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
154
+ ; CHECK-NEXT: [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
155
+ ; CHECK-NEXT: [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
156
+ ; CHECK-NEXT: store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
157
+ ; CHECK-NEXT: [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
158
+ ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
159
+ ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
160
+ ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 4
161
+ ; CHECK-NEXT: [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]]
162
+ ; CHECK-NEXT: [[R_UNPACK4:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT3]], align 16
163
+ ; CHECK-NEXT: [[R5:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], <vscale x 16 x i8> [[R_UNPACK4]], 1
164
+ ; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R5]]
125
165
;
126
166
entry:
127
167
%p = alloca {<vscale x 4 x i32 >, <vscale x 4 x i32 >}
0 commit comments