@@ -86,3 +86,79 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
86
86
%a = call <vscale x 32 x i32 > @callee_scalable_vector_split_indirect (<vscale x 32 x i32 > zeroinitializer , <vscale x 32 x i32 > %x )
87
87
ret <vscale x 32 x i32 > %a
88
88
}
89
+
90
+ define {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @caller_tuple_return () {
91
+ ; RV32-LABEL: caller_tuple_return:
92
+ ; RV32: # %bb.0:
93
+ ; RV32-NEXT: addi sp, sp, -16
94
+ ; RV32-NEXT: .cfi_def_cfa_offset 16
95
+ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
96
+ ; RV32-NEXT: .cfi_offset ra, -4
97
+ ; RV32-NEXT: call callee_tuple_return
98
+ ; RV32-NEXT: vmv2r.v v12, v8
99
+ ; RV32-NEXT: vmv2r.v v8, v10
100
+ ; RV32-NEXT: vmv2r.v v10, v12
101
+ ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
102
+ ; RV32-NEXT: addi sp, sp, 16
103
+ ; RV32-NEXT: ret
104
+ ;
105
+ ; RV64-LABEL: caller_tuple_return:
106
+ ; RV64: # %bb.0:
107
+ ; RV64-NEXT: addi sp, sp, -16
108
+ ; RV64-NEXT: .cfi_def_cfa_offset 16
109
+ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
110
+ ; RV64-NEXT: .cfi_offset ra, -8
111
+ ; RV64-NEXT: call callee_tuple_return
112
+ ; RV64-NEXT: vmv2r.v v12, v8
113
+ ; RV64-NEXT: vmv2r.v v8, v10
114
+ ; RV64-NEXT: vmv2r.v v10, v12
115
+ ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
116
+ ; RV64-NEXT: addi sp, sp, 16
117
+ ; RV64-NEXT: ret
118
+ %a = call {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @callee_tuple_return ()
119
+ %b = extractvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %a , 0
120
+ %c = extractvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %a , 1
121
+ %d = insertvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} poison, <vscale x 4 x i32 > %c , 0
122
+ %e = insertvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %d , <vscale x 4 x i32 > %b , 1
123
+ ret {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %e
124
+ }
125
+
126
+ declare {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @callee_tuple_return ()
127
+
128
+ define void @caller_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x ) {
129
+ ; RV32-LABEL: caller_tuple_argument:
130
+ ; RV32: # %bb.0:
131
+ ; RV32-NEXT: addi sp, sp, -16
132
+ ; RV32-NEXT: .cfi_def_cfa_offset 16
133
+ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
134
+ ; RV32-NEXT: .cfi_offset ra, -4
135
+ ; RV32-NEXT: vmv2r.v v12, v8
136
+ ; RV32-NEXT: vmv2r.v v8, v10
137
+ ; RV32-NEXT: vmv2r.v v10, v12
138
+ ; RV32-NEXT: call callee_tuple_argument
139
+ ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
140
+ ; RV32-NEXT: addi sp, sp, 16
141
+ ; RV32-NEXT: ret
142
+ ;
143
+ ; RV64-LABEL: caller_tuple_argument:
144
+ ; RV64: # %bb.0:
145
+ ; RV64-NEXT: addi sp, sp, -16
146
+ ; RV64-NEXT: .cfi_def_cfa_offset 16
147
+ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
148
+ ; RV64-NEXT: .cfi_offset ra, -8
149
+ ; RV64-NEXT: vmv2r.v v12, v8
150
+ ; RV64-NEXT: vmv2r.v v8, v10
151
+ ; RV64-NEXT: vmv2r.v v10, v12
152
+ ; RV64-NEXT: call callee_tuple_argument
153
+ ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
154
+ ; RV64-NEXT: addi sp, sp, 16
155
+ ; RV64-NEXT: ret
156
+ %a = extractvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x , 0
157
+ %b = extractvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x , 1
158
+ %c = insertvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} poison, <vscale x 4 x i32 > %b , 0
159
+ %d = insertvalue {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %c , <vscale x 4 x i32 > %a , 1
160
+ call void @callee_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >} %d )
161
+ ret void
162
+ }
163
+
164
+ declare void @callee_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >})
0 commit comments