@@ -86,3 +86,59 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
86
86
%a = call <vscale x 32 x i32 > @callee_scalable_vector_split_indirect (<vscale x 32 x i32 > zeroinitializer , <vscale x 32 x i32 > %x )
87
87
ret <vscale x 32 x i32 > %a
88
88
}
89
+
90
+ define {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @caller_tuple_return () {
91
+ ; RV32-LABEL: caller_tuple_return:
92
+ ; RV32: # %bb.0:
93
+ ; RV32-NEXT: addi sp, sp, -16
94
+ ; RV32-NEXT: .cfi_def_cfa_offset 16
95
+ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
96
+ ; RV32-NEXT: .cfi_offset ra, -4
97
+ ; RV32-NEXT: call callee_tuple_return
98
+ ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
99
+ ; RV32-NEXT: addi sp, sp, 16
100
+ ; RV32-NEXT: ret
101
+ ;
102
+ ; RV64-LABEL: caller_tuple_return:
103
+ ; RV64: # %bb.0:
104
+ ; RV64-NEXT: addi sp, sp, -16
105
+ ; RV64-NEXT: .cfi_def_cfa_offset 16
106
+ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
107
+ ; RV64-NEXT: .cfi_offset ra, -8
108
+ ; RV64-NEXT: call callee_tuple_return
109
+ ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
110
+ ; RV64-NEXT: addi sp, sp, 16
111
+ ; RV64-NEXT: ret
112
+ %a = call {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @callee_tuple_return ()
113
+ ret {<vscale x 4 x i32 >, <vscale x 4 x i32 >} %a
114
+ }
115
+
116
+ declare {<vscale x 4 x i32 >, <vscale x 4 x i32 >} @callee_tuple_return ()
117
+
118
+ define void @caller_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x ) {
119
+ ; RV32-LABEL: caller_tuple_argument:
120
+ ; RV32: # %bb.0:
121
+ ; RV32-NEXT: addi sp, sp, -16
122
+ ; RV32-NEXT: .cfi_def_cfa_offset 16
123
+ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
124
+ ; RV32-NEXT: .cfi_offset ra, -4
125
+ ; RV32-NEXT: call callee_tuple_argument
126
+ ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
127
+ ; RV32-NEXT: addi sp, sp, 16
128
+ ; RV32-NEXT: ret
129
+ ;
130
+ ; RV64-LABEL: caller_tuple_argument:
131
+ ; RV64: # %bb.0:
132
+ ; RV64-NEXT: addi sp, sp, -16
133
+ ; RV64-NEXT: .cfi_def_cfa_offset 16
134
+ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
135
+ ; RV64-NEXT: .cfi_offset ra, -8
136
+ ; RV64-NEXT: call callee_tuple_argument
137
+ ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
138
+ ; RV64-NEXT: addi sp, sp, 16
139
+ ; RV64-NEXT: ret
140
+ call void @callee_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >} %x )
141
+ ret void
142
+ }
143
+
144
+ declare void @callee_tuple_argument ({<vscale x 4 x i32 >, <vscale x 4 x i32 >})
0 commit comments