Skip to content

Commit a3ac9a5

Browse files
committed
fixup! [RISCV] RISCV vector calling convention (1/2)
1 parent 9041f46 commit a3ac9a5

File tree

3 files changed

+5
-133
lines changed

3 files changed

+5
-133
lines changed

clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,17 @@
77
// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
88
vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
99
vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
10-
vint32m1_t ret;
11-
vint32m1_t val;
12-
val = __riscv_vle32_v_i32m1(base, vl);
13-
ret = bar(input);
10+
vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
11+
vint32m1_t ret = bar(input);
1412
__riscv_vse32_v_i32m1(base, val, vl);
1513
return ret;
1614
}
1715

1816
// CHECK-LLVM: call <vscale x 2 x i32> @baz
1917
vint32m1_t baz(vint32m1_t input);
2018
vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
21-
vint32m1_t ret;
22-
vint32m1_t val;
23-
val = __riscv_vle32_v_i32m1(base, vl);
24-
ret = baz(input);
19+
vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
20+
vint32m1_t ret = baz(input);
2521
__riscv_vse32_v_i32m1(base, val, vl);
2622
return ret;
2723
}

llvm/lib/Target/RISCV/RISCVFrameLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1110,7 +1110,7 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
11101110
// First push RVV Callee Saved object, then push RVV stack object
11111111
std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
11121112
const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1113-
if (RVVCSI.size())
1113+
if (!RVVCSI.empty())
11141114
pushRVVObjects(RVVCSI[0].getFrameIdx(),
11151115
RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
11161116
pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll

Lines changed: 0 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -695,68 +695,6 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
695695
; CHECK-NEXT: .cfi_def_cfa sp, 16
696696
; CHECK-NEXT: addi sp, sp, 16
697697
; CHECK-NEXT: ret
698-
; RV32-LABEL: fshr_v16i64:
699-
; RV32: # %bb.0:
700-
; RV32-NEXT: addi sp, sp, -16
701-
; RV32-NEXT: .cfi_def_cfa_offset 16
702-
; RV32-NEXT: csrr a2, vlenb
703-
; RV32-NEXT: slli a2, a2, 3
704-
; RV32-NEXT: sub sp, sp, a2
705-
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
706-
; RV32-NEXT: addi a2, sp, 16
707-
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
708-
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
709-
; RV32-NEXT: vle64.v v24, (a0)
710-
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
711-
; RV32-NEXT: vsll.vi v16, v8, 1, v0.t
712-
; RV32-NEXT: li a0, 32
713-
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
714-
; RV32-NEXT: vmv.v.i v8, -1
715-
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
716-
; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
717-
; RV32-NEXT: li a0, 63
718-
; RV32-NEXT: vand.vx v8, v8, a0, v0.t
719-
; RV32-NEXT: vsll.vv v8, v16, v8, v0.t
720-
; RV32-NEXT: vand.vx v16, v24, a0, v0.t
721-
; RV32-NEXT: addi a0, sp, 16
722-
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
723-
; RV32-NEXT: vsrl.vv v16, v24, v16, v0.t
724-
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
725-
; RV32-NEXT: csrr a0, vlenb
726-
; RV32-NEXT: slli a0, a0, 3
727-
; RV32-NEXT: add sp, sp, a0
728-
; RV32-NEXT: .cfi_def_cfa sp, 16
729-
; RV32-NEXT: addi sp, sp, 16
730-
; RV32-NEXT: ret
731-
; RV64-LABEL: fshr_v16i64:
732-
; RV64: # %bb.0:
733-
; RV64-NEXT: addi sp, sp, -16
734-
; RV64-NEXT: .cfi_def_cfa_offset 16
735-
; RV64-NEXT: csrr a2, vlenb
736-
; RV64-NEXT: slli a2, a2, 3
737-
; RV64-NEXT: sub sp, sp, a2
738-
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
739-
; RV64-NEXT: addi a2, sp, 16
740-
; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
741-
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
742-
; RV64-NEXT: vle64.v v24, (a0)
743-
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
744-
; RV64-NEXT: vsll.vi v16, v8, 1, v0.t
745-
; RV64-NEXT: li a0, 63
746-
; RV64-NEXT: vnot.v v8, v24, v0.t
747-
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
748-
; RV64-NEXT: vsll.vv v8, v16, v8, v0.t
749-
; RV64-NEXT: vand.vx v16, v24, a0, v0.t
750-
; RV64-NEXT: addi a0, sp, 16
751-
; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
752-
; RV64-NEXT: vsrl.vv v16, v24, v16, v0.t
753-
; RV64-NEXT: vor.vv v8, v8, v16, v0.t
754-
; RV64-NEXT: csrr a0, vlenb
755-
; RV64-NEXT: slli a0, a0, 3
756-
; RV64-NEXT: add sp, sp, a0
757-
; RV64-NEXT: .cfi_def_cfa sp, 16
758-
; RV64-NEXT: addi sp, sp, 16
759-
; RV64-NEXT: ret
760698
%res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
761699
ret <16 x i64> %res
762700
}
@@ -793,68 +731,6 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
793731
; CHECK-NEXT: .cfi_def_cfa sp, 16
794732
; CHECK-NEXT: addi sp, sp, 16
795733
; CHECK-NEXT: ret
796-
; RV32-LABEL: fshl_v16i64:
797-
; RV32: # %bb.0:
798-
; RV32-NEXT: addi sp, sp, -16
799-
; RV32-NEXT: .cfi_def_cfa_offset 16
800-
; RV32-NEXT: csrr a2, vlenb
801-
; RV32-NEXT: slli a2, a2, 3
802-
; RV32-NEXT: sub sp, sp, a2
803-
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
804-
; RV32-NEXT: addi a2, sp, 16
805-
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
806-
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
807-
; RV32-NEXT: vle64.v v24, (a0)
808-
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
809-
; RV32-NEXT: vsrl.vi v16, v16, 1, v0.t
810-
; RV32-NEXT: li a0, 32
811-
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
812-
; RV32-NEXT: vmv.v.i v8, -1
813-
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
814-
; RV32-NEXT: vxor.vv v8, v24, v8, v0.t
815-
; RV32-NEXT: li a0, 63
816-
; RV32-NEXT: vand.vx v8, v8, a0, v0.t
817-
; RV32-NEXT: vsrl.vv v8, v16, v8, v0.t
818-
; RV32-NEXT: vand.vx v16, v24, a0, v0.t
819-
; RV32-NEXT: addi a0, sp, 16
820-
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
821-
; RV32-NEXT: vsll.vv v16, v24, v16, v0.t
822-
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
823-
; RV32-NEXT: csrr a0, vlenb
824-
; RV32-NEXT: slli a0, a0, 3
825-
; RV32-NEXT: add sp, sp, a0
826-
; RV32-NEXT: .cfi_def_cfa sp, 16
827-
; RV32-NEXT: addi sp, sp, 16
828-
; RV32-NEXT: ret
829-
; RV64-LABEL: fshl_v16i64:
830-
; RV64: # %bb.0:
831-
; RV64-NEXT: addi sp, sp, -16
832-
; RV64-NEXT: .cfi_def_cfa_offset 16
833-
; RV64-NEXT: csrr a2, vlenb
834-
; RV64-NEXT: slli a2, a2, 3
835-
; RV64-NEXT: sub sp, sp, a2
836-
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
837-
; RV64-NEXT: addi a2, sp, 16
838-
; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
839-
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
840-
; RV64-NEXT: vle64.v v24, (a0)
841-
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
842-
; RV64-NEXT: vsrl.vi v16, v16, 1, v0.t
843-
; RV64-NEXT: li a0, 63
844-
; RV64-NEXT: vnot.v v8, v24, v0.t
845-
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
846-
; RV64-NEXT: vsrl.vv v8, v16, v8, v0.t
847-
; RV64-NEXT: vand.vx v16, v24, a0, v0.t
848-
; RV64-NEXT: addi a0, sp, 16
849-
; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
850-
; RV64-NEXT: vsll.vv v16, v24, v16, v0.t
851-
; RV64-NEXT: vor.vv v8, v16, v8, v0.t
852-
; RV64-NEXT: csrr a0, vlenb
853-
; RV64-NEXT: slli a0, a0, 3
854-
; RV64-NEXT: add sp, sp, a0
855-
; RV64-NEXT: .cfi_def_cfa sp, 16
856-
; RV64-NEXT: addi sp, sp, 16
857-
; RV64-NEXT: ret
858734
%res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl)
859735
ret <16 x i64> %res
860736
}

0 commit comments

Comments
 (0)