Skip to content

Commit 244fd4d

Browse files
committed
[RISCV] Run mem2reg on more scalar C builtin tests to remove allocas and simplify checks. NFC
1 parent d6a48a3 commit 244fd4d

File tree

2 files changed

+20
-45
lines changed

2 files changed

+20
-45
lines changed

clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb.c

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,30 @@
11
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
22
// RUN: %clang_cc1 -triple riscv32 -target-feature +zbb -emit-llvm %s -o - \
3+
// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
34
// RUN: | FileCheck %s -check-prefix=RV32ZBB
45

56
// RV32ZBB-LABEL: @orc_b_32(
67
// RV32ZBB-NEXT: entry:
7-
// RV32ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
8-
// RV32ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
9-
// RV32ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
10-
// RV32ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[TMP0]])
11-
// RV32ZBB-NEXT: ret i32 [[TMP1]]
8+
// RV32ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[A:%.*]])
9+
// RV32ZBB-NEXT: ret i32 [[TMP0]]
1210
//
1311
unsigned int orc_b_32(unsigned int a) {
1412
return __builtin_riscv_orc_b_32(a);
1513
}
1614

1715
// RV32ZBB-LABEL: @clz_32(
1816
// RV32ZBB-NEXT: entry:
19-
// RV32ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
20-
// RV32ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
21-
// RV32ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
22-
// RV32ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
23-
// RV32ZBB-NEXT: ret i32 [[TMP1]]
17+
// RV32ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
18+
// RV32ZBB-NEXT: ret i32 [[TMP0]]
2419
//
2520
unsigned int clz_32(unsigned int a) {
2621
return __builtin_riscv_clz_32(a);
2722
}
2823

2924
// RV32ZBB-LABEL: @ctz_32(
3025
// RV32ZBB-NEXT: entry:
31-
// RV32ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
32-
// RV32ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
33-
// RV32ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
34-
// RV32ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
35-
// RV32ZBB-NEXT: ret i32 [[TMP1]]
26+
// RV32ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[A:%.*]], i1 false)
27+
// RV32ZBB-NEXT: ret i32 [[TMP0]]
3628
//
3729
unsigned int ctz_32(unsigned int a) {
3830
return __builtin_riscv_ctz_32(a);

clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbb.c

Lines changed: 13 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,39 @@
11
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
22
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbb -emit-llvm %s -o - \
3+
// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
34
// RUN: | FileCheck %s -check-prefix=RV64ZBB
45

56
// RV64ZBB-LABEL: @orc_b_32(
67
// RV64ZBB-NEXT: entry:
7-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
8-
// RV64ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
9-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
10-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[TMP0]])
11-
// RV64ZBB-NEXT: ret i32 [[TMP1]]
8+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[A:%.*]])
9+
// RV64ZBB-NEXT: ret i32 [[TMP0]]
1210
//
1311
unsigned int orc_b_32(unsigned int a) {
1412
return __builtin_riscv_orc_b_32(a);
1513
}
1614

1715
// RV64ZBB-LABEL: @orc_b_64(
1816
// RV64ZBB-NEXT: entry:
19-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
20-
// RV64ZBB-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
21-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
22-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.orc.b.i64(i64 [[TMP0]])
23-
// RV64ZBB-NEXT: ret i64 [[TMP1]]
17+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.orc.b.i64(i64 [[A:%.*]])
18+
// RV64ZBB-NEXT: ret i64 [[TMP0]]
2419
//
2520
unsigned long orc_b_64(unsigned long a) {
2621
return __builtin_riscv_orc_b_64(a);
2722
}
2823

2924
// RV64ZBB-LABEL: @clz_32(
3025
// RV64ZBB-NEXT: entry:
31-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
32-
// RV64ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
33-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
34-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
35-
// RV64ZBB-NEXT: ret i32 [[TMP1]]
26+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
27+
// RV64ZBB-NEXT: ret i32 [[TMP0]]
3628
//
3729
unsigned int clz_32(unsigned int a) {
3830
return __builtin_riscv_clz_32(a);
3931
}
4032

4133
// RV64ZBB-LABEL: @clz_64(
4234
// RV64ZBB-NEXT: entry:
43-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
44-
// RV64ZBB-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
45-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
46-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
47-
// RV64ZBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
35+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[A:%.*]], i1 false)
36+
// RV64ZBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
4837
// RV64ZBB-NEXT: ret i32 [[CAST]]
4938
//
5039
unsigned int clz_64(unsigned long a) {
@@ -53,23 +42,17 @@ unsigned int clz_64(unsigned long a) {
5342

5443
// RV64ZBB-LABEL: @ctz_32(
5544
// RV64ZBB-NEXT: entry:
56-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
57-
// RV64ZBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
58-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
59-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
60-
// RV64ZBB-NEXT: ret i32 [[TMP1]]
45+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[A:%.*]], i1 false)
46+
// RV64ZBB-NEXT: ret i32 [[TMP0]]
6147
//
6248
unsigned int ctz_32(unsigned int a) {
6349
return __builtin_riscv_ctz_32(a);
6450
}
6551

6652
// RV64ZBB-LABEL: @ctz_64(
6753
// RV64ZBB-NEXT: entry:
68-
// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
69-
// RV64ZBB-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
70-
// RV64ZBB-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
71-
// RV64ZBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 false)
72-
// RV64ZBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
54+
// RV64ZBB-NEXT: [[TMP0:%.*]] = call i64 @llvm.cttz.i64(i64 [[A:%.*]], i1 false)
55+
// RV64ZBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
7356
// RV64ZBB-NEXT: ret i32 [[CAST]]
7457
//
7558
unsigned int ctz_64(unsigned long a) {

0 commit comments

Comments
 (0)