Skip to content

Commit ac03e3f

Browse files
committed
[RISCV] Use 'long' in aes64 Zknd/Zkne builtin tests. NFC
This matches the data type of the intrinsics. This case be seen from the removal of sext and trunc instructions from the IR. Reviewed By: kito-cheng Differential Revision: https://reviews.llvm.org/D154572
1 parent 7c9230c commit ac03e3f

File tree

3 files changed

+57
-76
lines changed

3 files changed

+57
-76
lines changed

clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,32 +6,27 @@
66

77
// RV64ZKND-ZKNE-LABEL: @aes64ks1i(
88
// RV64ZKND-ZKNE-NEXT: entry:
9-
// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
10-
// RV64ZKND-ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
11-
// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
12-
// RV64ZKND-ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
13-
// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[CONV]], i32 0)
14-
// RV64ZKND-ZKNE-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
15-
// RV64ZKND-ZKNE-NEXT: ret i32 [[CONV1]]
9+
// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
10+
// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
11+
// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
12+
// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[TMP0]], i32 0)
13+
// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP1]]
1614
//
17-
int aes64ks1i(int rs1) {
15+
long aes64ks1i(long rs1) {
1816
return __builtin_riscv_aes64ks1i_64(rs1, 0);
1917
}
2018

2119
// RV64ZKND-ZKNE-LABEL: @aes64ks2(
2220
// RV64ZKND-ZKNE-NEXT: entry:
23-
// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
24-
// RV64ZKND-ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
25-
// RV64ZKND-ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
26-
// RV64ZKND-ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
27-
// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
28-
// RV64ZKND-ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
29-
// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
30-
// RV64ZKND-ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
31-
// RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[CONV]], i64 [[CONV1]])
32-
// RV64ZKND-ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
33-
// RV64ZKND-ZKNE-NEXT: ret i32 [[CONV2]]
21+
// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
22+
// RV64ZKND-ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
23+
// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
24+
// RV64ZKND-ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
25+
// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
26+
// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
27+
// RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]])
28+
// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP2]]
3429
//
35-
int aes64ks2(int rs1, int rs2) {
30+
long aes64ks2(long rs1, long rs2) {
3631
return __builtin_riscv_aes64ks2_64(rs1, rs2);
3732
}

clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c

Lines changed: 24 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -5,52 +5,44 @@
55

66
// RV64ZKND-LABEL: @aes64dsm(
77
// RV64ZKND-NEXT: entry:
8-
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
9-
// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
10-
// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
11-
// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
12-
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
13-
// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
14-
// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
15-
// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
16-
// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[CONV]], i64 [[CONV1]])
17-
// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
18-
// RV64ZKND-NEXT: ret i32 [[CONV2]]
8+
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
9+
// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
10+
// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
11+
// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
12+
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
13+
// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
14+
// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]])
15+
// RV64ZKND-NEXT: ret i64 [[TMP2]]
1916
//
20-
int aes64dsm(int rs1, int rs2) {
17+
long aes64dsm(long rs1, long rs2) {
2118
return __builtin_riscv_aes64dsm_64(rs1, rs2);
2219
}
2320

2421

2522
// RV64ZKND-LABEL: @aes64ds(
2623
// RV64ZKND-NEXT: entry:
27-
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
28-
// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
29-
// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
30-
// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
31-
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
32-
// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
33-
// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
34-
// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
35-
// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[CONV]], i64 [[CONV1]])
36-
// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
37-
// RV64ZKND-NEXT: ret i32 [[CONV2]]
24+
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
25+
// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
26+
// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
27+
// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
28+
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
29+
// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
30+
// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]])
31+
// RV64ZKND-NEXT: ret i64 [[TMP2]]
3832
//
39-
int aes64ds(int rs1, int rs2) {
33+
long aes64ds(long rs1, long rs2) {
4034
return __builtin_riscv_aes64ds_64(rs1, rs2);
4135
}
4236

4337

4438
// RV64ZKND-LABEL: @aes64im(
4539
// RV64ZKND-NEXT: entry:
46-
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
47-
// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
48-
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
49-
// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
50-
// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[CONV]])
51-
// RV64ZKND-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
52-
// RV64ZKND-NEXT: ret i32 [[CONV1]]
40+
// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
41+
// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
42+
// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
43+
// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]])
44+
// RV64ZKND-NEXT: ret i64 [[TMP1]]
5345
//
54-
int aes64im(int rs1) {
46+
long aes64im(long rs1) {
5547
return __builtin_riscv_aes64im_64(rs1);
5648
}

clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5,37 +5,31 @@
55

66
// RV64ZKNE-LABEL: @aes64es(
77
// RV64ZKNE-NEXT: entry:
8-
// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
9-
// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
10-
// RV64ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
11-
// RV64ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
12-
// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
13-
// RV64ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
14-
// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
15-
// RV64ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
16-
// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[CONV]], i64 [[CONV1]])
17-
// RV64ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
18-
// RV64ZKNE-NEXT: ret i32 [[CONV2]]
8+
// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
9+
// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
10+
// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
11+
// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
12+
// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
13+
// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
14+
// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]])
15+
// RV64ZKNE-NEXT: ret i64 [[TMP2]]
1916
//
20-
int aes64es(int rs1, int rs2) {
17+
long aes64es(long rs1, long rs2) {
2118
return __builtin_riscv_aes64es_64(rs1, rs2);
2219
}
2320

2421

2522
// RV64ZKNE-LABEL: @aes64esm(
2623
// RV64ZKNE-NEXT: entry:
27-
// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
28-
// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
29-
// RV64ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
30-
// RV64ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
31-
// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
32-
// RV64ZKNE-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
33-
// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
34-
// RV64ZKNE-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
35-
// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[CONV]], i64 [[CONV1]])
36-
// RV64ZKNE-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
37-
// RV64ZKNE-NEXT: ret i32 [[CONV2]]
24+
// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
25+
// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
26+
// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
27+
// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
28+
// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
29+
// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
30+
// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]])
31+
// RV64ZKNE-NEXT: ret i64 [[TMP2]]
3832
//
39-
int aes64esm(int rs1, int rs2) {
33+
long aes64esm(long rs1, long rs2) {
4034
return __builtin_riscv_aes64esm_64(rs1, rs2);
4135
}

0 commit comments

Comments
 (0)