|
5 | 5 |
|
6 | 6 | // RV64ZKND-LABEL: @aes64dsm(
|
7 | 7 | // RV64ZKND-NEXT: entry:
|
8 |
| -// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 |
9 |
| -// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4 |
10 |
| -// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 |
11 |
| -// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4 |
12 |
| -// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 |
13 |
| -// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 |
14 |
| -// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4 |
15 |
| -// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64 |
16 |
| -// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[CONV]], i64 [[CONV1]]) |
17 |
| -// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 |
18 |
| -// RV64ZKND-NEXT: ret i32 [[CONV2]] |
| 8 | +// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 |
| 9 | +// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8 |
| 10 | +// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 |
| 11 | +// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8 |
| 12 | +// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 |
| 13 | +// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8 |
| 14 | +// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]]) |
| 15 | +// RV64ZKND-NEXT: ret i64 [[TMP2]] |
19 | 16 | //
|
20 |
| -int aes64dsm(int rs1, int rs2) { |
| 17 | +long aes64dsm(long rs1, long rs2) { |
21 | 18 | return __builtin_riscv_aes64dsm_64(rs1, rs2);
|
22 | 19 | }
|
23 | 20 |
|
24 | 21 |
|
25 | 22 | // RV64ZKND-LABEL: @aes64ds(
|
26 | 23 | // RV64ZKND-NEXT: entry:
|
27 |
| -// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 |
28 |
| -// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4 |
29 |
| -// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 |
30 |
| -// RV64ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4 |
31 |
| -// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 |
32 |
| -// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 |
33 |
| -// RV64ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4 |
34 |
| -// RV64ZKND-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64 |
35 |
| -// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[CONV]], i64 [[CONV1]]) |
36 |
| -// RV64ZKND-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 |
37 |
| -// RV64ZKND-NEXT: ret i32 [[CONV2]] |
| 24 | +// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 |
| 25 | +// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8 |
| 26 | +// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 |
| 27 | +// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8 |
| 28 | +// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 |
| 29 | +// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8 |
| 30 | +// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]]) |
| 31 | +// RV64ZKND-NEXT: ret i64 [[TMP2]] |
38 | 32 | //
|
39 |
| -int aes64ds(int rs1, int rs2) { |
| 33 | +long aes64ds(long rs1, long rs2) { |
40 | 34 | return __builtin_riscv_aes64ds_64(rs1, rs2);
|
41 | 35 | }
|
42 | 36 |
|
43 | 37 |
|
44 | 38 | // RV64ZKND-LABEL: @aes64im(
|
45 | 39 | // RV64ZKND-NEXT: entry:
|
46 |
| -// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 |
47 |
| -// RV64ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 |
48 |
| -// RV64ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 |
49 |
| -// RV64ZKND-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 |
50 |
| -// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[CONV]]) |
51 |
| -// RV64ZKND-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 |
52 |
| -// RV64ZKND-NEXT: ret i32 [[CONV1]] |
| 40 | +// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 |
| 41 | +// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 |
| 42 | +// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 |
| 43 | +// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]]) |
| 44 | +// RV64ZKND-NEXT: ret i64 [[TMP1]] |
53 | 45 | //
|
54 |
| -int aes64im(int rs1) { |
| 46 | +long aes64im(long rs1) { |
55 | 47 | return __builtin_riscv_aes64im_64(rs1);
|
56 | 48 | }
|
0 commit comments