1
1
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2
2
// RUN: %clang_cc1 -triple riscv32 -target-feature +zbc -emit-llvm %s -o - \
3
+ // RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
3
4
// RUN: | FileCheck %s -check-prefix=RV32ZBC
4
5
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbc -emit-llvm %s -o - \
6
+ // RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
5
7
// RUN: | FileCheck %s -check-prefix=RV64ZBC
6
8
7
9
#include <stdint.h>
8
10
9
11
#if __riscv_xlen == 64
10
12
// RV64ZBC-LABEL: @clmul_64(
11
13
// RV64ZBC-NEXT: entry:
12
- // RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
13
- // RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
14
- // RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
15
- // RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
16
- // RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
17
- // RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
18
- // RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
19
- // RV64ZBC-NEXT: ret i64 [[TMP2]]
14
+ // RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[A:%.*]], i64 [[B:%.*]])
15
+ // RV64ZBC-NEXT: ret i64 [[TMP0]]
20
16
//
21
17
uint64_t clmul_64 (uint64_t a , uint64_t b ) {
22
18
return __builtin_riscv_clmul_64 (a , b );
23
19
}
24
20
25
21
// RV64ZBC-LABEL: @clmulh_64(
26
22
// RV64ZBC-NEXT: entry:
27
- // RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
28
- // RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
29
- // RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
30
- // RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
31
- // RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
32
- // RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
33
- // RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
34
- // RV64ZBC-NEXT: ret i64 [[TMP2]]
23
+ // RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[A:%.*]], i64 [[B:%.*]])
24
+ // RV64ZBC-NEXT: ret i64 [[TMP0]]
35
25
//
36
26
uint64_t clmulh_64 (uint64_t a , uint64_t b ) {
37
27
return __builtin_riscv_clmulh_64 (a , b );
38
28
}
39
29
40
30
// RV64ZBC-LABEL: @clmulr_64(
41
31
// RV64ZBC-NEXT: entry:
42
- // RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
43
- // RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
44
- // RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
45
- // RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
46
- // RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
47
- // RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
48
- // RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[TMP0]], i64 [[TMP1]])
49
- // RV64ZBC-NEXT: ret i64 [[TMP2]]
32
+ // RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[A:%.*]], i64 [[B:%.*]])
33
+ // RV64ZBC-NEXT: ret i64 [[TMP0]]
50
34
//
51
35
uint64_t clmulr_64 (uint64_t a , uint64_t b ) {
52
36
return __builtin_riscv_clmulr_64 (a , b );
@@ -55,25 +39,13 @@ uint64_t clmulr_64(uint64_t a, uint64_t b) {
55
39
56
40
// RV32ZBC-LABEL: @clmul_32(
57
41
// RV32ZBC-NEXT: entry:
58
- // RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
59
- // RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
60
- // RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
61
- // RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
62
- // RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
63
- // RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
64
- // RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
65
- // RV32ZBC-NEXT: ret i32 [[TMP2]]
42
+ // RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
43
+ // RV32ZBC-NEXT: ret i32 [[TMP0]]
66
44
//
67
45
// RV64ZBC-LABEL: @clmul_32(
68
46
// RV64ZBC-NEXT: entry:
69
- // RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
70
- // RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
71
- // RV64ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
72
- // RV64ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
73
- // RV64ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
74
- // RV64ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
75
- // RV64ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
76
- // RV64ZBC-NEXT: ret i32 [[TMP2]]
47
+ // RV64ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
48
+ // RV64ZBC-NEXT: ret i32 [[TMP0]]
77
49
//
78
50
uint32_t clmul_32 (uint32_t a , uint32_t b ) {
79
51
return __builtin_riscv_clmul_32 (a , b );
@@ -82,29 +54,17 @@ uint32_t clmul_32(uint32_t a, uint32_t b) {
82
54
#if __riscv_xlen == 32
83
55
// RV32ZBC-LABEL: @clmulh_32(
84
56
// RV32ZBC-NEXT: entry:
85
- // RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
86
- // RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
87
- // RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
88
- // RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
89
- // RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
90
- // RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
91
- // RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
92
- // RV32ZBC-NEXT: ret i32 [[TMP2]]
57
+ // RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[A:%.*]], i32 [[B:%.*]])
58
+ // RV32ZBC-NEXT: ret i32 [[TMP0]]
93
59
//
94
60
uint32_t clmulh_32 (uint32_t a , uint32_t b ) {
95
61
return __builtin_riscv_clmulh_32 (a , b );
96
62
}
97
63
98
64
// RV32ZBC-LABEL: @clmulr_32(
99
65
// RV32ZBC-NEXT: entry:
100
- // RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
101
- // RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
102
- // RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
103
- // RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
104
- // RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
105
- // RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
106
- // RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[TMP0]], i32 [[TMP1]])
107
- // RV32ZBC-NEXT: ret i32 [[TMP2]]
66
+ // RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[A:%.*]], i32 [[B:%.*]])
67
+ // RV32ZBC-NEXT: ret i32 [[TMP0]]
108
68
//
109
69
uint32_t clmulr_32 (uint32_t a , uint32_t b ) {
110
70
return __builtin_riscv_clmulr_32 (a , b );
0 commit comments