@@ -40,16 +40,22 @@ define i1 @test_get_rounding_sideeffect() {
40
40
; RV32IF-NEXT: frrm a0
41
41
; RV32IF-NEXT: lui a1, 66
42
42
; RV32IF-NEXT: slli a0, a0, 2
43
- ; RV32IF-NEXT: addi a1, a1, 769
44
- ; RV32IF-NEXT: srl s0, a1, a0
43
+ ; RV32IF-NEXT: addi s0, a1, 769
44
+ ; RV32IF-NEXT: srl a0, s0, a0
45
+ ; RV32IF-NEXT: andi a0, a0, 7
46
+ ; RV32IF-NEXT: beqz a0, .LBB1_2
47
+ ; RV32IF-NEXT: # %bb.1:
45
48
; RV32IF-NEXT: li a0, 0
46
- ; RV32IF-NEXT: andi s0, s0, 7
47
- ; RV32IF-NEXT: bnez s0, .LBB1_2
48
- ; RV32IF-NEXT: # %bb.1: # %if.end
49
+ ; RV32IF-NEXT: j .LBB1_3
50
+ ; RV32IF-NEXT: .LBB1_2: # %if.end
49
51
; RV32IF-NEXT: call fesetround
50
- ; RV32IF-NEXT: addi s0, s0, -1
51
- ; RV32IF-NEXT: seqz a0, s0
52
- ; RV32IF-NEXT: .LBB1_2: # %return
52
+ ; RV32IF-NEXT: frrm a0
53
+ ; RV32IF-NEXT: slli a0, a0, 2
54
+ ; RV32IF-NEXT: srl a0, s0, a0
55
+ ; RV32IF-NEXT: andi a0, a0, 7
56
+ ; RV32IF-NEXT: addi a0, a0, -1
57
+ ; RV32IF-NEXT: seqz a0, a0
58
+ ; RV32IF-NEXT: .LBB1_3: # %return
53
59
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
54
60
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
55
61
; RV32IF-NEXT: .cfi_restore ra
@@ -71,16 +77,22 @@ define i1 @test_get_rounding_sideeffect() {
71
77
; RV64IF-NEXT: frrm a0
72
78
; RV64IF-NEXT: lui a1, 66
73
79
; RV64IF-NEXT: slli a0, a0, 2
74
- ; RV64IF-NEXT: addiw a1, a1, 769
75
- ; RV64IF-NEXT: srl s0, a1, a0
80
+ ; RV64IF-NEXT: addiw s0, a1, 769
81
+ ; RV64IF-NEXT: srl a0, s0, a0
82
+ ; RV64IF-NEXT: andi a0, a0, 7
83
+ ; RV64IF-NEXT: beqz a0, .LBB1_2
84
+ ; RV64IF-NEXT: # %bb.1:
76
85
; RV64IF-NEXT: li a0, 0
77
- ; RV64IF-NEXT: andi s0, s0, 7
78
- ; RV64IF-NEXT: bnez s0, .LBB1_2
79
- ; RV64IF-NEXT: # %bb.1: # %if.end
86
+ ; RV64IF-NEXT: j .LBB1_3
87
+ ; RV64IF-NEXT: .LBB1_2: # %if.end
80
88
; RV64IF-NEXT: call fesetround
81
- ; RV64IF-NEXT: addi s0, s0, -1
82
- ; RV64IF-NEXT: seqz a0, s0
83
- ; RV64IF-NEXT: .LBB1_2: # %return
89
+ ; RV64IF-NEXT: frrm a0
90
+ ; RV64IF-NEXT: slli a0, a0, 2
91
+ ; RV64IF-NEXT: srl a0, s0, a0
92
+ ; RV64IF-NEXT: andi a0, a0, 7
93
+ ; RV64IF-NEXT: addi a0, a0, -1
94
+ ; RV64IF-NEXT: seqz a0, a0
95
+ ; RV64IF-NEXT: .LBB1_3: # %return
84
96
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
85
97
; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
86
98
; RV64IF-NEXT: .cfi_restore ra
0 commit comments