@@ -28,8 +28,8 @@ body: |
28
28
; CHECK-LABEL: name: test_smul_overflow
29
29
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
30
30
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
31
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
32
31
; CHECK: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]]
32
+ ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
33
33
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
34
34
; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]]
35
35
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]]
@@ -51,9 +51,9 @@ body: |
51
51
; CHECK-LABEL: name: test_umul_overflow
52
52
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
53
53
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
54
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
55
54
; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]]
56
55
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
56
+ ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
57
57
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
58
58
; CHECK: $x0 = COPY [[MUL]](s64)
59
59
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
@@ -66,3 +66,67 @@ body: |
66
66
$w0 = COPY %4(s32)
67
67
68
68
...
69
+ ---
70
+ name : test_umulo_overflow_no_invalid_mir
71
+ alignment : 4
72
+ tracksRegLiveness : true
73
+ liveins :
74
+ - { reg: '$x0' }
75
+ - { reg: '$x1' }
76
+ - { reg: '$x2' }
77
+ frameInfo :
78
+ maxAlignment : 16
79
+ stack :
80
+ - { id: 0, size: 8, alignment: 8 }
81
+ - { id: 1, size: 8, alignment: 8 }
82
+ - { id: 2, size: 16, alignment: 16 }
83
+ - { id: 3, size: 16, alignment: 8 }
84
+ machineFunctionInfo : {}
85
+ body : |
86
+ bb.1:
87
+ liveins: $x0, $x1, $x2
88
+ ; Check that the overflow result doesn't generate incorrect MIR by using a G_CONSTANT 0
89
+ ; before it's been defined.
90
+ ; CHECK-LABEL: name: test_umulo_overflow_no_invalid_mir
91
+ ; CHECK: liveins: $x0, $x1, $x2
92
+ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
93
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
94
+ ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
95
+ ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
96
+ ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
97
+ ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3
98
+ ; CHECK: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store 8)
99
+ ; CHECK: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store 8)
100
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8)
101
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load 8)
102
+ ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[LOAD1]]
103
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
104
+ ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[LOAD1]]
105
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
106
+ ; CHECK: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store 8, align 1)
107
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
108
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
109
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
110
+ ; CHECK: $x0 = COPY [[MUL]](s64)
111
+ ; CHECK: $x1 = COPY [[AND]](s64)
112
+ ; CHECK: RET_ReallyLR implicit $x0
113
+ %0:_(p0) = COPY $x0
114
+ %1:_(s64) = COPY $x1
115
+ %2:_(s64) = COPY $x2
116
+ %25:_(s32) = G_CONSTANT i32 0
117
+ %3:_(p0) = G_FRAME_INDEX %stack.0
118
+ %4:_(p0) = G_FRAME_INDEX %stack.1
119
+ %6:_(p0) = G_FRAME_INDEX %stack.3
120
+ G_STORE %2(s64), %3(p0) :: (store 8)
121
+ G_STORE %1(s64), %4(p0) :: (store 8)
122
+ %7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load 8)
123
+ %8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load 8)
124
+ %9:_(s64), %10:_(s1) = G_UMULO %7, %8
125
+ %31:_(s64) = G_CONSTANT i64 0
126
+ G_STORE %31(s64), %6(p0) :: (store 8, align 1)
127
+ %16:_(s64) = G_ZEXT %10(s1)
128
+ $x0 = COPY %9(s64)
129
+ $x1 = COPY %16(s64)
130
+ RET_ReallyLR implicit $x0
131
+
132
+ ...
0 commit comments