Skip to content

Commit db2d441

Browse files
committed
[RISCV] Add a test case for another issue in SelectRORIW. NFC
When validating C3 in (sext_inreg (or (shl X, C2), (shr (and Y, C3), C1)), i32) we are truncating it to 32 bits before checking its value. We need to check all 64 bits.
1 parent 2762e67 commit db2d441

File tree

1 file changed

+46
-1
lines changed

1 file changed

+46
-1
lines changed

llvm/test/CodeGen/RISCV/rv64Zbbp.ll

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
343343
ret i32 %1
344344
}
345345

346-
; This test is similar to the type legalized versio of the fshl/fshr tests, but
346+
; This test is similar to the type legalized version of the fshl/fshr tests, but
347347
; instead of having the same input to both shifts it has different inputs. Make
348348
; sure we don't match it has a roriw.
349349
; FIXME: We're currently missing a check that the inputs are the same.
@@ -376,6 +376,51 @@ define signext i32 @not_rori_i32(i32 signext %x, i32 signext %y) nounwind {
376376
ret i32 %c
377377
}
378378

379+
; This is similar to the type legalized roriw pattern, but the and mask is more
380+
; than 32 bits so the lshr doesn't shift zeroes into the lower 32 bits. Make
381+
; sure we don't match it to roriw.
382+
; FIXME: We are currently truncating the mask to 32-bits before checking.
383+
define i64 @roriw_bug(i64 %x) nounwind {
384+
; RV64I-LABEL: roriw_bug:
385+
; RV64I: # %bb.0:
386+
; RV64I-NEXT: slli a1, a0, 31
387+
; RV64I-NEXT: andi a0, a0, -2
388+
; RV64I-NEXT: srli a2, a0, 1
389+
; RV64I-NEXT: or a1, a1, a2
390+
; RV64I-NEXT: sext.w a1, a1
391+
; RV64I-NEXT: xor a0, a0, a1
392+
; RV64I-NEXT: ret
393+
;
394+
; RV64IB-LABEL: roriw_bug:
395+
; RV64IB: # %bb.0:
396+
; RV64IB-NEXT: andi a1, a0, -2
397+
; RV64IB-NEXT: roriw a0, a0, 1
398+
; RV64IB-NEXT: xor a0, a1, a0
399+
; RV64IB-NEXT: ret
400+
;
401+
; RV64IBB-LABEL: roriw_bug:
402+
; RV64IBB: # %bb.0:
403+
; RV64IBB-NEXT: andi a1, a0, -2
404+
; RV64IBB-NEXT: roriw a0, a0, 1
405+
; RV64IBB-NEXT: xor a0, a1, a0
406+
; RV64IBB-NEXT: ret
407+
;
408+
; RV64IBP-LABEL: roriw_bug:
409+
; RV64IBP: # %bb.0:
410+
; RV64IBP-NEXT: andi a1, a0, -2
411+
; RV64IBP-NEXT: roriw a0, a0, 1
412+
; RV64IBP-NEXT: xor a0, a1, a0
413+
; RV64IBP-NEXT: ret
414+
%a = shl i64 %x, 31
415+
%b = and i64 %x, 18446744073709551614
416+
%c = lshr i64 %b, 1
417+
%d = or i64 %a, %c
418+
%e = shl i64 %d, 32
419+
%f = ashr i64 %e, 32
420+
%g = xor i64 %b, %f ; to increase the use count on %b to disable SimplifyDemandedBits.
421+
ret i64 %g
422+
}
423+
379424
define i64 @rori_i64_fshl(i64 %a) nounwind {
380425
; RV64I-LABEL: rori_i64_fshl:
381426
; RV64I: # %bb.0:

0 commit comments

Comments
 (0)