@@ -6,6 +6,8 @@ declare i8 @llvm.umin.i8(i8, i8)
6
6
declare i8 @llvm.smax.i8 (i8 , i8 )
7
7
declare i8 @llvm.umax.i8 (i8 , i8 )
8
8
9
+ declare void @llvm.assume (i1 )
10
+
9
11
declare void @use.i8 (i8 )
10
12
11
13
define i8 @xor_1 (i8 %a , i1 %c , i8 %x , i8 %y ) {
@@ -492,3 +494,33 @@ define i8 @smax_both_freely_invertable_always(i8 %x, i8 %y) {
492
494
%r = call i8 @llvm.smax.i8 (i8 %xx , i8 %yy )
493
495
ret i8 %r
494
496
}
497
+
498
+ define i8 @lshr_nneg (i8 %x , i8 %y ) {
499
+ ; CHECK-LABEL: @lshr_nneg(
500
+ ; CHECK-NEXT: [[NEG:%.*]] = icmp slt i8 [[X:%.*]], 0
501
+ ; CHECK-NEXT: call void @llvm.assume(i1 [[NEG]])
502
+ ; CHECK-NEXT: [[X_NOT:%.*]] = xor i8 [[X]], -1
503
+ ; CHECK-NEXT: [[SHR:%.*]] = lshr i8 [[X_NOT]], [[Y:%.*]]
504
+ ; CHECK-NEXT: [[SHR_NOT:%.*]] = xor i8 [[SHR]], -1
505
+ ; CHECK-NEXT: ret i8 [[SHR_NOT]]
506
+ ;
507
+ %neg = icmp slt i8 %x , 0
508
+ call void @llvm.assume (i1 %neg )
509
+ %x.not = xor i8 %x , -1
510
+ %shr = lshr i8 %x.not , %y
511
+ %shr.not = xor i8 %shr , -1
512
+ ret i8 %shr.not
513
+ }
514
+
515
+ define i8 @lshr_not_nneg (i8 %x , i8 %y ) {
516
+ ; CHECK-LABEL: @lshr_not_nneg(
517
+ ; CHECK-NEXT: [[X_NOT:%.*]] = xor i8 [[X:%.*]], -1
518
+ ; CHECK-NEXT: [[SHR:%.*]] = lshr i8 [[X_NOT]], [[Y:%.*]]
519
+ ; CHECK-NEXT: [[SHR_NOT:%.*]] = xor i8 [[SHR]], -1
520
+ ; CHECK-NEXT: ret i8 [[SHR_NOT]]
521
+ ;
522
+ %x.not = xor i8 %x , -1
523
+ %shr = lshr i8 %x.not , %y
524
+ %shr.not = xor i8 %shr , -1
525
+ ret i8 %shr.not
526
+ }
0 commit comments