@@ -421,8 +421,8 @@ define <2 x i16> @rotate_left_commute_16bit_vec(<2 x i16> %v, <2 x i32> %shift)
421
421
422
422
define i8 @rotate_right_8bit (i8 %v , i3 %shift ) {
423
423
; CHECK-LABEL: @rotate_right_8bit(
424
- ; CHECK-NEXT: [[TMP1 :%.*]] = zext i3 [[SHIFT:%.*]] to i8
425
- ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1 ]])
424
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = zext i3 [[SHIFT:%.*]] to i8
425
+ ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT_TRUNC ]])
426
426
; CHECK-NEXT: ret i8 [[CONV2]]
427
427
;
428
428
%and = zext i3 %shift to i32
@@ -441,10 +441,10 @@ define i8 @rotate_right_8bit(i8 %v, i3 %shift) {
441
441
define i8 @rotate_right_commute_8bit_unmasked_shl (i32 %v , i32 %shift ) {
442
442
; CHECK-LABEL: @rotate_right_commute_8bit_unmasked_shl(
443
443
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i8
444
- ; CHECK-NEXT: [[TMP2 :%.*]] = and i8 [[TMP1]], 3
445
- ; CHECK-NEXT: [[TMP3 :%.*]] = trunc i32 [[V:%.*]] to i8
446
- ; CHECK-NEXT: [[TMP4 :%.*]] = trunc i32 [[V]] to i8
447
- ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP3 ]], i8 [[TMP4 ]], i8 [[TMP2 ]])
444
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = and i8 [[TMP1]], 3
445
+ ; CHECK-NEXT: [[TMP2 :%.*]] = trunc i32 [[V:%.*]] to i8
446
+ ; CHECK-NEXT: [[TMP3 :%.*]] = trunc i32 [[V]] to i8
447
+ ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP2 ]], i8 [[TMP3 ]], i8 [[SHAMT_TRUNC ]])
448
448
; CHECK-NEXT: ret i8 [[CONV2]]
449
449
;
450
450
%and = and i32 %shift , 3
@@ -462,10 +462,10 @@ define i8 @rotate_right_commute_8bit_unmasked_shl(i32 %v, i32 %shift) {
462
462
define i8 @rotate_right_commute_8bit (i32 %v , i32 %shift ) {
463
463
; CHECK-LABEL: @rotate_right_commute_8bit(
464
464
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i8
465
- ; CHECK-NEXT: [[TMP2 :%.*]] = and i8 [[TMP1]], 3
466
- ; CHECK-NEXT: [[TMP3 :%.*]] = trunc i32 [[V:%.*]] to i8
467
- ; CHECK-NEXT: [[TMP4 :%.*]] = trunc i32 [[V]] to i8
468
- ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP3 ]], i8 [[TMP4 ]], i8 [[TMP2 ]])
465
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = and i8 [[TMP1]], 3
466
+ ; CHECK-NEXT: [[TMP2 :%.*]] = trunc i32 [[V:%.*]] to i8
467
+ ; CHECK-NEXT: [[TMP3 :%.*]] = trunc i32 [[V]] to i8
468
+ ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP2 ]], i8 [[TMP3 ]], i8 [[SHAMT_TRUNC ]])
469
469
; CHECK-NEXT: ret i8 [[CONV2]]
470
470
;
471
471
%and = and i32 %shift , 3
@@ -483,8 +483,8 @@ define i8 @rotate_right_commute_8bit(i32 %v, i32 %shift) {
483
483
484
484
define i8 @rotate8_not_safe (i8 %v , i32 %shamt ) {
485
485
; CHECK-LABEL: @rotate8_not_safe(
486
- ; CHECK-NEXT: [[TMP1 :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
487
- ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1 ]])
486
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
487
+ ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT_TRUNC ]])
488
488
; CHECK-NEXT: ret i8 [[RET]]
489
489
;
490
490
%conv = zext i8 %v to i32
@@ -597,8 +597,8 @@ define i8 @rotateright_8_neg_mask_commute(i8 %v, i8 %shamt) {
597
597
598
598
define i16 @rotateright_16_neg_mask_wide_amount (i16 %v , i32 %shamt ) {
599
599
; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount(
600
- ; CHECK-NEXT: [[TMP1 :%.*]] = trunc i32 [[SHAMT:%.*]] to i16
601
- ; CHECK-NEXT: [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[TMP1 ]])
600
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = trunc i32 [[SHAMT:%.*]] to i16
601
+ ; CHECK-NEXT: [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[SHAMT_TRUNC ]])
602
602
; CHECK-NEXT: ret i16 [[RET]]
603
603
;
604
604
%neg = sub i32 0 , %shamt
@@ -614,8 +614,8 @@ define i16 @rotateright_16_neg_mask_wide_amount(i16 %v, i32 %shamt) {
614
614
615
615
define i16 @rotateright_16_neg_mask_wide_amount_commute (i16 %v , i32 %shamt ) {
616
616
; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount_commute(
617
- ; CHECK-NEXT: [[TMP1 :%.*]] = trunc i32 [[SHAMT:%.*]] to i16
618
- ; CHECK-NEXT: [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[TMP1 ]])
617
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = trunc i32 [[SHAMT:%.*]] to i16
618
+ ; CHECK-NEXT: [[RET:%.*]] = call i16 @llvm.fshr.i16(i16 [[V:%.*]], i16 [[V]], i16 [[SHAMT_TRUNC ]])
619
619
; CHECK-NEXT: ret i16 [[RET]]
620
620
;
621
621
%neg = sub i32 0 , %shamt
@@ -648,8 +648,8 @@ define i64 @rotateright_64_zext_neg_mask_amount(i64 %0, i32 %1) {
648
648
649
649
define i8 @rotateleft_8_neg_mask_wide_amount (i8 %v , i32 %shamt ) {
650
650
; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount(
651
- ; CHECK-NEXT: [[TMP1 :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
652
- ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1 ]])
651
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
652
+ ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT_TRUNC ]])
653
653
; CHECK-NEXT: ret i8 [[RET]]
654
654
;
655
655
%neg = sub i32 0 , %shamt
@@ -665,8 +665,8 @@ define i8 @rotateleft_8_neg_mask_wide_amount(i8 %v, i32 %shamt) {
665
665
666
666
define i8 @rotateleft_8_neg_mask_wide_amount_commute (i8 %v , i32 %shamt ) {
667
667
; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount_commute(
668
- ; CHECK-NEXT: [[TMP1 :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
669
- ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[TMP1 ]])
668
+ ; CHECK-NEXT: [[SHAMT_TRUNC :%.*]] = trunc i32 [[SHAMT:%.*]] to i8
669
+ ; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.fshl.i8(i8 [[V:%.*]], i8 [[V]], i8 [[SHAMT_TRUNC ]])
670
670
; CHECK-NEXT: ret i8 [[RET]]
671
671
;
672
672
%neg = sub i32 0 , %shamt
@@ -957,3 +957,24 @@ define i8 @unmasked_shlop_unmasked_shift_amount(i32 %x, i32 %shamt) {
957
957
%t8 = trunc i32 %t7 to i8
958
958
ret i8 %t8
959
959
}
960
+
961
+ define i1 @check_rotate_masked_16bit (i8 %0 , i32 %1 ) {
962
+ ; CHECK-LABEL: @check_rotate_masked_16bit(
963
+ ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP1:%.*]], 1
964
+ ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 0
965
+ ; CHECK-NEXT: ret i1 [[TMP4]]
966
+ ;
967
+ %3 = and i32 %1 , 1
968
+ %4 = and i8 %0 , 15
969
+ %5 = zext i8 %4 to i32
970
+ %6 = lshr i32 %3 , %5
971
+ %7 = sub i8 0 , %0
972
+ %8 = and i8 %7 , 15
973
+ %9 = zext i8 %8 to i32
974
+ %10 = shl nuw nsw i32 %3 , %9
975
+ %11 = or i32 %6 , %10
976
+ %12 = trunc i32 %11 to i16
977
+ %13 = sext i16 %12 to i64
978
+ %14 = icmp uge i64 0 , %13
979
+ ret i1 %14
980
+ }
0 commit comments