Skip to content
This repository was archived by the owner on Mar 28, 2020. It is now read-only.

Commit 27f0bb5

Browse files
committed
[SLP] Initial test for fix of PR31690.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292631 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent cfdef7c commit 27f0bb5

File tree

1 file changed

+203
-0
lines changed

1 file changed

+203
-0
lines changed

test/Transforms/SLPVectorizer/X86/horizontal-list.ll

Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -570,3 +570,206 @@ define float @f(float* nocapture readonly %x) {
570570
ret float %add.47
571571
}
572572

573+
define float @f1(float* nocapture readonly %x, i32 %a, i32 %b) {
574+
; CHECK-LABEL: @f1(
575+
; CHECK-NEXT: entry:
576+
; CHECK-NEXT: [[REM:%.*]] = srem i32 [[A:%.*]], [[B:%.*]]
577+
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[REM]] to float
578+
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[X:%.*]], align 4
579+
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP0]], [[CONV]]
580+
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
581+
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
582+
; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[ADD]]
583+
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
584+
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX_2]], align 4
585+
; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP2]], [[ADD_1]]
586+
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
587+
; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX_3]], align 4
588+
; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP3]], [[ADD_2]]
589+
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
590+
; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX_4]], align 4
591+
; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float [[TMP4]], [[ADD_3]]
592+
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
593+
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX_5]], align 4
594+
; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float [[TMP5]], [[ADD_4]]
595+
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
596+
; CHECK-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX_6]], align 4
597+
; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float [[TMP6]], [[ADD_5]]
598+
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
599+
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX_7]], align 4
600+
; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float [[TMP7]], [[ADD_6]]
601+
; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
602+
; CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX_8]], align 4
603+
; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float [[TMP8]], [[ADD_7]]
604+
; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
605+
; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX_9]], align 4
606+
; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float [[TMP9]], [[ADD_8]]
607+
; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
608+
; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX_10]], align 4
609+
; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float [[TMP10]], [[ADD_9]]
610+
; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
611+
; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX_11]], align 4
612+
; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float [[TMP11]], [[ADD_10]]
613+
; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
614+
; CHECK-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX_12]], align 4
615+
; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float [[TMP12]], [[ADD_11]]
616+
; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
617+
; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX_13]], align 4
618+
; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float [[TMP13]], [[ADD_12]]
619+
; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
620+
; CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX_14]], align 4
621+
; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float [[TMP14]], [[ADD_13]]
622+
; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
623+
; CHECK-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX_15]], align 4
624+
; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float [[TMP15]], [[ADD_14]]
625+
; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
626+
; CHECK-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX_16]], align 4
627+
; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float [[TMP16]], [[ADD_15]]
628+
; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
629+
; CHECK-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX_17]], align 4
630+
; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float [[TMP17]], [[ADD_16]]
631+
; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
632+
; CHECK-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX_18]], align 4
633+
; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float [[TMP18]], [[ADD_17]]
634+
; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
635+
; CHECK-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX_19]], align 4
636+
; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float [[TMP19]], [[ADD_18]]
637+
; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
638+
; CHECK-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX_20]], align 4
639+
; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float [[TMP20]], [[ADD_19]]
640+
; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
641+
; CHECK-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX_21]], align 4
642+
; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float [[TMP21]], [[ADD_20]]
643+
; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
644+
; CHECK-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX_22]], align 4
645+
; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float [[TMP22]], [[ADD_21]]
646+
; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
647+
; CHECK-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX_23]], align 4
648+
; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float [[TMP23]], [[ADD_22]]
649+
; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
650+
; CHECK-NEXT: [[TMP24:%.*]] = load float, float* [[ARRAYIDX_24]], align 4
651+
; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float [[TMP24]], [[ADD_23]]
652+
; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
653+
; CHECK-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX_25]], align 4
654+
; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float [[TMP25]], [[ADD_24]]
655+
; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
656+
; CHECK-NEXT: [[TMP26:%.*]] = load float, float* [[ARRAYIDX_26]], align 4
657+
; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float [[TMP26]], [[ADD_25]]
658+
; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
659+
; CHECK-NEXT: [[TMP27:%.*]] = load float, float* [[ARRAYIDX_27]], align 4
660+
; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float [[TMP27]], [[ADD_26]]
661+
; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
662+
; CHECK-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX_28]], align 4
663+
; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float [[TMP28]], [[ADD_27]]
664+
; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
665+
; CHECK-NEXT: [[TMP29:%.*]] = load float, float* [[ARRAYIDX_29]], align 4
666+
; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float [[TMP29]], [[ADD_28]]
667+
; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
668+
; CHECK-NEXT: [[TMP30:%.*]] = load float, float* [[ARRAYIDX_30]], align 4
669+
; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float [[TMP30]], [[ADD_29]]
670+
; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
671+
; CHECK-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX_31]], align 4
672+
; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float [[TMP31]], [[ADD_30]]
673+
; CHECK-NEXT: ret float [[ADD_31]]
674+
;
675+
entry:
676+
%rem = srem i32 %a, %b
677+
%conv = sitofp i32 %rem to float
678+
%0 = load float, float* %x, align 4
679+
%add = fadd fast float %0, %conv
680+
%arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
681+
%1 = load float, float* %arrayidx.1, align 4
682+
%add.1 = fadd fast float %1, %add
683+
%arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
684+
%2 = load float, float* %arrayidx.2, align 4
685+
%add.2 = fadd fast float %2, %add.1
686+
%arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
687+
%3 = load float, float* %arrayidx.3, align 4
688+
%add.3 = fadd fast float %3, %add.2
689+
%arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
690+
%4 = load float, float* %arrayidx.4, align 4
691+
%add.4 = fadd fast float %4, %add.3
692+
%arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
693+
%5 = load float, float* %arrayidx.5, align 4
694+
%add.5 = fadd fast float %5, %add.4
695+
%arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
696+
%6 = load float, float* %arrayidx.6, align 4
697+
%add.6 = fadd fast float %6, %add.5
698+
%arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
699+
%7 = load float, float* %arrayidx.7, align 4
700+
%add.7 = fadd fast float %7, %add.6
701+
%arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
702+
%8 = load float, float* %arrayidx.8, align 4
703+
%add.8 = fadd fast float %8, %add.7
704+
%arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
705+
%9 = load float, float* %arrayidx.9, align 4
706+
%add.9 = fadd fast float %9, %add.8
707+
%arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
708+
%10 = load float, float* %arrayidx.10, align 4
709+
%add.10 = fadd fast float %10, %add.9
710+
%arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
711+
%11 = load float, float* %arrayidx.11, align 4
712+
%add.11 = fadd fast float %11, %add.10
713+
%arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
714+
%12 = load float, float* %arrayidx.12, align 4
715+
%add.12 = fadd fast float %12, %add.11
716+
%arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
717+
%13 = load float, float* %arrayidx.13, align 4
718+
%add.13 = fadd fast float %13, %add.12
719+
%arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
720+
%14 = load float, float* %arrayidx.14, align 4
721+
%add.14 = fadd fast float %14, %add.13
722+
%arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
723+
%15 = load float, float* %arrayidx.15, align 4
724+
%add.15 = fadd fast float %15, %add.14
725+
%arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
726+
%16 = load float, float* %arrayidx.16, align 4
727+
%add.16 = fadd fast float %16, %add.15
728+
%arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
729+
%17 = load float, float* %arrayidx.17, align 4
730+
%add.17 = fadd fast float %17, %add.16
731+
%arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
732+
%18 = load float, float* %arrayidx.18, align 4
733+
%add.18 = fadd fast float %18, %add.17
734+
%arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
735+
%19 = load float, float* %arrayidx.19, align 4
736+
%add.19 = fadd fast float %19, %add.18
737+
%arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
738+
%20 = load float, float* %arrayidx.20, align 4
739+
%add.20 = fadd fast float %20, %add.19
740+
%arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
741+
%21 = load float, float* %arrayidx.21, align 4
742+
%add.21 = fadd fast float %21, %add.20
743+
%arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
744+
%22 = load float, float* %arrayidx.22, align 4
745+
%add.22 = fadd fast float %22, %add.21
746+
%arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
747+
%23 = load float, float* %arrayidx.23, align 4
748+
%add.23 = fadd fast float %23, %add.22
749+
%arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
750+
%24 = load float, float* %arrayidx.24, align 4
751+
%add.24 = fadd fast float %24, %add.23
752+
%arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
753+
%25 = load float, float* %arrayidx.25, align 4
754+
%add.25 = fadd fast float %25, %add.24
755+
%arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
756+
%26 = load float, float* %arrayidx.26, align 4
757+
%add.26 = fadd fast float %26, %add.25
758+
%arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
759+
%27 = load float, float* %arrayidx.27, align 4
760+
%add.27 = fadd fast float %27, %add.26
761+
%arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
762+
%28 = load float, float* %arrayidx.28, align 4
763+
%add.28 = fadd fast float %28, %add.27
764+
%arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
765+
%29 = load float, float* %arrayidx.29, align 4
766+
%add.29 = fadd fast float %29, %add.28
767+
%arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
768+
%30 = load float, float* %arrayidx.30, align 4
769+
%add.30 = fadd fast float %30, %add.29
770+
%arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
771+
%31 = load float, float* %arrayidx.31, align 4
772+
%add.31 = fadd fast float %31, %add.30
773+
ret float %add.31
774+
}
775+

0 commit comments

Comments
 (0)