|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=riscv64-unknown-linux-gnu -mcpu=sifive-p670 -pass-remarks-output=%t| FileCheck %s |
| 3 | +; RUN: FileCheck --input-file=%t %s --check-prefix=YAML |
| 4 | + |
| 5 | +; YAML-LABEL: --- !Passed |
| 6 | +; YAML-NEXT: Pass: slp-vectorizer |
| 7 | +; YAML-NEXT: Name: VectorizedHorizontalReduction |
| 8 | +; YAML-NEXT: Function: test |
| 9 | +; YAML-NEXT: Args: |
| 10 | +; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost ' |
| 11 | +; YAML-NEXT: - Cost: '-35' |
| 12 | +; YAML-NEXT: - String: ' and with tree size ' |
| 13 | +; YAML-NEXT: - TreeSize: '1' |
| 14 | +; YAML-NEXT: ... |
| 15 | +; YAML-NEXT: --- !Passed |
| 16 | +; YAML-NEXT: Pass: slp-vectorizer |
| 17 | +; YAML-NEXT: Name: VectorizedHorizontalReduction |
| 18 | +; YAML-NEXT: Function: test |
| 19 | +; YAML-NEXT: Args: |
| 20 | +; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost ' |
| 21 | +; YAML-NEXT: - Cost: '-14' |
| 22 | +; YAML-NEXT: - String: ' and with tree size ' |
| 23 | +; YAML-NEXT: - TreeSize: '1' |
| 24 | +; YAML-NEXT: ... |
| 25 | +; YAML-NEXT: --- !Passed |
| 26 | +; YAML-NEXT: Pass: slp-vectorizer |
| 27 | +; YAML-NEXT: Name: VectorizedHorizontalReduction |
| 28 | +; YAML-NEXT: Function: test |
| 29 | +; YAML-NEXT: Args: |
| 30 | +; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost ' |
| 31 | +; YAML-NEXT: - Cost: '-4' |
| 32 | +; YAML-NEXT: - String: ' and with tree size ' |
| 33 | +; YAML-NEXT: - TreeSize: '1' |
| 34 | +; YAML-NEXT:... |
| 35 | +define float @test(ptr %x) { |
| 36 | +; CHECK-LABEL: @test( |
| 37 | +; CHECK-NEXT: entry: |
| 38 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X:%.*]], i64 1 |
| 39 | +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x float>, ptr [[ARRAYIDX]], align 4 |
| 40 | +; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, ptr [[X]], i64 17 |
| 41 | +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr [[ARRAYIDX_16]], align 4 |
| 42 | +; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, ptr [[X]], i64 25 |
| 43 | +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[ARRAYIDX_24]], align 4 |
| 44 | +; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, ptr [[X]], i64 29 |
| 45 | +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX_28]], align 4 |
| 46 | +; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, ptr [[X]], i64 30 |
| 47 | +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_29]], align 4 |
| 48 | +; CHECK-NEXT: [[TMP5:%.*]] = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> [[TMP0]]) |
| 49 | +; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> [[TMP1]]) |
| 50 | +; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP5]], [[TMP6]] |
| 51 | +; CHECK-NEXT: [[TMP7:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP2]]) |
| 52 | +; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP7]] |
| 53 | +; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[OP_RDX1]], [[TMP3]] |
| 54 | +; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[TMP4]] |
| 55 | +; CHECK-NEXT: ret float [[OP_RDX3]] |
| 56 | +; |
| 57 | + entry: |
| 58 | + %arrayidx = getelementptr inbounds float, ptr %x, i64 1 |
| 59 | + %0 = load float, ptr %arrayidx, align 4 |
| 60 | + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 2 |
| 61 | + %1 = load float, ptr %arrayidx.1, align 4 |
| 62 | + %add.1 = fadd fast float %1, %0 |
| 63 | + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 3 |
| 64 | + %2 = load float, ptr %arrayidx.2, align 4 |
| 65 | + %add.2 = fadd fast float %2, %add.1 |
| 66 | + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 4 |
| 67 | + %3 = load float, ptr %arrayidx.3, align 4 |
| 68 | + %add.3 = fadd fast float %3, %add.2 |
| 69 | + %arrayidx.4 = getelementptr inbounds float, ptr %x, i64 5 |
| 70 | + %4 = load float, ptr %arrayidx.4, align 4 |
| 71 | + %add.4 = fadd fast float %4, %add.3 |
| 72 | + %arrayidx.5 = getelementptr inbounds float, ptr %x, i64 6 |
| 73 | + %5 = load float, ptr %arrayidx.5, align 4 |
| 74 | + %add.5 = fadd fast float %5, %add.4 |
| 75 | + %arrayidx.6 = getelementptr inbounds float, ptr %x, i64 7 |
| 76 | + %6 = load float, ptr %arrayidx.6, align 4 |
| 77 | + %add.6 = fadd fast float %6, %add.5 |
| 78 | + %arrayidx.7 = getelementptr inbounds float, ptr %x, i64 8 |
| 79 | + %7 = load float, ptr %arrayidx.7, align 4 |
| 80 | + %add.7 = fadd fast float %7, %add.6 |
| 81 | + %arrayidx.8 = getelementptr inbounds float, ptr %x, i64 9 |
| 82 | + %8 = load float, ptr %arrayidx.8, align 4 |
| 83 | + %add.8 = fadd fast float %8, %add.7 |
| 84 | + %arrayidx.9 = getelementptr inbounds float, ptr %x, i64 10 |
| 85 | + %9 = load float, ptr %arrayidx.9, align 4 |
| 86 | + %add.9 = fadd fast float %9, %add.8 |
| 87 | + %arrayidx.10 = getelementptr inbounds float, ptr %x, i64 11 |
| 88 | + %10 = load float, ptr %arrayidx.10, align 4 |
| 89 | + %add.10 = fadd fast float %10, %add.9 |
| 90 | + %arrayidx.11 = getelementptr inbounds float, ptr %x, i64 12 |
| 91 | + %11 = load float, ptr %arrayidx.11, align 4 |
| 92 | + %add.11 = fadd fast float %11, %add.10 |
| 93 | + %arrayidx.12 = getelementptr inbounds float, ptr %x, i64 13 |
| 94 | + %12 = load float, ptr %arrayidx.12, align 4 |
| 95 | + %add.12 = fadd fast float %12, %add.11 |
| 96 | + %arrayidx.13 = getelementptr inbounds float, ptr %x, i64 14 |
| 97 | + %13 = load float, ptr %arrayidx.13, align 4 |
| 98 | + %add.13 = fadd fast float %13, %add.12 |
| 99 | + %arrayidx.14 = getelementptr inbounds float, ptr %x, i64 15 |
| 100 | + %14 = load float, ptr %arrayidx.14, align 4 |
| 101 | + %add.14 = fadd fast float %14, %add.13 |
| 102 | + %arrayidx.15 = getelementptr inbounds float, ptr %x, i64 16 |
| 103 | + %15 = load float, ptr %arrayidx.15, align 4 |
| 104 | + %add.15 = fadd fast float %15, %add.14 |
| 105 | + %arrayidx.16 = getelementptr inbounds float, ptr %x, i64 17 |
| 106 | + %16 = load float, ptr %arrayidx.16, align 4 |
| 107 | + %add.16 = fadd fast float %16, %add.15 |
| 108 | + %arrayidx.17 = getelementptr inbounds float, ptr %x, i64 18 |
| 109 | + %17 = load float, ptr %arrayidx.17, align 4 |
| 110 | + %add.17 = fadd fast float %17, %add.16 |
| 111 | + %arrayidx.18 = getelementptr inbounds float, ptr %x, i64 19 |
| 112 | + %18 = load float, ptr %arrayidx.18, align 4 |
| 113 | + %add.18 = fadd fast float %18, %add.17 |
| 114 | + %arrayidx.19 = getelementptr inbounds float, ptr %x, i64 20 |
| 115 | + %19 = load float, ptr %arrayidx.19, align 4 |
| 116 | + %add.19 = fadd fast float %19, %add.18 |
| 117 | + %arrayidx.20 = getelementptr inbounds float, ptr %x, i64 21 |
| 118 | + %20 = load float, ptr %arrayidx.20, align 4 |
| 119 | + %add.20 = fadd fast float %20, %add.19 |
| 120 | + %arrayidx.21 = getelementptr inbounds float, ptr %x, i64 22 |
| 121 | + %21 = load float, ptr %arrayidx.21, align 4 |
| 122 | + %add.21 = fadd fast float %21, %add.20 |
| 123 | + %arrayidx.22 = getelementptr inbounds float, ptr %x, i64 23 |
| 124 | + %22 = load float, ptr %arrayidx.22, align 4 |
| 125 | + %add.22 = fadd fast float %22, %add.21 |
| 126 | + %arrayidx.23 = getelementptr inbounds float, ptr %x, i64 24 |
| 127 | + %23 = load float, ptr %arrayidx.23, align 4 |
| 128 | + %add.23 = fadd fast float %23, %add.22 |
| 129 | + %arrayidx.24 = getelementptr inbounds float, ptr %x, i64 25 |
| 130 | + %24 = load float, ptr %arrayidx.24, align 4 |
| 131 | + %add.24 = fadd fast float %24, %add.23 |
| 132 | + %arrayidx.25 = getelementptr inbounds float, ptr %x, i64 26 |
| 133 | + %25 = load float, ptr %arrayidx.25, align 4 |
| 134 | + %add.25 = fadd fast float %25, %add.24 |
| 135 | + %arrayidx.26 = getelementptr inbounds float, ptr %x, i64 27 |
| 136 | + %26 = load float, ptr %arrayidx.26, align 4 |
| 137 | + %add.26 = fadd fast float %26, %add.25 |
| 138 | + %arrayidx.27 = getelementptr inbounds float, ptr %x, i64 28 |
| 139 | + %27 = load float, ptr %arrayidx.27, align 4 |
| 140 | + %add.27 = fadd fast float %27, %add.26 |
| 141 | + %arrayidx.28 = getelementptr inbounds float, ptr %x, i64 29 |
| 142 | + %28 = load float, ptr %arrayidx.28, align 4 |
| 143 | + %add.28 = fadd fast float %28, %add.27 |
| 144 | + %arrayidx.29 = getelementptr inbounds float, ptr %x, i64 30 |
| 145 | + %29 = load float, ptr %arrayidx.29, align 4 |
| 146 | + %add.29 = fadd fast float %29, %add.28 |
| 147 | + ret float %add.29 |
| 148 | +} |
0 commit comments