|
| 1 | +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| 2 | +# RUN: llc -run-pass=aarch64-prelegalizer-combiner -mtriple aarch64-unknown-unknown -mattr=+dotprod %s -o - | FileCheck %s |
| 3 | + |
| 4 | +--- |
| 5 | +name: vecreduce_intrinsic |
| 6 | +body: | |
| 7 | + bb.0: |
| 8 | + liveins: $q0, $q1, $q2, $q3, $q4 |
| 9 | + ; CHECK-LABEL: name: vecreduce_intrinsic |
| 10 | + ; CHECK: liveins: $q0, $q1, $q2, $q3, $q4 |
| 11 | + ; CHECK-NEXT: {{ $}} |
| 12 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 |
| 13 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 |
| 14 | + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $q2 |
| 15 | + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $q3 |
| 16 | + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $q4 |
| 17 | + ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[COPY1]](<4 x s32>), [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>) |
| 18 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.experimental.vector.partial.reduce.add), [[COPY]](<4 x s32>), [[CONCAT_VECTORS]](<16 x s32>) |
| 19 | + ; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[INT]](<4 x s32>) |
| 20 | + ; CHECK-NEXT: $w0 = COPY [[VECREDUCE_ADD]](s32) |
| 21 | + ; CHECK-NEXT: RET_ReallyLR implicit $w0 |
| 22 | + %0:_(<4 x s32>) = COPY $q0 |
| 23 | + %2:_(<4 x s32>) = COPY $q1 |
| 24 | + %3:_(<4 x s32>) = COPY $q2 |
| 25 | + %4:_(<4 x s32>) = COPY $q3 |
| 26 | + %5:_(<4 x s32>) = COPY $q4 |
| 27 | + %1:_(<16 x s32>) = G_CONCAT_VECTORS %2:_(<4 x s32>), %3:_(<4 x s32>), %4:_(<4 x s32>), %5:_(<4 x s32>) |
| 28 | + %6:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.experimental.vector.partial.reduce.add), %0:_(<4 x s32>), %1:_(<16 x s32>) |
| 29 | + %7:_(s32) = G_VECREDUCE_ADD %6:_(<4 x s32>) |
| 30 | + $w0 = COPY %7:_(s32) |
| 31 | + RET_ReallyLR implicit $w0 |
| 32 | +
|
| 33 | +... |
0 commit comments