|
| 1 | +; RUN: llc < %s -debug-only=legalize-types 2>&1 | FileCheck %s --check-prefix=CHECK-LEGALIZATION |
| 2 | +; RUN: llc < %s | FileCheck %s |
| 3 | +; REQUIRES: asserts |
| 4 | + |
| 5 | +target triple = "aarch64-unknown-linux-gnu" |
| 6 | +attributes #0 = {"target-features"="+sve"} |
| 7 | + |
| 8 | +declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64) |
| 9 | +declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64) |
| 10 | + |
| 11 | +define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 { |
| 12 | +; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0> |
| 13 | +; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2i64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2> |
| 14 | +; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2i64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4> |
| 15 | +; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2i64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6> |
| 16 | + |
| 17 | +; CHECK-LABEL: test_nxv2i64_v8i64: |
| 18 | +; CHECK: // %bb.0: |
| 19 | +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| 20 | +; CHECK-NEXT: addvl sp, sp, #-4 |
| 21 | +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG |
| 22 | +; CHECK-NEXT: .cfi_offset w29, -16 |
| 23 | +; CHECK-NEXT: cntd x8 |
| 24 | +; CHECK-NEXT: sub x8, x8, #1 // =1 |
| 25 | +; CHECK-NEXT: cmp x8, #0 // =0 |
| 26 | +; CHECK-NEXT: csel x10, x8, xzr, lo |
| 27 | +; CHECK-NEXT: ptrue p0.d |
| 28 | +; CHECK-NEXT: mov x9, sp |
| 29 | +; CHECK-NEXT: lsl x10, x10, #3 |
| 30 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp] |
| 31 | +; CHECK-NEXT: str q1, [x9, x10] |
| 32 | +; CHECK-NEXT: addvl x10, sp, #1 |
| 33 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] |
| 34 | +; CHECK-NEXT: mov w9, #2 |
| 35 | +; CHECK-NEXT: cmp x8, #2 // =2 |
| 36 | +; CHECK-NEXT: csel x9, x8, x9, lo |
| 37 | +; CHECK-NEXT: lsl x9, x9, #3 |
| 38 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl] |
| 39 | +; CHECK-NEXT: str q2, [x10, x9] |
| 40 | +; CHECK-NEXT: addvl x10, sp, #2 |
| 41 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] |
| 42 | +; CHECK-NEXT: mov w9, #4 |
| 43 | +; CHECK-NEXT: cmp x8, #4 // =4 |
| 44 | +; CHECK-NEXT: csel x9, x8, x9, lo |
| 45 | +; CHECK-NEXT: lsl x9, x9, #3 |
| 46 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl] |
| 47 | +; CHECK-NEXT: str q3, [x10, x9] |
| 48 | +; CHECK-NEXT: addvl x10, sp, #3 |
| 49 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] |
| 50 | +; CHECK-NEXT: mov w9, #6 |
| 51 | +; CHECK-NEXT: cmp x8, #6 // =6 |
| 52 | +; CHECK-NEXT: csel x8, x8, x9, lo |
| 53 | +; CHECK-NEXT: lsl x8, x8, #3 |
| 54 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl] |
| 55 | +; CHECK-NEXT: str q4, [x10, x8] |
| 56 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #3, mul vl] |
| 57 | +; CHECK-NEXT: addvl sp, sp, #4 |
| 58 | +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| 59 | +; CHECK-NEXT: ret |
| 60 | + %r = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> %a, <8 x i64> %b, i64 0) |
| 61 | + ret <vscale x 2 x i64> %r |
| 62 | +} |
| 63 | + |
| 64 | +define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x double> %b) #0 { |
| 65 | +; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2f64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0> |
| 66 | +; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2f64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2> |
| 67 | +; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2f64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4> |
| 68 | +; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2f64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6> |
| 69 | + |
| 70 | +; CHECK-LABEL: test_nxv2f64_v8f64: |
| 71 | +; CHECK: // %bb.0: |
| 72 | +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| 73 | +; CHECK-NEXT: addvl sp, sp, #-4 |
| 74 | +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG |
| 75 | +; CHECK-NEXT: .cfi_offset w29, -16 |
| 76 | +; CHECK-NEXT: cntd x8 |
| 77 | +; CHECK-NEXT: sub x8, x8, #1 // =1 |
| 78 | +; CHECK-NEXT: cmp x8, #0 // =0 |
| 79 | +; CHECK-NEXT: csel x10, x8, xzr, lo |
| 80 | +; CHECK-NEXT: ptrue p0.d |
| 81 | +; CHECK-NEXT: mov x9, sp |
| 82 | +; CHECK-NEXT: lsl x10, x10, #3 |
| 83 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp] |
| 84 | +; CHECK-NEXT: str q1, [x9, x10] |
| 85 | +; CHECK-NEXT: addvl x10, sp, #1 |
| 86 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] |
| 87 | +; CHECK-NEXT: mov w9, #2 |
| 88 | +; CHECK-NEXT: cmp x8, #2 // =2 |
| 89 | +; CHECK-NEXT: csel x9, x8, x9, lo |
| 90 | +; CHECK-NEXT: lsl x9, x9, #3 |
| 91 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl] |
| 92 | +; CHECK-NEXT: str q2, [x10, x9] |
| 93 | +; CHECK-NEXT: addvl x10, sp, #2 |
| 94 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] |
| 95 | +; CHECK-NEXT: mov w9, #4 |
| 96 | +; CHECK-NEXT: cmp x8, #4 // =4 |
| 97 | +; CHECK-NEXT: csel x9, x8, x9, lo |
| 98 | +; CHECK-NEXT: lsl x9, x9, #3 |
| 99 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl] |
| 100 | +; CHECK-NEXT: str q3, [x10, x9] |
| 101 | +; CHECK-NEXT: addvl x10, sp, #3 |
| 102 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] |
| 103 | +; CHECK-NEXT: mov w9, #6 |
| 104 | +; CHECK-NEXT: cmp x8, #6 // =6 |
| 105 | +; CHECK-NEXT: csel x8, x8, x9, lo |
| 106 | +; CHECK-NEXT: lsl x8, x8, #3 |
| 107 | +; CHECK-NEXT: st1d { z0.d }, p0, [sp, #3, mul vl] |
| 108 | +; CHECK-NEXT: str q4, [x10, x8] |
| 109 | +; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #3, mul vl] |
| 110 | +; CHECK-NEXT: addvl sp, sp, #4 |
| 111 | +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| 112 | +; CHECK-NEXT: ret |
| 113 | + %r = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> %a, <8 x double> %b, i64 0) |
| 114 | + ret <vscale x 2 x double> %r |
| 115 | +} |
0 commit comments