|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s | FileCheck %s |
| 3 | + |
| 4 | +target triple = "aarch64-unknown-linux-gnu" |
| 5 | + |
| 6 | +; Ensure that a no-op 'and' get removed with vector splat of 1 or ptrue with proper constant |
| 7 | + |
| 8 | +define <vscale x 16 x i1> @fold_away_ptrue_and_ptrue() #0 { |
| 9 | +; CHECK-LABEL: fold_away_ptrue_and_ptrue: |
| 10 | +; CHECK: // %bb.0: // %entry |
| 11 | +; CHECK-NEXT: ptrue p0.s |
| 12 | +; CHECK-NEXT: ret |
| 13 | +entry: |
| 14 | + %0 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) |
| 15 | + %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %0) |
| 16 | + %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| 17 | + %and = and <vscale x 16 x i1> %2, %1 |
| 18 | + ret <vscale x 16 x i1> %and |
| 19 | +} |
| 20 | + |
| 21 | +define <vscale x 16 x i1> @fold_away_ptrue_and_splat_predicate() #0 { |
| 22 | +; CHECK-LABEL: fold_away_ptrue_and_splat_predicate: |
| 23 | +; CHECK: // %bb.0: // %entry |
| 24 | +; CHECK-NEXT: ptrue p0.s |
| 25 | +; CHECK-NEXT: ret |
| 26 | +entry: |
| 27 | + %ins = insertelement <vscale x 4 x i1> undef, i1 1, i32 0 |
| 28 | + %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer |
| 29 | + %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %splat) |
| 30 | + %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| 31 | + %and = and <vscale x 16 x i1> %0, %1 |
| 32 | + ret <vscale x 16 x i1> %and |
| 33 | +} |
| 34 | + |
| 35 | +; Ensure that one AND operation remain for inactive lanes zeroing with 2 x i1 type (llvm.aarch64.sve.convert.to.svbool.nxv2i1). |
| 36 | +define <vscale x 16 x i1> @fold_away_ptrue_and_convert_to() #0 { |
| 37 | +; CHECK-LABEL: fold_away_ptrue_and_convert_to: |
| 38 | +; CHECK: // %bb.0: // %entry |
| 39 | +; CHECK-NEXT: ptrue p0.s |
| 40 | +; CHECK-NEXT: ptrue p1.d |
| 41 | +; CHECK-NEXT: and p0.b, p1/z, p1.b, p0.b |
| 42 | +; CHECK-NEXT: ret |
| 43 | +entry: |
| 44 | + %0 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) |
| 45 | + %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %0) |
| 46 | + %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| 47 | + %3 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %2) |
| 48 | + %4 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %3) |
| 49 | + %and = and <vscale x 16 x i1> %4, %1 |
| 50 | + ret <vscale x 16 x i1> %and |
| 51 | +} |
| 52 | + |
| 53 | +define <vscale x 16 x i1> @fold_away_two_similar() #0 { |
| 54 | +; CHECK-LABEL: fold_away_two_similar: |
| 55 | +; CHECK: // %bb.0: // %entry |
| 56 | +; CHECK-NEXT: ptrue p0.b |
| 57 | +; CHECK-NEXT: ret |
| 58 | +entry: |
| 59 | + %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| 60 | + %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| 61 | + %and = and <vscale x 16 x i1> %0, %1 |
| 62 | + ret <vscale x 16 x i1> %and |
| 63 | +} |
| 64 | + |
| 65 | +declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 immarg) |
| 66 | + |
| 67 | +declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) |
| 68 | + |
| 69 | +declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg) |
| 70 | + |
| 71 | +declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>) |
| 72 | + |
| 73 | +declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>) |
| 74 | + |
| 75 | + |
| 76 | +attributes #0 = { "target-features"="+sve" } |
0 commit comments