Skip to content

[AArch64] replace SVE intrinsics with no active lanes with zero #107413

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 9, 2024

Conversation

Lukacma
Copy link
Contributor

@Lukacma Lukacma commented Sep 5, 2024

This patch extends #73964 and optimises SVE intrinsics into zero constants when predicate is zero.

@Lukacma
Copy link
Contributor Author

Lukacma commented Sep 5, 2024

This patch comes from splitting #86651 into multiple patches as requested in review of that patch

@llvmbot
Copy link
Member

llvmbot commented Sep 5, 2024

@llvm/pr-subscribers-backend-aarch64

@llvm/pr-subscribers-llvm-transforms

Author: None (Lukacma)

Changes

This patch extends #73964 and optimises SVE intrinsics into zero constants when predicate is zero.


Full diff: https://github.com/llvm/llvm-project/pull/107413.diff

2 Files Affected:

  • (modified) llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (+34-10)
  • (added) llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-comb-no-active-lanes-to-zero.ll (+232)
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 58c267f1ce4bd6..9a887b832c90f6 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1110,10 +1110,10 @@ instCombineSVENoActiveUnaryErase(InstCombiner &IC, IntrinsicInst &II,
   return std::nullopt;
 }
 
-// Simplify unary operation where predicate has all inactive lanes by replacing
+// Simplify operation where predicate has all inactive lanes by replacing
 // instruction with zeroed object
 static std::optional<Instruction *>
-instCombineSVENoActiveUnaryZero(InstCombiner &IC, IntrinsicInst &II) {
+instCombineSVENoActiveZero(InstCombiner &IC, IntrinsicInst &II) {
   if (match(II.getOperand(0), m_ZeroInt())) {
     Constant *Node;
     Type *RetTy = II.getType();
@@ -1126,10 +1126,9 @@ instCombineSVENoActiveUnaryZero(InstCombiner &IC, IntrinsicInst &II) {
                                                   : ConstantInt::get(VecT, 0));
       }
       Node = ConstantStruct::get(StructT, ZerVec);
-    } else if (RetTy->isFPOrFPVectorTy())
-      Node = ConstantFP::get(RetTy, 0.0);
-    else
-      Node = ConstantInt::get(II.getType(), 0);
+    } else
+      Node = RetTy->isFPOrFPVectorTy() ? ConstantFP::get(RetTy, 0.0)
+                                       : ConstantInt::get(II.getType(), 0);
 
     IC.replaceInstUsesWith(II, Node);
     return IC.eraseInstFromFunction(II);
@@ -1188,7 +1187,7 @@ static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
   LLVMContext &Ctx = II.getContext();
 
   // Replace by zero constant when all lanes are inactive
-  if (auto II_NA = instCombineSVENoActiveUnaryZero(IC, II))
+  if (auto II_NA = instCombineSVENoActiveZero(IC, II))
     return II_NA;
 
   // Check that the predicate is all active
@@ -1556,7 +1555,7 @@ instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
   Type *VecTy = II.getType();
 
   // Replace by zero constant when all lanes are inactive
-  if (auto II_NA = instCombineSVENoActiveUnaryZero(IC, II))
+  if (auto II_NA = instCombineSVENoActiveZero(IC, II))
     return II_NA;
 
   if (isAllActivePredicate(Pred)) {
@@ -1907,7 +1906,7 @@ instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II) {
   Value *PassThru = ConstantAggregateZero::get(Ty);
 
   // Replace by zero constant when all lanes are inactive
-  if (auto II_NA = instCombineSVENoActiveUnaryZero(IC, II))
+  if (auto II_NA = instCombineSVENoActiveZero(IC, II))
     return II_NA;
 
   // Contiguous gather => masked load.
@@ -2197,6 +2196,31 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
   case Intrinsic::aarch64_sve_st4:
   case Intrinsic::aarch64_sve_st4q:
     return instCombineSVENoActiveUnaryErase(IC, II, 4);
+  case Intrinsic::aarch64_sve_addqv:
+  case Intrinsic::aarch64_sve_and_z:
+  case Intrinsic::aarch64_sve_bic_z:
+  case Intrinsic::aarch64_sve_brka_z:
+  case Intrinsic::aarch64_sve_brkb_z:
+  case Intrinsic::aarch64_sve_brkn_z:
+  case Intrinsic::aarch64_sve_brkpa_z:
+  case Intrinsic::aarch64_sve_brkpb_z:
+  case Intrinsic::aarch64_sve_cntp:
+  case Intrinsic::aarch64_sve_compact:
+  case Intrinsic::aarch64_sve_eor_z:
+  case Intrinsic::aarch64_sve_eorv:
+  case Intrinsic::aarch64_sve_eorqv:
+  case Intrinsic::aarch64_sve_nand_z:
+  case Intrinsic::aarch64_sve_nor_z:
+  case Intrinsic::aarch64_sve_orn_z:
+  case Intrinsic::aarch64_sve_orr_z:
+  case Intrinsic::aarch64_sve_orv:
+  case Intrinsic::aarch64_sve_orqv:
+  case Intrinsic::aarch64_sve_pnext:
+  case Intrinsic::aarch64_sve_rdffr_z:
+  case Intrinsic::aarch64_sve_saddv:
+  case Intrinsic::aarch64_sve_uaddv:
+  case Intrinsic::aarch64_sve_umaxv:
+  case Intrinsic::aarch64_sve_umaxqv:
   case Intrinsic::aarch64_sve_cmpeq:
   case Intrinsic::aarch64_sve_cmpeq_wide:
   case Intrinsic::aarch64_sve_cmpge:
@@ -2251,7 +2275,7 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
   case Intrinsic::aarch64_sve_ldnt1_gather_index:
   case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
   case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
-    return instCombineSVENoActiveUnaryZero(IC, II);
+    return instCombineSVENoActiveZero(IC, II);
   case Intrinsic::aarch64_sve_prf:
   case Intrinsic::aarch64_sve_prfb_gather_index:
   case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-comb-no-active-lanes-to-zero.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-comb-no-active-lanes-to-zero.ll
new file mode 100644
index 00000000000000..e58aa2eeefa8d3
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-comb-no-active-lanes-to-zero.ll
@@ -0,0 +1,232 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+;RUN: opt -S -passes=instcombine < %s | FileCheck %s
+target triple = "aarch64-unknown-linux-gnu"
+
+
+define <16 x i8> @addqv_i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: define <16 x i8> @addqv_i8(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) {
+; CHECK-NEXT:    ret <16 x i8> zeroinitializer
+;
+  %res = call <16 x i8> @llvm.aarch64.sve.addqv.v16i8.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a);
+  ret <16 x i8> %res
+}
+
+define <vscale x 4 x i1> @and_4(<vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 4 x i1> @and_4(
+; CHECK-SAME: <vscale x 4 x i1> [[PN:%.*]], <vscale x 4 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 4 x i1> zeroinitializer
+;
+  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
+  ret <vscale x 4 x i1> %res;
+}
+
+define <vscale x 16 x i1> @bic_16(<vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 16 x i1> @bic_16(
+; CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
+  ret <vscale x 16 x i1> %res;
+}
+
+define <vscale x 16 x i1> @brka_z_b8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: define <vscale x 16 x i1> @brka_z_b8(
+; CHECK-SAME: <vscale x 16 x i1> [[A:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brka.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %a)
+  ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @brkb_z_b8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: define <vscale x 16 x i1> @brkb_z_b8(
+; CHECK-SAME: <vscale x 16 x i1> [[A:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkb.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %a)
+  ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @brkn_b8(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: define <vscale x 16 x i1> @brkn_b8(
+; CHECK-SAME: <vscale x 16 x i1> [[A:%.*]], <vscale x 16 x i1> [[B:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkn.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
+  ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @brkpa_b8(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: define <vscale x 16 x i1> @brkpa_b8(
+; CHECK-SAME: <vscale x 16 x i1> [[A:%.*]], <vscale x 16 x i1> [[B:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkpa.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
+  ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @brkpb_b8(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: define <vscale x 16 x i1> @brkpb_b8(
+; CHECK-SAME: <vscale x 16 x i1> [[A:%.*]], <vscale x 16 x i1> [[B:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkpb.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
+  ret <vscale x 16 x i1> %out
+}
+
+define i64 @cntp_b64(<vscale x 2 x i1> %a) {
+; CHECK-LABEL: define i64 @cntp_b64(
+; CHECK-SAME: <vscale x 2 x i1> [[A:%.*]]) {
+; CHECK-NEXT:    ret i64 0
+;
+; USE_SCALAR_INC-LABEL: cntp_b64:
+; USE_SCALAR_INC:       // %bb.0:
+; USE_SCALAR_INC-NEXT:    cntp x0, p0, p1.d
+; USE_SCALAR_INC-NEXT:    ret
+  %out = call i64 @llvm.aarch64.sve.cntp.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %a)
+  ret i64 %out
+}
+
+define <vscale x 4 x i32> @compact_i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: define <vscale x 4 x i32> @compact_i32(
+; CHECK-SAME: <vscale x 4 x i32> [[A:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 4 x i32> zeroinitializer
+;
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.compact.nxv4i32(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %a)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 16 x i1> @eor_16(<vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 16 x i1> @eor_16(
+; CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
+  ret <vscale x 16 x i1> %res;
+}
+
+define i32 @eorv_i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: define i32 @eorv_i32(
+; CHECK-SAME: <vscale x 4 x i32> [[A:%.*]]) {
+; CHECK-NEXT:    ret i32 0
+;
+  %out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %a)
+  ret i32 %out
+}
+
+define <4 x i32> @eorqv_i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: define <4 x i32> @eorqv_i32(
+; CHECK-SAME: <vscale x 4 x i32> [[A:%.*]]) {
+; CHECK-NEXT:    ret <4 x i32> zeroinitializer
+;
+  %res = call <4 x i32> @llvm.aarch64.sve.eorqv.v4i32.nxv4i32(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %a);
+  ret <4 x i32> %res
+}
+
+define <vscale x 8 x i1> @nand_8(<vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 8 x i1> @nand_8(
+; CHECK-SAME: <vscale x 8 x i1> [[PN:%.*]], <vscale x 8 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 8 x i1> zeroinitializer
+;
+  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.nand.z.nxv8i1(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
+  ret <vscale x 8 x i1> %res;
+}
+
+define <vscale x 4 x i1> @nor_4(<vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 4 x i1> @nor_4(
+; CHECK-SAME: <vscale x 4 x i1> [[PN:%.*]], <vscale x 4 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 4 x i1> zeroinitializer
+;
+  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
+  ret <vscale x 4 x i1> %res;
+}
+
+define <vscale x 4 x i1> @orn_4(<vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 4 x i1> @orn_4(
+; CHECK-SAME: <vscale x 4 x i1> [[PN:%.*]], <vscale x 4 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 4 x i1> zeroinitializer
+;
+  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
+  ret <vscale x 4 x i1> %res;
+}
+
+define <vscale x 2 x i1> @orr_2(<vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
+; CHECK-LABEL: define <vscale x 2 x i1> @orr_2(
+; CHECK-SAME: <vscale x 2 x i1> [[PN:%.*]], <vscale x 2 x i1> [[PD:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 2 x i1> zeroinitializer
+;
+  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
+  ret <vscale x 2 x i1> %res;
+}
+
+define i8 @orv_i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: define i8 @orv_i8(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) {
+; CHECK-NEXT:    ret i8 0
+;
+  %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+  ret i8 %out
+}
+
+define <8 x i16> @orqv_i16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: define <8 x i16> @orqv_i16(
+; CHECK-SAME: <vscale x 8 x i16> [[A:%.*]]) {
+; CHECK-NEXT:    ret <8 x i16> zeroinitializer
+;
+  %res = call <8 x i16> @llvm.aarch64.sve.orqv.v8i16.nxv8i16(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}
+
+define <vscale x 4 x i1> @pnext_b32(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: define <vscale x 4 x i1> @pnext_b32(
+; CHECK-SAME: <vscale x 4 x i1> [[A:%.*]]) {
+; CHECK-NEXT:    ret <vscale x 4 x i1> zeroinitializer
+;
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.pnext.nxv4i1(<vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> %a)
+  ret <vscale x 4 x i1> %out
+}
+
+define <vscale x 16 x i1> @rdffr_z() {
+; CHECK-LABEL: define <vscale x 16 x i1> @rdffr_z() {
+; CHECK-NEXT:    ret <vscale x 16 x i1> zeroinitializer
+;
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.rdffr.z(<vscale x 16 x i1> zeroinitializer)
+  ret <vscale x 16 x i1> %out
+}
+
+define i64 @saddv_i64(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: define i64 @saddv_i64(
+; CHECK-SAME: <vscale x 2 x i64> [[A:%.*]]) {
+; CHECK-NEXT:    ret i64 0
+;
+  %out = call i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i64> %a)
+  ret i64 %out
+}
+
+define i64 @uaddv_i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: define i64 @uaddv_i8(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) {
+; CHECK-NEXT:    ret i64 0
+;
+  %out = call i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+  ret i64 %out
+}
+
+define i8 @umaxv_i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: define i8 @umaxv_i8(
+; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]]) {
+; CHECK-NEXT:    ret i8 0
+;
+  %out = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a)
+  ret i8 %out
+}
+
+define <8 x i16> @umaxqv_i16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: define <8 x i16> @umaxqv_i16(
+; CHECK-SAME: <vscale x 8 x i16> [[A:%.*]]) {
+; CHECK-NEXT:    ret <8 x i16> zeroinitializer
+;
+  %res = call <8 x i16> @llvm.aarch64.sve.umaxqv.v8i16.nxv8i16(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x i16> %a);
+  ret <8 x i16> %res
+}

@Lukacma Lukacma merged commit d57be19 into llvm:main Sep 9, 2024
11 checks passed
@Lukacma Lukacma deleted the sve-falseopt-zero branch September 9, 2024 09:28
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants