Skip to content

[InstCombine][NFC] Precommit a test for folding a binary op of reductions. #121568

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 9, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
215 changes: 215 additions & 0 deletions llvm/test/Transforms/InstCombine/fold-binop-of-reductions.ll
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please also add negative tests where intrinsic IDs are not the same or vector types don't match, and add a test that should flags are not preserved.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please also add negative tests where intrinsic IDs are not the same

I cannot find this test.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reduction_does_not_match_binop

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess nikic means to add a test like reduce.mul + reduce.add.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

right, just realized that and pushed.

Original file line number Diff line number Diff line change
@@ -0,0 +1,215 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=instcombine -S | FileCheck %s

define i32 @add_of_reduce_add(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @add_of_reduce_add(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @sub_of_reduce_add(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @sub_of_reduce_add(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sub <16 x i32> [[V0]], [[V1]]
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP1]])
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v1)
%res = sub i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @mul_of_reduce_mul(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @mul_of_reduce_mul(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v1)
%res = mul i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @and_of_reduce_and(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @and_of_reduce_and(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = and i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v1)
%res = and i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @or_of_reduce_or(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @or_of_reduce_or(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = or i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v1)
%res = or i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @xor_of_reduce_xor(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @xor_of_reduce_xor(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = xor i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v1)
%res = xor i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @reduction_does_not_match_binop(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @reduction_does_not_match_binop(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @intrinsics_do_not_match(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @intrinsics_do_not_match(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @element_counts_do_not_match(<16 x i32> %v0, <8 x i32> %v1) {
; CHECK-LABEL: define i32 @element_counts_do_not_match(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <8 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @multiple_use_of_reduction_0(<16 x i32> %v0, <16 x i32> %v1, ptr %p) {
; CHECK-LABEL: define i32 @multiple_use_of_reduction_0(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], ptr [[P:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: store i32 [[V0_RED]], ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
store i32 %v0_red, ptr %p
ret i32 %res
}

define i32 @multiple_use_of_reduction_1(<16 x i32> %v0, <16 x i32> %v1, ptr %p) {
; CHECK-LABEL: define i32 @multiple_use_of_reduction_1(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], ptr [[P:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: store i32 [[V1_RED]], ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
store i32 %v1_red, ptr %p
ret i32 %res
}

define i32 @do_not_preserve_overflow_flags(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @do_not_preserve_overflow_flags(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add nuw nsw i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v1)
%res = add nsw nuw i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @preserve_disjoint_flags(<16 x i32> %v0, <16 x i32> %v1) {
; CHECK-LABEL: define i32 @preserve_disjoint_flags(
; CHECK-SAME: <16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = or disjoint i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v1)
%res = or disjoint i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @add_of_reduce_add_vscale(<vscale x 16 x i32> %v0, <vscale x 16 x i32> %v1) {
; CHECK-LABEL: define i32 @add_of_reduce_add_vscale(
; CHECK-SAME: <vscale x 16 x i32> [[V0:%.*]], <vscale x 16 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}

define i32 @element_counts_do_not_match_vscale(<vscale x 16 x i32> %v0, <vscale x 8 x i32> %v1) {
; CHECK-LABEL: define i32 @element_counts_do_not_match_vscale(
; CHECK-SAME: <vscale x 16 x i32> [[V0:%.*]], <vscale x 8 x i32> [[V1:%.*]]) {
; CHECK-NEXT: [[V0_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> [[V0]])
; CHECK-NEXT: [[V1_RED:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[V1]])
; CHECK-NEXT: [[RES:%.*]] = add i32 [[V0_RED]], [[V1_RED]]
; CHECK-NEXT: ret i32 [[RES]]
;
%v0_red = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %v0)
%v1_red = tail call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 8 x i32> %v1)
%res = add i32 %v0_red, %v1_red
ret i32 %res
}
Loading