|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -S -early-cse < %s | FileCheck %s |
| 3 | + |
| 4 | +define <128 x i8> @f0(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { |
| 5 | +; CHECK-LABEL: @f0( |
| 6 | +; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] |
| 7 | +; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[A1]], <128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]]) |
| 8 | +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) |
| 9 | +; CHECK-NEXT: ret <128 x i8> [[V1]] |
| 10 | +; |
| 11 | + %v0 = icmp eq <128 x i8> %a1, %a2 |
| 12 | + call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %a1, <128 x i8>* %a0, i32 4, <128 x i1> %v0) |
| 13 | + %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef) |
| 14 | + ret <128 x i8> %v1 |
| 15 | +} |
| 16 | + |
| 17 | +define <128 x i8> @f1(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { |
| 18 | +; CHECK-LABEL: @f1( |
| 19 | +; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] |
| 20 | +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) |
| 21 | +; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[V1]], <128 x i8>* [[A0]], i32 4, <128 x i1> [[V0]]) |
| 22 | +; CHECK-NEXT: ret <128 x i8> [[V1]] |
| 23 | +; |
| 24 | + %v0 = icmp eq <128 x i8> %a1, %a2 |
| 25 | + %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef) |
| 26 | + call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v1, <128 x i8>* %a0, i32 4, <128 x i1> %v0) |
| 27 | + ret <128 x i8> %v1 |
| 28 | +} |
| 29 | + |
| 30 | +define <128 x i8> @f2(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { |
| 31 | +; CHECK-LABEL: @f2( |
| 32 | +; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] |
| 33 | +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) |
| 34 | +; CHECK-NEXT: [[V3:%.*]] = add <128 x i8> [[V1]], [[V1]] |
| 35 | +; CHECK-NEXT: ret <128 x i8> [[V3]] |
| 36 | +; |
| 37 | + %v0 = icmp eq <128 x i8> %a1, %a2 |
| 38 | + %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef) |
| 39 | + %v2 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef) |
| 40 | + %v3 = add <128 x i8> %v1, %v2 |
| 41 | + ret <128 x i8> %v3 |
| 42 | +} |
| 43 | + |
| 44 | +declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32, <128 x i1>, <128 x i8>) |
| 45 | +declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32, <128 x i1>) |
0 commit comments