|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| 2 | +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s |
| 3 | + |
| 4 | +define <4 x i32> @masked_load_v4i32(ptr %a, <4 x i1> %mask) nounwind { |
| 5 | +; CHECK-LABEL: masked_load_v4i32: |
| 6 | +; CHECK: // %bb.0: |
| 7 | +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 |
| 8 | +; CHECK-NEXT: ptrue p0.s, vl4 |
| 9 | +; CHECK-NEXT: shl v0.4s, v0.4s, #31 |
| 10 | +; CHECK-NEXT: cmlt v0.4s, v0.4s, #0 |
| 11 | +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 |
| 12 | +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| 13 | +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 |
| 14 | +; CHECK-NEXT: ret |
| 15 | + %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> %mask, <4 x i32> undef), !nontemporal !0 |
| 16 | + ret <4 x i32> %load |
| 17 | +} |
| 18 | + |
| 19 | +define void @masked_store_v4i32(<4 x i32> %x, ptr %a, <4 x i1> %mask) nounwind { |
| 20 | +; CHECK-LABEL: masked_store_v4i32: |
| 21 | +; CHECK: // %bb.0: |
| 22 | +; CHECK-NEXT: ushll v1.4s, v1.4h, #0 |
| 23 | +; CHECK-NEXT: ptrue p0.s, vl4 |
| 24 | +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 |
| 25 | +; CHECK-NEXT: shl v1.4s, v1.4s, #31 |
| 26 | +; CHECK-NEXT: cmlt v1.4s, v1.4s, #0 |
| 27 | +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 |
| 28 | +; CHECK-NEXT: st1w { z0.s }, p0, [x0] |
| 29 | +; CHECK-NEXT: ret |
| 30 | + call void @llvm.masked.store.v4i32.p0(<4 x i32> %x, ptr %a, i32 1, <4 x i1> %mask), !nontemporal !0 |
| 31 | + ret void |
| 32 | +} |
| 33 | + |
| 34 | +define <4 x i32> @load_v4i32(ptr %a) nounwind { |
| 35 | +; CHECK-LABEL: load_v4i32: |
| 36 | +; CHECK: // %bb.0: |
| 37 | +; CHECK-NEXT: ldr q0, [x0] |
| 38 | +; CHECK-NEXT: ret |
| 39 | + %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>, <4 x i32> undef), !nontemporal !0 |
| 40 | + ret <4 x i32> %load |
| 41 | +} |
| 42 | + |
| 43 | +define void @store_v4i32(<4 x i32> %x, ptr %a) nounwind { |
| 44 | +; CHECK-LABEL: store_v4i32: |
| 45 | +; CHECK: // %bb.0: |
| 46 | +; CHECK-NEXT: str q0, [x0] |
| 47 | +; CHECK-NEXT: ret |
| 48 | + call void @llvm.masked.store.v4i32.p0(<4 x i32> %x, ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>), !nontemporal !0 |
| 49 | + ret void |
| 50 | +} |
| 51 | + |
| 52 | +define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind { |
| 53 | +; CHECK-LABEL: masked_load_nxv4i32: |
| 54 | +; CHECK: // %bb.0: |
| 55 | +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] |
| 56 | +; CHECK-NEXT: ret |
| 57 | + %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef), !nontemporal !0 |
| 58 | + ret <vscale x 4 x i32> %load |
| 59 | +} |
| 60 | + |
| 61 | +define void @masked_store_nxv4i32(<vscale x 4 x i32> %x, ptr %a, <vscale x 4 x i1> %mask) nounwind { |
| 62 | +; CHECK-LABEL: masked_store_nxv4i32: |
| 63 | +; CHECK: // %bb.0: |
| 64 | +; CHECK-NEXT: st1w { z0.s }, p0, [x0] |
| 65 | +; CHECK-NEXT: ret |
| 66 | + call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> %x, ptr %a, i32 1, <vscale x 4 x i1> %mask), !nontemporal !0 |
| 67 | + ret void |
| 68 | +} |
| 69 | + |
| 70 | +declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) |
| 71 | +declare void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>) |
| 72 | +declare <4 x i32> @llvm.masked.load.v4i32(ptr, i32, <4 x i1>, <4 x i32>) |
| 73 | +declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) |
| 74 | + |
| 75 | +!0 = !{i32 1} |
0 commit comments