|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -stop-after=legalizer -o - %s | FileCheck %s |
| 3 | + |
| 4 | +; Test behavior of legalizer when vector loads have range metadata, |
| 5 | +; and are lowered by bitcasting to a scalar integer, so we have to |
| 6 | +; drop the range metadata. |
| 7 | + |
| 8 | +define <4 x i8> @global_load_v4i8_align4__rangemd(ptr addrspace(1) %ptr) { |
| 9 | + ; CHECK-LABEL: name: global_load_v4i8_align4__rangemd |
| 10 | + ; CHECK: bb.1 (%ir-block.0): |
| 11 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 12 | + ; CHECK-NEXT: {{ $}} |
| 13 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 14 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 15 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 16 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p1) :: (load (s32) from %ir.ptr, addrspace 1) |
| 17 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 |
| 18 | + ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C]](s32) |
| 19 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 |
| 20 | + ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) |
| 21 | + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 |
| 22 | + ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C2]](s32) |
| 23 | + ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32) |
| 24 | + ; CHECK-NEXT: $vgpr1 = COPY [[LSHR]](s32) |
| 25 | + ; CHECK-NEXT: $vgpr2 = COPY [[LSHR1]](s32) |
| 26 | + ; CHECK-NEXT: $vgpr3 = COPY [[LSHR2]](s32) |
| 27 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 |
| 28 | + %load = load <4 x i8>, ptr addrspace(1) %ptr, align 4, !range !0, !noundef !1 |
| 29 | + ret <4 x i8> %load |
| 30 | +} |
| 31 | + |
| 32 | +; This is also widened. |
| 33 | +define <3 x i8> @global_load_v3i8_align4__rangemd(ptr addrspace(1) %ptr) { |
| 34 | + ; CHECK-LABEL: name: global_load_v3i8_align4__rangemd |
| 35 | + ; CHECK: bb.1 (%ir-block.0): |
| 36 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 37 | + ; CHECK-NEXT: {{ $}} |
| 38 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 39 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 40 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 41 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p1) :: (load (s32) from %ir.ptr, addrspace 1) |
| 42 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 |
| 43 | + ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C]](s32) |
| 44 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 |
| 45 | + ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) |
| 46 | + ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32) |
| 47 | + ; CHECK-NEXT: $vgpr1 = COPY [[LSHR]](s32) |
| 48 | + ; CHECK-NEXT: $vgpr2 = COPY [[LSHR1]](s32) |
| 49 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 |
| 50 | + %load = load <3 x i8>, ptr addrspace(1) %ptr, align 4, !range !0, !noundef !1 |
| 51 | + ret <3 x i8> %load |
| 52 | +} |
| 53 | + |
| 54 | +define <2 x i8> @global_load_v2i8_align2__rangemd(ptr addrspace(1) %ptr) { |
| 55 | + ; CHECK-LABEL: name: global_load_v2i8_align2__rangemd |
| 56 | + ; CHECK: bb.1 (%ir-block.0): |
| 57 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 58 | + ; CHECK-NEXT: {{ $}} |
| 59 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 60 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 61 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 62 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p1) :: (load (s16) from %ir.ptr, addrspace 1) |
| 63 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 |
| 64 | + ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C]](s32) |
| 65 | + ; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32) |
| 66 | + ; CHECK-NEXT: $vgpr1 = COPY [[LSHR]](s32) |
| 67 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1 |
| 68 | + %load = load <2 x i8>, ptr addrspace(1) %ptr, align 2, !range !0, !noundef !1 |
| 69 | + ret <2 x i8> %load |
| 70 | +} |
| 71 | + |
| 72 | +define <2 x i64> @global_load_v2i64_align16__rangemd(ptr addrspace(1) %ptr) { |
| 73 | + ; CHECK-LABEL: name: global_load_v2i64_align16__rangemd |
| 74 | + ; CHECK: bb.1 (%ir-block.0): |
| 75 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 76 | + ; CHECK-NEXT: {{ $}} |
| 77 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 78 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 79 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 80 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[MV]](p1) :: (load (<2 x s64>) from %ir.ptr, !range !2, addrspace 1) |
| 81 | + ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>) |
| 82 | + ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32) |
| 83 | + ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32) |
| 84 | + ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32) |
| 85 | + ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32) |
| 86 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 |
| 87 | + %load = load <2 x i64>, ptr addrspace(1) %ptr, align 16, !range !2, !noundef !1 |
| 88 | + ret <2 x i64> %load |
| 89 | +} |
| 90 | + |
| 91 | +; This goes the other direction and converts a scalar load to a vector. |
| 92 | +define i128 @global_load_i128_align16__rangemd(ptr addrspace(1) %ptr) { |
| 93 | + ; CHECK-LABEL: name: global_load_i128_align16__rangemd |
| 94 | + ; CHECK: bb.1 (%ir-block.0): |
| 95 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 96 | + ; CHECK-NEXT: {{ $}} |
| 97 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 98 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 99 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 100 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[MV]](p1) :: (load (<4 x s32>) from %ir.ptr, addrspace 1) |
| 101 | + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[LOAD]](<4 x s32>) |
| 102 | + ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](s128) |
| 103 | + ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32) |
| 104 | + ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32) |
| 105 | + ; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32) |
| 106 | + ; CHECK-NEXT: $vgpr3 = COPY [[UV3]](s32) |
| 107 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 |
| 108 | + %load = load i128, ptr addrspace(1) %ptr, align 16, !range !3, !noundef !1 |
| 109 | + ret i128 %load |
| 110 | +} |
| 111 | + |
| 112 | +; Load will be zero extended, so we should be able to extend the range |
| 113 | +; metadata. |
| 114 | +define i32 @global_sextload_i8_align1__rangemd(ptr addrspace(1) %ptr) { |
| 115 | + ; CHECK-LABEL: name: global_sextload_i8_align1__rangemd |
| 116 | + ; CHECK: bb.1 (%ir-block.0): |
| 117 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 118 | + ; CHECK-NEXT: {{ $}} |
| 119 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 120 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 121 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 122 | + ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[MV]](p1) :: (load (s8) from %ir.ptr, !range !0, addrspace 1) |
| 123 | + ; CHECK-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32) |
| 124 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 125 | + %load = load i8, ptr addrspace(1) %ptr, align 1, !range !0, !noundef !1 |
| 126 | + %ext = sext i8 %load to i32 |
| 127 | + ret i32 %ext |
| 128 | +} |
| 129 | + |
| 130 | +define i32 @global_zextload_i8_align1__rangemd(ptr addrspace(1) %ptr) { |
| 131 | + ; CHECK-LABEL: name: global_zextload_i8_align1__rangemd |
| 132 | + ; CHECK: bb.1 (%ir-block.0): |
| 133 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 134 | + ; CHECK-NEXT: {{ $}} |
| 135 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 136 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 137 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 138 | + ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[MV]](p1) :: (load (s8) from %ir.ptr, !range !4, addrspace 1) |
| 139 | + ; CHECK-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32) |
| 140 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 141 | + %load = load i8, ptr addrspace(1) %ptr, align 1, !range !4, !noundef !1 |
| 142 | + %ext = sext i8 %load to i32 |
| 143 | + ret i32 %ext |
| 144 | +} |
| 145 | + |
| 146 | +!0 = !{i8 -32, i8 64} |
| 147 | +!1 = !{} |
| 148 | +!2 = !{i64 -2048, i64 1024} |
| 149 | +!3 = !{i128 -2048, i128 1024} |
| 150 | +!4 = !{i8 8, i8 64} |
0 commit comments