|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s |
| 3 | + |
| 4 | +define amdgpu_ps float @_amdgpu_ps_main() #0 { |
| 5 | +; GCN-LABEL: _amdgpu_ps_main: |
| 6 | +; GCN: ; %bb.0: ; %.entry |
| 7 | +; GCN-NEXT: s_mov_b32 s0, 0 |
| 8 | +; GCN-NEXT: s_mov_b32 s1, s0 |
| 9 | +; GCN-NEXT: s_mov_b32 s2, s0 |
| 10 | +; GCN-NEXT: s_mov_b32 s3, s0 |
| 11 | +; GCN-NEXT: s_mov_b32 s4, s0 |
| 12 | +; GCN-NEXT: s_mov_b32 s5, s0 |
| 13 | +; GCN-NEXT: s_mov_b32 s6, s0 |
| 14 | +; GCN-NEXT: s_mov_b32 s7, s0 |
| 15 | +; GCN-NEXT: image_sample v[0:1], v[0:1], s[0:7], s[0:3] dmask:0x3 dim:SQ_RSRC_IMG_2D |
| 16 | +; GCN-NEXT: s_waitcnt vmcnt(0) |
| 17 | +; GCN-NEXT: s_clause 0x2 |
| 18 | +; GCN-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D |
| 19 | +; GCN-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D |
| 20 | +; GCN-NEXT: image_load v4, v[0:1], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm |
| 21 | +; GCN-NEXT: s_clause 0x3 |
| 22 | +; GCN-NEXT: s_buffer_load_dword s24, s[0:3], 0x5c |
| 23 | +; GCN-NEXT: s_buffer_load_dword s28, s[0:3], 0x7c |
| 24 | +; GCN-NEXT: s_buffer_load_dword s29, s[0:3], 0xc0 |
| 25 | +; GCN-NEXT: s_waitcnt_depctr 0xffe3 |
| 26 | +; GCN-NEXT: s_nop 0 |
| 27 | +; GCN-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x40 |
| 28 | +; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| 29 | +; GCN-NEXT: v_sub_f32_e64 v5, s24, s28 |
| 30 | +; GCN-NEXT: v_add_f32_e64 v7, s29, -1.0 |
| 31 | +; GCN-NEXT: s_clause 0x1 |
| 32 | +; GCN-NEXT: s_buffer_load_dwordx4 s[4:7], s[0:3], 0x50 |
| 33 | +; GCN-NEXT: s_nop 0 |
| 34 | +; GCN-NEXT: s_buffer_load_dword s0, s[0:3], 0x2c |
| 35 | +; GCN-NEXT: v_fma_f32 v1, v1, v5, s28 |
| 36 | +; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| 37 | +; GCN-NEXT: s_clause 0x3 |
| 38 | +; GCN-NEXT: s_buffer_load_dwordx4 s[8:11], s[0:3], 0x60 |
| 39 | +; GCN-NEXT: s_buffer_load_dwordx4 s[12:15], s[0:3], 0x20 |
| 40 | +; GCN-NEXT: s_buffer_load_dwordx4 s[16:19], s[0:3], 0x0 |
| 41 | +; GCN-NEXT: s_buffer_load_dwordx4 s[20:23], s[0:3], 0x70 |
| 42 | +; GCN-NEXT: v_max_f32_e64 v6, s0, s0 clamp |
| 43 | +; GCN-NEXT: s_buffer_load_dwordx4 s[24:27], s[0:3], 0x10 |
| 44 | +; GCN-NEXT: v_sub_f32_e32 v9, s0, v1 |
| 45 | +; GCN-NEXT: s_mov_b32 s0, 0x3c23d70a |
| 46 | +; GCN-NEXT: v_mul_f32_e32 v5, s2, v6 |
| 47 | +; GCN-NEXT: v_fma_f32 v8, -s2, v6, s6 |
| 48 | +; GCN-NEXT: v_fmac_f32_e32 v1, v6, v9 |
| 49 | +; GCN-NEXT: v_fma_f32 v7, v6, v7, 1.0 |
| 50 | +; GCN-NEXT: v_fmac_f32_e32 v5, v8, v6 |
| 51 | +; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| 52 | +; GCN-NEXT: v_mul_f32_e32 v8, s10, v0 |
| 53 | +; GCN-NEXT: v_fma_f32 v0, -v0, s10, s14 |
| 54 | +; GCN-NEXT: v_fmac_f32_e32 v8, v0, v6 |
| 55 | +; GCN-NEXT: v_sub_f32_e32 v0, v1, v7 |
| 56 | +; GCN-NEXT: v_fmac_f32_e32 v7, v0, v6 |
| 57 | +; GCN-NEXT: s_waitcnt vmcnt(2) |
| 58 | +; GCN-NEXT: v_mul_f32_e32 v9, s18, v2 |
| 59 | +; GCN-NEXT: s_waitcnt vmcnt(1) |
| 60 | +; GCN-NEXT: v_mul_f32_e32 v3, s22, v3 |
| 61 | +; GCN-NEXT: v_add_f32_e32 v5, v2, v5 |
| 62 | +; GCN-NEXT: v_mul_f32_e32 v1, v9, v6 |
| 63 | +; GCN-NEXT: v_mul_f32_e32 v9, v6, v3 |
| 64 | +; GCN-NEXT: v_fmac_f32_e64 v8, -v6, v3 |
| 65 | +; GCN-NEXT: s_waitcnt vmcnt(0) |
| 66 | +; GCN-NEXT: v_add_f32_e32 v4, v4, v5 |
| 67 | +; GCN-NEXT: v_fma_f32 v0, v2, s26, -v1 |
| 68 | +; GCN-NEXT: v_fmac_f32_e32 v9, v8, v6 |
| 69 | +; GCN-NEXT: v_mul_f32_e32 v3, v4, v6 |
| 70 | +; GCN-NEXT: v_fma_f32 v4, v7, s0, 0x3ca3d70a |
| 71 | +; GCN-NEXT: v_fmac_f32_e32 v1, v0, v6 |
| 72 | +; GCN-NEXT: v_mul_f32_e32 v0, v2, v6 |
| 73 | +; GCN-NEXT: v_mul_f32_e32 v2, v9, v4 |
| 74 | +; GCN-NEXT: v_mul_f32_e32 v1, v3, v1 |
| 75 | +; GCN-NEXT: v_fmac_f32_e32 v1, v2, v0 |
| 76 | +; GCN-NEXT: v_max_f32_e32 v0, 0, v1 |
| 77 | +; GCN-NEXT: ; return to shader part epilog |
| 78 | +.entry: |
| 79 | + %0 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 80 | + %.i2243 = extractelement <3 x float> %0, i32 2 |
| 81 | + %1 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 0, i32 0) |
| 82 | + %2 = shufflevector <3 x i32> %1, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 83 | + %3 = bitcast <4 x i32> %2 to <4 x float> |
| 84 | + %.i2248 = extractelement <4 x float> %3, i32 2 |
| 85 | + %.i2249 = fmul reassoc nnan nsz arcp contract afn float %.i2243, %.i2248 |
| 86 | + %4 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00) |
| 87 | + %5 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 88 | + %.i2333 = extractelement <3 x float> %5, i32 2 |
| 89 | + %6 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00) |
| 90 | + %7 = call <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 3, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 91 | + %.i1408 = extractelement <2 x float> %7, i32 1 |
| 92 | + %.i0364 = extractelement <2 x float> %7, i32 0 |
| 93 | + %8 = call float @llvm.amdgcn.image.sample.2d.f32.f32(i32 1, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 94 | + %9 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 112, i32 0) |
| 95 | + %10 = shufflevector <3 x i32> %9, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 96 | + %11 = bitcast <4 x i32> %10 to <4 x float> |
| 97 | + %.i2360 = extractelement <4 x float> %11, i32 2 |
| 98 | + %.i2363 = fmul reassoc nnan nsz arcp contract afn float %.i2360, %8 |
| 99 | + %12 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 96, i32 0) |
| 100 | + %13 = shufflevector <3 x i32> %12, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 101 | + %14 = bitcast <4 x i32> %13 to <4 x float> |
| 102 | + %.i2367 = extractelement <4 x float> %14, i32 2 |
| 103 | + %.i2370 = fmul reassoc nnan nsz arcp contract afn float %.i0364, %.i2367 |
| 104 | + %15 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 32, i32 0) |
| 105 | + %16 = shufflevector <3 x i32> %15, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 106 | + %17 = bitcast <4 x i32> %16 to <4 x float> |
| 107 | + %.i2373 = extractelement <4 x float> %17, i32 2 |
| 108 | + %.i2376 = fsub reassoc nnan nsz arcp contract afn float %.i2373, %.i2370 |
| 109 | + %.i2383 = fmul reassoc nnan nsz arcp contract afn float %.i2376, %6 |
| 110 | + %.i2386 = fadd reassoc nnan nsz arcp contract afn float %.i2370, %.i2383 |
| 111 | + %18 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00) |
| 112 | + %19 = fmul reassoc nnan nsz arcp contract afn float %18, %.i2363 |
| 113 | + %.i2394 = fsub reassoc nnan nsz arcp contract afn float %.i2386, %19 |
| 114 | + %.i2397 = fmul reassoc nnan nsz arcp contract afn float %.i2363, %18 |
| 115 | + %.i2404 = fmul reassoc nnan nsz arcp contract afn float %.i2394, %4 |
| 116 | + %.i2407 = fadd reassoc nnan nsz arcp contract afn float %.i2397, %.i2404 |
| 117 | + %20 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 92, i32 0) |
| 118 | + %21 = bitcast i32 %20 to float |
| 119 | + %22 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 124, i32 0) |
| 120 | + %23 = bitcast i32 %22 to float |
| 121 | + %24 = fsub reassoc nnan nsz arcp contract afn float %21, %23 |
| 122 | + %25 = fmul reassoc nnan nsz arcp contract afn float %.i1408, %24 |
| 123 | + %26 = fadd reassoc nnan nsz arcp contract afn float %25, %23 |
| 124 | + %27 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 44, i32 0) |
| 125 | + %28 = bitcast i32 %27 to float |
| 126 | + %29 = fsub reassoc nnan nsz arcp contract afn float %28, %26 |
| 127 | + %30 = fmul reassoc nnan nsz arcp contract afn float %6, %29 |
| 128 | + %31 = fadd reassoc nnan nsz arcp contract afn float %26, %30 |
| 129 | + %32 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 192, i32 0) |
| 130 | + %33 = bitcast i32 %32 to float |
| 131 | + %34 = fadd reassoc nnan nsz arcp contract afn float %33, -1.000000e+00 |
| 132 | + %35 = fmul reassoc nnan nsz arcp contract afn float %18, %34 |
| 133 | + %36 = fadd reassoc nnan nsz arcp contract afn float %35, 1.000000e+00 |
| 134 | + %37 = fsub reassoc nnan nsz arcp contract afn float %31, %36 |
| 135 | + %38 = fmul reassoc nnan nsz arcp contract afn float %37, %4 |
| 136 | + %39 = fadd reassoc nnan nsz arcp contract afn float %36, %38 |
| 137 | + %40 = fmul reassoc nnan nsz arcp contract afn float %39, 0x3F847AE140000000 |
| 138 | + %41 = fadd reassoc nnan nsz arcp contract afn float %40, 0x3F947AE140000000 |
| 139 | + %.i2415 = fmul reassoc nnan nsz arcp contract afn float %.i2407, %41 |
| 140 | + %42 = call <3 x float> @llvm.amdgcn.image.load.mip.2d.v3f32.i32(i32 7, i32 undef, i32 undef, i32 0, <8 x i32> undef, i32 0, i32 0) |
| 141 | + %.i2521 = extractelement <3 x float> %42, i32 2 |
| 142 | + %43 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00) |
| 143 | + %44 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 144 | + %.i2465 = extractelement <3 x float> %44, i32 2 |
| 145 | + %.i2466 = fmul reassoc nnan nsz arcp contract afn float %.i2465, %43 |
| 146 | + %.i2469 = fmul reassoc nnan nsz arcp contract afn float %.i2415, %.i2466 |
| 147 | + %45 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 64, i32 0) |
| 148 | + %46 = shufflevector <3 x i32> %45, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 149 | + %47 = bitcast <4 x i32> %46 to <4 x float> |
| 150 | + %.i2476 = extractelement <4 x float> %47, i32 2 |
| 151 | + %.i2479 = fmul reassoc nnan nsz arcp contract afn float %.i2476, %18 |
| 152 | + %48 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 80, i32 0) |
| 153 | + %49 = shufflevector <3 x i32> %48, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 154 | + %50 = bitcast <4 x i32> %49 to <4 x float> |
| 155 | + %.i2482 = extractelement <4 x float> %50, i32 2 |
| 156 | + %.i2485 = fsub reassoc nnan nsz arcp contract afn float %.i2482, %.i2479 |
| 157 | + %.i2488 = fmul reassoc nnan nsz arcp contract afn float %.i2249, %18 |
| 158 | + %.i2491 = fmul reassoc nnan nsz arcp contract afn float %.i2485, %4 |
| 159 | + %.i2494 = fadd reassoc nnan nsz arcp contract afn float %.i2479, %.i2491 |
| 160 | + %51 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0) |
| 161 | + %.i2515 = extractelement <3 x float> %51, i32 2 |
| 162 | + %.i2516 = fadd reassoc nnan nsz arcp contract afn float %.i2515, %.i2494 |
| 163 | + %.i2522 = fadd reassoc nnan nsz arcp contract afn float %.i2521, %.i2516 |
| 164 | + %.i2525 = fmul reassoc nnan nsz arcp contract afn float %.i2522, %43 |
| 165 | + %52 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 16, i32 0) |
| 166 | + %53 = shufflevector <3 x i32> %52, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> |
| 167 | + %54 = bitcast <4 x i32> %53 to <4 x float> |
| 168 | + %.i2530 = extractelement <4 x float> %54, i32 2 |
| 169 | + %.i2531 = fmul reassoc nnan nsz arcp contract afn float %.i2333, %.i2530 |
| 170 | + %.i2536 = fsub reassoc nnan nsz arcp contract afn float %.i2531, %.i2488 |
| 171 | + %.i2539 = fmul reassoc nnan nsz arcp contract afn float %.i2536, %4 |
| 172 | + %.i2542 = fadd reassoc nnan nsz arcp contract afn float %.i2488, %.i2539 |
| 173 | + %.i2545 = fmul reassoc nnan nsz arcp contract afn float %.i2525, %.i2542 |
| 174 | + %.i2548 = fadd reassoc nnan nsz arcp contract afn float %.i2469, %.i2545 |
| 175 | + %.i2551 = call reassoc nnan nsz arcp contract afn float @llvm.maxnum.f32(float %.i2548, float 0.000000e+00) |
| 176 | + ret float %.i2551 |
| 177 | +} |
| 178 | + |
| 179 | +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn |
| 180 | +declare float @llvm.maxnum.f32(float, float) #1 |
| 181 | + |
| 182 | +; Function Attrs: nounwind readnone speculatable willreturn |
| 183 | +declare float @llvm.amdgcn.fmed3.f32(float, float, float) #2 |
| 184 | + |
| 185 | +; Function Attrs: nounwind readonly willreturn |
| 186 | +declare <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3 |
| 187 | + |
| 188 | +; Function Attrs: nounwind readonly willreturn |
| 189 | +declare float @llvm.amdgcn.image.sample.2d.f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3 |
| 190 | + |
| 191 | +; Function Attrs: nounwind readonly willreturn |
| 192 | +declare <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3 |
| 193 | + |
| 194 | +; Function Attrs: nounwind readonly willreturn |
| 195 | +declare <3 x float> @llvm.amdgcn.image.load.mip.2d.v3f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #3 |
| 196 | + |
| 197 | +; Function Attrs: nounwind readnone willreturn |
| 198 | +declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32 immarg) #3 |
| 199 | + |
| 200 | +; Function Attrs: nounwind readnone willreturn |
| 201 | +declare <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32>, i32, i32 immarg) #3 |
| 202 | + |
| 203 | +attributes #0 = { "denormal-fp-math-f32"="preserve-sign" } |
| 204 | +attributes #1 = { nofree nosync nounwind readnone speculatable willreturn } |
| 205 | +attributes #2 = { nounwind readnone speculatable willreturn } |
| 206 | +attributes #3 = { nounwind readonly willreturn } |
0 commit comments