|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 %s |
| 3 | + |
| 4 | + |
| 5 | +; DAGCombiner will transform: |
| 6 | +; (fabsf (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF)) |
| 7 | +; unless isFabsFree returns true |
| 8 | +define amdgpu_kernel void @s_fabsf_fn_free(ptr addrspace(1) %out, i32 %in) { |
| 9 | +; R600-LABEL: s_fabsf_fn_free: |
| 10 | +; R600: ; %bb.0: |
| 11 | +; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[] |
| 12 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 |
| 13 | +; R600-NEXT: CF_END |
| 14 | +; R600-NEXT: PAD |
| 15 | +; R600-NEXT: ALU clause starting at 4: |
| 16 | +; R600-NEXT: MOV * T0.W, KC0[2].Z, |
| 17 | +; R600-NEXT: MOV T0.X, |PV.W|, |
| 18 | +; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, |
| 19 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 20 | + %bc= bitcast i32 %in to float |
| 21 | + %fabs = call float @fabsf(float %bc) |
| 22 | + store float %fabs, ptr addrspace(1) %out |
| 23 | + ret void |
| 24 | +} |
| 25 | + |
| 26 | +define amdgpu_kernel void @s_fabsf_free(ptr addrspace(1) %out, i32 %in) { |
| 27 | +; R600-LABEL: s_fabsf_free: |
| 28 | +; R600: ; %bb.0: |
| 29 | +; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[] |
| 30 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 |
| 31 | +; R600-NEXT: CF_END |
| 32 | +; R600-NEXT: PAD |
| 33 | +; R600-NEXT: ALU clause starting at 4: |
| 34 | +; R600-NEXT: MOV * T0.W, KC0[2].Z, |
| 35 | +; R600-NEXT: MOV T0.X, |PV.W|, |
| 36 | +; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, |
| 37 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 38 | + %bc= bitcast i32 %in to float |
| 39 | + %fabs = call float @llvm.fabs.f32(float %bc) |
| 40 | + store float %fabs, ptr addrspace(1) %out |
| 41 | + ret void |
| 42 | +} |
| 43 | + |
| 44 | +define amdgpu_kernel void @s_fabsf_f32(ptr addrspace(1) %out, float %in) { |
| 45 | +; R600-LABEL: s_fabsf_f32: |
| 46 | +; R600: ; %bb.0: |
| 47 | +; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[] |
| 48 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 |
| 49 | +; R600-NEXT: CF_END |
| 50 | +; R600-NEXT: PAD |
| 51 | +; R600-NEXT: ALU clause starting at 4: |
| 52 | +; R600-NEXT: MOV * T0.W, KC0[2].Z, |
| 53 | +; R600-NEXT: MOV T0.X, |PV.W|, |
| 54 | +; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, |
| 55 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 56 | + %fabs = call float @llvm.fabs.f32(float %in) |
| 57 | + store float %fabs, ptr addrspace(1) %out |
| 58 | + ret void |
| 59 | +} |
| 60 | + |
| 61 | +define amdgpu_kernel void @fabs_v2f32(ptr addrspace(1) %out, <2 x float> %in) { |
| 62 | +; R600-LABEL: fabs_v2f32: |
| 63 | +; R600: ; %bb.0: |
| 64 | +; R600-NEXT: ALU 5, @4, KC0[CB0:0-32], KC1[] |
| 65 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1 |
| 66 | +; R600-NEXT: CF_END |
| 67 | +; R600-NEXT: PAD |
| 68 | +; R600-NEXT: ALU clause starting at 4: |
| 69 | +; R600-NEXT: MOV * T0.W, KC0[3].X, |
| 70 | +; R600-NEXT: MOV T0.Y, |PV.W|, |
| 71 | +; R600-NEXT: MOV * T0.W, KC0[2].W, |
| 72 | +; R600-NEXT: MOV T0.X, |PV.W|, |
| 73 | +; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, |
| 74 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 75 | + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) |
| 76 | + store <2 x float> %fabs, ptr addrspace(1) %out |
| 77 | + ret void |
| 78 | +} |
| 79 | + |
| 80 | +define amdgpu_kernel void @fabsf_v4f32(ptr addrspace(1) %out, <4 x float> %in) { |
| 81 | +; R600-LABEL: fabsf_v4f32: |
| 82 | +; R600: ; %bb.0: |
| 83 | +; R600-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[] |
| 84 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1 |
| 85 | +; R600-NEXT: CF_END |
| 86 | +; R600-NEXT: PAD |
| 87 | +; R600-NEXT: ALU clause starting at 4: |
| 88 | +; R600-NEXT: MOV T0.W, KC0[4].X, |
| 89 | +; R600-NEXT: MOV * T1.W, KC0[3].W, |
| 90 | +; R600-NEXT: MOV * T0.W, |PV.W|, |
| 91 | +; R600-NEXT: MOV T0.Z, |T1.W|, |
| 92 | +; R600-NEXT: MOV * T1.W, KC0[3].Z, |
| 93 | +; R600-NEXT: MOV T0.Y, |PV.W|, |
| 94 | +; R600-NEXT: MOV * T1.W, KC0[3].Y, |
| 95 | +; R600-NEXT: MOV T0.X, |PV.W|, |
| 96 | +; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, |
| 97 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 98 | + %fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in) |
| 99 | + store <4 x float> %fabs, ptr addrspace(1) %out |
| 100 | + ret void |
| 101 | +} |
| 102 | + |
| 103 | +define amdgpu_kernel void @fabsf_fn_fold(ptr addrspace(1) %out, float %in0, float %in1) { |
| 104 | +; R600-LABEL: fabsf_fn_fold: |
| 105 | +; R600: ; %bb.0: |
| 106 | +; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[] |
| 107 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 |
| 108 | +; R600-NEXT: CF_END |
| 109 | +; R600-NEXT: PAD |
| 110 | +; R600-NEXT: ALU clause starting at 4: |
| 111 | +; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x, |
| 112 | +; R600-NEXT: MUL_IEEE * T1.X, |KC0[2].Z|, KC0[2].W, |
| 113 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 114 | + %fabs = call float @fabsf(float %in0) |
| 115 | + %fmul = fmul float %fabs, %in1 |
| 116 | + store float %fmul, ptr addrspace(1) %out |
| 117 | + ret void |
| 118 | +} |
| 119 | + |
| 120 | +define amdgpu_kernel void @fabs_fold(ptr addrspace(1) %out, float %in0, float %in1) { |
| 121 | +; R600-LABEL: fabs_fold: |
| 122 | +; R600: ; %bb.0: |
| 123 | +; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[] |
| 124 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 |
| 125 | +; R600-NEXT: CF_END |
| 126 | +; R600-NEXT: PAD |
| 127 | +; R600-NEXT: ALU clause starting at 4: |
| 128 | +; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x, |
| 129 | +; R600-NEXT: MUL_IEEE * T1.X, |KC0[2].Z|, KC0[2].W, |
| 130 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 131 | + %fabs = call float @llvm.fabs.f32(float %in0) |
| 132 | + %fmul = fmul float %fabs, %in1 |
| 133 | + store float %fmul, ptr addrspace(1) %out |
| 134 | + ret void |
| 135 | +} |
| 136 | + |
| 137 | +define amdgpu_kernel void @bitpreserve_fabsf_f32(ptr addrspace(1) %out, float %in) { |
| 138 | +; R600-LABEL: bitpreserve_fabsf_f32: |
| 139 | +; R600: ; %bb.0: |
| 140 | +; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[] |
| 141 | +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 |
| 142 | +; R600-NEXT: CF_END |
| 143 | +; R600-NEXT: PAD |
| 144 | +; R600-NEXT: ALU clause starting at 4: |
| 145 | +; R600-NEXT: LSHR T0.X, KC0[2].Y, literal.x, |
| 146 | +; R600-NEXT: ADD * T1.X, |KC0[2].Z|, 1.0, |
| 147 | +; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) |
| 148 | + %in.bc = bitcast float %in to i32 |
| 149 | + %int.abs = and i32 %in.bc, 2147483647 |
| 150 | + %bc = bitcast i32 %int.abs to float |
| 151 | + %fadd = fadd float %bc, 1.0 |
| 152 | + store float %fadd, ptr addrspace(1) %out |
| 153 | + ret void |
| 154 | +} |
| 155 | + |
| 156 | +declare float @fabsf(float) readnone |
| 157 | +declare float @llvm.fabs.f32(float) readnone |
| 158 | +declare <2 x float> @llvm.fabs.v2f32(<2 x float>) readnone |
| 159 | +declare <4 x float> @llvm.fabs.v4f32(<4 x float>) readnone |
0 commit comments