|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#version 450 core |
| 10 | + |
| 11 | +#define PRECISION ${PRECISION} |
| 12 | + |
| 13 | +#include "indexing_utils.h" |
| 14 | + |
| 15 | +layout(std430) buffer; |
| 16 | + |
| 17 | +layout(set = 0, binding = 0, ${IMAGE_FORMAT[DTYPE]}) uniform PRECISION restrict writeonly ${IMAGE_T[2][DTYPE]} image_out; |
| 18 | +layout(set = 0, binding = 1) buffer PRECISION restrict readonly Buffer { |
| 19 | + ${T[DTYPE]} data[]; |
| 20 | +} |
| 21 | +buffer_in; |
| 22 | + |
| 23 | +// Corresponds to {1,4,9,24} in the example below. |
| 24 | +layout(set = 0, binding = 2) uniform PRECISION restrict GpuSizes { |
| 25 | + ivec4 data; |
| 26 | +} |
| 27 | +gpu_sizes; |
| 28 | + |
| 29 | +// Corresponds to {3,3,7,10} in the example below. |
| 30 | +layout(set = 0, binding = 3) uniform PRECISION restrict OriginalSizes { |
| 31 | + ivec4 data; |
| 32 | +} |
| 33 | +original_sizes; |
| 34 | + |
| 35 | +// Corresponds to {8,12} in the example below. |
| 36 | +layout(set = 0, binding = 4) uniform PRECISION restrict PaddedSizes { |
| 37 | + ivec2 data; |
| 38 | +} |
| 39 | +padded_sizes; |
| 40 | + |
| 41 | +layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; |
| 42 | + |
| 43 | +/* |
| 44 | + * Computes special prepacking for a 2D convolution. Each shader invocation |
| 45 | + * calculates the input buffer location to read into the desired texel. This |
| 46 | + * packing was originally developed on CPU and that approach is described in the |
| 47 | + * rest of this comment. Refer to the code-level comments, for how we translate |
| 48 | + * it to GPU by reversing the steps. |
| 49 | + * |
| 50 | + * Consider an example weight tensor of size {10,7,3,3}. The following |
| 51 | + * transformations will be applied. |
| 52 | + * |
| 53 | + * 1. Pad the N and C dims so that both are a multiple of 4. In this case, 2 |
| 54 | + * batches and 1 channel of padding are added, producing a tensor of size |
| 55 | + * {12,8,3,3}. |
| 56 | + * at::pad(x, {0,0,0,0,0,1,0,2}, "constant", 0); |
| 57 | + * |
| 58 | + * 2. Split the tensor along the C dim so that each split has 4 channels. |
| 59 | + * x.reshape({12,2,4,3,3}); |
| 60 | + * |
| 61 | + * 3. For each split, "fold" the C dim into the W dim. Suppose the first rows |
| 62 | + * at H=0 of the split have values |
| 63 | + * 0,1,2 | 10,11,12 | 20,21,22 | 30,31,32 |
| 64 | + * |
| 65 | + * where | denotes a channel boundary. Then, the goal is to combine those rows |
| 66 | + * into one row with the values |
| 67 | + * 0, 10, 20, 30, 1, 11, 21, 31, 2, 12, 22, 32 |
| 68 | + * |
| 69 | + * x.permute({0,1,3,4,2}).reshape({12,2,3,12}); |
| 70 | + * |
| 71 | + * 4. Stack the splits belonging to the same batch horizontally by swapping the |
| 72 | + * C and H dims. |
| 73 | + * x.permute({0,2,1,3}).reshape({12,3,24}); |
| 74 | + * |
| 75 | + * 5. Repeat a similar process to "fold" the N dim into the C dim. Split along |
| 76 | + * the N dim so that each split has 4 batches. |
| 77 | + * x.reshape({3,4,3,24}); |
| 78 | + * |
| 79 | + * 6. Stack the batches on each other vertically by swapping the N and C dims. |
| 80 | + * x.permute({1,0,2,3}).reshape({4,9,24}); |
| 81 | + */ |
| 82 | +void main() { |
| 83 | + const ivec3 pos = ivec3(gl_GlobalInvocationID); |
| 84 | + const ivec4 coord = POS_TO_COORD_CHANNELS_PACKED(pos, gpu_sizes.data); |
| 85 | + |
| 86 | + if (any(greaterThanEqual(coord, gpu_sizes.data))) { |
| 87 | + return; |
| 88 | + } |
| 89 | + |
| 90 | + // As in usual staging shaders, map from GPU texel position to normal CPU |
| 91 | + // buffer indices: (24,9) -> (4,9,24) |
| 92 | + const int base_index = COORD_TO_BUFFER_IDX(coord, gpu_sizes.data); |
| 93 | + const ivec4 p0 = |
| 94 | + base_index + ivec4(0, 1, 2, 3) * STRIDE_CHANNELS_PACKED(gpu_sizes.data); |
| 95 | + |
| 96 | + // Re-map the normal CPU buffer indices to special indices, through a series |
| 97 | + // of mappings: reshape is a no-op to the underlying indices, so we only map |
| 98 | + // for pad and permute. |
| 99 | + const int Np = padded_sizes.data.y; |
| 100 | + const int Cp = padded_sizes.data.x; |
| 101 | + const int N = original_sizes.data.w; |
| 102 | + const int C = original_sizes.data.z; |
| 103 | + const int H = original_sizes.data.y; |
| 104 | + const int W = original_sizes.data.x; |
| 105 | + |
| 106 | + // Undo step 6 premute: (4,3,3,24) -> (3,4,3,24) |
| 107 | + // Undo step 4 permute: (12,3,2,12) -> (12,2,3,12) |
| 108 | + // Undo step 3 permute, part 1: (12,2,3h,3w,4) -> (12,2,3h,4,3w) |
| 109 | + // Undo step 3 permute, part 2: (12,2,3h,4,3w) -> (12,2,4,3h,3w) |
| 110 | + const ivec4 p1 = SWAP_ADJ_DIMS(p0, 4, (Np / 4), (H * Cp * W)); |
| 111 | + const ivec4 p2 = SWAP_ADJ_DIMS(p1, H, (Cp / 4), (W * 4)); |
| 112 | + const ivec4 p3 = SWAP_ADJ_DIMS(p2, W, 4, 1); |
| 113 | + const ivec4 p4 = SWAP_ADJ_DIMS(p3, H, 4, W); |
| 114 | + |
| 115 | + // Undo step 1 pad: (12,8,3,3) -> (10,7,3,3) |
| 116 | + // For values in the padded region, write zero instead of buffer data. |
| 117 | + const ivec4 c = p4 % (Cp * H * W) / (H * W); |
| 118 | + const ivec4 n = p4 / (Cp * H * W); |
| 119 | + const ivec4 p5 = p4 - n * (Cp - C) * H * W; |
| 120 | + const ivec4 mask = ivec4(greaterThanEqual(c, ivec4(C))) | |
| 121 | + ivec4(greaterThanEqual(n, ivec4(N))); |
| 122 | + |
| 123 | + ${T[DTYPE]} val_x = mix(buffer_in.data[p5.x], 0, mask.x); |
| 124 | + ${T[DTYPE]} val_y = mix(buffer_in.data[p5.y], 0, mask.y); |
| 125 | + ${T[DTYPE]} val_z = mix(buffer_in.data[p5.z], 0, mask.z); |
| 126 | + ${T[DTYPE]} val_w = mix(buffer_in.data[p5.w], 0, mask.w); |
| 127 | + |
| 128 | + ${VEC4_T[DTYPE]} texel = ${VEC4_T[DTYPE]}(val_x, val_y, val_z, val_w); |
| 129 | + |
| 130 | + imageStore(image_out, pos.xy, texel); |
| 131 | +} |
0 commit comments