|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#version 450 core |
| 10 | + |
| 11 | +#define PRECISION ${PRECISION} |
| 12 | + |
| 13 | +#include "indexing_utils.h" |
| 14 | + |
| 15 | +layout(std430) buffer; |
| 16 | + |
| 17 | +layout(set = 0, binding = 0, ${IMAGE_FORMAT[DTYPE]}) uniform PRECISION restrict writeonly ${IMAGE_T[2][DTYPE]} image_out; |
| 18 | +layout(set = 0, binding = 1) buffer PRECISION restrict readonly Buffer { |
| 19 | + ${T[DTYPE]} data[]; |
| 20 | +} |
| 21 | +buffer_in; |
| 22 | + |
| 23 | +// Corresponds to {1,4,3,9} in the example below. |
| 24 | +layout(set = 0, binding = 2) uniform PRECISION restrict GpuSizes { |
| 25 | + ivec4 data; |
| 26 | +} |
| 27 | +gpu_sizes; |
| 28 | + |
| 29 | +// Corresponds to {3,3,1,11} in the example below. |
| 30 | +layout(set = 0, binding = 3) uniform PRECISION restrict OriginalSizes { |
| 31 | + ivec4 data; |
| 32 | +} |
| 33 | +original_sizes; |
| 34 | + |
| 35 | +// Corresponds to {1,12} in the example below. |
| 36 | +layout(set = 0, binding = 4) uniform PRECISION restrict PaddedSizes { |
| 37 | + ivec2 data; |
| 38 | +} |
| 39 | +padded_sizes; |
| 40 | + |
| 41 | +layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; |
| 42 | + |
| 43 | +/* |
| 44 | + * Computes special prepacking for a depthwise convolution. Each shader invocation |
| 45 | + * calculates the input buffer location to read into the desired texel. This |
| 46 | + * packing was originally developed on CPU and that approach is described in the |
| 47 | + * rest of this comment. Refer to the code-level comments, for how we translate |
| 48 | + * it to GPU by reversing the steps. |
| 49 | + * |
| 50 | + * Consider an example weight tensor of size {11,1,3,3}. The following |
| 51 | + * transformations will be applied. |
| 52 | + * |
| 53 | + * 1. Pad the N dim so that it is a multiple of 4. In this case, 1 |
| 54 | + * batch of padding is added, producing a tensor of size {12,1,3,3}. |
| 55 | + * at::pad(x, {0,0,0,0,0,0,0,1}, "constant", 0); |
| 56 | + * |
| 57 | + * 2. Flatten the last two dims by reshaping the tensor: |
| 58 | + * x.reshape({12,1,9}); |
| 59 | + * |
| 60 | + * 3. "Fold" the N dim into the C dim. Split the tensor along the N dim so that |
| 61 | + * each split has 4 channels. |
| 62 | + * x.reshape({3,4,1,9}); |
| 63 | + * |
| 64 | + * 4. Stack the batches on each other vertically by permuting the N and C dims |
| 65 | + * and reshaping the tensor. |
| 66 | + * x.permute({1,0,2,3}).reshape({4,3,9}); |
| 67 | + */ |
| 68 | +void main() { |
| 69 | + const ivec3 pos = ivec3(gl_GlobalInvocationID); |
| 70 | + const ivec4 coord = POS_TO_COORD_CHANNELS_PACKED(pos, gpu_sizes.data); |
| 71 | + |
| 72 | + if (any(greaterThanEqual(coord, gpu_sizes.data))) { |
| 73 | + return; |
| 74 | + } |
| 75 | + |
| 76 | + // As in usual staging shaders, map from GPU texel position to normal CPU |
| 77 | + // buffer indices: (9,3) -> (4,3,9) |
| 78 | + const int base_index = COORD_TO_BUFFER_IDX(coord, gpu_sizes.data); |
| 79 | + const ivec4 p0 = |
| 80 | + base_index + ivec4(0, 1, 2, 3) * STRIDE_CHANNELS_PACKED(gpu_sizes.data); |
| 81 | + |
| 82 | + // Re-map the normal CPU buffer indices to special indices, through a series |
| 83 | + // of mappings: reshape is a no-op to the underlying indices, so we only map |
| 84 | + // for pad and permute. |
| 85 | + const int Np = padded_sizes.data.x; |
| 86 | + const int N = original_sizes.data.w; |
| 87 | + const int C = original_sizes.data.z; |
| 88 | + const int H = original_sizes.data.y; |
| 89 | + const int W = original_sizes.data.x; |
| 90 | + |
| 91 | + // Undo step 3 permute: (4,3,1,9) -> (3,4,1,9) |
| 92 | + const ivec4 p1 = SWAP_ADJ_DIMS(p0, 4, (Np / 4), (C * H * W)); |
| 93 | + |
| 94 | + // Undo step 1 pad: (12,1,3,3) -> (11,1,3,3) |
| 95 | + // For values in the padded region, write zero instead of buffer data. |
| 96 | + const ivec4 n = p1 / (C * H * W); |
| 97 | + const ivec4 mask = ivec4(greaterThanEqual(n, ivec4(N))); |
| 98 | + |
| 99 | + ${T[DTYPE]} val_x = mix(buffer_in.data[p1.x], 0, mask.x); |
| 100 | + ${T[DTYPE]} val_y = mix(buffer_in.data[p1.y], 0, mask.y); |
| 101 | + ${T[DTYPE]} val_z = mix(buffer_in.data[p1.z], 0, mask.z); |
| 102 | + ${T[DTYPE]} val_w = mix(buffer_in.data[p1.w], 0, mask.w); |
| 103 | + |
| 104 | + ${VEC4_T[DTYPE]} texel = ${VEC4_T[DTYPE]}(val_x, val_y, val_z, val_w); |
| 105 | + |
| 106 | + imageStore(image_out, pos.xy, texel); |
| 107 | +} |
0 commit comments