Skip to content

Commit 90d5191

Browse files
SS-JIAfacebook-github-bot
authored andcommitted
vTensor cleanup 7/N - Blanket replacement of packed_dim_whcn_idx with packed_dim (#5484)
Summary: Pull Request resolved: #5484 ## Context `packed_dim_whcn_idx` is a bit too verbose. Replace it with `packed_dim` for brevity. ghstack-source-id: 243563524 Reviewed By: jorgep31415 Differential Revision: D63032323 fbshipit-source-id: 523492534ae9905c4888bd150e22875110d6c64b
1 parent 7c6d58a commit 90d5191

File tree

18 files changed

+80
-88
lines changed

18 files changed

+80
-88
lines changed

backends/vulkan/runtime/api/containers/Tensor.cpp

Lines changed: 24 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@ namespace api {
1515

1616
std::vector<int64_t> calculate_dim_order(
1717
const size_t ndim,
18-
const int32_t packed_dim_whcn_idx) {
18+
const int32_t packed_dim) {
1919
// Special case for zero dim tensors
2020
if (ndim == 0) {
2121
return {0};
2222
}
2323
std::vector<int64_t> dim_order(ndim);
24-
int64_t last_dim = ndim - 1 - packed_dim_whcn_idx;
24+
int64_t last_dim = ndim - 1 - packed_dim;
2525

2626
int64_t cur_dim = 0;
2727
for (int d = 0; d < ndim; ++d) {
@@ -131,7 +131,7 @@ std::vector<int64_t> unsqueeze_strides(
131131

132132
std::vector<int64_t> calculate_padded_sizes(
133133
const std::vector<int64_t>& sizes,
134-
const int32_t packed_dim_whcn_idx) {
134+
const int32_t packed_dim) {
135135
int64_t ndim = sizes.size();
136136
if (ndim == 0) {
137137
ndim = 1;
@@ -145,7 +145,7 @@ std::vector<int64_t> calculate_padded_sizes(
145145
}
146146

147147
// Pad the packed dim to the next multiple of 4.
148-
const int64_t dim_offset = packed_dim_whcn_idx + 1;
148+
const int64_t dim_offset = packed_dim + 1;
149149
const int64_t padded_dim_size = utils::val_at(-dim_offset, sizes);
150150
padded_sizes.at(ndim_up4 - dim_offset) = utils::align_up_4(padded_dim_size);
151151

@@ -155,7 +155,7 @@ std::vector<int64_t> calculate_padded_sizes(
155155
utils::uvec3 calculate_image_extents(
156156
const std::vector<int64_t>& padded_sizes,
157157
const std::vector<int64_t>& axis_map,
158-
const int32_t packed_dim_whcn_idx) {
158+
const int32_t packed_dim) {
159159
VK_CHECK_COND(padded_sizes.size() == 4);
160160
VK_CHECK_COND(axis_map.size() == 4);
161161

@@ -176,8 +176,8 @@ utils::uvec3 calculate_image_extents(
176176
// Multiply the extents of the batch axis by the batch size.
177177
extents[batch_axis] *= padded_sizes.at(0);
178178

179-
VK_CHECK_COND(extents[axis_map.at(packed_dim_whcn_idx)] % 4 == 0);
180-
extents[axis_map.at(packed_dim_whcn_idx)] /= 4;
179+
VK_CHECK_COND(extents[axis_map.at(packed_dim)] % 4 == 0);
180+
extents[axis_map.at(packed_dim)] /= 4;
181181
return extents;
182182
}
183183

@@ -254,14 +254,14 @@ vTensorStorage::vTensorStorage(
254254
Context* const context,
255255
const utils::StorageType storage_type,
256256
const std::vector<int64_t>& axis_map,
257-
const int32_t packed_dim_whcn_idx,
257+
const int32_t packed_dim,
258258
const std::vector<int64_t>& padded_sizes,
259259
const vkapi::ScalarType dtype,
260260
const bool allocate_memory)
261261
: context_(context),
262262
storage_type_{storage_type},
263263
image_extents_(
264-
calculate_image_extents(padded_sizes, axis_map, packed_dim_whcn_idx)),
264+
calculate_image_extents(padded_sizes, axis_map, packed_dim)),
265265
buffer_length_{utils::multiply_integers(padded_sizes)},
266266
buffer_offset_{0},
267267
image_(allocate_image(
@@ -378,13 +378,12 @@ vTensor::vTensor(
378378
: dtype_(dtype),
379379
// Calculate tensor metadata
380380
sizes_(sizes.begin(), sizes.end()),
381-
packed_dim_whcn_idx_(
382-
utils::to_packed_dim_whcn_idx<int32_t>(memory_layout)),
383-
dim_order_(calculate_dim_order(sizes_.size(), packed_dim_whcn_idx_)),
381+
packed_dim_(utils::to_packed_dim<int32_t>(memory_layout)),
382+
dim_order_(calculate_dim_order(sizes_.size(), packed_dim_)),
384383
axis_map_(default_axis_map()),
385384
strides_(calculate_strides(sizes, dim_order_)),
386385
numel_(utils::multiply_integers(sizes_)),
387-
padded_sizes_{calculate_padded_sizes(sizes, packed_dim_whcn_idx_)},
386+
padded_sizes_{calculate_padded_sizes(sizes, packed_dim_)},
388387
unsqueezed_strides_{unsqueeze_strides(strides_, numel_)},
389388
padded_numel_(utils::multiply_integers(padded_sizes_)),
390389
logical_limits_{{0, 0, 0}},
@@ -399,7 +398,7 @@ vTensor::vTensor(
399398
context,
400399
storage_type,
401400
axis_map_,
402-
packed_dim_whcn_idx_,
401+
packed_dim_,
403402
padded_sizes_,
404403
dtype_,
405404
allocate_memory) {
@@ -422,7 +421,7 @@ vTensor::vTensor(const vTensor& other)
422421
: dtype_(other.dtype_),
423422
// Copy tensor size metadata
424423
sizes_(other.sizes_.begin(), other.sizes_.end()),
425-
packed_dim_whcn_idx_{other.packed_dim_whcn_idx_},
424+
packed_dim_{other.packed_dim_},
426425
dim_order_(other.dim_order_.begin(), other.dim_order_.end()),
427426
axis_map_(other.axis_map_.begin(), other.axis_map_.end()),
428427
strides_(other.strides_.begin(), other.strides_.end()),
@@ -450,12 +449,12 @@ vTensor::vTensor(
450449
: dtype_(other.dtype_),
451450
// Copy tensor size metadata
452451
sizes_(sizes.begin(), sizes.end()),
453-
packed_dim_whcn_idx_(other.packed_dim_whcn_idx_),
452+
packed_dim_(other.packed_dim_),
454453
dim_order_(dim_order.begin(), dim_order.end()),
455454
axis_map_(default_axis_map()),
456455
strides_(calculate_strides(sizes_, dim_order_)),
457456
numel_(utils::multiply_integers(sizes_)),
458-
padded_sizes_{calculate_padded_sizes(sizes, packed_dim_whcn_idx_)},
457+
padded_sizes_{calculate_padded_sizes(sizes, packed_dim_)},
459458
unsqueezed_strides_{unsqueeze_strides(strides_, numel_)},
460459
padded_numel_(utils::multiply_integers(padded_sizes_)),
461460
logical_limits_(other.logical_limits_),
@@ -512,7 +511,7 @@ void vTensor::set_logical_limits(const utils::uvec3& image_extents) {
512511
}
513512

514513
utils::GPUMemoryLayout vTensor::estimate_memory_layout() const {
515-
switch (packed_dim_whcn_idx_) {
514+
switch (packed_dim_) {
516515
case WHCN::kWidthDim:
517516
return utils::kWidthPacked;
518517
case WHCN::kHeightDim:
@@ -602,14 +601,14 @@ void vTensor::update_metadata() {
602601
strides_ = calculate_strides(sizes_, dim_order_);
603602
numel_ = utils::multiply_integers(sizes_);
604603

605-
padded_sizes_ = calculate_padded_sizes(sizes_, packed_dim_whcn_idx_);
604+
padded_sizes_ = calculate_padded_sizes(sizes_, packed_dim_);
606605
unsqueezed_strides_ = unsqueeze_strides(strides_, numel_);
607606
padded_numel_ = utils::multiply_integers(padded_sizes_);
608607

609608
// Calculate the image extents that would have been used to allocate a texture
610609
// withthe current sizes, and use that to set the logical limits.
611610
set_logical_limits(
612-
calculate_image_extents(padded_sizes_, axis_map_, packed_dim_whcn_idx_));
611+
calculate_image_extents(padded_sizes_, axis_map_, packed_dim_));
613612

614613
if (sizes_uniform_.buffer()) {
615614
sizes_uniform_.update(utils::make_whcn_ivec4(sizes_));
@@ -633,7 +632,7 @@ void vTensor::check_sizes(const std::vector<int64_t>& sizes) const {
633632
// For texture storage check that the current texture is large enough for
634633
// the new sizes of the tensor.
635634
utils::uvec3 virtual_extents =
636-
calculate_image_extents(padded_sizes_, axis_map_, packed_dim_whcn_idx_);
635+
calculate_image_extents(padded_sizes_, axis_map_, packed_dim_);
637636

638637
bool valid_resize = virtual_extents[0] <= storage_.image_extents_[0];
639638
valid_resize =
@@ -705,11 +704,11 @@ void vTensor::virtual_transpose(const int64_t dim0, const int64_t dim1) {
705704

706705
const int dim0_whcn = sizes_.size() - 1 - dim0;
707706
const int dim1_whcn = sizes_.size() - 1 - dim1;
708-
if (packed_dim_whcn_idx_ == dim0_whcn) {
709-
packed_dim_whcn_idx_ = dim1_whcn;
707+
if (packed_dim_ == dim0_whcn) {
708+
packed_dim_ = dim1_whcn;
710709
}
711-
if (packed_dim_whcn_idx_ == dim1_whcn) {
712-
packed_dim_whcn_idx_ = dim0_whcn;
710+
if (packed_dim_ == dim1_whcn) {
711+
packed_dim_ = dim0_whcn;
713712
}
714713

715714
if (storage_type() == utils::kBuffer) {

backends/vulkan/runtime/api/containers/Tensor.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ namespace api {
2626
*/
2727
std::vector<int64_t> calculate_dim_order(
2828
const size_t ndim,
29-
const int32_t packed_dim_whcn_idx);
29+
const int32_t packed_dim);
3030

3131
/*
3232
* Given the sizes of a tensor and the dim order of the tensor (both in NCHW)
@@ -57,15 +57,15 @@ std::vector<int64_t> unsqueeze_strides(
5757
*/
5858
std::vector<int64_t> calculate_padded_sizes(
5959
const std::vector<int64_t>& sizes,
60-
const int32_t packed_dim_whcn_idx);
60+
const int32_t packed_dim);
6161

6262
/*
6363
* Calculate the image extents required of a texture backed tensor.
6464
*/
6565
utils::uvec3 calculate_image_extents(
6666
const std::vector<int64_t>& padded_sizes,
6767
const std::vector<int64_t>& axis_map,
68-
const int32_t packed_dim_whcn_idx);
68+
const int32_t packed_dim);
6969

7070
struct LastAccess {
7171
vkapi::PipelineStageFlags stage;
@@ -90,7 +90,7 @@ class vTensorStorage final {
9090
Context* context,
9191
const utils::StorageType storage_type,
9292
const std::vector<int64_t>& axis_map,
93-
const int32_t packed_dim_whcn_idx,
93+
const int32_t packed_dim,
9494
const std::vector<int64_t>& padded_sizes,
9595
const vkapi::ScalarType dtype,
9696
const bool allocate_memory = true);
@@ -228,7 +228,7 @@ class vTensor final {
228228
// which dimension is packed along a texel. For buffer backed tensors, this
229229
// describes which dimension has a stride of 1 (i.e. is last in the dim
230230
// order).
231-
int32_t packed_dim_whcn_idx_;
231+
int32_t packed_dim_;
232232

233233
/*
234234
* "Layout" metadata. These describe with further detail how tensor data is
@@ -378,12 +378,12 @@ class vTensor final {
378378
* tensor. In some scenarios, the exact layout of the tensor may not be able
379379
* to be replicated due to calling `virtual_*()` functions after construction;
380380
* however, this function will provide a memory layout that will produce the
381-
* same `packed_dim_whcn_idx` as this tensor.
381+
* same `packed_dim_` as this tensor.
382382
*/
383383
utils::GPUMemoryLayout estimate_memory_layout() const;
384384

385-
inline int32_t packed_dim_whcn_idx() const {
386-
return packed_dim_whcn_idx_;
385+
inline int32_t packed_dim() const {
386+
return packed_dim_;
387387
}
388388

389389
inline const std::vector<int64_t>& sizes() const {

backends/vulkan/runtime/graph/ComputeGraph.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -312,8 +312,8 @@ class ComputeGraph final {
312312
return values_.at(idx).toConstTensor().estimate_memory_layout();
313313
}
314314

315-
inline int32_t packed_dim_whcn_idx_of(const ValueRef idx) const {
316-
return values_.at(idx).toConstTensor().packed_dim_whcn_idx();
315+
inline int32_t packed_dim_of(const ValueRef idx) const {
316+
return values_.at(idx).toConstTensor().packed_dim();
317317
}
318318

319319
inline vkapi::BufferBindInfo sizes_ubo(const ValueRef idx) {

backends/vulkan/runtime/graph/ops/impl/BinaryOp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ void add_binary_op_node(
9393
graph.create_params_buffer(broadcast_params),
9494
graph.create_params_buffer(alpha_val)},
9595
// Specialization Constants
96-
{SV(t_out->packed_dim_whcn_idx())},
96+
{SV(t_out->packed_dim())},
9797
// Resizing Logic
9898
resize_binary_op_node,
9999
{}));

backends/vulkan/runtime/graph/ops/impl/Convolution.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ ValueRef prepack_biases(
108108
v,
109109
{t->sizes_ubo(), t->axis_map_ubo()},
110110
// Specialization constants
111-
{SV(t->packed_dim_whcn_idx())}));
111+
{SV(t->packed_dim())}));
112112

113113
return v;
114114
}
@@ -216,7 +216,7 @@ ValueRef prepack_weights(
216216
graph.create_params_buffer(
217217
utils::make_ivec4(original_sizes, /*reverse = */ true))},
218218
// Specialization constants
219-
{SV(t->packed_dim_whcn_idx())}));
219+
{SV(t->packed_dim())}));
220220

221221
return v;
222222
}

backends/vulkan/runtime/graph/ops/impl/Full.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ void add_full_node(
5454
// Shader params buffers
5555
{t_out->sizes_ubo(), graph.create_params_buffer(fill_value_val)},
5656
// Specialization Constants
57-
{SV(t_out->packed_dim_whcn_idx())},
57+
{SV(t_out->packed_dim())},
5858
// Resizing Logic
5959
resize_full_node,
6060
{size_or_in}));

backends/vulkan/runtime/graph/ops/impl/Linear.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,7 @@ void check_addmm_args(
3636
VK_CHECK_COND(mat1_sizes.size() == 2 || mat1_sizes.size() == 3);
3737
VK_CHECK_COND(mat1_sizes.size() == mat2_sizes.size());
3838

39-
VK_CHECK_COND(
40-
graph.packed_dim_whcn_idx_of(mat1) == graph.packed_dim_whcn_idx_of(out));
39+
VK_CHECK_COND(graph.packed_dim_of(mat1) == graph.packed_dim_of(out));
4140

4241
VK_CHECK_COND(utils::val_at(-1, mat1_sizes) == utils::val_at(-2, mat2_sizes));
4342

@@ -127,10 +126,10 @@ void add_addmm_naive_node(
127126
graph.create_params_buffer(params),
128127
},
129128
// Specialization Constants
130-
{graph.packed_dim_whcn_idx_of(out),
131-
graph.packed_dim_whcn_idx_of(mat1),
132-
graph.packed_dim_whcn_idx_of(mat2),
133-
graph.packed_dim_whcn_idx_of(self)},
129+
{graph.packed_dim_of(out),
130+
graph.packed_dim_of(mat1),
131+
graph.packed_dim_of(mat2),
132+
graph.packed_dim_of(self)},
134133
// Resizing Logic
135134
resize_addmm_node,
136135
{mat2_is_transposed}));
@@ -221,7 +220,7 @@ void add_addmm_optimized_node(
221220
graph.create_params_buffer(params),
222221
},
223222
// Specialization Constants
224-
{graph.packed_dim_whcn_idx_of(out)},
223+
{graph.packed_dim_of(out)},
225224
// Resizing Logic
226225
resize_addmm_node,
227226
{mat2_is_transposed}));
@@ -247,10 +246,10 @@ void add_addmm_node(
247246
}
248247

249248
Params params = {alpha_val, beta_val};
250-
if (graph.packed_dim_whcn_idx_of(mat1) == WHCN::kChannelsDim) {
249+
if (graph.packed_dim_of(mat1) == WHCN::kChannelsDim) {
251250
add_addmm_optimized_node(
252251
graph, self, mat1, mat2, beta, alpha, out, params, mat2_is_transposed);
253-
} else if (graph.packed_dim_whcn_idx_of(mat1) == WHCN::kWidthDim) {
252+
} else if (graph.packed_dim_of(mat1) == WHCN::kWidthDim) {
254253
add_addmm_naive_node(
255254
graph, self, mat1, mat2, beta, alpha, out, params, mat2_is_transposed);
256255
} else {

backends/vulkan/runtime/graph/ops/impl/MatMul.cpp

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@ void check_matmul_args(
2929
VK_CHECK_COND(mat1_sizes.size() == 2 || mat1_sizes.size() == 3);
3030
VK_CHECK_COND(mat1_sizes.size() == mat2_sizes.size());
3131

32-
VK_CHECK_COND(
33-
graph.packed_dim_whcn_idx_of(mat1) == graph.packed_dim_whcn_idx_of(out));
32+
VK_CHECK_COND(graph.packed_dim_of(mat1) == graph.packed_dim_of(out));
3433

3534
VK_CHECK_COND(utils::val_at(-1, mat1_sizes) == utils::val_at(-2, mat2_sizes));
3635
}
@@ -139,9 +138,9 @@ void add_matmul_naive_texture3d_node(
139138
graph.axis_map_ubo(mat2),
140139
},
141140
// Specialization Constants
142-
{graph.packed_dim_whcn_idx_of(out),
143-
graph.packed_dim_whcn_idx_of(mat1),
144-
graph.packed_dim_whcn_idx_of(mat2)},
141+
{graph.packed_dim_of(out),
142+
graph.packed_dim_of(mat1),
143+
graph.packed_dim_of(mat2)},
145144
// Resizing Logic
146145
resize_matmul_node,
147146
{mat2_is_transposed}));
@@ -223,7 +222,7 @@ void add_matmul_optimized_node(
223222
graph.axis_map_ubo(mat2_packed),
224223
},
225224
// Specialization Constants
226-
{graph.packed_dim_whcn_idx_of(out)},
225+
{graph.packed_dim_of(out)},
227226
// Resizing Logic
228227
resize_matmul_node,
229228
{mat2_is_transposed}));
@@ -238,9 +237,9 @@ void add_matmul_node(
238237
if (graph.is_buffer_storage(out)) {
239238
add_matmul_naive_buffer_node(
240239
graph, mat1, mat2_data, out, mat2_is_transposed);
241-
} else if (graph.packed_dim_whcn_idx_of(mat1) == WHCN::kChannelsDim) {
240+
} else if (graph.packed_dim_of(mat1) == WHCN::kChannelsDim) {
242241
add_matmul_optimized_node(graph, mat1, mat2_data, out, mat2_is_transposed);
243-
} else if (graph.packed_dim_whcn_idx_of(mat1) == WHCN::kWidthDim) {
242+
} else if (graph.packed_dim_of(mat1) == WHCN::kWidthDim) {
244243
add_matmul_naive_texture3d_node(
245244
graph, mat1, mat2_data, out, mat2_is_transposed);
246245
} else {

backends/vulkan/runtime/graph/ops/impl/QuantizedLinear.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@ void check_qlinear_args(
3030
VK_CHECK_COND(qmat2_sizes.size() == 2);
3131
VK_CHECK_COND(scales_sizes.size() == 1);
3232

33-
VK_CHECK_COND(
34-
graph.packed_dim_whcn_idx_of(mat1) == graph.packed_dim_whcn_idx_of(out));
33+
VK_CHECK_COND(graph.packed_dim_of(mat1) == graph.packed_dim_of(out));
3534

3635
VK_CHECK_COND(
3736
utils::val_at(-1, mat1_sizes) == utils::val_at(-1, qmat2_sizes));
@@ -79,8 +78,8 @@ void add_q_8w_linear_node(
7978

8079
std::string kernel_name = "q_8w_linear";
8180
kernel_name.reserve(kShaderNameReserve);
82-
add_packed_dim_suffix(kernel_name, graph.packed_dim_whcn_idx_of(mat1));
83-
add_packed_dim_suffix(kernel_name, graph.packed_dim_whcn_idx_of(q_mat2));
81+
add_packed_dim_suffix(kernel_name, graph.packed_dim_of(mat1));
82+
add_packed_dim_suffix(kernel_name, graph.packed_dim_of(q_mat2));
8483
add_dtype_suffix(kernel_name, graph.dtype_of(out));
8584
add_storage_type_suffix(kernel_name, graph.storage_type_of(out));
8685

0 commit comments

Comments
 (0)