15
15
#include < executorch/backends/vulkan/runtime/api/Types.h>
16
16
17
17
namespace vkcompute {
18
+ namespace api {
18
19
19
20
/*
20
21
* Given the sizes of a tensor and the GPU memory layout, calculate the strides
@@ -29,7 +30,7 @@ namespace vkcompute {
29
30
*/
30
31
std::vector<int64_t > calculate_strides (
31
32
const std::vector<int64_t >& sizes,
32
- const api:: GPUMemoryLayout memory_layout,
33
+ const GPUMemoryLayout memory_layout,
33
34
const bool texel_strides = true );
34
35
35
36
/*
@@ -48,27 +49,24 @@ std::vector<int64_t> calculate_strides(
48
49
*/
49
50
std::vector<int64_t > calculate_padded_sizes (
50
51
const std::vector<int64_t >& sizes,
51
- const api:: GPUMemoryLayout memory_layout);
52
+ const GPUMemoryLayout memory_layout);
52
53
53
54
/*
54
55
* Given the padded sizes of a tensor and the GPU memory layout, calculate the
55
56
* 3D image extents required to store the tensor data as an image texture.
56
57
*/
57
58
utils::uvec3 calculate_image_extents (
58
59
const std::vector<int64_t >& padded_sizes,
59
- const api:: GPUMemoryLayout memory_layout);
60
+ const GPUMemoryLayout memory_layout);
60
61
61
62
struct LastAccess {
62
- api:: PipelineStageFlags stage;
63
- api:: MemoryAccessFlags access;
63
+ PipelineStageFlags stage;
64
+ MemoryAccessFlags access;
64
65
65
66
LastAccess ()
66
- : stage{api::PipelineStage::NO_STAGE},
67
- access{api::MemoryAccessType::NONE} {}
67
+ : stage{PipelineStage::NO_STAGE}, access{MemoryAccessType::NONE} {}
68
68
69
- LastAccess (
70
- api::PipelineStageFlags stage_flags,
71
- api::MemoryAccessFlags access_flags)
69
+ LastAccess (PipelineStageFlags stage_flags, MemoryAccessFlags access_flags)
72
70
: stage{stage_flags}, access{access_flags} {}
73
71
};
74
72
@@ -78,11 +76,11 @@ class vTensorStorage final {
78
76
vTensorStorage () = default ;
79
77
80
78
vTensorStorage (
81
- api:: Context* context,
82
- const api:: StorageType storage_type,
83
- const api:: GPUMemoryLayout gpu_memory_layout,
79
+ Context* context,
80
+ const StorageType storage_type,
81
+ const GPUMemoryLayout gpu_memory_layout,
84
82
const std::vector<int64_t >& sizes,
85
- const api:: ScalarType dtype,
83
+ const ScalarType dtype,
86
84
const bool allocate_memory = true );
87
85
88
86
vTensorStorage (const vTensorStorage& other) = delete ;
@@ -97,17 +95,17 @@ class vTensorStorage final {
97
95
98
96
private:
99
97
// Context
100
- api:: Context* context_{};
98
+ Context* context_{};
101
99
102
- api:: StorageType storage_type_;
100
+ StorageType storage_type_;
103
101
104
102
// Resource sizings
105
103
utils::uvec3 image_extents_{};
106
104
int64_t buffer_length_{};
107
105
108
106
// GPU Storage
109
- mutable api:: VulkanImage image_;
110
- mutable api:: VulkanBuffer buffer_;
107
+ mutable VulkanImage image_;
108
+ mutable VulkanBuffer buffer_;
111
109
112
110
// Last Access - used to insert memory barriers
113
111
LastAccess last_access_;
@@ -118,9 +116,9 @@ class vTensorStorage final {
118
116
119
117
// Memory barrier insertion
120
118
void transition (
121
- api:: PipelineBarrier&,
122
- const api:: PipelineStageFlags,
123
- const api:: MemoryAccessFlags);
119
+ PipelineBarrier&,
120
+ const PipelineStageFlags,
121
+ const MemoryAccessFlags);
124
122
125
123
// Validation
126
124
void verify () const ;
@@ -132,8 +130,8 @@ class vTensorStorage final {
132
130
133
131
void discard_and_reallocate (
134
132
const std::vector<int64_t >& padded_sizes,
135
- const api:: GPUMemoryLayout gpu_memory_layout,
136
- const api:: ScalarType dtype);
133
+ const GPUMemoryLayout gpu_memory_layout,
134
+ const ScalarType dtype);
137
135
};
138
136
139
137
class vTensor final {
@@ -146,11 +144,11 @@ class vTensor final {
146
144
147
145
public:
148
146
explicit vTensor (
149
- api:: Context* context,
147
+ Context* context,
150
148
const std::vector<int64_t >& sizes,
151
- const api:: ScalarType dtype,
152
- const api:: StorageType storage_type = api:: kTexture3D ,
153
- const api:: GPUMemoryLayout memory_layout = api:: kChannelsPacked ,
149
+ const ScalarType dtype,
150
+ const StorageType storage_type = kTexture3D ,
151
+ const GPUMemoryLayout memory_layout = kChannelsPacked ,
154
152
const bool allocate_memory = true );
155
153
156
154
vTensor (const vTensor& other) = delete ;
@@ -160,8 +158,8 @@ class vTensor final {
160
158
vTensor& operator =(vTensor&& other) = default ;
161
159
162
160
private:
163
- api:: ScalarType dtype_;
164
- api:: GPUMemoryLayout memory_layout_;
161
+ ScalarType dtype_;
162
+ GPUMemoryLayout memory_layout_;
165
163
166
164
// sizes of the tensor in NCHW dimension order
167
165
std::vector<int64_t > sizes_;
@@ -181,10 +179,10 @@ class vTensor final {
181
179
* Refer to the comments for the corresponding *_ubo() functions for more
182
180
* context about the data contained in each buffer.
183
181
*/
184
- api:: ParamsBuffer sizes_uniform_;
185
- api:: ParamsBuffer texture_limits_uniform_;
186
- api:: ParamsBuffer texel_strides_uniform_;
187
- api:: ParamsBuffer ntexels_uniform_;
182
+ ParamsBuffer sizes_uniform_;
183
+ ParamsBuffer texture_limits_uniform_;
184
+ ParamsBuffer texel_strides_uniform_;
185
+ ParamsBuffer ntexels_uniform_;
188
186
189
187
vTensorStorage storage_;
190
188
@@ -193,56 +191,48 @@ class vTensor final {
193
191
Texture Access
194
192
*/
195
193
196
- inline api:: VulkanImage& image () const & {
194
+ inline VulkanImage& image () const & {
197
195
return storage_.image_ ;
198
196
}
199
197
200
- api::VulkanImage& image (
201
- api::PipelineBarrier&,
202
- const api::PipelineStageFlags) &;
198
+ VulkanImage& image (PipelineBarrier&, const PipelineStageFlags) &;
203
199
204
- api::VulkanImage& image (
205
- api::PipelineBarrier&,
206
- const api::PipelineStageFlags,
207
- const api::MemoryAccessFlags) &;
200
+ VulkanImage&
201
+ image (PipelineBarrier&, const PipelineStageFlags, const MemoryAccessFlags) &;
208
202
209
- inline api:: VulkanBuffer& buffer () const & {
203
+ inline VulkanBuffer& buffer () const & {
210
204
return storage_.buffer_ ;
211
205
}
212
206
213
- api::VulkanBuffer& buffer (
214
- api::PipelineBarrier&,
215
- const api::PipelineStageFlags) &;
207
+ VulkanBuffer& buffer (PipelineBarrier&, const PipelineStageFlags) &;
216
208
217
- api::VulkanBuffer& buffer (
218
- api::PipelineBarrier&,
219
- const api::PipelineStageFlags,
220
- const api::MemoryAccessFlags) &;
209
+ VulkanBuffer&
210
+ buffer (PipelineBarrier&, const PipelineStageFlags, const MemoryAccessFlags) &;
221
211
222
212
/*
223
213
Metadata
224
214
*/
225
215
226
- inline api:: StorageType storage_type () const {
216
+ inline StorageType storage_type () const {
227
217
return storage_.storage_type_ ;
228
218
}
229
219
230
220
inline bool has_buffer_storage () const {
231
- return storage_.storage_type_ == api:: kBuffer ;
221
+ return storage_.storage_type_ == kBuffer ;
232
222
}
233
223
234
224
inline const utils::uvec3& image_extents () const {
235
225
return storage_.image_extents_ ;
236
226
}
237
227
238
228
/*
239
- * Extract an `api:: ScalarType` from the TensorOptions member
229
+ * Extract an `ScalarType` from the TensorOptions member
240
230
*/
241
- inline api:: ScalarType dtype () const {
231
+ inline ScalarType dtype () const {
242
232
return dtype_;
243
233
}
244
234
245
- inline api:: GPUMemoryLayout gpu_memory_layout () const {
235
+ inline GPUMemoryLayout gpu_memory_layout () const {
246
236
return memory_layout_;
247
237
}
248
238
@@ -267,7 +257,7 @@ class vTensor final {
267
257
* Note that dimensions that are not present in the tensor's sizes are set to
268
258
* a size of 1.
269
259
*/
270
- const api:: BufferBindInfo sizes_ubo ();
260
+ const BufferBindInfo sizes_ubo ();
271
261
272
262
/*
273
263
* Returns a GPU buffer containing the virtual image extents of the tensor.
@@ -278,18 +268,18 @@ class vTensor final {
278
268
*
279
269
* This buffer should only be used to
280
270
*/
281
- const api:: BufferBindInfo texture_limits_ubo ();
271
+ const BufferBindInfo texture_limits_ubo ();
282
272
283
273
/*
284
274
* Returns the strides of the texel buffer used to store the tensor, as
285
275
* calculated by calculate_strides().
286
276
*/
287
- const api:: BufferBindInfo texel_strides_ubo ();
277
+ const BufferBindInfo texel_strides_ubo ();
288
278
289
279
/*
290
280
* Returns the number of texels in the texel buffer used to store the tensor.
291
281
*/
292
- const api:: BufferBindInfo ntexels_ubo ();
282
+ const BufferBindInfo ntexels_ubo ();
293
283
294
284
inline const utils::ivec3 texture_limits () const {
295
285
return texture_limits_.limits ;
@@ -300,7 +290,7 @@ class vTensor final {
300
290
}
301
291
302
292
inline size_t nbytes () const {
303
- return api:: element_size (dtype ()) * numel ();
293
+ return element_size (dtype ()) * numel ();
304
294
}
305
295
306
296
/*
@@ -322,7 +312,7 @@ class vTensor final {
322
312
* Return nbytes but based on padded_sizes_ instead of sizes_
323
313
*/
324
314
inline VkDeviceSize gpu_nbytes () const {
325
- return api:: element_size (dtype ()) * gpu_numel ();
315
+ return element_size (dtype ()) * gpu_numel ();
326
316
}
327
317
328
318
/*
@@ -338,7 +328,7 @@ class vTensor final {
338
328
/*
339
329
* Binds the underlying resource to the given memory allocation
340
330
*/
341
- void bind_allocation (const api:: Allocation& allocation);
331
+ void bind_allocation (const Allocation& allocation);
342
332
343
333
private:
344
334
/*
@@ -362,4 +352,5 @@ class vTensor final {
362
352
void virtual_resize (const std::vector<int64_t >& new_sizes);
363
353
};
364
354
355
+ } // namespace api
365
356
} // namespace vkcompute
0 commit comments