@@ -15,10 +15,10 @@ namespace api {
15
15
16
16
std::vector<int64_t > calculate_strides (
17
17
const std::vector<int64_t >& sizes,
18
- const vkapi ::GPUMemoryLayout memory_layout,
18
+ const utils ::GPUMemoryLayout memory_layout,
19
19
const bool texel_strides) {
20
20
const int64_t dim_offset =
21
- vkapi ::to_packed_dim_nchw_offset<int64_t >(memory_layout);
21
+ utils ::to_packed_dim_nchw_offset<int64_t >(memory_layout);
22
22
const int64_t last_dim = sizes.size () - dim_offset;
23
23
VK_CHECK_COND (last_dim >= 0 );
24
24
@@ -45,7 +45,7 @@ std::vector<int64_t> calculate_strides(
45
45
46
46
std::vector<int64_t > calculate_padded_sizes (
47
47
const std::vector<int64_t >& sizes,
48
- const vkapi ::GPUMemoryLayout memory_layout) {
48
+ const utils ::GPUMemoryLayout memory_layout) {
49
49
int64_t ndim = sizes.size ();
50
50
if (ndim == 0 ) {
51
51
ndim = 1 ;
@@ -60,7 +60,7 @@ std::vector<int64_t> calculate_padded_sizes(
60
60
61
61
// Pad the packed dim to the next multiple of 4.
62
62
const int64_t dim_offset =
63
- vkapi ::to_packed_dim_nchw_offset<int64_t >(memory_layout);
63
+ utils ::to_packed_dim_nchw_offset<int64_t >(memory_layout);
64
64
const int64_t padded_dim_size = utils::val_at (-dim_offset, sizes);
65
65
padded_sizes.at (ndim_up4 - dim_offset) = utils::align_up_4 (padded_dim_size);
66
66
@@ -69,7 +69,7 @@ std::vector<int64_t> calculate_padded_sizes(
69
69
70
70
utils::uvec3 calculate_image_extents (
71
71
const std::vector<int64_t >& padded_sizes,
72
- const vkapi ::GPUMemoryLayout memory_layout) {
72
+ const utils ::GPUMemoryLayout memory_layout) {
73
73
VK_CHECK_COND (padded_sizes.size () == 4 );
74
74
75
75
uint32_t N = utils::safe_downcast<uint32_t >(padded_sizes.at (0 ));
@@ -78,15 +78,15 @@ utils::uvec3 calculate_image_extents(
78
78
uint32_t W = utils::safe_downcast<uint32_t >(padded_sizes.at (3 ));
79
79
80
80
switch (memory_layout) {
81
- case vkapi ::kWidthPacked :
81
+ case utils ::kWidthPacked :
82
82
VK_CHECK_COND (W % 4 == 0 );
83
83
W /= 4 ;
84
84
break ;
85
- case vkapi ::kHeightPacked :
85
+ case utils ::kHeightPacked :
86
86
VK_CHECK_COND (H % 4 == 0 );
87
87
H /= 4 ;
88
88
break ;
89
- case vkapi ::kChannelsPacked :
89
+ case utils ::kChannelsPacked :
90
90
VK_CHECK_COND (C % 4 == 0 );
91
91
C /= 4 ;
92
92
break ;
@@ -103,8 +103,8 @@ vTensor::vTensor(
103
103
Context* const context,
104
104
const std::vector<int64_t >& sizes,
105
105
const vkapi::ScalarType dtype,
106
- const vkapi ::StorageType storage_type,
107
- const vkapi ::GPUMemoryLayout memory_layout,
106
+ const utils ::StorageType storage_type,
107
+ const utils ::GPUMemoryLayout memory_layout,
108
108
const bool allocate_memory)
109
109
: dtype_(dtype),
110
110
memory_layout_ (memory_layout),
@@ -125,7 +125,7 @@ vTensor::vTensor(
125
125
padded_sizes_,
126
126
dtype_,
127
127
allocate_memory) {
128
- if (storage_type != vkapi ::kBuffer ) {
128
+ if (storage_type != utils ::kBuffer ) {
129
129
texture_limits_.limits = utils::ivec3{
130
130
utils::safe_downcast<int32_t >(storage_.image_extents_ .data [0 ]),
131
131
utils::safe_downcast<int32_t >(storage_.image_extents_ .data [1 ]),
@@ -204,33 +204,33 @@ const vkapi::BufferBindInfo vTensor::ntexels_ubo() {
204
204
205
205
VmaAllocationCreateInfo vTensor::get_allocation_create_info () const {
206
206
switch (storage_type ()) {
207
- case vkapi ::kBuffer :
207
+ case utils ::kBuffer :
208
208
return storage_.buffer_ .allocation_create_info ();
209
- case vkapi ::kTexture2D :
210
- case vkapi ::kTexture3D :
209
+ case utils ::kTexture2D :
210
+ case utils ::kTexture3D :
211
211
return storage_.image_ .allocation_create_info ();
212
212
}
213
213
return {};
214
214
}
215
215
216
216
VkMemoryRequirements vTensor::get_memory_requirements () const {
217
217
switch (storage_type ()) {
218
- case vkapi ::kBuffer :
218
+ case utils ::kBuffer :
219
219
return storage_.buffer_ .get_memory_requirements ();
220
- case vkapi ::kTexture2D :
221
- case vkapi ::kTexture3D :
220
+ case utils ::kTexture2D :
221
+ case utils ::kTexture3D :
222
222
return storage_.image_ .get_memory_requirements ();
223
223
}
224
224
return {};
225
225
}
226
226
227
227
void vTensor::bind_allocation (const vkapi::Allocation& allocation) {
228
228
switch (storage_type ()) {
229
- case vkapi ::kBuffer :
229
+ case utils ::kBuffer :
230
230
storage_.buffer_ .bind_allocation (allocation);
231
231
break ;
232
- case vkapi ::kTexture2D :
233
- case vkapi ::kTexture3D :
232
+ case utils ::kTexture2D :
233
+ case utils ::kTexture3D :
234
234
storage_.image_ .bind_allocation (allocation);
235
235
break ;
236
236
}
@@ -275,7 +275,7 @@ void vTensor::reallocate(const std::vector<int64_t>& new_sizes) {
275
275
}
276
276
277
277
void vTensor::virtual_resize (const std::vector<int64_t >& new_sizes) {
278
- if (storage_type () != vkapi ::kBuffer ) {
278
+ if (storage_type () != utils ::kBuffer ) {
279
279
// For texture storage check that the current texture is large enough for
280
280
// the new sizes of the tensor.
281
281
utils::uvec3 virtual_extents =
@@ -302,7 +302,7 @@ void vTensor::virtual_resize(const std::vector<int64_t>& new_sizes) {
302
302
vkapi::VulkanImage allocate_image (
303
303
Context* const context_ptr,
304
304
utils::uvec3& image_extents,
305
- const vkapi ::StorageType storage_type,
305
+ const utils ::StorageType storage_type,
306
306
const VkFormat image_format,
307
307
const bool allocate_memory) {
308
308
vkapi::Adapter* adapter_ptr = context_ptr->adapter_ptr ();
@@ -318,11 +318,11 @@ vkapi::VulkanImage allocate_image(
318
318
VkImageViewType image_view_type;
319
319
320
320
switch (storage_type) {
321
- case vkapi ::kTexture3D :
321
+ case utils ::kTexture3D :
322
322
image_type = VK_IMAGE_TYPE_3D;
323
323
image_view_type = VK_IMAGE_VIEW_TYPE_3D;
324
324
break ;
325
- case vkapi ::kTexture2D :
325
+ case utils ::kTexture2D :
326
326
image_type = VK_IMAGE_TYPE_2D;
327
327
image_view_type = VK_IMAGE_VIEW_TYPE_2D;
328
328
break ;
@@ -347,13 +347,13 @@ vkapi::VulkanImage allocate_image(
347
347
vkapi::VulkanBuffer allocate_buffer (
348
348
Context* const context_ptr,
349
349
const int64_t numel,
350
- const vkapi ::StorageType storage_type,
350
+ const utils ::StorageType storage_type,
351
351
const vkapi::ScalarType dtype,
352
352
const bool allocate_memory) {
353
353
vkapi::Adapter* adapter_ptr = context_ptr->adapter_ptr ();
354
354
355
355
switch (storage_type) {
356
- case vkapi ::kBuffer :
356
+ case utils ::kBuffer :
357
357
break ;
358
358
default :
359
359
// Return an empty VulkanBuffer if Buffer storage is not used
@@ -366,8 +366,8 @@ vkapi::VulkanBuffer allocate_buffer(
366
366
367
367
vTensorStorage::vTensorStorage (
368
368
Context* const context,
369
- const vkapi ::StorageType storage_type,
370
- const vkapi ::GPUMemoryLayout gpu_memory_layout,
369
+ const utils ::StorageType storage_type,
370
+ const utils ::GPUMemoryLayout gpu_memory_layout,
371
371
const std::vector<int64_t >& padded_sizes,
372
372
const vkapi::ScalarType dtype,
373
373
const bool allocate_memory)
@@ -458,7 +458,7 @@ void vTensorStorage::transition(
458
458
459
459
void vTensorStorage::discard_and_reallocate (
460
460
const std::vector<int64_t >& padded_sizes,
461
- const vkapi ::GPUMemoryLayout gpu_memory_layout,
461
+ const utils ::GPUMemoryLayout gpu_memory_layout,
462
462
const vkapi::ScalarType dtype) {
463
463
const bool image_owns_memory = image_.owns_memory ();
464
464
const bool buffer_owns_memory = buffer_.owns_memory ();
0 commit comments