@@ -55,6 +55,14 @@ umf_result_t umfCUDAMemoryProviderParamsSetMemoryType(
55
55
return UMF_RESULT_ERROR_NOT_SUPPORTED ;
56
56
}
57
57
58
+ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags (
59
+ umf_cuda_memory_provider_params_handle_t hParams , unsigned int flags ) {
60
+ (void )hParams ;
61
+ (void )flags ;
62
+ LOG_ERR ("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!" );
63
+ return UMF_RESULT_ERROR_NOT_SUPPORTED ;
64
+ }
65
+
58
66
umf_memory_provider_ops_t * umfCUDAMemoryProviderOps (void ) {
59
67
// not supported
60
68
LOG_ERR ("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!" );
@@ -89,13 +97,22 @@ typedef struct cu_memory_provider_t {
89
97
CUdevice device ;
90
98
umf_usm_memory_type_t memory_type ;
91
99
size_t min_alignment ;
100
+ unsigned int alloc_flags ;
92
101
} cu_memory_provider_t ;
93
102
94
103
// CUDA Memory Provider settings struct
95
104
typedef struct umf_cuda_memory_provider_params_t {
96
- void * cuda_context_handle ; ///< Handle to the CUDA context
97
- int cuda_device_handle ; ///< Handle to the CUDA device
98
- umf_usm_memory_type_t memory_type ; ///< Allocation memory type
105
+ // Handle to the CUDA context
106
+ void * cuda_context_handle ;
107
+
108
+ // Handle to the CUDA device
109
+ int cuda_device_handle ;
110
+
111
+ // Allocation memory type
112
+ umf_usm_memory_type_t memory_type ;
113
+
114
+ // Allocation flags for cuMemHostAlloc/cuMemAllocManaged
115
+ unsigned int alloc_flags ;
99
116
} umf_cuda_memory_provider_params_t ;
100
117
101
118
typedef struct cu_ops_t {
@@ -104,6 +121,7 @@ typedef struct cu_ops_t {
104
121
CUmemAllocationGranularity_flags option );
105
122
CUresult (* cuMemAlloc )(CUdeviceptr * dptr , size_t bytesize );
106
123
CUresult (* cuMemAllocHost )(void * * pp , size_t bytesize );
124
+ CUresult (* cuMemHostAlloc )(void * * pp , size_t bytesize , unsigned int flags );
107
125
CUresult (* cuMemAllocManaged )(CUdeviceptr * dptr , size_t bytesize ,
108
126
unsigned int flags );
109
127
CUresult (* cuMemFree )(CUdeviceptr dptr );
@@ -174,6 +192,8 @@ static void init_cu_global_state(void) {
174
192
utils_get_symbol_addr (0 , "cuMemAlloc_v2" , lib_name );
175
193
* (void * * )& g_cu_ops .cuMemAllocHost =
176
194
utils_get_symbol_addr (0 , "cuMemAllocHost_v2" , lib_name );
195
+ * (void * * )& g_cu_ops .cuMemHostAlloc =
196
+ utils_get_symbol_addr (0 , "cuMemHostAlloc" , lib_name );
177
197
* (void * * )& g_cu_ops .cuMemAllocManaged =
178
198
utils_get_symbol_addr (0 , "cuMemAllocManaged" , lib_name );
179
199
* (void * * )& g_cu_ops .cuMemFree =
@@ -196,12 +216,12 @@ static void init_cu_global_state(void) {
196
216
utils_get_symbol_addr (0 , "cuIpcCloseMemHandle" , lib_name );
197
217
198
218
if (!g_cu_ops .cuMemGetAllocationGranularity || !g_cu_ops .cuMemAlloc ||
199
- !g_cu_ops .cuMemAllocHost || !g_cu_ops .cuMemAllocManaged ||
200
- !g_cu_ops .cuMemFree || !g_cu_ops .cuMemFreeHost ||
201
- !g_cu_ops .cuGetErrorName || !g_cu_ops .cuGetErrorString ||
202
- !g_cu_ops .cuCtxGetCurrent || !g_cu_ops .cuCtxSetCurrent ||
203
- !g_cu_ops .cuIpcGetMemHandle || !g_cu_ops .cuIpcOpenMemHandle ||
204
- !g_cu_ops .cuIpcCloseMemHandle ) {
219
+ !g_cu_ops .cuMemAllocHost || !g_cu_ops .cuMemHostAlloc ||
220
+ !g_cu_ops .cuMemAllocManaged || !g_cu_ops .cuMemFree ||
221
+ !g_cu_ops .cuMemFreeHost || !g_cu_ops .cuGetErrorName ||
222
+ !g_cu_ops .cuGetErrorString || !g_cu_ops .cuCtxGetCurrent ||
223
+ !g_cu_ops .cuCtxSetCurrent || !g_cu_ops .cuIpcGetMemHandle ||
224
+ !g_cu_ops .cuIpcOpenMemHandle || ! g_cu_ops . cuIpcCloseMemHandle ) {
205
225
LOG_ERR ("Required CUDA symbols not found." );
206
226
Init_cu_global_state_failed = true;
207
227
}
@@ -225,6 +245,7 @@ umf_result_t umfCUDAMemoryProviderParamsCreate(
225
245
params_data -> cuda_context_handle = NULL ;
226
246
params_data -> cuda_device_handle = -1 ;
227
247
params_data -> memory_type = UMF_MEMORY_TYPE_UNKNOWN ;
248
+ params_data -> alloc_flags = 0 ;
228
249
229
250
* hParams = params_data ;
230
251
@@ -275,6 +296,18 @@ umf_result_t umfCUDAMemoryProviderParamsSetMemoryType(
275
296
return UMF_RESULT_SUCCESS ;
276
297
}
277
298
299
+ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags (
300
+ umf_cuda_memory_provider_params_handle_t hParams , unsigned int flags ) {
301
+ if (!hParams ) {
302
+ LOG_ERR ("CUDA Memory Provider params handle is NULL" );
303
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
304
+ }
305
+
306
+ hParams -> alloc_flags = flags ;
307
+
308
+ return UMF_RESULT_SUCCESS ;
309
+ }
310
+
278
311
static umf_result_t cu_memory_provider_initialize (void * params ,
279
312
void * * provider ) {
280
313
if (params == NULL ) {
@@ -294,6 +327,24 @@ static umf_result_t cu_memory_provider_initialize(void *params,
294
327
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
295
328
}
296
329
330
+ if (cu_params -> memory_type == UMF_MEMORY_TYPE_SHARED ) {
331
+ if (cu_params -> alloc_flags == 0 ) {
332
+ // if flags are not set, the default setting is CU_MEM_ATTACH_GLOBAL
333
+ cu_params -> alloc_flags = CU_MEM_ATTACH_GLOBAL ;
334
+ } else if (cu_params -> alloc_flags != CU_MEM_ATTACH_GLOBAL &&
335
+ cu_params -> alloc_flags != CU_MEM_ATTACH_HOST ) {
336
+ LOG_ERR ("Invalid shared allocation flags" );
337
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
338
+ }
339
+ } else if (cu_params -> memory_type == UMF_MEMORY_TYPE_HOST ) {
340
+ if (cu_params -> alloc_flags &
341
+ ~(CU_MEMHOSTALLOC_PORTABLE | CU_MEMHOSTALLOC_DEVICEMAP |
342
+ CU_MEMHOSTALLOC_WRITECOMBINED )) {
343
+ LOG_ERR ("Invalid host allocation flags" );
344
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
345
+ }
346
+ }
347
+
297
348
utils_init_once (& cu_is_initialized , init_cu_global_state );
298
349
if (Init_cu_global_state_failed ) {
299
350
LOG_ERR ("Loading CUDA symbols failed" );
@@ -324,6 +375,7 @@ static umf_result_t cu_memory_provider_initialize(void *params,
324
375
cu_provider -> device = cu_params -> cuda_device_handle ;
325
376
cu_provider -> memory_type = cu_params -> memory_type ;
326
377
cu_provider -> min_alignment = min_alignment ;
378
+ cu_provider -> alloc_flags = cu_params -> alloc_flags ;
327
379
328
380
* provider = cu_provider ;
329
381
@@ -381,7 +433,8 @@ static umf_result_t cu_memory_provider_alloc(void *provider, size_t size,
381
433
CUresult cu_result = CUDA_SUCCESS ;
382
434
switch (cu_provider -> memory_type ) {
383
435
case UMF_MEMORY_TYPE_HOST : {
384
- cu_result = g_cu_ops .cuMemAllocHost (resultPtr , size );
436
+ cu_result =
437
+ g_cu_ops .cuMemHostAlloc (resultPtr , size , cu_provider -> alloc_flags );
385
438
break ;
386
439
}
387
440
case UMF_MEMORY_TYPE_DEVICE : {
@@ -390,7 +443,7 @@ static umf_result_t cu_memory_provider_alloc(void *provider, size_t size,
390
443
}
391
444
case UMF_MEMORY_TYPE_SHARED : {
392
445
cu_result = g_cu_ops .cuMemAllocManaged ((CUdeviceptr * )resultPtr , size ,
393
- CU_MEM_ATTACH_GLOBAL );
446
+ cu_provider -> alloc_flags );
394
447
break ;
395
448
}
396
449
default :
0 commit comments