9
9
#include <stdio.h>
10
10
#include <stdlib.h>
11
11
#include <string.h>
12
- #include <sys/mman.h>
13
-
14
- #include <jemalloc/jemalloc.h>
15
12
16
13
#include "base_alloc_global.h"
17
14
#include "utils_common.h"
22
19
#include <umf/memory_pool_ops.h>
23
20
#include <umf/pools/pool_jemalloc.h>
24
21
22
+ #include <jemalloc/jemalloc.h>
23
+
24
+ // The Windows version of jemalloc uses API with je_ prefix,
25
+ // while the Linux one does not.
26
+ #ifndef _WIN32
27
+ #define je_mallocx mallocx
28
+ #define je_dallocx dallocx
29
+ #define je_rallocx rallocx
30
+ #define je_mallctl mallctl
31
+ #define je_malloc_usable_size malloc_usable_size
32
+ #endif
33
+
25
34
#define MALLOCX_ARENA_MAX (MALLCTL_ARENAS_ALL - 1)
26
35
27
36
typedef struct jemalloc_memory_pool_t {
@@ -279,13 +288,13 @@ static extent_hooks_t arena_extent_hooks = {
279
288
.merge = arena_extent_merge ,
280
289
};
281
290
282
- static void * je_malloc (void * pool , size_t size ) {
291
+ static void * op_malloc (void * pool , size_t size ) {
283
292
assert (pool );
284
293
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
285
294
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
286
295
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
287
296
int flags = MALLOCX_ARENA (je_pool -> arena_index ) | MALLOCX_TCACHE_NONE ;
288
- void * ptr = mallocx (size , flags );
297
+ void * ptr = je_mallocx (size , flags );
289
298
if (ptr == NULL ) {
290
299
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
291
300
return NULL ;
@@ -296,24 +305,24 @@ static void *je_malloc(void *pool, size_t size) {
296
305
return ptr ;
297
306
}
298
307
299
- static umf_result_t je_free (void * pool , void * ptr ) {
308
+ static umf_result_t op_free (void * pool , void * ptr ) {
300
309
(void )pool ; // unused
301
310
assert (pool );
302
311
303
312
if (ptr != NULL ) {
304
313
VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
305
- dallocx (ptr , MALLOCX_TCACHE_NONE );
314
+ je_dallocx (ptr , MALLOCX_TCACHE_NONE );
306
315
}
307
316
308
317
return UMF_RESULT_SUCCESS ;
309
318
}
310
319
311
- static void * je_calloc (void * pool , size_t num , size_t size ) {
320
+ static void * op_calloc (void * pool , size_t num , size_t size ) {
312
321
assert (pool );
313
322
size_t csize = num * size ;
314
- void * ptr = je_malloc (pool , csize );
323
+ void * ptr = op_malloc (pool , csize );
315
324
if (ptr == NULL ) {
316
- // TLS_last_allocation_error is set by je_malloc ()
325
+ // TLS_last_allocation_error is set by op_malloc ()
317
326
return NULL ;
318
327
}
319
328
@@ -323,22 +332,22 @@ static void *je_calloc(void *pool, size_t num, size_t size) {
323
332
return ptr ;
324
333
}
325
334
326
- static void * je_realloc (void * pool , void * ptr , size_t size ) {
335
+ static void * op_realloc (void * pool , void * ptr , size_t size ) {
327
336
assert (pool );
328
337
if (size == 0 && ptr != NULL ) {
329
- dallocx (ptr , MALLOCX_TCACHE_NONE );
338
+ je_dallocx (ptr , MALLOCX_TCACHE_NONE );
330
339
TLS_last_allocation_error = UMF_RESULT_SUCCESS ;
331
340
VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
332
341
return NULL ;
333
342
} else if (ptr == NULL ) {
334
- return je_malloc (pool , size );
343
+ return op_malloc (pool , size );
335
344
}
336
345
337
346
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
338
347
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
339
348
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
340
349
int flags = MALLOCX_ARENA (je_pool -> arena_index ) | MALLOCX_TCACHE_NONE ;
341
- void * new_ptr = rallocx (ptr , size , flags );
350
+ void * new_ptr = je_rallocx (ptr , size , flags );
342
351
if (new_ptr == NULL ) {
343
352
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
344
353
return NULL ;
@@ -355,15 +364,15 @@ static void *je_realloc(void *pool, void *ptr, size_t size) {
355
364
return new_ptr ;
356
365
}
357
366
358
- static void * je_aligned_alloc (void * pool , size_t size , size_t alignment ) {
367
+ static void * op_aligned_alloc (void * pool , size_t size , size_t alignment ) {
359
368
assert (pool );
360
369
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
361
370
unsigned arena = je_pool -> arena_index ;
362
371
// MALLOCX_TCACHE_NONE is set, because jemalloc can mix objects from different arenas inside
363
372
// the tcache, so we wouldn't be able to guarantee isolation of different providers.
364
373
int flags =
365
374
MALLOCX_ALIGN (alignment ) | MALLOCX_ARENA (arena ) | MALLOCX_TCACHE_NONE ;
366
- void * ptr = mallocx (size , flags );
375
+ void * ptr = je_mallocx (size , flags );
367
376
if (ptr == NULL ) {
368
377
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
369
378
return NULL ;
@@ -374,7 +383,7 @@ static void *je_aligned_alloc(void *pool, size_t size, size_t alignment) {
374
383
return ptr ;
375
384
}
376
385
377
- static umf_result_t je_initialize (umf_memory_provider_handle_t provider ,
386
+ static umf_result_t op_initialize (umf_memory_provider_handle_t provider ,
378
387
void * params , void * * out_pool ) {
379
388
assert (provider );
380
389
assert (out_pool );
@@ -393,8 +402,8 @@ static umf_result_t je_initialize(umf_memory_provider_handle_t provider,
393
402
pool -> provider = provider ;
394
403
395
404
unsigned arena_index ;
396
- err =
397
- mallctl ( "arenas.create" , ( void * ) & arena_index , & unsigned_size , NULL , 0 );
405
+ err = je_mallctl ( "arenas.create" , ( void * ) & arena_index , & unsigned_size ,
406
+ NULL , 0 );
398
407
if (err ) {
399
408
fprintf (stderr , "Could not create arena.\n" );
400
409
goto err_free_pool ;
@@ -403,10 +412,10 @@ static umf_result_t je_initialize(umf_memory_provider_handle_t provider,
403
412
// setup extent_hooks for newly created arena
404
413
char cmd [64 ];
405
414
snprintf (cmd , sizeof (cmd ), "arena.%u.extent_hooks" , arena_index );
406
- err = mallctl (cmd , NULL , NULL , (void * )& pHooks , sizeof (void * ));
415
+ err = je_mallctl (cmd , NULL , NULL , (void * )& pHooks , sizeof (void * ));
407
416
if (err ) {
408
417
snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , arena_index );
409
- mallctl (cmd , NULL , 0 , NULL , 0 );
418
+ je_mallctl (cmd , NULL , 0 , NULL , 0 );
410
419
fprintf (stderr ,
411
420
"Could not setup extent_hooks for newly created arena.\n" );
412
421
goto err_free_pool ;
@@ -426,39 +435,39 @@ static umf_result_t je_initialize(umf_memory_provider_handle_t provider,
426
435
return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC ;
427
436
}
428
437
429
- static void je_finalize (void * pool ) {
438
+ static void op_finalize (void * pool ) {
430
439
assert (pool );
431
440
jemalloc_memory_pool_t * je_pool = (jemalloc_memory_pool_t * )pool ;
432
441
char cmd [64 ];
433
442
snprintf (cmd , sizeof (cmd ), "arena.%u.destroy" , je_pool -> arena_index );
434
- mallctl (cmd , NULL , 0 , NULL , 0 );
443
+ je_mallctl (cmd , NULL , 0 , NULL , 0 );
435
444
pool_by_arena_index [je_pool -> arena_index ] = NULL ;
436
445
umf_ba_global_free (je_pool );
437
446
438
447
VALGRIND_DO_DESTROY_MEMPOOL (pool );
439
448
}
440
449
441
- static size_t je_malloc_usable_size (void * pool , void * ptr ) {
450
+ static size_t op_malloc_usable_size (void * pool , void * ptr ) {
442
451
(void )pool ; // not used
443
- return malloc_usable_size (ptr );
452
+ return je_malloc_usable_size (ptr );
444
453
}
445
454
446
- static umf_result_t je_get_last_allocation_error (void * pool ) {
455
+ static umf_result_t op_get_last_allocation_error (void * pool ) {
447
456
(void )pool ; // not used
448
457
return TLS_last_allocation_error ;
449
458
}
450
459
451
460
static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = {
452
461
.version = UMF_VERSION_CURRENT ,
453
- .initialize = je_initialize ,
454
- .finalize = je_finalize ,
455
- .malloc = je_malloc ,
456
- .calloc = je_calloc ,
457
- .realloc = je_realloc ,
458
- .aligned_malloc = je_aligned_alloc ,
459
- .malloc_usable_size = je_malloc_usable_size ,
460
- .free = je_free ,
461
- .get_last_allocation_error = je_get_last_allocation_error ,
462
+ .initialize = op_initialize ,
463
+ .finalize = op_finalize ,
464
+ .malloc = op_malloc ,
465
+ .calloc = op_calloc ,
466
+ .realloc = op_realloc ,
467
+ .aligned_malloc = op_aligned_alloc ,
468
+ .malloc_usable_size = op_malloc_usable_size ,
469
+ .free = op_free ,
470
+ .get_last_allocation_error = op_get_last_allocation_error ,
462
471
};
463
472
464
473
umf_memory_pool_ops_t * umfJemallocPoolOps (void ) {
0 commit comments