|
20 | 20 | #include "provider/provider_tracking.h"
|
21 | 21 | #include "uthash/utlist.h"
|
22 | 22 | #include "utils_common.h"
|
| 23 | +#include "utils_concurrency.h" |
23 | 24 | #include "utils_log.h"
|
24 | 25 | #include "utils_math.h"
|
25 | 26 |
|
|
34 | 35 | // Forward declarations
|
35 | 36 | static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);
|
36 | 37 | static bool bucket_can_pool(bucket_t *bucket);
|
37 |
| -static void bucket_decrement_pool(bucket_t *bucket); |
38 | 38 | static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
|
39 | 39 | bool *from_pool);
|
40 | 40 |
|
@@ -316,6 +316,7 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
|
316 | 316 | assert(slab_it->val != NULL);
|
317 | 317 | pool_unregister_slab(bucket->pool, slab_it->val);
|
318 | 318 | DL_DELETE(bucket->available_slabs, slab_it);
|
| 319 | + assert(bucket->available_slabs_num > 0); |
319 | 320 | bucket->available_slabs_num--;
|
320 | 321 | destroy_slab(slab_it->val);
|
321 | 322 | }
|
@@ -381,10 +382,20 @@ static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
|
381 | 382 | // Allocation from existing slab is treated as from pool for statistics.
|
382 | 383 | *from_pool = true;
|
383 | 384 | if (slab->num_chunks_allocated == 0) {
|
| 385 | + assert(bucket->chunked_slabs_in_pool > 0); |
| 386 | +#ifndef NDEBUG |
| 387 | + uint64_t total_size_check; |
| 388 | + utils_atomic_load_acquire_u64(&bucket->shared_limits->total_size, |
| 389 | + &total_size_check); |
| 390 | + assert(total_size_check >= bucket_slab_alloc_size(bucket)); |
| 391 | +#endif |
384 | 392 | // If this was an empty slab, it was in the pool.
|
385 | 393 | // Now it is no longer in the pool, so update count.
|
386 | 394 | --bucket->chunked_slabs_in_pool;
|
387 |
| - bucket_decrement_pool(bucket); |
| 395 | + uint64_t size_to_add = bucket_slab_alloc_size(bucket); |
| 396 | + utils_fetch_and_sub_u64(&bucket->shared_limits->total_size, |
| 397 | + size_to_add); |
| 398 | + bucket_update_stats(bucket, 1, -1); |
388 | 399 | }
|
389 | 400 | }
|
390 | 401 |
|
@@ -420,36 +431,25 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
|
420 | 431 | in_pool * bucket_slab_alloc_size(bucket);
|
421 | 432 | }
|
422 | 433 |
|
423 |
| -static void bucket_decrement_pool(bucket_t *bucket) { |
424 |
| - bucket_update_stats(bucket, 1, -1); |
425 |
| - utils_fetch_and_add64(&bucket->shared_limits->total_size, |
426 |
| - -(long long)bucket_slab_alloc_size(bucket)); |
427 |
| -} |
428 |
| - |
429 | 434 | static bool bucket_can_pool(bucket_t *bucket) {
|
430 | 435 | size_t new_free_slabs_in_bucket;
|
431 | 436 |
|
432 | 437 | new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1;
|
433 | 438 |
|
434 | 439 | // we keep at most params.capacity slabs in the pool
|
435 | 440 | if (bucket_max_pooled_slabs(bucket) >= new_free_slabs_in_bucket) {
|
436 |
| - size_t pool_size = 0; |
437 |
| - utils_atomic_load_acquire(&bucket->shared_limits->total_size, |
438 |
| - &pool_size); |
439 |
| - while (true) { |
440 |
| - size_t new_pool_size = pool_size + bucket_slab_alloc_size(bucket); |
441 |
| - |
442 |
| - if (bucket->shared_limits->max_size < new_pool_size) { |
443 |
| - break; |
444 |
| - } |
445 |
| - |
446 |
| - if (utils_compare_exchange(&bucket->shared_limits->total_size, |
447 |
| - &pool_size, &new_pool_size)) { |
448 |
| - ++bucket->chunked_slabs_in_pool; |
449 |
| - |
450 |
| - bucket_update_stats(bucket, -1, 1); |
451 |
| - return true; |
452 |
| - } |
| 441 | + |
| 442 | + uint64_t size_to_add = bucket_slab_alloc_size(bucket); |
| 443 | + size_t previous_size = utils_fetch_and_add_u64( |
| 444 | + &bucket->shared_limits->total_size, size_to_add); |
| 445 | + |
| 446 | + if (previous_size + size_to_add <= bucket->shared_limits->max_size) { |
| 447 | + ++bucket->chunked_slabs_in_pool; |
| 448 | + bucket_update_stats(bucket, -1, 1); |
| 449 | + return true; |
| 450 | + } else { |
| 451 | + utils_fetch_and_sub_u64(&bucket->shared_limits->total_size, |
| 452 | + size_to_add); |
453 | 453 | }
|
454 | 454 | }
|
455 | 455 |
|
@@ -523,8 +523,8 @@ static void disjoint_pool_print_stats(disjoint_pool_t *pool) {
|
523 | 523 | utils_mutex_unlock(&bucket->bucket_lock);
|
524 | 524 | }
|
525 | 525 |
|
526 |
| - LOG_DEBUG("current pool size: %zu", |
527 |
| - disjoint_pool_get_limits(pool)->total_size); |
| 526 | + LOG_DEBUG("current pool size: %llu", |
| 527 | + (unsigned long long)disjoint_pool_get_limits(pool)->total_size); |
528 | 528 | LOG_DEBUG("suggested setting=;%c%s:%zu,%zu,64K", (char)tolower(name[0]),
|
529 | 529 | (name + 1), high_bucket_size, high_peak_slabs_in_use);
|
530 | 530 | }
|
@@ -864,11 +864,12 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
|
864 | 864 |
|
865 | 865 | if (disjoint_pool->params.pool_trace > 2) {
|
866 | 866 | const char *name = disjoint_pool->params.name;
|
867 |
| - LOG_DEBUG("freed %s %p to %s, current total pool size: %zu, current " |
| 867 | + LOG_DEBUG("freed %s %p to %s, current total pool size: %llu, current " |
868 | 868 | "pool size for %s: %zu",
|
869 | 869 | name, ptr, (to_pool ? "pool" : "provider"),
|
870 |
| - disjoint_pool_get_limits(disjoint_pool)->total_size, name, |
871 |
| - disjoint_pool->params.cur_pool_size); |
| 870 | + (unsigned long long)disjoint_pool_get_limits(disjoint_pool) |
| 871 | + ->total_size, |
| 872 | + name, disjoint_pool->params.cur_pool_size); |
872 | 873 | }
|
873 | 874 |
|
874 | 875 | return UMF_RESULT_SUCCESS;
|
@@ -920,7 +921,8 @@ umf_memory_pool_ops_t *umfDisjointPoolOps(void) {
|
920 | 921 |
|
921 | 922 | umf_disjoint_pool_shared_limits_t *
|
922 | 923 | umfDisjointPoolSharedLimitsCreate(size_t max_size) {
|
923 |
| - umf_disjoint_pool_shared_limits_t *ptr = umf_ba_global_alloc(sizeof(*ptr)); |
| 924 | + umf_disjoint_pool_shared_limits_t *ptr = |
| 925 | + umf_ba_global_aligned_alloc(sizeof(*ptr), 8); |
924 | 926 | if (ptr == NULL) {
|
925 | 927 | LOG_ERR("cannot allocate memory for disjoint pool shared limits");
|
926 | 928 | return NULL;
|
|
0 commit comments