Skip to content

Commit eedb2ae

Browse files
committed
Make it possible to allocate more than initial pool size
Make it possible to allocate more than the initial pool size of the linear allocator. Signed-off-by: Lukasz Dorau <[email protected]>
1 parent 84559ef commit eedb2ae

File tree

1 file changed

+23
-9
lines changed

1 file changed

+23
-9
lines changed

src/base_alloc/base_alloc_linear.c

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t;
2323

2424
// metadata is set and used only in the main (the first) pool
2525
typedef struct umf_ba_main_linear_pool_meta_t {
26-
size_t pool_size; // size of each pool (argument of each ba_os_alloc() call)
26+
size_t pool_size; // size of this pool (argument of ba_os_alloc() call)
2727
os_mutex_t lock;
2828
char *data_ptr;
2929
size_t size_left;
@@ -52,6 +52,8 @@ struct umf_ba_next_linear_pool_t {
5252
// to be freed in umf_ba_linear_destroy())
5353
umf_ba_next_linear_pool_t *next_pool;
5454

55+
size_t pool_size; // size of this pool (argument of ba_os_alloc() call)
56+
5557
// data area of all pools except of the main (the first one) starts here
5658
char data[];
5759
};
@@ -70,8 +72,8 @@ static void ba_debug_checks(umf_ba_linear_pool_t *pool) {
7072
#endif /* NDEBUG */
7173

7274
umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
73-
size_t metadata_size = sizeof(umf_ba_main_linear_pool_meta_t);
74-
pool_size = pool_size + metadata_size;
75+
pool_size += sizeof(umf_ba_next_linear_pool_t *) +
76+
sizeof(umf_ba_main_linear_pool_meta_t);
7577
if (pool_size < MINIMUM_LINEAR_POOL_SIZE) {
7678
pool_size = MINIMUM_LINEAR_POOL_SIZE;
7779
}
@@ -110,16 +112,29 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
110112
size_t aligned_size = align_size(size, MEMORY_ALIGNMENT);
111113
util_mutex_lock(&pool->metadata.lock);
112114
if (pool->metadata.size_left < aligned_size) {
115+
size_t pool_size = pool->metadata.pool_size;
116+
size_t usable_size =
117+
pool_size - offsetof(umf_ba_next_linear_pool_t, data);
118+
if (usable_size < aligned_size) {
119+
pool_size += aligned_size - usable_size;
120+
pool_size = align_size(pool_size, ba_os_get_page_size());
121+
}
122+
123+
assert(pool_size - offsetof(umf_ba_next_linear_pool_t, data) >=
124+
aligned_size);
125+
113126
umf_ba_next_linear_pool_t *new_pool =
114-
(umf_ba_next_linear_pool_t *)ba_os_alloc(pool->metadata.pool_size);
127+
(umf_ba_next_linear_pool_t *)ba_os_alloc(pool_size);
115128
if (!new_pool) {
116129
util_mutex_unlock(&pool->metadata.lock);
117130
return NULL;
118131
}
119132

133+
new_pool->pool_size = pool_size;
134+
120135
void *data_ptr = &new_pool->data;
121-
size_t size_left = pool->metadata.pool_size -
122-
offsetof(umf_ba_next_linear_pool_t, data);
136+
size_t size_left =
137+
new_pool->pool_size - offsetof(umf_ba_next_linear_pool_t, data);
123138
align_ptr_size(&data_ptr, &size_left, MEMORY_ALIGNMENT);
124139

125140
pool->metadata.data_ptr = data_ptr;
@@ -148,15 +163,14 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
148163
#ifndef NDEBUG
149164
ba_debug_checks(pool);
150165
#endif /* NDEBUG */
151-
size_t size = pool->metadata.pool_size;
152166
umf_ba_next_linear_pool_t *current_pool;
153167
umf_ba_next_linear_pool_t *next_pool = pool->next_pool;
154168
while (next_pool) {
155169
current_pool = next_pool;
156170
next_pool = next_pool->next_pool;
157-
ba_os_free(current_pool, size);
171+
ba_os_free(current_pool, current_pool->pool_size);
158172
}
159173

160174
util_mutex_destroy_not_free(&pool->metadata.lock);
161-
ba_os_free(pool, size);
175+
ba_os_free(pool, pool->metadata.pool_size);
162176
}

0 commit comments

Comments
 (0)