@@ -31,7 +31,7 @@ typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t;
31
31
// metadata is set and used only in the main (the first) pool
32
32
typedef struct umf_ba_main_linear_pool_meta_t {
33
33
size_t pool_size ; // size of this pool (argument of ba_os_alloc() call)
34
- os_mutex_t lock ;
34
+ utils_mutex_t lock ;
35
35
char * data_ptr ;
36
36
size_t size_left ;
37
37
size_t pool_n_allocs ; // number of allocations in this pool
@@ -98,7 +98,7 @@ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
98
98
void * data_ptr = & pool -> data ;
99
99
size_t size_left = pool_size - offsetof(umf_ba_linear_pool_t , data );
100
100
101
- util_align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
101
+ utils_align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
102
102
103
103
pool -> metadata .pool_size = pool_size ;
104
104
pool -> metadata .data_ptr = data_ptr ;
@@ -109,7 +109,7 @@ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
109
109
_DEBUG_EXECUTE (pool -> metadata .global_n_allocs = 0 );
110
110
111
111
// init lock
112
- os_mutex_t * lock = util_mutex_init (& pool -> metadata .lock );
112
+ utils_mutex_t * lock = utils_mutex_init (& pool -> metadata .lock );
113
113
if (!lock ) {
114
114
ba_os_free (pool , pool_size );
115
115
return NULL ;
@@ -123,7 +123,7 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
123
123
return NULL ;
124
124
}
125
125
size_t aligned_size = ALIGN_UP (size , MEMORY_ALIGNMENT );
126
- util_mutex_lock (& pool -> metadata .lock );
126
+ utils_mutex_lock (& pool -> metadata .lock );
127
127
if (pool -> metadata .size_left < aligned_size ) {
128
128
size_t pool_size = MINIMUM_LINEAR_POOL_SIZE ;
129
129
size_t usable_size =
@@ -139,7 +139,7 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
139
139
umf_ba_next_linear_pool_t * new_pool =
140
140
(umf_ba_next_linear_pool_t * )ba_os_alloc (pool_size );
141
141
if (!new_pool ) {
142
- util_mutex_unlock (& pool -> metadata .lock );
142
+ utils_mutex_unlock (& pool -> metadata .lock );
143
143
return NULL ;
144
144
}
145
145
@@ -149,7 +149,7 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
149
149
void * data_ptr = & new_pool -> data ;
150
150
size_t size_left =
151
151
new_pool -> pool_size - offsetof(umf_ba_next_linear_pool_t , data );
152
- util_align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
152
+ utils_align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
153
153
154
154
pool -> metadata .data_ptr = data_ptr ;
155
155
pool -> metadata .size_left = size_left ;
@@ -171,7 +171,7 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
171
171
}
172
172
_DEBUG_EXECUTE (pool -> metadata .global_n_allocs ++ );
173
173
_DEBUG_EXECUTE (ba_debug_checks (pool ));
174
- util_mutex_unlock (& pool -> metadata .lock );
174
+ utils_mutex_unlock (& pool -> metadata .lock );
175
175
176
176
return ptr ;
177
177
}
@@ -188,7 +188,7 @@ static inline int pool_contains_ptr(void *pool, size_t pool_size,
188
188
// 0 - ptr belonged to the pool and was freed
189
189
// -1 - ptr doesn't belong to the pool and wasn't freed
190
190
int umf_ba_linear_free (umf_ba_linear_pool_t * pool , void * ptr ) {
191
- util_mutex_lock (& pool -> metadata .lock );
191
+ utils_mutex_lock (& pool -> metadata .lock );
192
192
_DEBUG_EXECUTE (ba_debug_checks (pool ));
193
193
if (pool_contains_ptr (pool , pool -> metadata .pool_size , pool -> data , ptr )) {
194
194
pool -> metadata .pool_n_allocs -- ;
@@ -204,7 +204,7 @@ int umf_ba_linear_free(umf_ba_linear_pool_t *pool, void *ptr) {
204
204
pool -> metadata .pool_size = page_size ;
205
205
}
206
206
_DEBUG_EXECUTE (ba_debug_checks (pool ));
207
- util_mutex_unlock (& pool -> metadata .lock );
207
+ utils_mutex_unlock (& pool -> metadata .lock );
208
208
return 0 ;
209
209
}
210
210
@@ -227,14 +227,14 @@ int umf_ba_linear_free(umf_ba_linear_pool_t *pool, void *ptr) {
227
227
ba_os_free (next_pool_ptr , size );
228
228
}
229
229
_DEBUG_EXECUTE (ba_debug_checks (pool ));
230
- util_mutex_unlock (& pool -> metadata .lock );
230
+ utils_mutex_unlock (& pool -> metadata .lock );
231
231
return 0 ;
232
232
}
233
233
prev_pool = next_pool ;
234
234
next_pool = next_pool -> next_pool ;
235
235
}
236
236
237
- util_mutex_unlock (& pool -> metadata .lock );
237
+ utils_mutex_unlock (& pool -> metadata .lock );
238
238
// ptr doesn't belong to the pool and wasn't freed
239
239
return -1 ;
240
240
}
@@ -243,7 +243,7 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
243
243
// Do not destroy if we are running in the proxy library,
244
244
// because it may need those resources till
245
245
// the very end of exiting the application.
246
- if (util_is_running_in_proxy_lib ()) {
246
+ if (utils_is_running_in_proxy_lib ()) {
247
247
return ;
248
248
}
249
249
@@ -262,7 +262,7 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
262
262
ba_os_free (current_pool , current_pool -> pool_size );
263
263
}
264
264
265
- util_mutex_destroy_not_free (& pool -> metadata .lock );
265
+ utils_mutex_destroy_not_free (& pool -> metadata .lock );
266
266
ba_os_free (pool , pool -> metadata .pool_size );
267
267
}
268
268
@@ -272,12 +272,12 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
272
272
// to the end of the pool if ptr belongs to the pool
273
273
size_t umf_ba_linear_pool_contains_pointer (umf_ba_linear_pool_t * pool ,
274
274
void * ptr ) {
275
- util_mutex_lock (& pool -> metadata .lock );
275
+ utils_mutex_lock (& pool -> metadata .lock );
276
276
char * cptr = (char * )ptr ;
277
277
if (cptr >= pool -> data &&
278
278
cptr < ((char * )(pool )) + pool -> metadata .pool_size ) {
279
279
size_t size = ((char * )(pool )) + pool -> metadata .pool_size - cptr ;
280
- util_mutex_unlock (& pool -> metadata .lock );
280
+ utils_mutex_unlock (& pool -> metadata .lock );
281
281
return size ;
282
282
}
283
283
@@ -286,12 +286,12 @@ size_t umf_ba_linear_pool_contains_pointer(umf_ba_linear_pool_t *pool,
286
286
if (cptr >= next_pool -> data &&
287
287
cptr < ((char * )(next_pool )) + next_pool -> pool_size ) {
288
288
size_t size = ((char * )(next_pool )) + next_pool -> pool_size - cptr ;
289
- util_mutex_unlock (& pool -> metadata .lock );
289
+ utils_mutex_unlock (& pool -> metadata .lock );
290
290
return size ;
291
291
}
292
292
next_pool = next_pool -> next_pool ;
293
293
}
294
294
295
- util_mutex_unlock (& pool -> metadata .lock );
295
+ utils_mutex_unlock (& pool -> metadata .lock );
296
296
return 0 ;
297
297
}
0 commit comments