57
57
#include <stdbool.h>
58
58
#include <stddef.h>
59
59
60
+ #include "base_alloc.h"
61
+ #include "base_alloc_linear.h"
60
62
#include "critnib.h"
61
63
#include "utils_common.h"
62
64
#include "utils_concurrency.h"
@@ -127,6 +129,10 @@ struct critnib {
127
129
uint64_t remove_count ;
128
130
129
131
struct os_mutex_t * mutex ; /* writes/removes */
132
+
133
+ umf_ba_linear_pool_t * pool_linear ;
134
+ umf_ba_pool_t * pool_nodes ;
135
+ umf_ba_pool_t * pool_leaves ;
130
136
};
131
137
132
138
/*
@@ -176,37 +182,68 @@ static inline unsigned slice_index(word key, sh_t shift) {
176
182
* critnib_new -- allocates a new critnib structure
177
183
*/
178
184
struct critnib * critnib_new (void ) {
179
- struct critnib * c = Zalloc (sizeof (struct critnib ));
180
- if (!c ) {
185
+ umf_ba_linear_pool_t * pool_linear =
186
+ umf_ba_linear_create (0 /* minimal pool size */ );
187
+ if (!pool_linear ) {
181
188
return NULL ;
182
189
}
183
190
184
- c -> mutex = util_mutex_create ();
191
+ struct critnib * c =
192
+ umf_ba_linear_alloc (pool_linear , sizeof (struct critnib ));
193
+ if (!c ) {
194
+ goto err_destroy_pool_linear ;
195
+ }
196
+
197
+ c -> pool_linear = pool_linear ;
198
+
199
+ void * mutex_ptr = umf_ba_linear_alloc (pool_linear , util_mutex_get_size ());
200
+ if (!mutex_ptr ) {
201
+ goto err_destroy_pool_linear ;
202
+ }
203
+
204
+ c -> mutex = util_mutex_init (mutex_ptr );
185
205
if (!c -> mutex ) {
186
- free (c );
187
- return NULL ;
206
+ goto err_destroy_pool_linear ;
207
+ }
208
+
209
+ c -> pool_nodes = umf_ba_create (sizeof (struct critnib_node ));
210
+ if (!c -> pool_nodes ) {
211
+ goto err_util_mutex_destroy ;
212
+ }
213
+
214
+ c -> pool_leaves = umf_ba_create (sizeof (struct critnib_leaf ));
215
+ if (!c -> pool_leaves ) {
216
+ goto err_destroy_pool_nodes ;
188
217
}
189
218
190
219
VALGRIND_HG_DRD_DISABLE_CHECKING (& c -> root , sizeof (c -> root ));
191
220
VALGRIND_HG_DRD_DISABLE_CHECKING (& c -> remove_count , sizeof (c -> remove_count ));
192
221
193
222
return c ;
223
+
224
+ err_destroy_pool_nodes :
225
+ umf_ba_destroy (c -> pool_nodes );
226
+ err_util_mutex_destroy :
227
+ util_mutex_destroy_not_free (c -> mutex );
228
+ err_destroy_pool_linear :
229
+ umf_ba_linear_destroy (pool_linear ); // free all its allocations and destroy
230
+ return NULL ;
194
231
}
195
232
196
233
/*
197
234
* internal: delete_node -- recursively free (to malloc) a subtree
198
235
*/
199
- static void delete_node (struct critnib_node * __restrict n ) {
236
+ static void delete_node (struct critnib * c , struct critnib_node * __restrict n ) {
200
237
if (is_leaf (n )) {
201
- Free ( to_leaf (n ));
238
+ umf_ba_free ( c -> pool_leaves , to_leaf (n ));
202
239
} else {
203
240
for (int i = 0 ; i < SLNODES ; i ++ ) {
204
241
if (n -> child [i ]) {
205
- delete_node (n -> child [i ]);
242
+ delete_node (c , n -> child [i ]);
206
243
}
207
244
}
208
245
209
- Free ( n );
246
+ umf_ba_free ( c -> pool_nodes , n );
210
247
}
211
248
}
212
249
@@ -215,29 +252,35 @@ static void delete_node(struct critnib_node *__restrict n) {
215
252
*/
216
253
void critnib_delete (struct critnib * c ) {
217
254
if (c -> root ) {
218
- delete_node (c -> root );
255
+ delete_node (c , c -> root );
219
256
}
220
257
221
- util_mutex_destroy (c -> mutex );
258
+ // mutex is freed in umf_ba_linear_destroy(c->pool_linear) at the end
259
+ util_mutex_destroy_not_free (c -> mutex );
222
260
223
261
for (struct critnib_node * m = c -> deleted_node ; m ;) {
224
262
struct critnib_node * mm = m -> child [0 ];
225
- Free ( m );
263
+ umf_ba_free ( c -> pool_nodes , m );
226
264
m = mm ;
227
265
}
228
266
229
267
for (struct critnib_leaf * k = c -> deleted_leaf ; k ;) {
230
268
struct critnib_leaf * kk = k -> value ;
231
- Free ( k );
269
+ umf_ba_free ( c -> pool_leaves , k );
232
270
k = kk ;
233
271
}
234
272
235
273
for (int i = 0 ; i < DELETED_LIFE ; i ++ ) {
236
- Free ( c -> pending_del_nodes [i ]);
237
- Free ( c -> pending_del_leaves [i ]);
274
+ umf_ba_free ( c -> pool_nodes , c -> pending_del_nodes [i ]);
275
+ umf_ba_free ( c -> pool_leaves , c -> pending_del_leaves [i ]);
238
276
}
239
277
240
- Free (c );
278
+ umf_ba_destroy (c -> pool_nodes );
279
+ umf_ba_destroy (c -> pool_leaves );
280
+ umf_ba_linear_destroy (
281
+ c -> pool_linear ); // free all its allocations and destroy
282
+
283
+ // 'c' was freed in umf_ba_linear_destroy(c->pool_linear)
241
284
}
242
285
243
286
/*
@@ -264,7 +307,7 @@ static void free_node(struct critnib *__restrict c,
264
307
*/
265
308
static struct critnib_node * alloc_node (struct critnib * __restrict c ) {
266
309
if (!c -> deleted_node ) {
267
- return Malloc ( sizeof ( struct critnib_node ) );
310
+ return umf_ba_alloc ( c -> pool_nodes );
268
311
}
269
312
270
313
struct critnib_node * n = c -> deleted_node ;
@@ -295,7 +338,7 @@ static void free_leaf(struct critnib *__restrict c,
295
338
*/
296
339
static struct critnib_leaf * alloc_leaf (struct critnib * __restrict c ) {
297
340
if (!c -> deleted_leaf ) {
298
- return Malloc ( sizeof ( struct critnib_leaf ) );
341
+ return umf_ba_alloc ( c -> pool_leaves );
299
342
}
300
343
301
344
struct critnib_leaf * k = c -> deleted_leaf ;
0 commit comments