19
19
// alignment of the linear base allocator
20
20
#define MEMORY_ALIGNMENT (sizeof(uintptr_t))
21
21
22
- // metadata of the linear base allocator
23
- typedef struct {
24
- size_t pool_size ;
22
+ typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t ;
23
+
24
+ // metadata is set and used only in the main (the first) pool
25
+ typedef struct umf_ba_main_linear_pool_meta_t {
26
+ size_t pool_size ; // size of each pool (argument of each ba_os_alloc() call)
25
27
os_mutex_t lock ;
26
28
char * data_ptr ;
27
29
size_t size_left ;
30
+ #ifndef NDEBUG
31
+ size_t n_pools ;
32
+ #endif /* NDEBUG */
28
33
} umf_ba_main_linear_pool_meta_t ;
29
34
30
- // pool of the linear base allocator
35
+ // the main pool of the linear base allocator (there is only one such pool)
31
36
struct umf_ba_linear_pool {
37
+ // address of the beginning of the next pool (a list of allocated pools
38
+ // to be freed in umf_ba_linear_destroy())
39
+ umf_ba_next_linear_pool_t * next_pool ;
40
+
41
+ // metadata is set and used only in the main (the first) pool
32
42
umf_ba_main_linear_pool_meta_t metadata ;
33
- char data []; // data area starts here
43
+
44
+ // data area of the main pool (the first one) starts here
45
+ char data [];
46
+ };
47
+
48
+ // the "next" pools of the linear base allocator (pools allocated later,
49
+ // when we run out of the memory of the main pool)
50
+ struct umf_ba_next_linear_pool_t {
51
+ // address of the beginning of the next pool (a list of allocated pools
52
+ // to be freed in umf_ba_linear_destroy())
53
+ umf_ba_next_linear_pool_t * next_pool ;
54
+
55
+ // data area of all pools except of the main (the first one) starts here
56
+ char data [];
34
57
};
35
58
59
+ #ifndef NDEBUG
60
+ static void ba_debug_checks (umf_ba_linear_pool_t * pool ) {
61
+ // count pools
62
+ size_t n_pools = 1 ;
63
+ umf_ba_next_linear_pool_t * next_pool = pool -> next_pool ;
64
+ while (next_pool ) {
65
+ n_pools ++ ;
66
+ next_pool = next_pool -> next_pool ;
67
+ }
68
+ assert (n_pools == pool -> metadata .n_pools );
69
+ }
70
+ #endif /* NDEBUG */
71
+
36
72
umf_ba_linear_pool_t * umf_ba_linear_create (size_t pool_size ) {
37
- size_t mutex_size = align_size (util_mutex_get_size (), MEMORY_ALIGNMENT );
38
73
size_t metadata_size = sizeof (umf_ba_main_linear_pool_meta_t );
39
- pool_size = pool_size + metadata_size + mutex_size ;
74
+ pool_size = pool_size + metadata_size ;
40
75
if (pool_size < MINIMUM_LINEAR_POOL_SIZE ) {
41
76
pool_size = MINIMUM_LINEAR_POOL_SIZE ;
42
77
}
@@ -56,6 +91,10 @@ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
56
91
pool -> metadata .pool_size = pool_size ;
57
92
pool -> metadata .data_ptr = data_ptr ;
58
93
pool -> metadata .size_left = size_left ;
94
+ pool -> next_pool = NULL ; // this is the only pool now
95
+ #ifndef NDEBUG
96
+ pool -> metadata .n_pools = 1 ;
97
+ #endif /* NDEBUG */
59
98
60
99
// init lock
61
100
os_mutex_t * lock = util_mutex_init (& pool -> metadata .lock );
@@ -69,27 +108,55 @@ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
69
108
70
109
void * umf_ba_linear_alloc (umf_ba_linear_pool_t * pool , size_t size ) {
71
110
size_t aligned_size = align_size (size , MEMORY_ALIGNMENT );
72
-
73
111
util_mutex_lock (& pool -> metadata .lock );
74
112
if (pool -> metadata .size_left < aligned_size ) {
75
- fprintf (stderr ,
76
- "error: umf_ba_linear_alloc() failed (requested size: %zu > "
77
- "space left: %zu)\n" ,
78
- aligned_size , pool -> metadata .size_left );
79
- util_mutex_unlock (& pool -> metadata .lock );
80
- assert (pool -> metadata .size_left >= aligned_size );
81
- return NULL ; // out of memory
113
+ umf_ba_next_linear_pool_t * new_pool =
114
+ (umf_ba_next_linear_pool_t * )ba_os_alloc (pool -> metadata .pool_size );
115
+ if (!new_pool ) {
116
+ util_mutex_unlock (& pool -> metadata .lock );
117
+ return NULL ;
118
+ }
119
+
120
+ void * data_ptr = & new_pool -> data ;
121
+ size_t size_left = pool -> metadata .pool_size -
122
+ offsetof(umf_ba_next_linear_pool_t , data );
123
+ align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
124
+
125
+ pool -> metadata .data_ptr = data_ptr ;
126
+ pool -> metadata .size_left = size_left ;
127
+
128
+ // add the new pool to the list of pools
129
+ new_pool -> next_pool = pool -> next_pool ;
130
+ pool -> next_pool = new_pool ;
131
+ #ifndef NDEBUG
132
+ pool -> metadata .n_pools ++ ;
133
+ #endif /* NDEBUG */
82
134
}
83
135
84
136
void * ptr = pool -> metadata .data_ptr ;
85
137
pool -> metadata .data_ptr += aligned_size ;
86
138
pool -> metadata .size_left -= aligned_size ;
139
+ #ifndef NDEBUG
140
+ ba_debug_checks (pool );
141
+ #endif /* NDEBUG */
87
142
util_mutex_unlock (& pool -> metadata .lock );
88
143
89
144
return ptr ;
90
145
}
91
146
92
147
void umf_ba_linear_destroy (umf_ba_linear_pool_t * pool ) {
148
+ #ifndef NDEBUG
149
+ ba_debug_checks (pool );
150
+ #endif /* NDEBUG */
151
+ size_t size = pool -> metadata .pool_size ;
152
+ umf_ba_next_linear_pool_t * current_pool ;
153
+ umf_ba_next_linear_pool_t * next_pool = pool -> next_pool ;
154
+ while (next_pool ) {
155
+ current_pool = next_pool ;
156
+ next_pool = next_pool -> next_pool ;
157
+ ba_os_free (current_pool , size );
158
+ }
159
+
93
160
util_mutex_destroy_not_free (& pool -> metadata .lock );
94
- ba_os_free (pool , pool -> metadata . pool_size );
161
+ ba_os_free (pool , size );
95
162
}
0 commit comments