@@ -23,7 +23,7 @@ typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t;
23
23
24
24
// metadata is set and used only in the main (the first) pool
25
25
typedef struct umf_ba_main_linear_pool_meta_t {
26
- size_t pool_size ; // size of each pool (argument of each ba_os_alloc() call)
26
+ size_t pool_size ; // size of this pool (argument of ba_os_alloc() call)
27
27
os_mutex_t lock ;
28
28
char * data_ptr ;
29
29
size_t size_left ;
@@ -52,6 +52,8 @@ struct umf_ba_next_linear_pool_t {
52
52
// to be freed in umf_ba_linear_destroy())
53
53
umf_ba_next_linear_pool_t * next_pool ;
54
54
55
+ size_t pool_size ; // size of this pool (argument of ba_os_alloc() call)
56
+
55
57
// data area of all pools except of the main (the first one) starts here
56
58
char data [];
57
59
};
@@ -70,8 +72,8 @@ static void ba_debug_checks(umf_ba_linear_pool_t *pool) {
70
72
#endif /* NDEBUG */
71
73
72
74
umf_ba_linear_pool_t * umf_ba_linear_create (size_t pool_size ) {
73
- size_t metadata_size = sizeof (umf_ba_main_linear_pool_meta_t );
74
- pool_size = pool_size + metadata_size ;
75
+ pool_size + = sizeof (umf_ba_next_linear_pool_t * ) +
76
+ sizeof ( umf_ba_main_linear_pool_meta_t ) ;
75
77
if (pool_size < MINIMUM_LINEAR_POOL_SIZE ) {
76
78
pool_size = MINIMUM_LINEAR_POOL_SIZE ;
77
79
}
@@ -110,16 +112,29 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
110
112
size_t aligned_size = align_size (size , MEMORY_ALIGNMENT );
111
113
util_mutex_lock (& pool -> metadata .lock );
112
114
if (pool -> metadata .size_left < aligned_size ) {
115
+ size_t pool_size = pool -> metadata .pool_size ;
116
+ size_t usable_size =
117
+ pool_size - offsetof(umf_ba_next_linear_pool_t , data );
118
+ if (usable_size < aligned_size ) {
119
+ pool_size += aligned_size - usable_size ;
120
+ pool_size = align_size (pool_size , ba_os_get_page_size ());
121
+ }
122
+
123
+ assert (pool_size - offsetof(umf_ba_next_linear_pool_t , data ) >=
124
+ aligned_size );
125
+
113
126
umf_ba_next_linear_pool_t * new_pool =
114
- (umf_ba_next_linear_pool_t * )ba_os_alloc (pool -> metadata . pool_size );
127
+ (umf_ba_next_linear_pool_t * )ba_os_alloc (pool_size );
115
128
if (!new_pool ) {
116
129
util_mutex_unlock (& pool -> metadata .lock );
117
130
return NULL ;
118
131
}
119
132
133
+ new_pool -> pool_size = pool_size ;
134
+
120
135
void * data_ptr = & new_pool -> data ;
121
- size_t size_left = pool -> metadata . pool_size -
122
- offsetof(umf_ba_next_linear_pool_t , data );
136
+ size_t size_left =
137
+ new_pool -> pool_size - offsetof(umf_ba_next_linear_pool_t , data );
123
138
align_ptr_size (& data_ptr , & size_left , MEMORY_ALIGNMENT );
124
139
125
140
pool -> metadata .data_ptr = data_ptr ;
@@ -148,15 +163,14 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
148
163
#ifndef NDEBUG
149
164
ba_debug_checks (pool );
150
165
#endif /* NDEBUG */
151
- size_t size = pool -> metadata .pool_size ;
152
166
umf_ba_next_linear_pool_t * current_pool ;
153
167
umf_ba_next_linear_pool_t * next_pool = pool -> next_pool ;
154
168
while (next_pool ) {
155
169
current_pool = next_pool ;
156
170
next_pool = next_pool -> next_pool ;
157
- ba_os_free (current_pool , size );
171
+ ba_os_free (current_pool , current_pool -> pool_size );
158
172
}
159
173
160
174
util_mutex_destroy_not_free (& pool -> metadata .lock );
161
- ba_os_free (pool , size );
175
+ ba_os_free (pool , pool -> metadata . pool_size );
162
176
}
0 commit comments