@@ -75,28 +75,36 @@ static slab_t *create_slab(bucket_t *bucket) {
75
75
umf_result_t res = UMF_RESULT_SUCCESS ;
76
76
umf_memory_provider_handle_t provider = bucket -> pool -> provider ;
77
77
78
- slab_t * slab = umf_ba_global_alloc (sizeof (* slab ));
78
+ size_t num_chunks_total =
79
+ utils_max (bucket_slab_min_size (bucket ) / bucket -> size , 1 );
80
+
81
+ // Calculate the number of 64-bit words needed.
82
+ size_t num_words =
83
+ (num_chunks_total + CHUNK_BITMAP_SIZE - 1 ) / CHUNK_BITMAP_SIZE ;
84
+
85
+ slab_t * slab = umf_ba_global_alloc (sizeof (* slab ) +
86
+ num_words * sizeof (slab -> chunks [0 ]));
79
87
if (slab == NULL ) {
80
88
LOG_ERR ("allocation of new slab failed!" );
81
89
return NULL ;
82
90
}
83
91
84
92
slab -> num_chunks_allocated = 0 ;
85
- slab -> first_free_chunk_idx = 0 ;
86
93
slab -> bucket = bucket ;
87
94
88
95
slab -> iter .val = slab ;
89
96
slab -> iter .prev = slab -> iter .next = NULL ;
90
97
91
- slab -> num_chunks_total =
92
- utils_max (bucket_slab_min_size (bucket ) / bucket -> size , 1 );
93
- slab -> chunks =
94
- umf_ba_global_alloc (sizeof (* slab -> chunks ) * slab -> num_chunks_total );
95
- if (slab -> chunks == NULL ) {
96
- LOG_ERR ("allocation of slab chunks failed!" );
97
- goto free_slab ;
98
+ slab -> num_chunks_total = num_chunks_total ;
99
+ slab -> num_words = num_words ;
100
+
101
+ // set all chunks as free
102
+ memset (slab -> chunks , ~0 , num_words * sizeof (slab -> chunks [0 ]));
103
+ if (num_chunks_total % CHUNK_BITMAP_SIZE ) {
104
+ // clear remaining bits
105
+ slab -> chunks [num_words - 1 ] =
106
+ ((1ULL << (num_chunks_total % CHUNK_BITMAP_SIZE )) - 1 );
98
107
}
99
- memset (slab -> chunks , 0 , sizeof (* slab -> chunks ) * slab -> num_chunks_total );
100
108
101
109
// if slab_min_size is not a multiple of bucket size, we would have some
102
110
// padding at the end of the slab
@@ -108,7 +116,7 @@ static slab_t *create_slab(bucket_t *bucket) {
108
116
res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 , & slab -> mem_ptr );
109
117
if (res != UMF_RESULT_SUCCESS ) {
110
118
LOG_ERR ("allocation of slab data failed!" );
111
- goto free_slab_chunks ;
119
+ goto free_slab ;
112
120
}
113
121
114
122
// raw allocation is not available for user so mark it as inaccessible
@@ -117,9 +125,6 @@ static slab_t *create_slab(bucket_t *bucket) {
117
125
LOG_DEBUG ("bucket: %p, slab_size: %zu" , (void * )bucket , slab -> slab_size );
118
126
return slab ;
119
127
120
- free_slab_chunks :
121
- umf_ba_global_free (slab -> chunks );
122
-
123
128
free_slab :
124
129
umf_ba_global_free (slab );
125
130
return NULL ;
@@ -136,25 +141,21 @@ static void destroy_slab(slab_t *slab) {
136
141
LOG_ERR ("deallocation of slab data failed!" );
137
142
}
138
143
139
- umf_ba_global_free (slab -> chunks );
140
144
umf_ba_global_free (slab );
141
145
}
142
146
143
- // return the index of the first available chunk, SIZE_MAX otherwise
144
147
static size_t slab_find_first_available_chunk_idx (const slab_t * slab ) {
145
- // use the first free chunk index as a hint for the search
146
- for (bool * chunk = slab -> chunks + slab -> first_free_chunk_idx ;
147
- chunk != slab -> chunks + slab -> num_chunks_total ; chunk ++ ) {
148
-
149
- // false means not used
150
- if (* chunk == false) {
151
- size_t idx = chunk - slab -> chunks ;
152
- LOG_DEBUG ("idx: %zu" , idx );
153
- return idx ;
148
+ for (size_t i = 0 ; i < slab -> num_words ; i ++ ) {
149
+ // NOTE: free chunks are represented as set bits
150
+ uint64_t word = slab -> chunks [i ];
151
+ if (word != 0 ) {
152
+ size_t bit_index = utils_lsb64 (word );
153
+ size_t free_chunk = i * CHUNK_BITMAP_SIZE + bit_index ;
154
+ return free_chunk ;
154
155
}
155
156
}
156
157
157
- LOG_DEBUG ( "idx: SIZE_MAX" );
158
+ // No free chunk was found.
158
159
return SIZE_MAX ;
159
160
}
160
161
@@ -167,12 +168,9 @@ static void *slab_get_chunk(slab_t *slab) {
167
168
(void * )((uintptr_t )slab -> mem_ptr + chunk_idx * slab -> bucket -> size );
168
169
169
170
// mark chunk as used
170
- slab -> chunks [ chunk_idx ] = true ;
171
+ slab_set_chunk_bit ( slab , chunk_idx , false) ;
171
172
slab -> num_chunks_allocated += 1 ;
172
173
173
- // use the found index as the next hint
174
- slab -> first_free_chunk_idx = chunk_idx + 1 ;
175
-
176
174
return free_chunk ;
177
175
}
178
176
@@ -195,18 +193,9 @@ static void slab_free_chunk(slab_t *slab, void *ptr) {
195
193
size_t chunk_idx = ptr_diff / slab -> bucket -> size ;
196
194
197
195
// Make sure that the chunk was allocated
198
- assert (slab -> chunks [ chunk_idx ] && "double free detected" );
199
- slab -> chunks [ chunk_idx ] = false ;
196
+ assert (slab_read_chunk_bit ( slab , chunk_idx ) == 0 && "double free detected" );
197
+ slab_set_chunk_bit ( slab , chunk_idx , true) ;
200
198
slab -> num_chunks_allocated -= 1 ;
201
-
202
- if (chunk_idx < slab -> first_free_chunk_idx ) {
203
- slab -> first_free_chunk_idx = chunk_idx ;
204
- }
205
-
206
- LOG_DEBUG ("chunk_idx: %zu, num_chunks_allocated: %zu, "
207
- "first_free_chunk_idx: %zu" ,
208
- chunk_idx , slab -> num_chunks_allocated ,
209
- slab -> first_free_chunk_idx );
210
199
}
211
200
212
201
static bool slab_has_avail (const slab_t * slab ) {
0 commit comments