@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
93
93
return p ;
94
94
}
95
95
96
- void kmem_cache_free (struct kmem_cache * cachep , void * objp )
96
+ void kmem_cache_free_locked (struct kmem_cache * cachep , void * objp )
97
97
{
98
98
assert (objp );
99
99
uatomic_dec (& nr_allocated );
100
100
uatomic_dec (& cachep -> nr_allocated );
101
101
if (kmalloc_verbose )
102
102
printf ("Freeing %p to slab\n" , objp );
103
- pthread_mutex_lock (& cachep -> lock );
104
103
if (cachep -> nr_objs > 10 || cachep -> align ) {
105
104
memset (objp , POISON_FREE , cachep -> size );
106
105
free (objp );
@@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
110
109
node -> parent = cachep -> objs ;
111
110
cachep -> objs = node ;
112
111
}
112
+ }
113
+
114
+ void kmem_cache_free (struct kmem_cache * cachep , void * objp )
115
+ {
116
+ pthread_mutex_lock (& cachep -> lock );
117
+ kmem_cache_free_locked (cachep , objp );
113
118
pthread_mutex_unlock (& cachep -> lock );
114
119
}
115
120
121
+ void kmem_cache_free_bulk (struct kmem_cache * cachep , size_t size , void * * list )
122
+ {
123
+ if (kmalloc_verbose )
124
+ pr_debug ("Bulk free %p[0-%lu]\n" , list , size - 1 );
125
+
126
+ pthread_mutex_lock (& cachep -> lock );
127
+ for (int i = 0 ; i < size ; i ++ )
128
+ kmem_cache_free_locked (cachep , list [i ]);
129
+ pthread_mutex_unlock (& cachep -> lock );
130
+ }
131
+
132
+ int kmem_cache_alloc_bulk (struct kmem_cache * cachep , gfp_t gfp , size_t size ,
133
+ void * * p )
134
+ {
135
+ size_t i ;
136
+
137
+ if (kmalloc_verbose )
138
+ pr_debug ("Bulk alloc %lu\n" , size );
139
+
140
+ if (!(gfp & __GFP_DIRECT_RECLAIM )) {
141
+ if (cachep -> non_kernel < size )
142
+ return 0 ;
143
+
144
+ cachep -> non_kernel -= size ;
145
+ }
146
+
147
+ pthread_mutex_lock (& cachep -> lock );
148
+ if (cachep -> nr_objs >= size ) {
149
+ struct radix_tree_node * node ;
150
+
151
+ for (i = 0 ; i < size ; i ++ ) {
152
+ node = cachep -> objs ;
153
+ cachep -> nr_objs -- ;
154
+ cachep -> objs = node -> parent ;
155
+ p [i ] = node ;
156
+ node -> parent = NULL ;
157
+ }
158
+ pthread_mutex_unlock (& cachep -> lock );
159
+ } else {
160
+ pthread_mutex_unlock (& cachep -> lock );
161
+ for (i = 0 ; i < size ; i ++ ) {
162
+ if (cachep -> align ) {
163
+ posix_memalign (& p [i ], cachep -> align ,
164
+ cachep -> size * size );
165
+ } else {
166
+ p [i ] = malloc (cachep -> size * size );
167
+ }
168
+ if (cachep -> ctor )
169
+ cachep -> ctor (p [i ]);
170
+ else if (gfp & __GFP_ZERO )
171
+ memset (p [i ], 0 , cachep -> size );
172
+ }
173
+ }
174
+
175
+ for (i = 0 ; i < size ; i ++ ) {
176
+ uatomic_inc (& nr_allocated );
177
+ uatomic_inc (& cachep -> nr_allocated );
178
+ uatomic_inc (& cachep -> nr_tallocated );
179
+ if (kmalloc_verbose )
180
+ printf ("Allocating %p from slab\n" , p [i ]);
181
+ }
182
+
183
+ return size ;
184
+ }
185
+
116
186
struct kmem_cache *
117
187
kmem_cache_create (const char * name , unsigned int size , unsigned int align ,
118
188
unsigned int flags , void (* ctor )(void * ))
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
130
200
ret -> non_kernel = 0 ;
131
201
return ret ;
132
202
}
203
+
204
+ /*
205
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
206
+ */
207
+ void test_kmem_cache_bulk (void )
208
+ {
209
+ int i ;
210
+ void * list [12 ];
211
+ static struct kmem_cache * test_cache , * test_cache2 ;
212
+
213
+ /*
214
+ * Testing the bulk allocators without aligned kmem_cache to force the
215
+ * bulk alloc/free to reuse
216
+ */
217
+ test_cache = kmem_cache_create ("test_cache" , 256 , 0 , SLAB_PANIC , NULL );
218
+
219
+ for (i = 0 ; i < 5 ; i ++ )
220
+ list [i ] = kmem_cache_alloc (test_cache , __GFP_DIRECT_RECLAIM );
221
+
222
+ for (i = 0 ; i < 5 ; i ++ )
223
+ kmem_cache_free (test_cache , list [i ]);
224
+ assert (test_cache -> nr_objs == 5 );
225
+
226
+ kmem_cache_alloc_bulk (test_cache , __GFP_DIRECT_RECLAIM , 5 , list );
227
+ kmem_cache_free_bulk (test_cache , 5 , list );
228
+
229
+ for (i = 0 ; i < 12 ; i ++ )
230
+ list [i ] = kmem_cache_alloc (test_cache , __GFP_DIRECT_RECLAIM );
231
+
232
+ for (i = 0 ; i < 12 ; i ++ )
233
+ kmem_cache_free (test_cache , list [i ]);
234
+
235
+ /* The last free will not be kept around */
236
+ assert (test_cache -> nr_objs == 11 );
237
+
238
+ /* Aligned caches will immediately free */
239
+ test_cache2 = kmem_cache_create ("test_cache2" , 128 , 128 , SLAB_PANIC , NULL );
240
+
241
+ kmem_cache_alloc_bulk (test_cache2 , __GFP_DIRECT_RECLAIM , 10 , list );
242
+ kmem_cache_free_bulk (test_cache2 , 10 , list );
243
+ assert (!test_cache2 -> nr_objs );
244
+
245
+
246
+ }
0 commit comments