Skip to content

Commit cc86e0c

Browse files
howlettakpm00
authored andcommitted
radix tree test suite: add support for slab bulk APIs
Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the radix tree test suite. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Liam R. Howlett <[email protected]> Tested-by: Yu Zhao <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Howells <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 000a449 commit cc86e0c

File tree

2 files changed

+120
-2
lines changed

2 files changed

+120
-2
lines changed

tools/include/linux/slab.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
4141
unsigned int align, unsigned int flags,
4242
void (*ctor)(void *));
4343

44+
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
45+
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
46+
void **list);
47+
4448
#endif /* _TOOLS_SLAB_H */

tools/testing/radix-tree/linux.c

Lines changed: 116 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
9393
return p;
9494
}
9595

96-
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
96+
void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
9797
{
9898
assert(objp);
9999
uatomic_dec(&nr_allocated);
100100
uatomic_dec(&cachep->nr_allocated);
101101
if (kmalloc_verbose)
102102
printf("Freeing %p to slab\n", objp);
103-
pthread_mutex_lock(&cachep->lock);
104103
if (cachep->nr_objs > 10 || cachep->align) {
105104
memset(objp, POISON_FREE, cachep->size);
106105
free(objp);
@@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
110109
node->parent = cachep->objs;
111110
cachep->objs = node;
112111
}
112+
}
113+
114+
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
115+
{
116+
pthread_mutex_lock(&cachep->lock);
117+
kmem_cache_free_locked(cachep, objp);
113118
pthread_mutex_unlock(&cachep->lock);
114119
}
115120

121+
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
122+
{
123+
if (kmalloc_verbose)
124+
pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
125+
126+
pthread_mutex_lock(&cachep->lock);
127+
for (int i = 0; i < size; i++)
128+
kmem_cache_free_locked(cachep, list[i]);
129+
pthread_mutex_unlock(&cachep->lock);
130+
}
131+
132+
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
133+
void **p)
134+
{
135+
size_t i;
136+
137+
if (kmalloc_verbose)
138+
pr_debug("Bulk alloc %lu\n", size);
139+
140+
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
141+
if (cachep->non_kernel < size)
142+
return 0;
143+
144+
cachep->non_kernel -= size;
145+
}
146+
147+
pthread_mutex_lock(&cachep->lock);
148+
if (cachep->nr_objs >= size) {
149+
struct radix_tree_node *node;
150+
151+
for (i = 0; i < size; i++) {
152+
node = cachep->objs;
153+
cachep->nr_objs--;
154+
cachep->objs = node->parent;
155+
p[i] = node;
156+
node->parent = NULL;
157+
}
158+
pthread_mutex_unlock(&cachep->lock);
159+
} else {
160+
pthread_mutex_unlock(&cachep->lock);
161+
for (i = 0; i < size; i++) {
162+
if (cachep->align) {
163+
posix_memalign(&p[i], cachep->align,
164+
cachep->size * size);
165+
} else {
166+
p[i] = malloc(cachep->size * size);
167+
}
168+
if (cachep->ctor)
169+
cachep->ctor(p[i]);
170+
else if (gfp & __GFP_ZERO)
171+
memset(p[i], 0, cachep->size);
172+
}
173+
}
174+
175+
for (i = 0; i < size; i++) {
176+
uatomic_inc(&nr_allocated);
177+
uatomic_inc(&cachep->nr_allocated);
178+
uatomic_inc(&cachep->nr_tallocated);
179+
if (kmalloc_verbose)
180+
printf("Allocating %p from slab\n", p[i]);
181+
}
182+
183+
return size;
184+
}
185+
116186
struct kmem_cache *
117187
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
118188
unsigned int flags, void (*ctor)(void *))
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
130200
ret->non_kernel = 0;
131201
return ret;
132202
}
203+
204+
/*
205+
* Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
206+
*/
207+
void test_kmem_cache_bulk(void)
208+
{
209+
int i;
210+
void *list[12];
211+
static struct kmem_cache *test_cache, *test_cache2;
212+
213+
/*
214+
* Testing the bulk allocators without aligned kmem_cache to force the
215+
* bulk alloc/free to reuse
216+
*/
217+
test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
218+
219+
for (i = 0; i < 5; i++)
220+
list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
221+
222+
for (i = 0; i < 5; i++)
223+
kmem_cache_free(test_cache, list[i]);
224+
assert(test_cache->nr_objs == 5);
225+
226+
kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
227+
kmem_cache_free_bulk(test_cache, 5, list);
228+
229+
for (i = 0; i < 12 ; i++)
230+
list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
231+
232+
for (i = 0; i < 12; i++)
233+
kmem_cache_free(test_cache, list[i]);
234+
235+
/* The last free will not be kept around */
236+
assert(test_cache->nr_objs == 11);
237+
238+
/* Aligned caches will immediately free */
239+
test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
240+
241+
kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
242+
kmem_cache_free_bulk(test_cache2, 10, list);
243+
assert(!test_cache2->nr_objs);
244+
245+
246+
}

0 commit comments

Comments
 (0)