Skip to content

Commit 1501278

Browse files
committed
Merge tag 'slab-for-6.1-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab hotfix from Vlastimil Babka: "A single fix for the common-kmalloc series, for warnings on mips and sparc64 reported by Guenter Roeck" * tag 'slab-for-6.1-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: use kmalloc_node() for off slab freelist_idx_t array allocation
2 parents 36d8a3e + e36ce44 commit 1501278

File tree

2 files changed

+19
-19
lines changed

2 files changed

+19
-19
lines changed

include/linux/slab_def.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ struct kmem_cache {
3333

3434
size_t colour; /* cache colouring range */
3535
unsigned int colour_off; /* colour offset */
36-
struct kmem_cache *freelist_cache;
3736
unsigned int freelist_size;
3837

3938
/* constructor func */

mm/slab.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
16191619
* although actual page can be freed in rcu context
16201620
*/
16211621
if (OFF_SLAB(cachep))
1622-
kmem_cache_free(cachep->freelist_cache, freelist);
1622+
kfree(freelist);
16231623
}
16241624

16251625
/*
@@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
16711671
if (flags & CFLGS_OFF_SLAB) {
16721672
struct kmem_cache *freelist_cache;
16731673
size_t freelist_size;
1674+
size_t freelist_cache_size;
16741675

16751676
freelist_size = num * sizeof(freelist_idx_t);
1676-
freelist_cache = kmalloc_slab(freelist_size, 0u);
1677-
if (!freelist_cache)
1678-
continue;
1679-
1680-
/*
1681-
* Needed to avoid possible looping condition
1682-
* in cache_grow_begin()
1683-
*/
1684-
if (OFF_SLAB(freelist_cache))
1685-
continue;
1677+
if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
1678+
freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
1679+
} else {
1680+
freelist_cache = kmalloc_slab(freelist_size, 0u);
1681+
if (!freelist_cache)
1682+
continue;
1683+
freelist_cache_size = freelist_cache->size;
1684+
1685+
/*
1686+
* Needed to avoid possible looping condition
1687+
* in cache_grow_begin()
1688+
*/
1689+
if (OFF_SLAB(freelist_cache))
1690+
continue;
1691+
}
16861692

16871693
/* check if off slab has enough benefit */
1688-
if (freelist_cache->size > cachep->size / 2)
1694+
if (freelist_cache_size > cachep->size / 2)
16891695
continue;
16901696
}
16911697

@@ -2061,11 +2067,6 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
20612067
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
20622068
#endif
20632069

2064-
if (OFF_SLAB(cachep)) {
2065-
cachep->freelist_cache =
2066-
kmalloc_slab(cachep->freelist_size, 0u);
2067-
}
2068-
20692070
err = setup_cpu_cache(cachep, gfp);
20702071
if (err) {
20712072
__kmem_cache_release(cachep);
@@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
22922293
freelist = NULL;
22932294
else if (OFF_SLAB(cachep)) {
22942295
/* Slab management obj is off-slab. */
2295-
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2296+
freelist = kmalloc_node(cachep->freelist_size,
22962297
local_flags, nodeid);
22972298
} else {
22982299
/* We will use last bytes at the slab for freelist */

0 commit comments

Comments
 (0)