Skip to content

Commit a0c3b94

Browse files
hygonitehcaster
authored andcommitted
mm/slub: move kmalloc_large_node() to slab_common.c
In later patch SLAB will also pass requests larger than order-1 page to page allocator. Move kmalloc_large_node() to slab_common.c. Fold kmalloc_large_node_hook() into kmalloc_large_node() as there is no other caller. Signed-off-by: Hyeonggon Yoo <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent e4c98d6 commit a0c3b94

File tree

3 files changed

+26
-25
lines changed

3 files changed

+26
-25
lines changed

include/linux/slab.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,10 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
491491

492492
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
493493
__alloc_size(1);
494+
495+
void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
496+
__alloc_size(1);
497+
494498
/**
495499
* kmalloc - allocate memory
496500
* @size: how many bytes of memory are required.

mm/slab_common.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -928,6 +928,28 @@ void *kmalloc_large(size_t size, gfp_t flags)
928928
}
929929
EXPORT_SYMBOL(kmalloc_large);
930930

931+
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
932+
{
933+
struct page *page;
934+
void *ptr = NULL;
935+
unsigned int order = get_order(size);
936+
937+
flags |= __GFP_COMP;
938+
page = alloc_pages_node(node, flags, order);
939+
if (page) {
940+
ptr = page_address(page);
941+
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
942+
PAGE_SIZE << order);
943+
}
944+
945+
ptr = kasan_kmalloc_large(ptr, size, flags);
946+
/* As ptr might get tagged, call kmemleak hook after KASAN. */
947+
kmemleak_alloc(ptr, size, 1, flags);
948+
949+
return ptr;
950+
}
951+
EXPORT_SYMBOL(kmalloc_large_node);
952+
931953
#ifdef CONFIG_SLAB_FREELIST_RANDOM
932954
/* Randomize a generic freelist */
933955
static void freelist_randomize(struct rnd_state *state, unsigned int *list,

mm/slub.c

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,14 +1704,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
17041704
* Hooks for other subsystems that check memory allocations. In a typical
17051705
* production configuration these hooks all should produce no code at all.
17061706
*/
1707-
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1708-
{
1709-
ptr = kasan_kmalloc_large(ptr, size, flags);
1710-
/* As ptr might get tagged, call kmemleak hook after KASAN. */
1711-
kmemleak_alloc(ptr, size, 1, flags);
1712-
return ptr;
1713-
}
1714-
17151707
static __always_inline void kfree_hook(void *x)
17161708
{
17171709
kmemleak_free(x);
@@ -4402,23 +4394,6 @@ static int __init setup_slub_min_objects(char *str)
44024394

44034395
__setup("slub_min_objects=", setup_slub_min_objects);
44044396

4405-
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
4406-
{
4407-
struct page *page;
4408-
void *ptr = NULL;
4409-
unsigned int order = get_order(size);
4410-
4411-
flags |= __GFP_COMP;
4412-
page = alloc_pages_node(node, flags, order);
4413-
if (page) {
4414-
ptr = page_address(page);
4415-
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4416-
PAGE_SIZE << order);
4417-
}
4418-
4419-
return kmalloc_large_node_hook(ptr, size, flags);
4420-
}
4421-
44224397
static __always_inline
44234398
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
44244399
{

0 commit comments

Comments
 (0)