Skip to content

Commit e4c98d6

Browse files
hygonitehcaster
authored andcommitted
mm/slab_common: fold kmalloc_order_trace() into kmalloc_large()
There is no caller of kmalloc_order_trace() except kmalloc_large(). Fold it into kmalloc_large() and remove kmalloc_order{,_trace}(). Also add tracepoint in kmalloc_large() that was previously in kmalloc_order_trace(). Signed-off-by: Hyeonggon Yoo <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 0f853b2 commit e4c98d6

File tree

2 files changed

+6
-33
lines changed

2 files changed

+6
-33
lines changed

include/linux/slab.h

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -489,26 +489,8 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
489489
}
490490
#endif /* CONFIG_TRACING */
491491

492-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
493-
__alloc_size(1);
494-
495-
#ifdef CONFIG_TRACING
496-
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
497-
__assume_page_alignment __alloc_size(1);
498-
#else
499-
static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
500-
unsigned int order)
501-
{
502-
return kmalloc_order(size, flags, order);
503-
}
504-
#endif
505-
506-
static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
507-
{
508-
unsigned int order = get_order(size);
509-
return kmalloc_order_trace(size, flags, order);
510-
}
511-
492+
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
493+
__alloc_size(1);
512494
/**
513495
* kmalloc - allocate memory
514496
* @size: how many bytes of memory are required.

mm/slab_common.c

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -905,16 +905,16 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
905905
* directly to the page allocator. We use __GFP_COMP, because we will need to
906906
* know the allocation order to free the pages properly in kfree.
907907
*/
908-
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
908+
void *kmalloc_large(size_t size, gfp_t flags)
909909
{
910910
void *ret = NULL;
911911
struct page *page;
912+
unsigned int order = get_order(size);
912913

913914
if (unlikely(flags & GFP_SLAB_BUG_MASK))
914915
flags = kmalloc_fix_flags(flags);
915916

916-
flags |= __GFP_COMP;
917-
page = alloc_pages(flags, order);
917+
page = alloc_pages(flags | __GFP_COMP, order);
918918
if (likely(page)) {
919919
ret = page_address(page);
920920
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
@@ -923,19 +923,10 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
923923
ret = kasan_kmalloc_large(ret, size, flags);
924924
/* As ret might get tagged, call kmemleak hook after KASAN. */
925925
kmemleak_alloc(ret, size, 1, flags);
926-
return ret;
927-
}
928-
EXPORT_SYMBOL(kmalloc_order);
929-
930-
#ifdef CONFIG_TRACING
931-
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
932-
{
933-
void *ret = kmalloc_order(size, flags, order);
934926
trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
935927
return ret;
936928
}
937-
EXPORT_SYMBOL(kmalloc_order_trace);
938-
#endif
929+
EXPORT_SYMBOL(kmalloc_large);
939930

940931
#ifdef CONFIG_SLAB_FREELIST_RANDOM
941932
/* Randomize a generic freelist */

0 commit comments

Comments
 (0)