@@ -4402,29 +4402,6 @@ static int __init setup_slub_min_objects(char *str)
4402
4402
4403
4403
__setup ("slub_min_objects=" , setup_slub_min_objects );
4404
4404
4405
- void * __kmalloc (size_t size , gfp_t flags )
4406
- {
4407
- struct kmem_cache * s ;
4408
- void * ret ;
4409
-
4410
- if (unlikely (size > KMALLOC_MAX_CACHE_SIZE ))
4411
- return kmalloc_large (size , flags );
4412
-
4413
- s = kmalloc_slab (size , flags );
4414
-
4415
- if (unlikely (ZERO_OR_NULL_PTR (s )))
4416
- return s ;
4417
-
4418
- ret = slab_alloc (s , NULL , flags , _RET_IP_ , size );
4419
-
4420
- trace_kmalloc (_RET_IP_ , ret , s , size , s -> size , flags );
4421
-
4422
- ret = kasan_kmalloc (s , ret , size , flags );
4423
-
4424
- return ret ;
4425
- }
4426
- EXPORT_SYMBOL (__kmalloc );
4427
-
4428
4405
static void * kmalloc_large_node (size_t size , gfp_t flags , int node )
4429
4406
{
4430
4407
struct page * page ;
@@ -4442,15 +4419,16 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
4442
4419
return kmalloc_large_node_hook (ptr , size , flags );
4443
4420
}
4444
4421
4445
- void * __kmalloc_node (size_t size , gfp_t flags , int node )
4422
+ static __always_inline
4423
+ void * __do_kmalloc_node (size_t size , gfp_t flags , int node , unsigned long caller )
4446
4424
{
4447
4425
struct kmem_cache * s ;
4448
4426
void * ret ;
4449
4427
4450
4428
if (unlikely (size > KMALLOC_MAX_CACHE_SIZE )) {
4451
4429
ret = kmalloc_large_node (size , flags , node );
4452
4430
4453
- trace_kmalloc_node (_RET_IP_ , ret , NULL ,
4431
+ trace_kmalloc_node (caller , ret , NULL ,
4454
4432
size , PAGE_SIZE << get_order (size ),
4455
4433
flags , node );
4456
4434
@@ -4462,16 +4440,28 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
4462
4440
if (unlikely (ZERO_OR_NULL_PTR (s )))
4463
4441
return s ;
4464
4442
4465
- ret = slab_alloc_node (s , NULL , flags , node , _RET_IP_ , size );
4443
+ ret = slab_alloc_node (s , NULL , flags , node , caller , size );
4466
4444
4467
- trace_kmalloc_node (_RET_IP_ , ret , s , size , s -> size , flags , node );
4445
+ trace_kmalloc_node (caller , ret , s , size , s -> size , flags , node );
4468
4446
4469
4447
ret = kasan_kmalloc (s , ret , size , flags );
4470
4448
4471
4449
return ret ;
4472
4450
}
4451
+
4452
+ void * __kmalloc_node (size_t size , gfp_t flags , int node )
4453
+ {
4454
+ return __do_kmalloc_node (size , flags , node , _RET_IP_ );
4455
+ }
4473
4456
EXPORT_SYMBOL (__kmalloc_node );
4474
4457
4458
+ void * __kmalloc (size_t size , gfp_t flags )
4459
+ {
4460
+ return __do_kmalloc_node (size , flags , NUMA_NO_NODE , _RET_IP_ );
4461
+ }
4462
+ EXPORT_SYMBOL (__kmalloc );
4463
+
4464
+
4475
4465
#ifdef CONFIG_HARDENED_USERCOPY
4476
4466
/*
4477
4467
* Rejects incorrectly sized objects and objects that are to be copied
@@ -4905,32 +4895,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4905
4895
}
4906
4896
4907
4897
void * __kmalloc_node_track_caller (size_t size , gfp_t gfpflags ,
4908
- int node , unsigned long caller )
4898
+ int node , unsigned long caller )
4909
4899
{
4910
- struct kmem_cache * s ;
4911
- void * ret ;
4912
-
4913
- if (unlikely (size > KMALLOC_MAX_CACHE_SIZE )) {
4914
- ret = kmalloc_large_node (size , gfpflags , node );
4915
-
4916
- trace_kmalloc_node (caller , ret , NULL ,
4917
- size , PAGE_SIZE << get_order (size ),
4918
- gfpflags , node );
4919
-
4920
- return ret ;
4921
- }
4922
-
4923
- s = kmalloc_slab (size , gfpflags );
4924
-
4925
- if (unlikely (ZERO_OR_NULL_PTR (s )))
4926
- return s ;
4927
-
4928
- ret = slab_alloc_node (s , NULL , gfpflags , node , caller , size );
4929
-
4930
- /* Honor the call site pointer we received. */
4931
- trace_kmalloc_node (caller , ret , s , size , s -> size , gfpflags , node );
4932
-
4933
- return ret ;
4900
+ return __do_kmalloc_node (size , gfpflags , node , caller );
4934
4901
}
4935
4902
EXPORT_SYMBOL (__kmalloc_node_track_caller );
4936
4903
0 commit comments