@@ -3180,37 +3180,6 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3180
3180
return obj ? obj : fallback_alloc (cachep , flags );
3181
3181
}
3182
3182
3183
- static void * __do_cache_alloc (struct kmem_cache * cachep , gfp_t flags , int nodeid );
3184
-
3185
- static __always_inline void *
3186
- slab_alloc_node (struct kmem_cache * cachep , gfp_t flags , int nodeid , size_t orig_size ,
3187
- unsigned long caller )
3188
- {
3189
- unsigned long save_flags ;
3190
- void * ptr ;
3191
- struct obj_cgroup * objcg = NULL ;
3192
- bool init = false;
3193
-
3194
- flags &= gfp_allowed_mask ;
3195
- cachep = slab_pre_alloc_hook (cachep , NULL , & objcg , 1 , flags );
3196
- if (unlikely (!cachep ))
3197
- return NULL ;
3198
-
3199
- ptr = kfence_alloc (cachep , orig_size , flags );
3200
- if (unlikely (ptr ))
3201
- goto out_hooks ;
3202
-
3203
- local_irq_save (save_flags );
3204
- ptr = __do_cache_alloc (cachep , flags , nodeid );
3205
- local_irq_restore (save_flags );
3206
- ptr = cache_alloc_debugcheck_after (cachep , flags , ptr , caller );
3207
- init = slab_want_init_on_alloc (flags , cachep );
3208
-
3209
- out_hooks :
3210
- slab_post_alloc_hook (cachep , objcg , flags , 1 , & ptr , init );
3211
- return ptr ;
3212
- }
3213
-
3214
3183
static __always_inline void *
3215
3184
__do_cache_alloc (struct kmem_cache * cachep , gfp_t flags , int nodeid )
3216
3185
{
@@ -3259,8 +3228,8 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unus
3259
3228
#endif /* CONFIG_NUMA */
3260
3229
3261
3230
static __always_inline void *
3262
- slab_alloc (struct kmem_cache * cachep , struct list_lru * lru , gfp_t flags ,
3263
- size_t orig_size , unsigned long caller )
3231
+ slab_alloc_node (struct kmem_cache * cachep , struct list_lru * lru , gfp_t flags ,
3232
+ int nodeid , size_t orig_size , unsigned long caller )
3264
3233
{
3265
3234
unsigned long save_flags ;
3266
3235
void * objp ;
@@ -3277,7 +3246,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3277
3246
goto out ;
3278
3247
3279
3248
local_irq_save (save_flags );
3280
- objp = __do_cache_alloc (cachep , flags , NUMA_NO_NODE );
3249
+ objp = __do_cache_alloc (cachep , flags , nodeid );
3281
3250
local_irq_restore (save_flags );
3282
3251
objp = cache_alloc_debugcheck_after (cachep , flags , objp , caller );
3283
3252
prefetchw (objp );
@@ -3288,6 +3257,14 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3288
3257
return objp ;
3289
3258
}
3290
3259
3260
+ static __always_inline void *
3261
+ slab_alloc (struct kmem_cache * cachep , struct list_lru * lru , gfp_t flags ,
3262
+ size_t orig_size , unsigned long caller )
3263
+ {
3264
+ return slab_alloc_node (cachep , lru , flags , NUMA_NO_NODE , orig_size ,
3265
+ caller );
3266
+ }
3267
+
3291
3268
/*
3292
3269
* Caller needs to acquire correct kmem_cache_node's list_lock
3293
3270
* @list: List of detached free slabs should be freed by caller
@@ -3574,7 +3551,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
3574
3551
*/
3575
3552
void * kmem_cache_alloc_node (struct kmem_cache * cachep , gfp_t flags , int nodeid )
3576
3553
{
3577
- void * ret = slab_alloc_node (cachep , flags , nodeid , cachep -> object_size , _RET_IP_ );
3554
+ void * ret = slab_alloc_node (cachep , NULL , flags , nodeid , cachep -> object_size , _RET_IP_ );
3578
3555
3579
3556
trace_kmem_cache_alloc_node (_RET_IP_ , ret , cachep ,
3580
3557
cachep -> object_size , cachep -> size ,
@@ -3592,7 +3569,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3592
3569
{
3593
3570
void * ret ;
3594
3571
3595
- ret = slab_alloc_node (cachep , flags , nodeid , size , _RET_IP_ );
3572
+ ret = slab_alloc_node (cachep , NULL , flags , nodeid , size , _RET_IP_ );
3596
3573
3597
3574
ret = kasan_kmalloc (cachep , ret , size , flags );
3598
3575
trace_kmalloc_node (_RET_IP_ , ret , cachep ,
0 commit comments