@@ -3180,13 +3180,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3180
3180
return obj ? obj : fallback_alloc (cachep , flags );
3181
3181
}
3182
3182
3183
+ static void * __do_cache_alloc (struct kmem_cache * cachep , gfp_t flags , int nodeid );
3184
+
3183
3185
static __always_inline void *
3184
3186
slab_alloc_node (struct kmem_cache * cachep , gfp_t flags , int nodeid , size_t orig_size ,
3185
3187
unsigned long caller )
3186
3188
{
3187
3189
unsigned long save_flags ;
3188
3190
void * ptr ;
3189
- int slab_node = numa_mem_id ();
3190
3191
struct obj_cgroup * objcg = NULL ;
3191
3192
bool init = false;
3192
3193
@@ -3200,30 +3201,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
3200
3201
goto out_hooks ;
3201
3202
3202
3203
local_irq_save (save_flags );
3203
-
3204
- if (nodeid == NUMA_NO_NODE )
3205
- nodeid = slab_node ;
3206
-
3207
- if (unlikely (!get_node (cachep , nodeid ))) {
3208
- /* Node not bootstrapped yet */
3209
- ptr = fallback_alloc (cachep , flags );
3210
- goto out ;
3211
- }
3212
-
3213
- if (nodeid == slab_node ) {
3214
- /*
3215
- * Use the locally cached objects if possible.
3216
- * However ____cache_alloc does not allow fallback
3217
- * to other nodes. It may fail while we still have
3218
- * objects on other nodes available.
3219
- */
3220
- ptr = ____cache_alloc (cachep , flags );
3221
- if (ptr )
3222
- goto out ;
3223
- }
3224
- /* ___cache_alloc_node can fall back to other nodes */
3225
- ptr = ____cache_alloc_node (cachep , flags , nodeid );
3226
- out :
3204
+ ptr = __do_cache_alloc (cachep , flags , nodeid );
3227
3205
local_irq_restore (save_flags );
3228
3206
ptr = cache_alloc_debugcheck_after (cachep , flags , ptr , caller );
3229
3207
init = slab_want_init_on_alloc (flags , cachep );
@@ -3234,31 +3212,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
3234
3212
}
3235
3213
3236
3214
static __always_inline void *
3237
- __do_cache_alloc (struct kmem_cache * cache , gfp_t flags )
3215
+ __do_cache_alloc (struct kmem_cache * cachep , gfp_t flags , int nodeid )
3238
3216
{
3239
- void * objp ;
3217
+ void * objp = NULL ;
3218
+ int slab_node = numa_mem_id ();
3240
3219
3241
- if (current -> mempolicy || cpuset_do_slab_mem_spread ()) {
3242
- objp = alternate_node_alloc (cache , flags );
3243
- if (objp )
3244
- goto out ;
3220
+ if (nodeid == NUMA_NO_NODE ) {
3221
+ if (current -> mempolicy || cpuset_do_slab_mem_spread ()) {
3222
+ objp = alternate_node_alloc (cachep , flags );
3223
+ if (objp )
3224
+ goto out ;
3225
+ }
3226
+ /*
3227
+ * Use the locally cached objects if possible.
3228
+ * However ____cache_alloc does not allow fallback
3229
+ * to other nodes. It may fail while we still have
3230
+ * objects on other nodes available.
3231
+ */
3232
+ objp = ____cache_alloc (cachep , flags );
3233
+ nodeid = slab_node ;
3234
+ } else if (nodeid == slab_node ) {
3235
+ objp = ____cache_alloc (cachep , flags );
3236
+ } else if (!get_node (cachep , nodeid )) {
3237
+ /* Node not bootstrapped yet */
3238
+ objp = fallback_alloc (cachep , flags );
3239
+ goto out ;
3245
3240
}
3246
- objp = ____cache_alloc (cache , flags );
3247
3241
3248
3242
/*
3249
3243
* We may just have run out of memory on the local node.
3250
3244
* ____cache_alloc_node() knows how to locate memory on other nodes
3251
3245
*/
3252
3246
if (!objp )
3253
- objp = ____cache_alloc_node (cache , flags , numa_mem_id ());
3254
-
3247
+ objp = ____cache_alloc_node (cachep , flags , nodeid );
3255
3248
out :
3256
3249
return objp ;
3257
3250
}
3258
3251
#else
3259
3252
3260
3253
static __always_inline void *
3261
- __do_cache_alloc (struct kmem_cache * cachep , gfp_t flags )
3254
+ __do_cache_alloc (struct kmem_cache * cachep , gfp_t flags , int nodeid __maybe_unused )
3262
3255
{
3263
3256
return ____cache_alloc (cachep , flags );
3264
3257
}
@@ -3284,7 +3277,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3284
3277
goto out ;
3285
3278
3286
3279
local_irq_save (save_flags );
3287
- objp = __do_cache_alloc (cachep , flags );
3280
+ objp = __do_cache_alloc (cachep , flags , NUMA_NO_NODE );
3288
3281
local_irq_restore (save_flags );
3289
3282
objp = cache_alloc_debugcheck_after (cachep , flags , objp , caller );
3290
3283
prefetchw (objp );
@@ -3521,7 +3514,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3521
3514
3522
3515
local_irq_disable ();
3523
3516
for (i = 0 ; i < size ; i ++ ) {
3524
- void * objp = kfence_alloc (s , s -> object_size , flags ) ?: __do_cache_alloc (s , flags );
3517
+ void * objp = kfence_alloc (s , s -> object_size , flags ) ?:
3518
+ __do_cache_alloc (s , flags , NUMA_NO_NODE );
3525
3519
3526
3520
if (unlikely (!objp ))
3527
3521
goto error ;
0 commit comments