@@ -2958,12 +2958,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2958
2958
return ac -> entry [-- ac -> avail ];
2959
2959
}
2960
2960
2961
- static inline void cache_alloc_debugcheck_before (struct kmem_cache * cachep ,
2962
- gfp_t flags )
2963
- {
2964
- might_sleep_if (gfpflags_allow_blocking (flags ));
2965
- }
2966
-
2967
2961
#if DEBUG
2968
2962
static void * cache_alloc_debugcheck_after (struct kmem_cache * cachep ,
2969
2963
gfp_t flags , void * objp , unsigned long caller )
@@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
3205
3199
if (unlikely (ptr ))
3206
3200
goto out_hooks ;
3207
3201
3208
- cache_alloc_debugcheck_before (cachep , flags );
3209
3202
local_irq_save (save_flags );
3210
3203
3211
3204
if (nodeid == NUMA_NO_NODE )
@@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3290
3283
if (unlikely (objp ))
3291
3284
goto out ;
3292
3285
3293
- cache_alloc_debugcheck_before (cachep , flags );
3294
3286
local_irq_save (save_flags );
3295
3287
objp = __do_cache_alloc (cachep , flags );
3296
3288
local_irq_restore (save_flags );
@@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3527
3519
if (!s )
3528
3520
return 0 ;
3529
3521
3530
- cache_alloc_debugcheck_before (s , flags );
3531
-
3532
3522
local_irq_disable ();
3533
3523
for (i = 0 ; i < size ; i ++ ) {
3534
3524
void * objp = kfence_alloc (s , s -> object_size , flags ) ?: __do_cache_alloc (s , flags );
0 commit comments