File tree Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Original file line number Diff line number Diff line change @@ -1063,17 +1063,17 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
1063
1063
return ERR_PTR (- EBUSY );
1064
1064
1065
1065
might_sleep ();
1066
+ gfp_mask = gfp_mask & GFP_RECLAIM_MASK ;
1066
1067
1067
- va = kmem_cache_alloc_node (vmap_area_cachep ,
1068
- gfp_mask & GFP_RECLAIM_MASK , node );
1068
+ va = kmem_cache_alloc_node (vmap_area_cachep , gfp_mask , node );
1069
1069
if (unlikely (!va ))
1070
1070
return ERR_PTR (- ENOMEM );
1071
1071
1072
1072
/*
1073
1073
* Only scan the relevant parts containing pointers to other objects
1074
1074
* to avoid false negatives.
1075
1075
*/
1076
- kmemleak_scan_area (& va -> rb_node , SIZE_MAX , gfp_mask & GFP_RECLAIM_MASK );
1076
+ kmemleak_scan_area (& va -> rb_node , SIZE_MAX , gfp_mask );
1077
1077
1078
1078
retry :
1079
1079
/*
@@ -1099,7 +1099,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
1099
1099
* Just proceed as it is. If needed "overflow" path
1100
1100
* will refill the cache we allocate from.
1101
1101
*/
1102
- pva = kmem_cache_alloc_node (vmap_area_cachep , GFP_KERNEL , node );
1102
+ pva = kmem_cache_alloc_node (vmap_area_cachep , gfp_mask , node );
1103
1103
1104
1104
spin_lock (& vmap_area_lock );
1105
1105
You can’t perform that action at this time.
0 commit comments