Skip to content

Commit 5c6f4d6

Browse files
committed
Merge tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more mm updates from Andrew Morton: "A series from Dave Chinner which cleans up and fixes the handling of nested allocations within stackdepot and page-owner" * tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page-owner: use gfp_nested_mask() instead of open coded masking stackdepot: use gfp_nested_mask() instead of open coded masking mm: lift gfp_kmemleak_mask() to gfp.h
2 parents de7e71e + 99b80ac commit 5c6f4d6

File tree

4 files changed

+32
-23
lines changed

4 files changed

+32
-23
lines changed

include/linux/gfp.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,31 @@ static inline int gfp_zonelist(gfp_t flags)
156156
return ZONELIST_FALLBACK;
157157
}
158158

159+
/*
160+
* gfp flag masking for nested internal allocations.
161+
*
162+
* For code that needs to do allocations inside the public allocation API (e.g.
163+
* memory allocation tracking code) the allocations need to obey the caller
164+
* allocation context constrains to prevent allocation context mismatches (e.g.
165+
* GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
166+
* situations.
167+
*
168+
* It is also assumed that these nested allocations are for internal kernel
169+
* object storage purposes only and are not going to be used for DMA, etc. Hence
170+
* we strip out all the zone information and leave just the context information
171+
* intact.
172+
*
173+
* Further, internal allocations must fail before the higher level allocation
174+
* can fail, so we must make them fail faster and fail silently. We also don't
175+
* want them to deplete emergency reserves. Hence nested allocations must be
176+
* prepared for these allocations to fail.
177+
*/
178+
static inline gfp_t gfp_nested_mask(gfp_t flags)
179+
{
180+
return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
181+
(__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
182+
}
183+
159184
/*
160185
* We get the zone list from the current node and the gfp_mask.
161186
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.

lib/stackdepot.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
624624
* we won't be able to do that under the lock.
625625
*/
626626
if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
627-
/*
628-
* Zero out zone modifiers, as we don't have specific zone
629-
* requirements. Keep the flags related to allocation in atomic
630-
* contexts, I/O, nolockdep.
631-
*/
632-
alloc_flags &= ~GFP_ZONEMASK;
633-
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
634-
alloc_flags |= __GFP_NOWARN;
635-
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
627+
page = alloc_pages(gfp_nested_mask(alloc_flags),
628+
DEPOT_POOL_ORDER);
636629
if (page)
637630
prealloc = page_address(page);
638631
}

mm/kmemleak.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -114,12 +114,6 @@
114114

115115
#define BYTES_PER_POINTER sizeof(void *)
116116

117-
/* GFP bitmask for kmemleak internal allocations */
118-
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
119-
__GFP_NOLOCKDEP)) | \
120-
__GFP_NORETRY | __GFP_NOMEMALLOC | \
121-
__GFP_NOWARN)
122-
123117
/* scanning area inside a memory block */
124118
struct kmemleak_scan_area {
125119
struct hlist_node node;
@@ -463,7 +457,8 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
463457

464458
/* try the slab allocator first */
465459
if (object_cache) {
466-
object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp));
460+
object = kmem_cache_alloc_noprof(object_cache,
461+
gfp_nested_mask(gfp));
467462
if (object)
468463
return object;
469464
}
@@ -947,7 +942,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
947942
untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
948943

949944
if (scan_area_cache)
950-
area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp));
945+
area = kmem_cache_alloc_noprof(scan_area_cache,
946+
gfp_nested_mask(gfp));
951947

952948
raw_spin_lock_irqsave(&object->lock, flags);
953949
if (!area) {

mm/page_owner.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -168,13 +168,8 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
168168
unsigned long flags;
169169
struct stack *stack;
170170

171-
/* Filter gfp_mask the same way stackdepot does, for consistency */
172-
gfp_mask &= ~GFP_ZONEMASK;
173-
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
174-
gfp_mask |= __GFP_NOWARN;
175-
176171
set_current_in_page_owner();
177-
stack = kmalloc(sizeof(*stack), gfp_mask);
172+
stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
178173
if (!stack) {
179174
unset_current_in_page_owner();
180175
return;

0 commit comments

Comments
 (0)