Skip to content

Commit 01b4445

Browse files
gormanmakpm00
authored andcommitted
mm/page_alloc: replace local_lock with normal spinlock
struct per_cpu_pages is no longer strictly local as PCP lists can be drained remotely using a lock for protection. While the use of local_lock works, it goes against the intent of local_lock which is for "pure CPU local concurrency control mechanisms and not suited for inter-CPU concurrency control" (Documentation/locking/locktypes.rst) local_lock protects against migration between when the percpu pointer is accessed and the pcp->lock acquired. The lock acquisition is a preemption point so in the worst case, a task could migrate to another NUMA node and accidentally allocate remote memory. The main requirement is to pin the task to a CPU that is suitable for PREEMPT_RT and !PREEMPT_RT. Replace local_lock with helpers that pin a task to a CPU, lookup the per-cpu structure and acquire the embedded lock. It's similar to local_lock without breaking the intent behind the API. It is not a complete API as only the parts needed for PCP-alloc are implemented but in theory, the generic helpers could be promoted to a general API if there was demand for an embedded lock within a per-cpu struct with a guarantee that the per-cpu structure locked matches the running CPU and cannot use get_cpu_var due to RT concerns. PCP requires these semantics to avoid accidentally allocating remote memory. [[email protected]: use pcp_spin_trylock_irqsave instead of pcpu_spin_trylock_irqsave] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mel Gorman <[email protected]> Tested-by: Yu Zhao <[email protected]> Reviewed-by: Nicolas Saenz Julienne <[email protected]> Tested-by: Nicolas Saenz Julienne <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Tested-by: Yu Zhao <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Marcelo Tosatti <[email protected]> Cc: Marek Szyprowski <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 443c2ac commit 01b4445

File tree

1 file changed

+95
-45
lines changed

1 file changed

+95
-45
lines changed

mm/page_alloc.c

Lines changed: 95 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -126,13 +126,6 @@ typedef int __bitwise fpi_t;
126126
static DEFINE_MUTEX(pcp_batch_high_lock);
127127
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
128128

129-
struct pagesets {
130-
local_lock_t lock;
131-
};
132-
static DEFINE_PER_CPU(struct pagesets, pagesets) = {
133-
.lock = INIT_LOCAL_LOCK(lock),
134-
};
135-
136129
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
137130
/*
138131
* On SMP, spin_trylock is sufficient protection.
@@ -147,6 +140,83 @@ static DEFINE_PER_CPU(struct pagesets, pagesets) = {
147140
#define pcp_trylock_finish(flags) local_irq_restore(flags)
148141
#endif
149142

143+
/*
144+
* Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
145+
* a migration causing the wrong PCP to be locked and remote memory being
146+
* potentially allocated, pin the task to the CPU for the lookup+lock.
147+
* preempt_disable is used on !RT because it is faster than migrate_disable.
148+
* migrate_disable is used on RT because otherwise RT spinlock usage is
149+
* interfered with and a high priority task cannot preempt the allocator.
150+
*/
151+
#ifndef CONFIG_PREEMPT_RT
152+
#define pcpu_task_pin() preempt_disable()
153+
#define pcpu_task_unpin() preempt_enable()
154+
#else
155+
#define pcpu_task_pin() migrate_disable()
156+
#define pcpu_task_unpin() migrate_enable()
157+
#endif
158+
159+
/*
160+
* Generic helper to lookup and a per-cpu variable with an embedded spinlock.
161+
* Return value should be used with equivalent unlock helper.
162+
*/
163+
#define pcpu_spin_lock(type, member, ptr) \
164+
({ \
165+
type *_ret; \
166+
pcpu_task_pin(); \
167+
_ret = this_cpu_ptr(ptr); \
168+
spin_lock(&_ret->member); \
169+
_ret; \
170+
})
171+
172+
#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
173+
({ \
174+
type *_ret; \
175+
pcpu_task_pin(); \
176+
_ret = this_cpu_ptr(ptr); \
177+
spin_lock_irqsave(&_ret->member, flags); \
178+
_ret; \
179+
})
180+
181+
#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
182+
({ \
183+
type *_ret; \
184+
pcpu_task_pin(); \
185+
_ret = this_cpu_ptr(ptr); \
186+
if (!spin_trylock_irqsave(&_ret->member, flags)) { \
187+
pcpu_task_unpin(); \
188+
_ret = NULL; \
189+
} \
190+
_ret; \
191+
})
192+
193+
#define pcpu_spin_unlock(member, ptr) \
194+
({ \
195+
spin_unlock(&ptr->member); \
196+
pcpu_task_unpin(); \
197+
})
198+
199+
#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
200+
({ \
201+
spin_unlock_irqrestore(&ptr->member, flags); \
202+
pcpu_task_unpin(); \
203+
})
204+
205+
/* struct per_cpu_pages specific helpers. */
206+
#define pcp_spin_lock(ptr) \
207+
pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
208+
209+
#define pcp_spin_lock_irqsave(ptr, flags) \
210+
pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
211+
212+
#define pcp_spin_trylock_irqsave(ptr, flags) \
213+
pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
214+
215+
#define pcp_spin_unlock(ptr) \
216+
pcpu_spin_unlock(lock, ptr)
217+
218+
#define pcp_spin_unlock_irqrestore(ptr, flags) \
219+
pcpu_spin_unlock_irqrestore(lock, ptr, flags)
150220
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
151221
DEFINE_PER_CPU(int, numa_node);
152222
EXPORT_PER_CPU_SYMBOL(numa_node);
@@ -1485,10 +1555,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
14851555
/* Ensure requested pindex is drained first. */
14861556
pindex = pindex - 1;
14871557

1488-
/*
1489-
* local_lock_irq held so equivalent to spin_lock_irqsave for
1490-
* both PREEMPT_RT and non-PREEMPT_RT configurations.
1491-
*/
1558+
/* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
14921559
spin_lock(&zone->lock);
14931560
isolated_pageblocks = has_isolate_pageblock(zone);
14941561

@@ -3056,10 +3123,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
30563123
{
30573124
int i, allocated = 0;
30583125

3059-
/*
3060-
* local_lock_irq held so equivalent to spin_lock_irqsave for
3061-
* both PREEMPT_RT and non-PREEMPT_RT configurations.
3062-
*/
3126+
/* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
30633127
spin_lock(&zone->lock);
30643128
for (i = 0; i < count; ++i) {
30653129
struct page *page = __rmqueue(zone, order, migratetype,
@@ -3431,18 +3495,16 @@ void free_unref_page(struct page *page, unsigned int order)
34313495
migratetype = MIGRATE_MOVABLE;
34323496
}
34333497

3434-
local_lock_irqsave(&pagesets.lock, flags);
34353498
zone = page_zone(page);
34363499
pcp_trylock_prepare(UP_flags);
3437-
pcp = this_cpu_ptr(zone->per_cpu_pageset);
3438-
if (spin_trylock(&pcp->lock)) {
3500+
pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3501+
if (pcp) {
34393502
free_unref_page_commit(zone, pcp, page, migratetype, order);
3440-
spin_unlock(&pcp->lock);
3503+
pcp_spin_unlock_irqrestore(pcp, flags);
34413504
} else {
34423505
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
34433506
}
34443507
pcp_trylock_finish(UP_flags);
3445-
local_unlock_irqrestore(&pagesets.lock, flags);
34463508
}
34473509

34483510
/*
@@ -3477,17 +3539,16 @@ void free_unref_page_list(struct list_head *list)
34773539
}
34783540
}
34793541

3480-
local_lock_irqsave(&pagesets.lock, flags);
34813542
list_for_each_entry_safe(page, next, list, lru) {
34823543
struct zone *zone = page_zone(page);
34833544

34843545
/* Different zone, different pcp lock. */
34853546
if (zone != locked_zone) {
34863547
if (pcp)
3487-
spin_unlock(&pcp->lock);
3548+
pcp_spin_unlock_irqrestore(pcp, flags);
3549+
34883550
locked_zone = zone;
3489-
pcp = this_cpu_ptr(zone->per_cpu_pageset);
3490-
spin_lock(&pcp->lock);
3551+
pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
34913552
}
34923553

34933554
/*
@@ -3506,18 +3567,14 @@ void free_unref_page_list(struct list_head *list)
35063567
* a large list of pages to free.
35073568
*/
35083569
if (++batch_count == SWAP_CLUSTER_MAX) {
3509-
spin_unlock(&pcp->lock);
3510-
local_unlock_irqrestore(&pagesets.lock, flags);
3570+
pcp_spin_unlock_irqrestore(pcp, flags);
35113571
batch_count = 0;
3512-
local_lock_irqsave(&pagesets.lock, flags);
3513-
pcp = this_cpu_ptr(locked_zone->per_cpu_pageset);
3514-
spin_lock(&pcp->lock);
3572+
pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
35153573
}
35163574
}
35173575

35183576
if (pcp)
3519-
spin_unlock(&pcp->lock);
3520-
local_unlock_irqrestore(&pagesets.lock, flags);
3577+
pcp_spin_unlock_irqrestore(pcp, flags);
35213578
}
35223579

35233580
/*
@@ -3732,17 +3789,14 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
37323789
unsigned long flags;
37333790
unsigned long __maybe_unused UP_flags;
37343791

3735-
local_lock_irqsave(&pagesets.lock, flags);
3736-
37373792
/*
37383793
* spin_trylock may fail due to a parallel drain. In the future, the
37393794
* trylock will also protect against IRQ reentrancy.
37403795
*/
3741-
pcp = this_cpu_ptr(zone->per_cpu_pageset);
37423796
pcp_trylock_prepare(UP_flags);
3743-
if (!spin_trylock(&pcp->lock)) {
3797+
pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3798+
if (!pcp) {
37443799
pcp_trylock_finish(UP_flags);
3745-
local_unlock_irqrestore(&pagesets.lock, flags);
37463800
return NULL;
37473801
}
37483802

@@ -3754,9 +3808,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
37543808
pcp->free_factor >>= 1;
37553809
list = &pcp->lists[order_to_pindex(migratetype, order)];
37563810
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3757-
spin_unlock(&pcp->lock);
3811+
pcp_spin_unlock_irqrestore(pcp, flags);
37583812
pcp_trylock_finish(UP_flags);
3759-
local_unlock_irqrestore(&pagesets.lock, flags);
37603813
if (page) {
37613814
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
37623815
zone_statistics(preferred_zone, zone, 1);
@@ -5358,10 +5411,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53585411
goto failed;
53595412

53605413
/* Is a parallel drain in progress? */
5361-
local_lock_irqsave(&pagesets.lock, flags);
53625414
pcp_trylock_prepare(UP_flags);
5363-
pcp = this_cpu_ptr(zone->per_cpu_pageset);
5364-
if (!spin_trylock(&pcp->lock))
5415+
pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
5416+
if (!pcp)
53655417
goto failed_irq;
53665418

53675419
/* Attempt the batch allocation */
@@ -5379,7 +5431,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53795431
if (unlikely(!page)) {
53805432
/* Try and allocate at least one page */
53815433
if (!nr_account) {
5382-
spin_unlock(&pcp->lock);
5434+
pcp_spin_unlock_irqrestore(pcp, flags);
53835435
goto failed_irq;
53845436
}
53855437
break;
@@ -5394,9 +5446,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53945446
nr_populated++;
53955447
}
53965448

5397-
spin_unlock(&pcp->lock);
5449+
pcp_spin_unlock_irqrestore(pcp, flags);
53985450
pcp_trylock_finish(UP_flags);
5399-
local_unlock_irqrestore(&pagesets.lock, flags);
54005451

54015452
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
54025453
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
@@ -5406,7 +5457,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
54065457

54075458
failed_irq:
54085459
pcp_trylock_finish(UP_flags);
5409-
local_unlock_irqrestore(&pagesets.lock, flags);
54105460

54115461
failed:
54125462
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);

0 commit comments

Comments
 (0)