@@ -122,6 +122,24 @@ typedef int __bitwise fpi_t;
122
122
static DEFINE_MUTEX (pcp_batch_high_lock );
123
123
#define MIN_PERCPU_PAGELIST_FRACTION (8)
124
124
125
+ struct pagesets {
126
+ local_lock_t lock ;
127
+ #if defined(CONFIG_DEBUG_INFO_BTF ) && \
128
+ !defined(CONFIG_DEBUG_LOCK_ALLOC ) && \
129
+ !defined(CONFIG_PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT )
130
+ /*
131
+ * pahole 1.21 and earlier gets confused by zero-sized per-CPU
132
+ * variables and produces invalid BTF. Ensure that
133
+ * sizeof(struct pagesets) != 0 for older versions of pahole.
134
+ */
135
+ char __pahole_hack ;
136
+ #warning "pahole too old to support zero-sized struct pagesets"
137
+ #endif
138
+ };
139
+ static DEFINE_PER_CPU (struct pagesets , pagesets ) = {
140
+ .lock = INIT_LOCAL_LOCK (lock ),
141
+ };
142
+
125
143
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
126
144
DEFINE_PER_CPU (int , numa_node );
127
145
EXPORT_PER_CPU_SYMBOL (numa_node );
@@ -1453,6 +1471,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1453
1471
} while (-- count && -- batch_free && !list_empty (list ));
1454
1472
}
1455
1473
1474
+ /*
1475
+ * local_lock_irq held so equivalent to spin_lock_irqsave for
1476
+ * both PREEMPT_RT and non-PREEMPT_RT configurations.
1477
+ */
1456
1478
spin_lock (& zone -> lock );
1457
1479
isolated_pageblocks = has_isolate_pageblock (zone );
1458
1480
@@ -1573,6 +1595,11 @@ static void __free_pages_ok(struct page *page, unsigned int order,
1573
1595
return ;
1574
1596
1575
1597
migratetype = get_pfnblock_migratetype (page , pfn );
1598
+
1599
+ /*
1600
+ * TODO FIX: Disable IRQs before acquiring IRQ-safe zone->lock
1601
+ * and protect vmstat updates.
1602
+ */
1576
1603
local_irq_save (flags );
1577
1604
__count_vm_events (PGFREE , 1 << order );
1578
1605
free_one_page (page_zone (page ), page , pfn , order , migratetype ,
@@ -2955,6 +2982,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
2955
2982
{
2956
2983
int i , allocated = 0 ;
2957
2984
2985
+ /*
2986
+ * local_lock_irq held so equivalent to spin_lock_irqsave for
2987
+ * both PREEMPT_RT and non-PREEMPT_RT configurations.
2988
+ */
2958
2989
spin_lock (& zone -> lock );
2959
2990
for (i = 0 ; i < count ; ++ i ) {
2960
2991
struct page * page = __rmqueue (zone , order , migratetype ,
@@ -3007,12 +3038,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3007
3038
unsigned long flags ;
3008
3039
int to_drain , batch ;
3009
3040
3010
- local_irq_save ( flags );
3041
+ local_lock_irqsave ( & pagesets . lock , flags );
3011
3042
batch = READ_ONCE (pcp -> batch );
3012
3043
to_drain = min (pcp -> count , batch );
3013
3044
if (to_drain > 0 )
3014
3045
free_pcppages_bulk (zone , to_drain , pcp );
3015
- local_irq_restore ( flags );
3046
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3016
3047
}
3017
3048
#endif
3018
3049
@@ -3028,13 +3059,13 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3028
3059
unsigned long flags ;
3029
3060
struct per_cpu_pages * pcp ;
3030
3061
3031
- local_irq_save ( flags );
3062
+ local_lock_irqsave ( & pagesets . lock , flags );
3032
3063
3033
3064
pcp = per_cpu_ptr (zone -> per_cpu_pageset , cpu );
3034
3065
if (pcp -> count )
3035
3066
free_pcppages_bulk (zone , pcp -> count , pcp );
3036
3067
3037
- local_irq_restore ( flags );
3068
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3038
3069
}
3039
3070
3040
3071
/*
@@ -3297,9 +3328,9 @@ void free_unref_page(struct page *page)
3297
3328
if (!free_unref_page_prepare (page , pfn ))
3298
3329
return ;
3299
3330
3300
- local_irq_save ( flags );
3331
+ local_lock_irqsave ( & pagesets . lock , flags );
3301
3332
free_unref_page_commit (page , pfn );
3302
- local_irq_restore ( flags );
3333
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3303
3334
}
3304
3335
3305
3336
/*
@@ -3319,7 +3350,7 @@ void free_unref_page_list(struct list_head *list)
3319
3350
set_page_private (page , pfn );
3320
3351
}
3321
3352
3322
- local_irq_save ( flags );
3353
+ local_lock_irqsave ( & pagesets . lock , flags );
3323
3354
list_for_each_entry_safe (page , next , list , lru ) {
3324
3355
unsigned long pfn = page_private (page );
3325
3356
@@ -3332,12 +3363,12 @@ void free_unref_page_list(struct list_head *list)
3332
3363
* a large list of pages to free.
3333
3364
*/
3334
3365
if (++ batch_count == SWAP_CLUSTER_MAX ) {
3335
- local_irq_restore ( flags );
3366
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3336
3367
batch_count = 0 ;
3337
- local_irq_save ( flags );
3368
+ local_lock_irqsave ( & pagesets . lock , flags );
3338
3369
}
3339
3370
}
3340
- local_irq_restore ( flags );
3371
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3341
3372
}
3342
3373
3343
3374
/*
@@ -3494,15 +3525,15 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3494
3525
struct page * page ;
3495
3526
unsigned long flags ;
3496
3527
3497
- local_irq_save ( flags );
3528
+ local_lock_irqsave ( & pagesets . lock , flags );
3498
3529
pcp = this_cpu_ptr (zone -> per_cpu_pageset );
3499
3530
list = & pcp -> lists [migratetype ];
3500
3531
page = __rmqueue_pcplist (zone , migratetype , alloc_flags , pcp , list );
3501
3532
if (page ) {
3502
3533
__count_zid_vm_events (PGALLOC , page_zonenum (page ), 1 );
3503
3534
zone_statistics (preferred_zone , zone );
3504
3535
}
3505
- local_irq_restore ( flags );
3536
+ local_unlock_irqrestore ( & pagesets . lock , flags );
3506
3537
return page ;
3507
3538
}
3508
3539
@@ -5103,7 +5134,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5103
5134
goto failed ;
5104
5135
5105
5136
/* Attempt the batch allocation */
5106
- local_irq_save ( flags );
5137
+ local_lock_irqsave ( & pagesets . lock , flags );
5107
5138
pcp = this_cpu_ptr (zone -> per_cpu_pageset );
5108
5139
pcp_list = & pcp -> lists [ac .migratetype ];
5109
5140
@@ -5141,12 +5172,12 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5141
5172
nr_populated ++ ;
5142
5173
}
5143
5174
5144
- local_irq_restore ( flags );
5175
+ local_unlock_irqrestore ( & pagesets . lock , flags );
5145
5176
5146
5177
return nr_populated ;
5147
5178
5148
5179
failed_irq :
5149
- local_irq_restore ( flags );
5180
+ local_unlock_irqrestore ( & pagesets . lock , flags );
5150
5181
5151
5182
failed :
5152
5183
page = __alloc_pages (gfp , 0 , preferred_nid , nodemask );
0 commit comments