@@ -235,6 +235,64 @@ EXPORT_SYMBOL(nr_online_nodes);
235
235
236
236
int page_group_by_mobility_disabled __read_mostly ;
237
237
238
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
239
+ static inline void reset_deferred_meminit (pg_data_t * pgdat )
240
+ {
241
+ pgdat -> first_deferred_pfn = ULONG_MAX ;
242
+ }
243
+
244
+ /* Returns true if the struct page for the pfn is uninitialised */
245
+ static inline bool __defermem_init early_page_uninitialised (unsigned long pfn )
246
+ {
247
+ int nid = early_pfn_to_nid (pfn );
248
+
249
+ if (pfn >= NODE_DATA (nid )-> first_deferred_pfn )
250
+ return true;
251
+
252
+ return false;
253
+ }
254
+
255
+ /*
256
+ * Returns false when the remaining initialisation should be deferred until
257
+ * later in the boot cycle when it can be parallelised.
258
+ */
259
+ static inline bool update_defer_init (pg_data_t * pgdat ,
260
+ unsigned long pfn , unsigned long zone_end ,
261
+ unsigned long * nr_initialised )
262
+ {
263
+ /* Always populate low zones for address-contrained allocations */
264
+ if (zone_end < pgdat_end_pfn (pgdat ))
265
+ return true;
266
+
267
+ /* Initialise at least 2G of the highest zone */
268
+ (* nr_initialised )++ ;
269
+ if (* nr_initialised > (2UL << (30 - PAGE_SHIFT )) &&
270
+ (pfn & (PAGES_PER_SECTION - 1 )) == 0 ) {
271
+ pgdat -> first_deferred_pfn = pfn ;
272
+ return false;
273
+ }
274
+
275
+ return true;
276
+ }
277
+ #else
278
+ static inline void reset_deferred_meminit (pg_data_t * pgdat )
279
+ {
280
+ }
281
+
282
+ static inline bool early_page_uninitialised (unsigned long pfn )
283
+ {
284
+ return false;
285
+ }
286
+
287
+ static inline bool update_defer_init (pg_data_t * pgdat ,
288
+ unsigned long pfn , unsigned long zone_end ,
289
+ unsigned long * nr_initialised )
290
+ {
291
+ return true;
292
+ }
293
+ #endif
294
+
295
+
238
296
void set_pageblock_migratetype (struct page * page , int migratetype )
239
297
{
240
298
if (unlikely (page_group_by_mobility_disabled &&
@@ -878,8 +936,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
878
936
local_irq_restore (flags );
879
937
}
880
938
881
- void __init __free_pages_bootmem (struct page * page , unsigned long pfn ,
882
- unsigned int order )
939
+ static void __defer_init __free_pages_boot_core (struct page * page ,
940
+ unsigned long pfn , unsigned int order )
883
941
{
884
942
unsigned int nr_pages = 1 << order ;
885
943
struct page * p = page ;
@@ -951,6 +1009,14 @@ static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
951
1009
#endif
952
1010
953
1011
1012
+ void __defer_init __free_pages_bootmem (struct page * page , unsigned long pfn ,
1013
+ unsigned int order )
1014
+ {
1015
+ if (early_page_uninitialised (pfn ))
1016
+ return ;
1017
+ return __free_pages_boot_core (page , pfn , order );
1018
+ }
1019
+
954
1020
#ifdef CONFIG_CMA
955
1021
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
956
1022
void __init init_cma_reserved_pageblock (struct page * page )
@@ -4325,14 +4391,16 @@ static void setup_zone_migrate_reserve(struct zone *zone)
4325
4391
void __meminit memmap_init_zone (unsigned long size , int nid , unsigned long zone ,
4326
4392
unsigned long start_pfn , enum memmap_context context )
4327
4393
{
4394
+ pg_data_t * pgdat = NODE_DATA (nid );
4328
4395
unsigned long end_pfn = start_pfn + size ;
4329
4396
unsigned long pfn ;
4330
4397
struct zone * z ;
4398
+ unsigned long nr_initialised = 0 ;
4331
4399
4332
4400
if (highest_memmap_pfn < end_pfn - 1 )
4333
4401
highest_memmap_pfn = end_pfn - 1 ;
4334
4402
4335
- z = & NODE_DATA ( nid ) -> node_zones [zone ];
4403
+ z = & pgdat -> node_zones [zone ];
4336
4404
for (pfn = start_pfn ; pfn < end_pfn ; pfn ++ ) {
4337
4405
/*
4338
4406
* There can be holes in boot-time mem_map[]s
@@ -4344,6 +4412,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4344
4412
continue ;
4345
4413
if (!early_pfn_in_nid (pfn , nid ))
4346
4414
continue ;
4415
+ if (!update_defer_init (pgdat , pfn , end_pfn ,
4416
+ & nr_initialised ))
4417
+ break ;
4347
4418
}
4348
4419
__init_single_pfn (pfn , zone , nid );
4349
4420
}
@@ -5144,6 +5215,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5144
5215
/* pg_data_t should be reset to zero when it's allocated */
5145
5216
WARN_ON (pgdat -> nr_zones || pgdat -> classzone_idx );
5146
5217
5218
+ reset_deferred_meminit (pgdat );
5147
5219
pgdat -> node_id = nid ;
5148
5220
pgdat -> node_start_pfn = node_start_pfn ;
5149
5221
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
0 commit comments