@@ -292,33 +292,99 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start,
292
292
{
293
293
}
294
294
295
+ struct vmalloc_populate_data {
296
+ unsigned long start ;
297
+ struct page * * pages ;
298
+ };
299
+
295
300
static int kasan_populate_vmalloc_pte (pte_t * ptep , unsigned long addr ,
296
- void * unused )
301
+ void * _data )
297
302
{
298
- unsigned long page ;
303
+ struct vmalloc_populate_data * data = _data ;
304
+ struct page * page ;
299
305
pte_t pte ;
306
+ int index ;
300
307
301
308
if (likely (!pte_none (ptep_get (ptep ))))
302
309
return 0 ;
303
310
304
- page = __get_free_page (GFP_KERNEL );
305
- if (!page )
306
- return - ENOMEM ;
307
-
308
- __memset ((void * )page , KASAN_VMALLOC_INVALID , PAGE_SIZE );
309
- pte = pfn_pte (PFN_DOWN (__pa (page )), PAGE_KERNEL );
311
+ index = PFN_DOWN (addr - data -> start );
312
+ page = data -> pages [index ];
313
+ __memset (page_to_virt (page ), KASAN_VMALLOC_INVALID , PAGE_SIZE );
314
+ pte = pfn_pte (page_to_pfn (page ), PAGE_KERNEL );
310
315
311
316
spin_lock (& init_mm .page_table_lock );
312
317
if (likely (pte_none (ptep_get (ptep )))) {
313
318
set_pte_at (& init_mm , addr , ptep , pte );
314
- page = 0 ;
319
+ data -> pages [ index ] = NULL ;
315
320
}
316
321
spin_unlock (& init_mm .page_table_lock );
317
- if (page )
318
- free_page (page );
322
+
323
+ return 0 ;
324
+ }
325
+
326
+ static void ___free_pages_bulk (struct page * * pages , int nr_pages )
327
+ {
328
+ int i ;
329
+
330
+ for (i = 0 ; i < nr_pages ; i ++ ) {
331
+ if (pages [i ]) {
332
+ __free_pages (pages [i ], 0 );
333
+ pages [i ] = NULL ;
334
+ }
335
+ }
336
+ }
337
+
338
+ static int ___alloc_pages_bulk (struct page * * pages , int nr_pages )
339
+ {
340
+ unsigned long nr_populated , nr_total = nr_pages ;
341
+ struct page * * page_array = pages ;
342
+
343
+ while (nr_pages ) {
344
+ nr_populated = alloc_pages_bulk (GFP_KERNEL , nr_pages , pages );
345
+ if (!nr_populated ) {
346
+ ___free_pages_bulk (page_array , nr_total - nr_pages );
347
+ return - ENOMEM ;
348
+ }
349
+ pages += nr_populated ;
350
+ nr_pages -= nr_populated ;
351
+ }
352
+
319
353
return 0 ;
320
354
}
321
355
356
+ static int __kasan_populate_vmalloc (unsigned long start , unsigned long end )
357
+ {
358
+ unsigned long nr_pages , nr_total = PFN_UP (end - start );
359
+ struct vmalloc_populate_data data ;
360
+ int ret = 0 ;
361
+
362
+ data .pages = (struct page * * )__get_free_page (GFP_KERNEL | __GFP_ZERO );
363
+ if (!data .pages )
364
+ return - ENOMEM ;
365
+
366
+ while (nr_total ) {
367
+ nr_pages = min (nr_total , PAGE_SIZE / sizeof (data .pages [0 ]));
368
+ ret = ___alloc_pages_bulk (data .pages , nr_pages );
369
+ if (ret )
370
+ break ;
371
+
372
+ data .start = start ;
373
+ ret = apply_to_page_range (& init_mm , start , nr_pages * PAGE_SIZE ,
374
+ kasan_populate_vmalloc_pte , & data );
375
+ ___free_pages_bulk (data .pages , nr_pages );
376
+ if (ret )
377
+ break ;
378
+
379
+ start += nr_pages * PAGE_SIZE ;
380
+ nr_total -= nr_pages ;
381
+ }
382
+
383
+ free_page ((unsigned long )data .pages );
384
+
385
+ return ret ;
386
+ }
387
+
322
388
int kasan_populate_vmalloc (unsigned long addr , unsigned long size )
323
389
{
324
390
unsigned long shadow_start , shadow_end ;
@@ -348,9 +414,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
348
414
shadow_start = PAGE_ALIGN_DOWN (shadow_start );
349
415
shadow_end = PAGE_ALIGN (shadow_end );
350
416
351
- ret = apply_to_page_range (& init_mm , shadow_start ,
352
- shadow_end - shadow_start ,
353
- kasan_populate_vmalloc_pte , NULL );
417
+ ret = __kasan_populate_vmalloc (shadow_start , shadow_end );
354
418
if (ret )
355
419
return ret ;
356
420
0 commit comments