@@ -2325,16 +2325,16 @@ static void shrink_readahead_size_eio(struct file_ra_state *ra)
2325
2325
}
2326
2326
2327
2327
/*
2328
- * filemap_get_read_batch - Get a batch of pages for read
2328
+ * filemap_get_read_batch - Get a batch of folios for read
2329
2329
*
2330
- * Get a batch of pages which represent a contiguous range of bytes
2331
- * in the file. No tail pages will be returned. If @index is in the
2332
- * middle of a THP , the entire THP will be returned. The last page in
2333
- * the batch may have Readahead set or be not Uptodate so that the
2334
- * caller can take the appropriate action.
2330
+ * Get a batch of folios which represent a contiguous range of bytes in
2331
+ * the file. No exceptional entries will be returned. If @index is in
2332
+ * the middle of a folio , the entire folio will be returned. The last
2333
+ * folio in the batch may have the readahead flag set or the uptodate flag
2334
+ * clear so that the caller can take the appropriate action.
2335
2335
*/
2336
2336
static void filemap_get_read_batch (struct address_space * mapping ,
2337
- pgoff_t index , pgoff_t max , struct pagevec * pvec )
2337
+ pgoff_t index , pgoff_t max , struct folio_batch * fbatch )
2338
2338
{
2339
2339
XA_STATE (xas , & mapping -> i_pages , index );
2340
2340
struct folio * folio ;
@@ -2349,9 +2349,9 @@ static void filemap_get_read_batch(struct address_space *mapping,
2349
2349
goto retry ;
2350
2350
2351
2351
if (unlikely (folio != xas_reload (& xas )))
2352
- goto put_page ;
2352
+ goto put_folio ;
2353
2353
2354
- if (!pagevec_add ( pvec , & folio -> page ))
2354
+ if (!folio_batch_add ( fbatch , folio ))
2355
2355
break ;
2356
2356
if (!folio_test_uptodate (folio ))
2357
2357
break ;
@@ -2360,7 +2360,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
2360
2360
xas .xa_index = folio -> index + folio_nr_pages (folio ) - 1 ;
2361
2361
xas .xa_offset = (xas .xa_index >> xas .xa_shift ) & XA_CHUNK_MASK ;
2362
2362
continue ;
2363
- put_page :
2363
+ put_folio :
2364
2364
folio_put (folio );
2365
2365
retry :
2366
2366
xas_reset (& xas );
@@ -2475,7 +2475,7 @@ static int filemap_update_page(struct kiocb *iocb,
2475
2475
2476
2476
static int filemap_create_folio (struct file * file ,
2477
2477
struct address_space * mapping , pgoff_t index ,
2478
- struct pagevec * pvec )
2478
+ struct folio_batch * fbatch )
2479
2479
{
2480
2480
struct folio * folio ;
2481
2481
int error ;
@@ -2510,7 +2510,7 @@ static int filemap_create_folio(struct file *file,
2510
2510
goto error ;
2511
2511
2512
2512
filemap_invalidate_unlock_shared (mapping );
2513
- pagevec_add ( pvec , & folio -> page );
2513
+ folio_batch_add ( fbatch , folio );
2514
2514
return 0 ;
2515
2515
error :
2516
2516
filemap_invalidate_unlock_shared (mapping );
@@ -2531,7 +2531,7 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file,
2531
2531
}
2532
2532
2533
2533
static int filemap_get_pages (struct kiocb * iocb , struct iov_iter * iter ,
2534
- struct pagevec * pvec )
2534
+ struct folio_batch * fbatch )
2535
2535
{
2536
2536
struct file * filp = iocb -> ki_filp ;
2537
2537
struct address_space * mapping = filp -> f_mapping ;
@@ -2546,32 +2546,33 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2546
2546
if (fatal_signal_pending (current ))
2547
2547
return - EINTR ;
2548
2548
2549
- filemap_get_read_batch (mapping , index , last_index , pvec );
2550
- if (!pagevec_count ( pvec )) {
2549
+ filemap_get_read_batch (mapping , index , last_index , fbatch );
2550
+ if (!folio_batch_count ( fbatch )) {
2551
2551
if (iocb -> ki_flags & IOCB_NOIO )
2552
2552
return - EAGAIN ;
2553
2553
page_cache_sync_readahead (mapping , ra , filp , index ,
2554
2554
last_index - index );
2555
- filemap_get_read_batch (mapping , index , last_index , pvec );
2555
+ filemap_get_read_batch (mapping , index , last_index , fbatch );
2556
2556
}
2557
- if (!pagevec_count ( pvec )) {
2557
+ if (!folio_batch_count ( fbatch )) {
2558
2558
if (iocb -> ki_flags & (IOCB_NOWAIT | IOCB_WAITQ ))
2559
2559
return - EAGAIN ;
2560
2560
err = filemap_create_folio (filp , mapping ,
2561
- iocb -> ki_pos >> PAGE_SHIFT , pvec );
2561
+ iocb -> ki_pos >> PAGE_SHIFT , fbatch );
2562
2562
if (err == AOP_TRUNCATED_PAGE )
2563
2563
goto retry ;
2564
2564
return err ;
2565
2565
}
2566
2566
2567
- folio = page_folio ( pvec -> pages [ pagevec_count ( pvec ) - 1 ]) ;
2567
+ folio = fbatch -> folios [ folio_batch_count ( fbatch ) - 1 ];
2568
2568
if (folio_test_readahead (folio )) {
2569
2569
err = filemap_readahead (iocb , filp , mapping , folio , last_index );
2570
2570
if (err )
2571
2571
goto err ;
2572
2572
}
2573
2573
if (!folio_test_uptodate (folio )) {
2574
- if ((iocb -> ki_flags & IOCB_WAITQ ) && pagevec_count (pvec ) > 1 )
2574
+ if ((iocb -> ki_flags & IOCB_WAITQ ) &&
2575
+ folio_batch_count (fbatch ) > 1 )
2575
2576
iocb -> ki_flags |= IOCB_NOWAIT ;
2576
2577
err = filemap_update_page (iocb , mapping , iter , folio );
2577
2578
if (err )
@@ -2582,7 +2583,7 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2582
2583
err :
2583
2584
if (err < 0 )
2584
2585
folio_put (folio );
2585
- if (likely (-- pvec -> nr ))
2586
+ if (likely (-- fbatch -> nr ))
2586
2587
return 0 ;
2587
2588
if (err == AOP_TRUNCATED_PAGE )
2588
2589
goto retry ;
@@ -2609,7 +2610,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2609
2610
struct file_ra_state * ra = & filp -> f_ra ;
2610
2611
struct address_space * mapping = filp -> f_mapping ;
2611
2612
struct inode * inode = mapping -> host ;
2612
- struct pagevec pvec ;
2613
+ struct folio_batch fbatch ;
2613
2614
int i , error = 0 ;
2614
2615
bool writably_mapped ;
2615
2616
loff_t isize , end_offset ;
@@ -2620,7 +2621,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2620
2621
return 0 ;
2621
2622
2622
2623
iov_iter_truncate (iter , inode -> i_sb -> s_maxbytes );
2623
- pagevec_init ( & pvec );
2624
+ folio_batch_init ( & fbatch );
2624
2625
2625
2626
do {
2626
2627
cond_resched ();
@@ -2636,7 +2637,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2636
2637
if (unlikely (iocb -> ki_pos >= i_size_read (inode )))
2637
2638
break ;
2638
2639
2639
- error = filemap_get_pages (iocb , iter , & pvec );
2640
+ error = filemap_get_pages (iocb , iter , & fbatch );
2640
2641
if (error < 0 )
2641
2642
break ;
2642
2643
@@ -2650,7 +2651,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2650
2651
*/
2651
2652
isize = i_size_read (inode );
2652
2653
if (unlikely (iocb -> ki_pos >= isize ))
2653
- goto put_pages ;
2654
+ goto put_folios ;
2654
2655
end_offset = min_t (loff_t , isize , iocb -> ki_pos + iter -> count );
2655
2656
2656
2657
/*
@@ -2665,10 +2666,10 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2665
2666
*/
2666
2667
if (iocb -> ki_pos >> PAGE_SHIFT !=
2667
2668
ra -> prev_pos >> PAGE_SHIFT )
2668
- mark_page_accessed ( pvec . pages [0 ]);
2669
+ folio_mark_accessed ( fbatch . folios [0 ]);
2669
2670
2670
- for (i = 0 ; i < pagevec_count ( & pvec ); i ++ ) {
2671
- struct folio * folio = page_folio ( pvec . pages [i ]) ;
2671
+ for (i = 0 ; i < folio_batch_count ( & fbatch ); i ++ ) {
2672
+ struct folio * folio = fbatch . folios [i ];
2672
2673
size_t fsize = folio_size (folio );
2673
2674
size_t offset = iocb -> ki_pos & (fsize - 1 );
2674
2675
size_t bytes = min_t (loff_t , end_offset - iocb -> ki_pos ,
@@ -2698,10 +2699,10 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2698
2699
break ;
2699
2700
}
2700
2701
}
2701
- put_pages :
2702
- for (i = 0 ; i < pagevec_count ( & pvec ); i ++ )
2703
- put_page ( pvec . pages [i ]);
2704
- pagevec_reinit ( & pvec );
2702
+ put_folios :
2703
+ for (i = 0 ; i < folio_batch_count ( & fbatch ); i ++ )
2704
+ folio_put ( fbatch . folios [i ]);
2705
+ folio_batch_init ( & fbatch );
2705
2706
} while (iov_iter_count (iter ) && iocb -> ki_pos < isize && !error );
2706
2707
2707
2708
file_accessed (filp );
0 commit comments