@@ -2507,66 +2507,65 @@ static int map_block_for_writepage(struct inode *inode,
2507
2507
* start/recovery path as __block_write_full_folio, along with special
2508
2508
* code to handle reiserfs tails.
2509
2509
*/
2510
- static int reiserfs_write_full_page (struct page * page ,
2510
+ static int reiserfs_write_full_folio (struct folio * folio ,
2511
2511
struct writeback_control * wbc )
2512
2512
{
2513
- struct inode * inode = page -> mapping -> host ;
2513
+ struct inode * inode = folio -> mapping -> host ;
2514
2514
unsigned long end_index = inode -> i_size >> PAGE_SHIFT ;
2515
2515
int error = 0 ;
2516
2516
unsigned long block ;
2517
2517
sector_t last_block ;
2518
2518
struct buffer_head * head , * bh ;
2519
2519
int partial = 0 ;
2520
2520
int nr = 0 ;
2521
- int checked = PageChecked ( page );
2521
+ int checked = folio_test_checked ( folio );
2522
2522
struct reiserfs_transaction_handle th ;
2523
2523
struct super_block * s = inode -> i_sb ;
2524
2524
int bh_per_page = PAGE_SIZE / s -> s_blocksize ;
2525
2525
th .t_trans_id = 0 ;
2526
2526
2527
2527
/* no logging allowed when nonblocking or from PF_MEMALLOC */
2528
2528
if (checked && (current -> flags & PF_MEMALLOC )) {
2529
- redirty_page_for_writepage (wbc , page );
2530
- unlock_page ( page );
2529
+ folio_redirty_for_writepage (wbc , folio );
2530
+ folio_unlock ( folio );
2531
2531
return 0 ;
2532
2532
}
2533
2533
2534
2534
/*
2535
- * The page dirty bit is cleared before writepage is called, which
2535
+ * The folio dirty bit is cleared before writepage is called, which
2536
2536
* means we have to tell create_empty_buffers to make dirty buffers
2537
- * The page really should be up to date at this point, so tossing
2537
+ * The folio really should be up to date at this point, so tossing
2538
2538
* in the BH_Uptodate is just a sanity check.
2539
2539
*/
2540
- if (!page_has_buffers (page )) {
2541
- create_empty_buffers (page , s -> s_blocksize ,
2540
+ head = folio_buffers (folio );
2541
+ if (!head )
2542
+ head = folio_create_empty_buffers (folio , s -> s_blocksize ,
2542
2543
(1 << BH_Dirty ) | (1 << BH_Uptodate ));
2543
- }
2544
- head = page_buffers (page );
2545
2544
2546
2545
/*
2547
- * last page in the file, zero out any contents past the
2546
+ * last folio in the file, zero out any contents past the
2548
2547
* last byte in the file
2549
2548
*/
2550
- if (page -> index >= end_index ) {
2549
+ if (folio -> index >= end_index ) {
2551
2550
unsigned last_offset ;
2552
2551
2553
2552
last_offset = inode -> i_size & (PAGE_SIZE - 1 );
2554
- /* no file contents in this page */
2555
- if (page -> index >= end_index + 1 || !last_offset ) {
2556
- unlock_page ( page );
2553
+ /* no file contents in this folio */
2554
+ if (folio -> index >= end_index + 1 || !last_offset ) {
2555
+ folio_unlock ( folio );
2557
2556
return 0 ;
2558
2557
}
2559
- zero_user_segment ( page , last_offset , PAGE_SIZE );
2558
+ folio_zero_segment ( folio , last_offset , folio_size ( folio ) );
2560
2559
}
2561
2560
bh = head ;
2562
- block = page -> index << (PAGE_SHIFT - s -> s_blocksize_bits );
2561
+ block = folio -> index << (PAGE_SHIFT - s -> s_blocksize_bits );
2563
2562
last_block = (i_size_read (inode ) - 1 ) >> inode -> i_blkbits ;
2564
2563
/* first map all the buffers, logging any direct items we find */
2565
2564
do {
2566
2565
if (block > last_block ) {
2567
2566
/*
2568
2567
* This can happen when the block size is less than
2569
- * the page size. The corresponding bytes in the page
2568
+ * the folio size. The corresponding bytes in the folio
2570
2569
* were zero filled above
2571
2570
*/
2572
2571
clear_buffer_dirty (bh );
@@ -2593,7 +2592,7 @@ static int reiserfs_write_full_page(struct page *page,
2593
2592
* blocks we're going to log
2594
2593
*/
2595
2594
if (checked ) {
2596
- ClearPageChecked ( page );
2595
+ folio_clear_checked ( folio );
2597
2596
reiserfs_write_lock (s );
2598
2597
error = journal_begin (& th , s , bh_per_page + 1 );
2599
2598
if (error ) {
@@ -2602,7 +2601,7 @@ static int reiserfs_write_full_page(struct page *page,
2602
2601
}
2603
2602
reiserfs_update_inode_transaction (inode );
2604
2603
}
2605
- /* now go through and lock any dirty buffers on the page */
2604
+ /* now go through and lock any dirty buffers on the folio */
2606
2605
do {
2607
2606
get_bh (bh );
2608
2607
if (!buffer_mapped (bh ))
@@ -2623,7 +2622,7 @@ static int reiserfs_write_full_page(struct page *page,
2623
2622
lock_buffer (bh );
2624
2623
} else {
2625
2624
if (!trylock_buffer (bh )) {
2626
- redirty_page_for_writepage (wbc , page );
2625
+ folio_redirty_for_writepage (wbc , folio );
2627
2626
continue ;
2628
2627
}
2629
2628
}
@@ -2640,13 +2639,13 @@ static int reiserfs_write_full_page(struct page *page,
2640
2639
if (error )
2641
2640
goto fail ;
2642
2641
}
2643
- BUG_ON (PageWriteback ( page ));
2644
- set_page_writeback ( page );
2645
- unlock_page ( page );
2642
+ BUG_ON (folio_test_writeback ( folio ));
2643
+ folio_start_writeback ( folio );
2644
+ folio_unlock ( folio );
2646
2645
2647
2646
/*
2648
- * since any buffer might be the only dirty buffer on the page ,
2649
- * the first submit_bh can bring the page out of writeback.
2647
+ * since any buffer might be the only dirty buffer on the folio ,
2648
+ * the first submit_bh can bring the folio out of writeback.
2650
2649
* be careful with the buffers.
2651
2650
*/
2652
2651
do {
@@ -2663,10 +2662,10 @@ static int reiserfs_write_full_page(struct page *page,
2663
2662
done :
2664
2663
if (nr == 0 ) {
2665
2664
/*
2666
- * if this page only had a direct item, it is very possible for
2665
+ * if this folio only had a direct item, it is very possible for
2667
2666
* no io to be required without there being an error. Or,
2668
2667
* someone else could have locked them and sent them down the
2669
- * pipe without locking the page
2668
+ * pipe without locking the folio
2670
2669
*/
2671
2670
bh = head ;
2672
2671
do {
@@ -2677,18 +2676,18 @@ static int reiserfs_write_full_page(struct page *page,
2677
2676
bh = bh -> b_this_page ;
2678
2677
} while (bh != head );
2679
2678
if (!partial )
2680
- SetPageUptodate ( page );
2681
- end_page_writeback ( page );
2679
+ folio_mark_uptodate ( folio );
2680
+ folio_end_writeback ( folio );
2682
2681
}
2683
2682
return error ;
2684
2683
2685
2684
fail :
2686
2685
/*
2687
2686
* catches various errors, we need to make sure any valid dirty blocks
2688
- * get to the media. The page is currently locked and not marked for
2687
+ * get to the media. The folio is currently locked and not marked for
2689
2688
* writeback
2690
2689
*/
2691
- ClearPageUptodate ( page );
2690
+ folio_clear_uptodate ( folio );
2692
2691
bh = head ;
2693
2692
do {
2694
2693
get_bh (bh );
@@ -2698,16 +2697,16 @@ static int reiserfs_write_full_page(struct page *page,
2698
2697
} else {
2699
2698
/*
2700
2699
* clear any dirty bits that might have come from
2701
- * getting attached to a dirty page
2700
+ * getting attached to a dirty folio
2702
2701
*/
2703
2702
clear_buffer_dirty (bh );
2704
2703
}
2705
2704
bh = bh -> b_this_page ;
2706
2705
} while (bh != head );
2707
- SetPageError ( page );
2708
- BUG_ON (PageWriteback ( page ));
2709
- set_page_writeback ( page );
2710
- unlock_page ( page );
2706
+ folio_set_error ( folio );
2707
+ BUG_ON (folio_test_writeback ( folio ));
2708
+ folio_start_writeback ( folio );
2709
+ folio_unlock ( folio );
2711
2710
do {
2712
2711
struct buffer_head * next = bh -> b_this_page ;
2713
2712
if (buffer_async_write (bh )) {
@@ -2728,9 +2727,10 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio)
2728
2727
2729
2728
static int reiserfs_writepage (struct page * page , struct writeback_control * wbc )
2730
2729
{
2731
- struct inode * inode = page -> mapping -> host ;
2730
+ struct folio * folio = page_folio (page );
2731
+ struct inode * inode = folio -> mapping -> host ;
2732
2732
reiserfs_wait_on_write_block (inode -> i_sb );
2733
- return reiserfs_write_full_page ( page , wbc );
2733
+ return reiserfs_write_full_folio ( folio , wbc );
2734
2734
}
2735
2735
2736
2736
static void reiserfs_truncate_failed_write (struct inode * inode )
0 commit comments