Skip to content

Commit 44f6857

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
reiserfs: convert writepage to use a folio
Convert the incoming page to a folio and then use it throughout the writeback path. This definitely isn't enough to support large folios, but I don't expect reiserfs to gain support for those before it is removed. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Andreas Gruenbacher <[email protected]> Cc: Pankaj Raghav <[email protected]> Cc: Ryusuke Konishi <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 414ae0a commit 44f6857

File tree

1 file changed

+40
-40
lines changed

1 file changed

+40
-40
lines changed

fs/reiserfs/inode.c

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -2507,66 +2507,65 @@ static int map_block_for_writepage(struct inode *inode,
25072507
* start/recovery path as __block_write_full_folio, along with special
25082508
* code to handle reiserfs tails.
25092509
*/
2510-
static int reiserfs_write_full_page(struct page *page,
2510+
static int reiserfs_write_full_folio(struct folio *folio,
25112511
struct writeback_control *wbc)
25122512
{
2513-
struct inode *inode = page->mapping->host;
2513+
struct inode *inode = folio->mapping->host;
25142514
unsigned long end_index = inode->i_size >> PAGE_SHIFT;
25152515
int error = 0;
25162516
unsigned long block;
25172517
sector_t last_block;
25182518
struct buffer_head *head, *bh;
25192519
int partial = 0;
25202520
int nr = 0;
2521-
int checked = PageChecked(page);
2521+
int checked = folio_test_checked(folio);
25222522
struct reiserfs_transaction_handle th;
25232523
struct super_block *s = inode->i_sb;
25242524
int bh_per_page = PAGE_SIZE / s->s_blocksize;
25252525
th.t_trans_id = 0;
25262526

25272527
/* no logging allowed when nonblocking or from PF_MEMALLOC */
25282528
if (checked && (current->flags & PF_MEMALLOC)) {
2529-
redirty_page_for_writepage(wbc, page);
2530-
unlock_page(page);
2529+
folio_redirty_for_writepage(wbc, folio);
2530+
folio_unlock(folio);
25312531
return 0;
25322532
}
25332533

25342534
/*
2535-
* The page dirty bit is cleared before writepage is called, which
2535+
* The folio dirty bit is cleared before writepage is called, which
25362536
* means we have to tell create_empty_buffers to make dirty buffers
2537-
* The page really should be up to date at this point, so tossing
2537+
* The folio really should be up to date at this point, so tossing
25382538
* in the BH_Uptodate is just a sanity check.
25392539
*/
2540-
if (!page_has_buffers(page)) {
2541-
create_empty_buffers(page, s->s_blocksize,
2540+
head = folio_buffers(folio);
2541+
if (!head)
2542+
head = folio_create_empty_buffers(folio, s->s_blocksize,
25422543
(1 << BH_Dirty) | (1 << BH_Uptodate));
2543-
}
2544-
head = page_buffers(page);
25452544

25462545
/*
2547-
* last page in the file, zero out any contents past the
2546+
* last folio in the file, zero out any contents past the
25482547
* last byte in the file
25492548
*/
2550-
if (page->index >= end_index) {
2549+
if (folio->index >= end_index) {
25512550
unsigned last_offset;
25522551

25532552
last_offset = inode->i_size & (PAGE_SIZE - 1);
2554-
/* no file contents in this page */
2555-
if (page->index >= end_index + 1 || !last_offset) {
2556-
unlock_page(page);
2553+
/* no file contents in this folio */
2554+
if (folio->index >= end_index + 1 || !last_offset) {
2555+
folio_unlock(folio);
25572556
return 0;
25582557
}
2559-
zero_user_segment(page, last_offset, PAGE_SIZE);
2558+
folio_zero_segment(folio, last_offset, folio_size(folio));
25602559
}
25612560
bh = head;
2562-
block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2561+
block = folio->index << (PAGE_SHIFT - s->s_blocksize_bits);
25632562
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
25642563
/* first map all the buffers, logging any direct items we find */
25652564
do {
25662565
if (block > last_block) {
25672566
/*
25682567
* This can happen when the block size is less than
2569-
* the page size. The corresponding bytes in the page
2568+
* the folio size. The corresponding bytes in the folio
25702569
* were zero filled above
25712570
*/
25722571
clear_buffer_dirty(bh);
@@ -2593,7 +2592,7 @@ static int reiserfs_write_full_page(struct page *page,
25932592
* blocks we're going to log
25942593
*/
25952594
if (checked) {
2596-
ClearPageChecked(page);
2595+
folio_clear_checked(folio);
25972596
reiserfs_write_lock(s);
25982597
error = journal_begin(&th, s, bh_per_page + 1);
25992598
if (error) {
@@ -2602,7 +2601,7 @@ static int reiserfs_write_full_page(struct page *page,
26022601
}
26032602
reiserfs_update_inode_transaction(inode);
26042603
}
2605-
/* now go through and lock any dirty buffers on the page */
2604+
/* now go through and lock any dirty buffers on the folio */
26062605
do {
26072606
get_bh(bh);
26082607
if (!buffer_mapped(bh))
@@ -2623,7 +2622,7 @@ static int reiserfs_write_full_page(struct page *page,
26232622
lock_buffer(bh);
26242623
} else {
26252624
if (!trylock_buffer(bh)) {
2626-
redirty_page_for_writepage(wbc, page);
2625+
folio_redirty_for_writepage(wbc, folio);
26272626
continue;
26282627
}
26292628
}
@@ -2640,13 +2639,13 @@ static int reiserfs_write_full_page(struct page *page,
26402639
if (error)
26412640
goto fail;
26422641
}
2643-
BUG_ON(PageWriteback(page));
2644-
set_page_writeback(page);
2645-
unlock_page(page);
2642+
BUG_ON(folio_test_writeback(folio));
2643+
folio_start_writeback(folio);
2644+
folio_unlock(folio);
26462645

26472646
/*
2648-
* since any buffer might be the only dirty buffer on the page,
2649-
* the first submit_bh can bring the page out of writeback.
2647+
* since any buffer might be the only dirty buffer on the folio,
2648+
* the first submit_bh can bring the folio out of writeback.
26502649
* be careful with the buffers.
26512650
*/
26522651
do {
@@ -2663,10 +2662,10 @@ static int reiserfs_write_full_page(struct page *page,
26632662
done:
26642663
if (nr == 0) {
26652664
/*
2666-
* if this page only had a direct item, it is very possible for
2665+
* if this folio only had a direct item, it is very possible for
26672666
* no io to be required without there being an error. Or,
26682667
* someone else could have locked them and sent them down the
2669-
* pipe without locking the page
2668+
* pipe without locking the folio
26702669
*/
26712670
bh = head;
26722671
do {
@@ -2677,18 +2676,18 @@ static int reiserfs_write_full_page(struct page *page,
26772676
bh = bh->b_this_page;
26782677
} while (bh != head);
26792678
if (!partial)
2680-
SetPageUptodate(page);
2681-
end_page_writeback(page);
2679+
folio_mark_uptodate(folio);
2680+
folio_end_writeback(folio);
26822681
}
26832682
return error;
26842683

26852684
fail:
26862685
/*
26872686
* catches various errors, we need to make sure any valid dirty blocks
2688-
* get to the media. The page is currently locked and not marked for
2687+
* get to the media. The folio is currently locked and not marked for
26892688
* writeback
26902689
*/
2691-
ClearPageUptodate(page);
2690+
folio_clear_uptodate(folio);
26922691
bh = head;
26932692
do {
26942693
get_bh(bh);
@@ -2698,16 +2697,16 @@ static int reiserfs_write_full_page(struct page *page,
26982697
} else {
26992698
/*
27002699
* clear any dirty bits that might have come from
2701-
* getting attached to a dirty page
2700+
* getting attached to a dirty folio
27022701
*/
27032702
clear_buffer_dirty(bh);
27042703
}
27052704
bh = bh->b_this_page;
27062705
} while (bh != head);
2707-
SetPageError(page);
2708-
BUG_ON(PageWriteback(page));
2709-
set_page_writeback(page);
2710-
unlock_page(page);
2706+
folio_set_error(folio);
2707+
BUG_ON(folio_test_writeback(folio));
2708+
folio_start_writeback(folio);
2709+
folio_unlock(folio);
27112710
do {
27122711
struct buffer_head *next = bh->b_this_page;
27132712
if (buffer_async_write(bh)) {
@@ -2728,9 +2727,10 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio)
27282727

27292728
static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
27302729
{
2731-
struct inode *inode = page->mapping->host;
2730+
struct folio *folio = page_folio(page);
2731+
struct inode *inode = folio->mapping->host;
27322732
reiserfs_wait_on_write_block(inode->i_sb);
2733-
return reiserfs_write_full_page(page, wbc);
2733+
return reiserfs_write_full_folio(folio, wbc);
27342734
}
27352735

27362736
static void reiserfs_truncate_failed_write(struct inode *inode)

0 commit comments

Comments
 (0)