Skip to content

Commit e180b8c

Browse files
committed
erofs: convert z_erofs_bind_cache() to folios
The managed cache uses a pseudo inode to keep (necessary) compressed data. Currently, it still uses zero-order folios, so this is just a trivial conversion, except that the use of the pagepool is temporarily dropped. Drop some obsoleted comments too. Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 6f435e9 commit e180b8c

File tree

2 files changed

+21
-53
lines changed

2 files changed

+21
-53
lines changed

fs/erofs/compress.h

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -29,29 +29,8 @@ struct z_erofs_decompressor {
2929
char *name;
3030
};
3131

32-
/* some special page->private (unsigned long, see below) */
3332
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
34-
#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
35-
36-
/*
37-
* For all pages in a pcluster, page->private should be one of
38-
* Type Last 2bits page->private
39-
* short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
40-
* preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
41-
* cached/managed page 00 pointer to z_erofs_pcluster
42-
* online page (file-backed, 01/10/11 sub-index << 2 | count
43-
* some pages can be used for inplace I/O)
44-
*
45-
* page->mapping should be one of
46-
* Type page->mapping
47-
* short-lived page NULL
48-
* preallocated page NULL
49-
* cached/managed page non-NULL or NULL (invalidated/truncated page)
50-
* online page non-NULL
51-
*
52-
* For all managed pages, PG_private should be set with 1 extra refcount,
53-
* which is used for page reclaim / migration.
54-
*/
33+
#define Z_EROFS_PREALLOCATED_FOLIO ((void *)(-2UL << 2))
5534

5635
/*
5736
* Currently, short-lived pages are pages directly from buddy system

fs/erofs/zdata.c

Lines changed: 20 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -516,61 +516,54 @@ static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
516516
struct z_erofs_pcluster *pcl = fe->pcl;
517517
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
518518
bool shouldalloc = z_erofs_should_alloc_cache(fe);
519-
bool standalone = true;
520-
/*
521-
* optimistic allocation without direct reclaim since inplace I/O
522-
* can be used if low memory otherwise.
523-
*/
519+
bool may_bypass = true;
520+
/* Optimistic allocation, as in-place I/O can be used as a fallback */
524521
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
525522
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
523+
struct folio *folio, *newfolio;
526524
unsigned int i;
527525

528526
if (i_blocksize(fe->inode) != PAGE_SIZE ||
529527
fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
530528
return;
531529

532530
for (i = 0; i < pclusterpages; ++i) {
533-
struct page *page, *newpage;
534-
535531
/* Inaccurate check w/o locking to avoid unneeded lookups */
536532
if (READ_ONCE(pcl->compressed_bvecs[i].page))
537533
continue;
538534

539-
page = find_get_page(mc, pcl->index + i);
540-
if (!page) {
541-
/* I/O is needed, no possible to decompress directly */
542-
standalone = false;
535+
folio = filemap_get_folio(mc, pcl->index + i);
536+
if (IS_ERR(folio)) {
537+
may_bypass = false;
543538
if (!shouldalloc)
544539
continue;
545540

546541
/*
547-
* Try cached I/O if allocation succeeds or fallback to
548-
* in-place I/O instead to avoid any direct reclaim.
542+
* Allocate a managed folio for cached I/O, or it may be
543+
* then filled with a file-backed folio for in-place I/O
549544
*/
550-
newpage = erofs_allocpage(&fe->pagepool, gfp);
551-
if (!newpage)
545+
newfolio = filemap_alloc_folio(gfp, 0);
546+
if (!newfolio)
552547
continue;
553-
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
548+
newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
549+
folio = NULL;
554550
}
555551
spin_lock(&pcl->lockref.lock);
556552
if (!pcl->compressed_bvecs[i].page) {
557-
pcl->compressed_bvecs[i].page = page ? page : newpage;
553+
pcl->compressed_bvecs[i].page =
554+
folio_page(folio ?: newfolio, 0);
558555
spin_unlock(&pcl->lockref.lock);
559556
continue;
560557
}
561558
spin_unlock(&pcl->lockref.lock);
562-
563-
if (page)
564-
put_page(page);
565-
else if (newpage)
566-
erofs_pagepool_add(&fe->pagepool, newpage);
559+
folio_put(folio ?: newfolio);
567560
}
568561

569562
/*
570-
* don't do inplace I/O if all compressed pages are available in
571-
* managed cache since it can be moved to the bypass queue instead.
563+
* Don't perform in-place I/O if all compressed pages are available in
564+
* the managed cache, as the pcluster can be moved to the bypass queue.
572565
*/
573-
if (standalone)
566+
if (may_bypass)
574567
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
575568
}
576569

@@ -1480,12 +1473,8 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14801473
DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
14811474

14821475
folio = page_folio(zbv.page);
1483-
/*
1484-
* Handle preallocated cached folios. We tried to allocate such folios
1485-
* without triggering direct reclaim. If allocation failed, inplace
1486-
* file-backed folios will be used instead.
1487-
*/
1488-
if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
1476+
/* For preallocated managed folios, add them to page cache here */
1477+
if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) {
14891478
tocache = true;
14901479
goto out_tocache;
14911480
}

0 commit comments

Comments
 (0)