Skip to content

Commit 6f435e9

Browse files
committed
erofs: tidy up zdata.c
All small code style adjustments, no logic changes: - z_erofs_decompress_frontend => z_erofs_frontend; - z_erofs_decompress_backend => z_erofs_backend; - Use Z_EROFS_DEFINE_FRONTEND() to replace DECOMPRESS_FRONTEND_INIT(); - `nr_folios` should be `nrpages` in z_erofs_readahead(); - Refine in-line comments. Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5514d84 commit 6f435e9

File tree

1 file changed

+43
-68
lines changed

1 file changed

+43
-68
lines changed

fs/erofs/zdata.c

Lines changed: 43 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -455,31 +455,25 @@ int __init z_erofs_init_subsystem(void)
455455
}
456456

457457
enum z_erofs_pclustermode {
458+
/* It has previously been linked into another processing chain */
458459
Z_EROFS_PCLUSTER_INFLIGHT,
459460
/*
460-
* a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
461-
* could be dispatched into bypass queue later due to uptodated managed
462-
* pages. All related online pages cannot be reused for inplace I/O (or
463-
* bvpage) since it can be directly decoded without I/O submission.
461+
* A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
462+
* may be dispatched to the bypass queue later due to uptodated managed
463+
* folios. All file-backed folios related to this pcluster cannot be
464+
* reused for in-place I/O (or bvpage) since the pcluster may be decoded
465+
* in a separate queue (and thus out of order).
464466
*/
465467
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
466468
/*
467-
* The pcluster was just linked to a decompression chain by us. It can
468-
* also be linked with the remaining pclusters, which means if the
469-
* processing page is the tail page of a pcluster, this pcluster can
470-
* safely use the whole page (since the previous pcluster is within the
471-
* same chain) for in-place I/O, as illustrated below:
472-
* ___________________________________________________
473-
* | tail (partial) page | head (partial) page |
474-
* | (of the current pcl) | (of the previous pcl) |
475-
* |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
476-
*
477-
* [ (*) the page above can be used as inplace I/O. ]
469+
* The pcluster has just been linked to our processing chain.
470+
* File-backed folios (except for the head page) related to it can be
471+
* used for in-place I/O (or bvpage).
478472
*/
479473
Z_EROFS_PCLUSTER_FOLLOWED,
480474
};
481475

482-
struct z_erofs_decompress_frontend {
476+
struct z_erofs_frontend {
483477
struct inode *const inode;
484478
struct erofs_map_blocks map;
485479
struct z_erofs_bvec_iter biter;
@@ -495,11 +489,11 @@ struct z_erofs_decompress_frontend {
495489
unsigned int icur;
496490
};
497491

498-
#define DECOMPRESS_FRONTEND_INIT(__i) { \
499-
.inode = __i, .head = Z_EROFS_PCLUSTER_TAIL, \
500-
.mode = Z_EROFS_PCLUSTER_FOLLOWED }
492+
#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \
493+
.inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \
494+
.mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
501495

502-
static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
496+
static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe)
503497
{
504498
unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
505499

@@ -516,7 +510,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
516510
return false;
517511
}
518512

519-
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
513+
static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
520514
{
521515
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
522516
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -673,7 +667,7 @@ int erofs_init_managed_cache(struct super_block *sb)
673667
}
674668

675669
/* callers must be with pcluster lock held */
676-
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
670+
static int z_erofs_attach_page(struct z_erofs_frontend *fe,
677671
struct z_erofs_bvec *bvec, bool exclusive)
678672
{
679673
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -719,7 +713,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
719713
return true;
720714
}
721715

722-
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
716+
static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
723717
{
724718
struct erofs_map_blocks *map = &fe->map;
725719
struct super_block *sb = fe->inode->i_sb;
@@ -789,7 +783,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
789783
return err;
790784
}
791785

792-
static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
786+
static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
793787
{
794788
struct erofs_map_blocks *map = &fe->map;
795789
struct super_block *sb = fe->inode->i_sb;
@@ -862,14 +856,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
862856
return 0;
863857
}
864858

865-
/*
866-
* keep in mind that no referenced pclusters will be freed
867-
* only after a RCU grace period.
868-
*/
869859
static void z_erofs_rcu_callback(struct rcu_head *head)
870860
{
871-
z_erofs_free_pcluster(container_of(head,
872-
struct z_erofs_pcluster, rcu));
861+
z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu));
873862
}
874863

875864
static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
@@ -911,8 +900,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
911900
return free;
912901
}
913902

914-
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
915-
unsigned long nr_shrink)
903+
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr)
916904
{
917905
struct z_erofs_pcluster *pcl;
918906
unsigned long index, freed = 0;
@@ -925,7 +913,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
925913
xa_unlock(&sbi->managed_pslots);
926914

927915
++freed;
928-
if (!--nr_shrink)
916+
if (!--nr)
929917
return freed;
930918
xa_lock(&sbi->managed_pslots);
931919
}
@@ -954,7 +942,7 @@ static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
954942
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
955943
}
956944

957-
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
945+
static void z_erofs_pcluster_end(struct z_erofs_frontend *fe)
958946
{
959947
struct z_erofs_pcluster *pcl = fe->pcl;
960948

@@ -967,13 +955,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
967955
if (fe->candidate_bvpage)
968956
fe->candidate_bvpage = NULL;
969957

970-
/*
971-
* if all pending pages are added, don't hold its reference
972-
* any longer if the pcluster isn't hosted by ourselves.
973-
*/
958+
/* Drop refcount if it doesn't belong to our processing chain */
974959
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
975960
z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
976-
977961
fe->pcl = NULL;
978962
}
979963

@@ -1002,7 +986,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
1002986
return 0;
1003987
}
1004988

1005-
static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
989+
static int z_erofs_scan_folio(struct z_erofs_frontend *f,
1006990
struct folio *folio, bool ra)
1007991
{
1008992
struct inode *const inode = f->inode;
@@ -1117,7 +1101,7 @@ static bool z_erofs_page_is_invalidated(struct page *page)
11171101
return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
11181102
}
11191103

1120-
struct z_erofs_decompress_backend {
1104+
struct z_erofs_backend {
11211105
struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
11221106
struct super_block *sb;
11231107
struct z_erofs_pcluster *pcl;
@@ -1137,7 +1121,7 @@ struct z_erofs_bvec_item {
11371121
struct list_head list;
11381122
};
11391123

1140-
static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1124+
static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
11411125
struct z_erofs_bvec *bvec)
11421126
{
11431127
struct z_erofs_bvec_item *item;
@@ -1160,8 +1144,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
11601144
list_add(&item->list, &be->decompressed_secondary_bvecs);
11611145
}
11621146

1163-
static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1164-
int err)
1147+
static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
11651148
{
11661149
unsigned int off0 = be->pcl->pageofs_out;
11671150
struct list_head *p, *n;
@@ -1202,7 +1185,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
12021185
}
12031186
}
12041187

1205-
static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1188+
static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be)
12061189
{
12071190
struct z_erofs_pcluster *pcl = be->pcl;
12081191
struct z_erofs_bvec_iter biter;
@@ -1227,8 +1210,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
12271210
z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
12281211
}
12291212

1230-
static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1231-
bool *overlapped)
1213+
static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped)
12321214
{
12331215
struct z_erofs_pcluster *pcl = be->pcl;
12341216
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
@@ -1263,8 +1245,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
12631245
return err;
12641246
}
12651247

1266-
static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1267-
int err)
1248+
static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
12681249
{
12691250
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
12701251
struct z_erofs_pcluster *pcl = be->pcl;
@@ -1394,7 +1375,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
13941375
static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
13951376
struct page **pagepool)
13961377
{
1397-
struct z_erofs_decompress_backend be = {
1378+
struct z_erofs_backend be = {
13981379
.sb = io->sb,
13991380
.pagepool = pagepool,
14001381
.decompressed_secondary_bvecs =
@@ -1472,7 +1453,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
14721453
}
14731454

14741455
static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1475-
struct z_erofs_decompress_frontend *f,
1456+
struct z_erofs_frontend *f,
14761457
struct z_erofs_pcluster *pcl,
14771458
unsigned int nr,
14781459
struct address_space *mc)
@@ -1651,7 +1632,7 @@ static void z_erofs_endio(struct bio *bio)
16511632
bio_put(bio);
16521633
}
16531634

1654-
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1635+
static void z_erofs_submit_queue(struct z_erofs_frontend *f,
16551636
struct z_erofs_decompressqueue *fgq,
16561637
bool *force_fg, bool readahead)
16571638
{
@@ -1784,17 +1765,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
17841765
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
17851766
}
17861767

1787-
static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1788-
unsigned int ra_folios)
1768+
static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
17891769
{
17901770
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
17911771
struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
1792-
bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
1772+
bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
17931773
int err;
17941774

17951775
if (f->head == Z_EROFS_PCLUSTER_TAIL)
17961776
return 0;
1797-
z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
1777+
z_erofs_submit_queue(f, io, &force_fg, !!rapages);
17981778

17991779
/* handle bypass queue (no i/o pclusters) immediately */
18001780
err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
@@ -1812,7 +1792,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
18121792
* Since partial uptodate is still unimplemented for now, we have to use
18131793
* approximate readmore strategies as a start.
18141794
*/
1815-
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1795+
static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
18161796
struct readahead_control *rac, bool backmost)
18171797
{
18181798
struct inode *inode = f->inode;
@@ -1867,12 +1847,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
18671847
static int z_erofs_read_folio(struct file *file, struct folio *folio)
18681848
{
18691849
struct inode *const inode = folio->mapping->host;
1870-
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1850+
Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
18711851
int err;
18721852

18731853
trace_erofs_read_folio(folio, false);
1874-
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1875-
18761854
z_erofs_pcluster_readmore(&f, NULL, true);
18771855
err = z_erofs_scan_folio(&f, folio, false);
18781856
z_erofs_pcluster_readmore(&f, NULL, false);
@@ -1892,17 +1870,14 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
18921870
static void z_erofs_readahead(struct readahead_control *rac)
18931871
{
18941872
struct inode *const inode = rac->mapping->host;
1895-
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1873+
Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac));
18961874
struct folio *head = NULL, *folio;
1897-
unsigned int nr_folios;
1875+
unsigned int nrpages = readahead_count(rac);
18981876
int err;
18991877

1900-
f.headoffset = readahead_pos(rac);
1901-
19021878
z_erofs_pcluster_readmore(&f, rac, true);
1903-
nr_folios = readahead_count(rac);
1904-
trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1905-
1879+
nrpages = readahead_count(rac);
1880+
trace_erofs_readpages(inode, readahead_index(rac), nrpages, false);
19061881
while ((folio = readahead_folio(rac))) {
19071882
folio->private = head;
19081883
head = folio;
@@ -1921,7 +1896,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
19211896
z_erofs_pcluster_readmore(&f, rac, false);
19221897
z_erofs_pcluster_end(&f);
19231898

1924-
(void)z_erofs_runqueue(&f, nr_folios);
1899+
(void)z_erofs_runqueue(&f, nrpages);
19251900
erofs_put_metabuf(&f.map.buf);
19261901
erofs_release_pages(&f.pagepool);
19271902
}

0 commit comments

Comments
 (0)