@@ -455,31 +455,25 @@ int __init z_erofs_init_subsystem(void)
455
455
}
456
456
457
457
enum z_erofs_pclustermode {
458
+ /* It has previously been linked into another processing chain */
458
459
Z_EROFS_PCLUSTER_INFLIGHT ,
459
460
/*
460
- * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
461
- * could be dispatched into bypass queue later due to uptodated managed
462
- * pages. All related online pages cannot be reused for inplace I/O (or
463
- * bvpage) since it can be directly decoded without I/O submission.
461
+ * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
462
+ * may be dispatched to the bypass queue later due to uptodated managed
463
+ * folios. All file-backed folios related to this pcluster cannot be
464
+ * reused for in-place I/O (or bvpage) since the pcluster may be decoded
465
+ * in a separate queue (and thus out of order).
464
466
*/
465
467
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE ,
466
468
/*
467
- * The pcluster was just linked to a decompression chain by us. It can
468
- * also be linked with the remaining pclusters, which means if the
469
- * processing page is the tail page of a pcluster, this pcluster can
470
- * safely use the whole page (since the previous pcluster is within the
471
- * same chain) for in-place I/O, as illustrated below:
472
- * ___________________________________________________
473
- * | tail (partial) page | head (partial) page |
474
- * | (of the current pcl) | (of the previous pcl) |
475
- * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
476
- *
477
- * [ (*) the page above can be used as inplace I/O. ]
469
+ * The pcluster has just been linked to our processing chain.
470
+ * File-backed folios (except for the head page) related to it can be
471
+ * used for in-place I/O (or bvpage).
478
472
*/
479
473
Z_EROFS_PCLUSTER_FOLLOWED ,
480
474
};
481
475
482
- struct z_erofs_decompress_frontend {
476
+ struct z_erofs_frontend {
483
477
struct inode * const inode ;
484
478
struct erofs_map_blocks map ;
485
479
struct z_erofs_bvec_iter biter ;
@@ -495,11 +489,11 @@ struct z_erofs_decompress_frontend {
495
489
unsigned int icur ;
496
490
};
497
491
498
- #define DECOMPRESS_FRONTEND_INIT ( __i ) { \
499
- .inode = __i , .head = Z_EROFS_PCLUSTER_TAIL, \
500
- .mode = Z_EROFS_PCLUSTER_FOLLOWED }
492
+ #define Z_EROFS_DEFINE_FRONTEND ( fe , i , ho ) struct z_erofs_frontend fe = { \
493
+ .inode = i , .head = Z_EROFS_PCLUSTER_TAIL, \
494
+ .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
501
495
502
- static bool z_erofs_should_alloc_cache (struct z_erofs_decompress_frontend * fe )
496
+ static bool z_erofs_should_alloc_cache (struct z_erofs_frontend * fe )
503
497
{
504
498
unsigned int cachestrategy = EROFS_I_SB (fe -> inode )-> opt .cache_strategy ;
505
499
@@ -516,7 +510,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
516
510
return false;
517
511
}
518
512
519
- static void z_erofs_bind_cache (struct z_erofs_decompress_frontend * fe )
513
+ static void z_erofs_bind_cache (struct z_erofs_frontend * fe )
520
514
{
521
515
struct address_space * mc = MNGD_MAPPING (EROFS_I_SB (fe -> inode ));
522
516
struct z_erofs_pcluster * pcl = fe -> pcl ;
@@ -673,7 +667,7 @@ int erofs_init_managed_cache(struct super_block *sb)
673
667
}
674
668
675
669
/* callers must be with pcluster lock held */
676
- static int z_erofs_attach_page (struct z_erofs_decompress_frontend * fe ,
670
+ static int z_erofs_attach_page (struct z_erofs_frontend * fe ,
677
671
struct z_erofs_bvec * bvec , bool exclusive )
678
672
{
679
673
struct z_erofs_pcluster * pcl = fe -> pcl ;
@@ -719,7 +713,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
719
713
return true;
720
714
}
721
715
722
- static int z_erofs_register_pcluster (struct z_erofs_decompress_frontend * fe )
716
+ static int z_erofs_register_pcluster (struct z_erofs_frontend * fe )
723
717
{
724
718
struct erofs_map_blocks * map = & fe -> map ;
725
719
struct super_block * sb = fe -> inode -> i_sb ;
@@ -789,7 +783,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
789
783
return err ;
790
784
}
791
785
792
- static int z_erofs_pcluster_begin (struct z_erofs_decompress_frontend * fe )
786
+ static int z_erofs_pcluster_begin (struct z_erofs_frontend * fe )
793
787
{
794
788
struct erofs_map_blocks * map = & fe -> map ;
795
789
struct super_block * sb = fe -> inode -> i_sb ;
@@ -862,14 +856,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
862
856
return 0 ;
863
857
}
864
858
865
- /*
866
- * keep in mind that no referenced pclusters will be freed
867
- * only after a RCU grace period.
868
- */
869
859
static void z_erofs_rcu_callback (struct rcu_head * head )
870
860
{
871
- z_erofs_free_pcluster (container_of (head ,
872
- struct z_erofs_pcluster , rcu ));
861
+ z_erofs_free_pcluster (container_of (head , struct z_erofs_pcluster , rcu ));
873
862
}
874
863
875
864
static bool __erofs_try_to_release_pcluster (struct erofs_sb_info * sbi ,
@@ -911,8 +900,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
911
900
return free ;
912
901
}
913
902
914
- unsigned long z_erofs_shrink_scan (struct erofs_sb_info * sbi ,
915
- unsigned long nr_shrink )
903
+ unsigned long z_erofs_shrink_scan (struct erofs_sb_info * sbi , unsigned long nr )
916
904
{
917
905
struct z_erofs_pcluster * pcl ;
918
906
unsigned long index , freed = 0 ;
@@ -925,7 +913,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
925
913
xa_unlock (& sbi -> managed_pslots );
926
914
927
915
++ freed ;
928
- if (!-- nr_shrink )
916
+ if (!-- nr )
929
917
return freed ;
930
918
xa_lock (& sbi -> managed_pslots );
931
919
}
@@ -954,7 +942,7 @@ static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
954
942
call_rcu (& pcl -> rcu , z_erofs_rcu_callback );
955
943
}
956
944
957
- static void z_erofs_pcluster_end (struct z_erofs_decompress_frontend * fe )
945
+ static void z_erofs_pcluster_end (struct z_erofs_frontend * fe )
958
946
{
959
947
struct z_erofs_pcluster * pcl = fe -> pcl ;
960
948
@@ -967,13 +955,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
967
955
if (fe -> candidate_bvpage )
968
956
fe -> candidate_bvpage = NULL ;
969
957
970
- /*
971
- * if all pending pages are added, don't hold its reference
972
- * any longer if the pcluster isn't hosted by ourselves.
973
- */
958
+ /* Drop refcount if it doesn't belong to our processing chain */
974
959
if (fe -> mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE )
975
960
z_erofs_put_pcluster (EROFS_I_SB (fe -> inode ), pcl , false);
976
-
977
961
fe -> pcl = NULL ;
978
962
}
979
963
@@ -1002,7 +986,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
1002
986
return 0 ;
1003
987
}
1004
988
1005
- static int z_erofs_scan_folio (struct z_erofs_decompress_frontend * f ,
989
+ static int z_erofs_scan_folio (struct z_erofs_frontend * f ,
1006
990
struct folio * folio , bool ra )
1007
991
{
1008
992
struct inode * const inode = f -> inode ;
@@ -1117,7 +1101,7 @@ static bool z_erofs_page_is_invalidated(struct page *page)
1117
1101
return !page_folio (page )-> mapping && !z_erofs_is_shortlived_page (page );
1118
1102
}
1119
1103
1120
- struct z_erofs_decompress_backend {
1104
+ struct z_erofs_backend {
1121
1105
struct page * onstack_pages [Z_EROFS_ONSTACK_PAGES ];
1122
1106
struct super_block * sb ;
1123
1107
struct z_erofs_pcluster * pcl ;
@@ -1137,7 +1121,7 @@ struct z_erofs_bvec_item {
1137
1121
struct list_head list ;
1138
1122
};
1139
1123
1140
- static void z_erofs_do_decompressed_bvec (struct z_erofs_decompress_backend * be ,
1124
+ static void z_erofs_do_decompressed_bvec (struct z_erofs_backend * be ,
1141
1125
struct z_erofs_bvec * bvec )
1142
1126
{
1143
1127
struct z_erofs_bvec_item * item ;
@@ -1160,8 +1144,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1160
1144
list_add (& item -> list , & be -> decompressed_secondary_bvecs );
1161
1145
}
1162
1146
1163
- static void z_erofs_fill_other_copies (struct z_erofs_decompress_backend * be ,
1164
- int err )
1147
+ static void z_erofs_fill_other_copies (struct z_erofs_backend * be , int err )
1165
1148
{
1166
1149
unsigned int off0 = be -> pcl -> pageofs_out ;
1167
1150
struct list_head * p , * n ;
@@ -1202,7 +1185,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1202
1185
}
1203
1186
}
1204
1187
1205
- static void z_erofs_parse_out_bvecs (struct z_erofs_decompress_backend * be )
1188
+ static void z_erofs_parse_out_bvecs (struct z_erofs_backend * be )
1206
1189
{
1207
1190
struct z_erofs_pcluster * pcl = be -> pcl ;
1208
1191
struct z_erofs_bvec_iter biter ;
@@ -1227,8 +1210,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1227
1210
z_erofs_put_shortlivedpage (be -> pagepool , old_bvpage );
1228
1211
}
1229
1212
1230
- static int z_erofs_parse_in_bvecs (struct z_erofs_decompress_backend * be ,
1231
- bool * overlapped )
1213
+ static int z_erofs_parse_in_bvecs (struct z_erofs_backend * be , bool * overlapped )
1232
1214
{
1233
1215
struct z_erofs_pcluster * pcl = be -> pcl ;
1234
1216
unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
@@ -1263,8 +1245,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1263
1245
return err ;
1264
1246
}
1265
1247
1266
- static int z_erofs_decompress_pcluster (struct z_erofs_decompress_backend * be ,
1267
- int err )
1248
+ static int z_erofs_decompress_pcluster (struct z_erofs_backend * be , int err )
1268
1249
{
1269
1250
struct erofs_sb_info * const sbi = EROFS_SB (be -> sb );
1270
1251
struct z_erofs_pcluster * pcl = be -> pcl ;
@@ -1394,7 +1375,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1394
1375
static int z_erofs_decompress_queue (const struct z_erofs_decompressqueue * io ,
1395
1376
struct page * * pagepool )
1396
1377
{
1397
- struct z_erofs_decompress_backend be = {
1378
+ struct z_erofs_backend be = {
1398
1379
.sb = io -> sb ,
1399
1380
.pagepool = pagepool ,
1400
1381
.decompressed_secondary_bvecs =
@@ -1472,7 +1453,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1472
1453
}
1473
1454
1474
1455
static void z_erofs_fill_bio_vec (struct bio_vec * bvec ,
1475
- struct z_erofs_decompress_frontend * f ,
1456
+ struct z_erofs_frontend * f ,
1476
1457
struct z_erofs_pcluster * pcl ,
1477
1458
unsigned int nr ,
1478
1459
struct address_space * mc )
@@ -1651,7 +1632,7 @@ static void z_erofs_endio(struct bio *bio)
1651
1632
bio_put (bio );
1652
1633
}
1653
1634
1654
- static void z_erofs_submit_queue (struct z_erofs_decompress_frontend * f ,
1635
+ static void z_erofs_submit_queue (struct z_erofs_frontend * f ,
1655
1636
struct z_erofs_decompressqueue * fgq ,
1656
1637
bool * force_fg , bool readahead )
1657
1638
{
@@ -1784,17 +1765,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1784
1765
z_erofs_decompress_kickoff (q [JQ_SUBMIT ], nr_bios );
1785
1766
}
1786
1767
1787
- static int z_erofs_runqueue (struct z_erofs_decompress_frontend * f ,
1788
- unsigned int ra_folios )
1768
+ static int z_erofs_runqueue (struct z_erofs_frontend * f , unsigned int rapages )
1789
1769
{
1790
1770
struct z_erofs_decompressqueue io [NR_JOBQUEUES ];
1791
1771
struct erofs_sb_info * sbi = EROFS_I_SB (f -> inode );
1792
- bool force_fg = z_erofs_is_sync_decompress (sbi , ra_folios );
1772
+ bool force_fg = z_erofs_is_sync_decompress (sbi , rapages );
1793
1773
int err ;
1794
1774
1795
1775
if (f -> head == Z_EROFS_PCLUSTER_TAIL )
1796
1776
return 0 ;
1797
- z_erofs_submit_queue (f , io , & force_fg , !!ra_folios );
1777
+ z_erofs_submit_queue (f , io , & force_fg , !!rapages );
1798
1778
1799
1779
/* handle bypass queue (no i/o pclusters) immediately */
1800
1780
err = z_erofs_decompress_queue (& io [JQ_BYPASS ], & f -> pagepool );
@@ -1812,7 +1792,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1812
1792
* Since partial uptodate is still unimplemented for now, we have to use
1813
1793
* approximate readmore strategies as a start.
1814
1794
*/
1815
- static void z_erofs_pcluster_readmore (struct z_erofs_decompress_frontend * f ,
1795
+ static void z_erofs_pcluster_readmore (struct z_erofs_frontend * f ,
1816
1796
struct readahead_control * rac , bool backmost )
1817
1797
{
1818
1798
struct inode * inode = f -> inode ;
@@ -1867,12 +1847,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1867
1847
static int z_erofs_read_folio (struct file * file , struct folio * folio )
1868
1848
{
1869
1849
struct inode * const inode = folio -> mapping -> host ;
1870
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT ( inode );
1850
+ Z_EROFS_DEFINE_FRONTEND ( f , inode , folio_pos ( folio ) );
1871
1851
int err ;
1872
1852
1873
1853
trace_erofs_read_folio (folio , false);
1874
- f .headoffset = (erofs_off_t )folio -> index << PAGE_SHIFT ;
1875
-
1876
1854
z_erofs_pcluster_readmore (& f , NULL , true);
1877
1855
err = z_erofs_scan_folio (& f , folio , false);
1878
1856
z_erofs_pcluster_readmore (& f , NULL , false);
@@ -1892,17 +1870,14 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
1892
1870
static void z_erofs_readahead (struct readahead_control * rac )
1893
1871
{
1894
1872
struct inode * const inode = rac -> mapping -> host ;
1895
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT ( inode );
1873
+ Z_EROFS_DEFINE_FRONTEND ( f , inode , readahead_pos ( rac ) );
1896
1874
struct folio * head = NULL , * folio ;
1897
- unsigned int nr_folios ;
1875
+ unsigned int nrpages = readahead_count ( rac ) ;
1898
1876
int err ;
1899
1877
1900
- f .headoffset = readahead_pos (rac );
1901
-
1902
1878
z_erofs_pcluster_readmore (& f , rac , true);
1903
- nr_folios = readahead_count (rac );
1904
- trace_erofs_readpages (inode , readahead_index (rac ), nr_folios , false);
1905
-
1879
+ nrpages = readahead_count (rac );
1880
+ trace_erofs_readpages (inode , readahead_index (rac ), nrpages , false);
1906
1881
while ((folio = readahead_folio (rac ))) {
1907
1882
folio -> private = head ;
1908
1883
head = folio ;
@@ -1921,7 +1896,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
1921
1896
z_erofs_pcluster_readmore (& f , rac , false);
1922
1897
z_erofs_pcluster_end (& f );
1923
1898
1924
- (void )z_erofs_runqueue (& f , nr_folios );
1899
+ (void )z_erofs_runqueue (& f , nrpages );
1925
1900
erofs_put_metabuf (& f .map .buf );
1926
1901
erofs_release_pages (& f .pagepool );
1927
1902
}
0 commit comments