@@ -391,7 +391,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
391
391
return 0 ;
392
392
}
393
393
394
- static int z_erofs_do_map_blocks (struct inode * inode ,
394
+ static int z_erofs_map_blocks_fo (struct inode * inode ,
395
395
struct erofs_map_blocks * map , int flags )
396
396
{
397
397
struct erofs_inode * vi = EROFS_I (inode );
@@ -409,6 +409,14 @@ static int z_erofs_do_map_blocks(struct inode *inode,
409
409
unsigned long long ofs , end ;
410
410
411
411
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode -> i_size - 1 : map -> m_la ;
412
+ if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL ) &&
413
+ !vi -> z_tailextent_headlcn ) {
414
+ map -> m_la = 0 ;
415
+ map -> m_llen = inode -> i_size ;
416
+ map -> m_flags = EROFS_MAP_MAPPED |
417
+ EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT ;
418
+ return 0 ;
419
+ }
412
420
initial_lcn = ofs >> lclusterbits ;
413
421
endoff = ofs & ((1 << lclusterbits ) - 1 );
414
422
@@ -526,6 +534,115 @@ static int z_erofs_do_map_blocks(struct inode *inode,
526
534
return err ;
527
535
}
528
536
537
+ static int z_erofs_map_blocks_ext (struct inode * inode ,
538
+ struct erofs_map_blocks * map , int flags )
539
+ {
540
+ struct erofs_inode * vi = EROFS_I (inode );
541
+ struct super_block * sb = inode -> i_sb ;
542
+ bool interlaced = vi -> z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ;
543
+ unsigned int recsz = z_erofs_extent_recsize (vi -> z_advise );
544
+ erofs_off_t pos = round_up (Z_EROFS_MAP_HEADER_END (erofs_iloc (inode ) +
545
+ vi -> inode_isize + vi -> xattr_isize ), recsz );
546
+ erofs_off_t lend = inode -> i_size ;
547
+ erofs_off_t l , r , mid , pa , la , lstart ;
548
+ struct z_erofs_extent * ext ;
549
+ unsigned int fmt ;
550
+ bool last ;
551
+
552
+ map -> m_flags = 0 ;
553
+ if (recsz <= offsetof(struct z_erofs_extent , pstart_hi )) {
554
+ if (recsz <= offsetof(struct z_erofs_extent , pstart_lo )) {
555
+ ext = erofs_read_metabuf (& map -> buf , sb , pos , true);
556
+ if (IS_ERR (ext ))
557
+ return PTR_ERR (ext );
558
+ pa = le64_to_cpu (* (__le64 * )ext );
559
+ pos += sizeof (__le64 );
560
+ lstart = 0 ;
561
+ } else {
562
+ lstart = map -> m_la >> vi -> z_lclusterbits ;
563
+ pa = EROFS_NULL_ADDR ;
564
+ }
565
+
566
+ for (; lstart <= map -> m_la ; lstart += 1 << vi -> z_lclusterbits ) {
567
+ ext = erofs_read_metabuf (& map -> buf , sb , pos , true);
568
+ if (IS_ERR (ext ))
569
+ return PTR_ERR (ext );
570
+ map -> m_plen = le32_to_cpu (ext -> plen );
571
+ if (pa != EROFS_NULL_ADDR ) {
572
+ map -> m_pa = pa ;
573
+ pa += map -> m_plen & Z_EROFS_EXTENT_PLEN_MASK ;
574
+ } else {
575
+ map -> m_pa = le32_to_cpu (ext -> pstart_lo );
576
+ }
577
+ pos += recsz ;
578
+ }
579
+ last = (lstart >= round_up (lend , 1 << vi -> z_lclusterbits ));
580
+ lend = min (lstart , lend );
581
+ lstart -= 1 << vi -> z_lclusterbits ;
582
+ } else {
583
+ lstart = lend ;
584
+ for (l = 0 , r = vi -> z_extents ; l < r ; ) {
585
+ mid = l + (r - l ) / 2 ;
586
+ ext = erofs_read_metabuf (& map -> buf , sb ,
587
+ pos + mid * recsz , true);
588
+ if (IS_ERR (ext ))
589
+ return PTR_ERR (ext );
590
+
591
+ la = le32_to_cpu (ext -> lstart_lo );
592
+ pa = le32_to_cpu (ext -> pstart_lo ) |
593
+ (u64 )le32_to_cpu (ext -> pstart_hi ) << 32 ;
594
+ if (recsz > offsetof(struct z_erofs_extent , lstart_hi ))
595
+ la |= (u64 )le32_to_cpu (ext -> lstart_hi ) << 32 ;
596
+
597
+ if (la > map -> m_la ) {
598
+ r = mid ;
599
+ lend = la ;
600
+ } else {
601
+ l = mid + 1 ;
602
+ if (map -> m_la == la )
603
+ r = min (l + 1 , r );
604
+ lstart = la ;
605
+ map -> m_plen = le32_to_cpu (ext -> plen );
606
+ map -> m_pa = pa ;
607
+ }
608
+ }
609
+ last = (l >= vi -> z_extents );
610
+ }
611
+
612
+ if (lstart < lend ) {
613
+ map -> m_la = lstart ;
614
+ if (last && (vi -> z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER )) {
615
+ map -> m_flags |= EROFS_MAP_MAPPED | EROFS_MAP_FRAGMENT ;
616
+ vi -> z_fragmentoff = map -> m_plen ;
617
+ if (recsz >= offsetof(struct z_erofs_extent , pstart_lo ))
618
+ vi -> z_fragmentoff |= map -> m_pa << 32 ;
619
+ } else if (map -> m_plen ) {
620
+ map -> m_flags |= EROFS_MAP_MAPPED |
621
+ EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED ;
622
+ fmt = map -> m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT ;
623
+ if (fmt )
624
+ map -> m_algorithmformat = fmt - 1 ;
625
+ else if (interlaced && !erofs_blkoff (sb , map -> m_pa ))
626
+ map -> m_algorithmformat =
627
+ Z_EROFS_COMPRESSION_INTERLACED ;
628
+ else
629
+ map -> m_algorithmformat =
630
+ Z_EROFS_COMPRESSION_SHIFTED ;
631
+ if (map -> m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL )
632
+ map -> m_flags |= EROFS_MAP_PARTIAL_REF ;
633
+ map -> m_plen &= Z_EROFS_EXTENT_PLEN_MASK ;
634
+ }
635
+ }
636
+ map -> m_llen = lend - map -> m_la ;
637
+ if (!last && map -> m_llen < sb -> s_blocksize ) {
638
+ erofs_err (sb , "extent too small %llu @ offset %llu of nid %llu" ,
639
+ map -> m_llen , map -> m_la , vi -> nid );
640
+ DBG_BUGON (1 );
641
+ return - EFSCORRUPTED ;
642
+ }
643
+ return 0 ;
644
+ }
645
+
529
646
static int z_erofs_fill_inode_lazy (struct inode * inode )
530
647
{
531
648
struct erofs_inode * const vi = EROFS_I (inode );
@@ -570,6 +687,13 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
570
687
}
571
688
vi -> z_advise = le16_to_cpu (h -> h_advise );
572
689
vi -> z_lclusterbits = sb -> s_blocksize_bits + (h -> h_clusterbits & 15 );
690
+ if (vi -> datalayout == EROFS_INODE_COMPRESSED_FULL &&
691
+ (vi -> z_advise & Z_EROFS_ADVISE_EXTENTS )) {
692
+ vi -> z_extents = le32_to_cpu (h -> h_extents_lo ) |
693
+ ((u64 )le16_to_cpu (h -> h_extents_hi ) << 32 );
694
+ goto done ;
695
+ }
696
+
573
697
vi -> z_algorithmtype [0 ] = h -> h_algorithmtype & 15 ;
574
698
vi -> z_algorithmtype [1 ] = h -> h_algorithmtype >> 4 ;
575
699
if (vi -> z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER )
@@ -609,7 +733,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
609
733
.buf = __EROFS_BUF_INITIALIZER
610
734
};
611
735
612
- err = z_erofs_do_map_blocks (inode , & map ,
736
+ err = z_erofs_map_blocks_fo (inode , & map ,
613
737
EROFS_GET_BLOCKS_FINDTAIL );
614
738
erofs_put_metabuf (& map .buf );
615
739
if (err < 0 )
@@ -640,15 +764,11 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
640
764
} else {
641
765
err = z_erofs_fill_inode_lazy (inode );
642
766
if (!err ) {
643
- if ((vi -> z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER ) &&
644
- !vi -> z_tailextent_headlcn ) {
645
- map -> m_la = 0 ;
646
- map -> m_llen = inode -> i_size ;
647
- map -> m_flags = EROFS_MAP_MAPPED |
648
- EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT ;
649
- } else {
650
- err = z_erofs_do_map_blocks (inode , map , flags );
651
- }
767
+ if (vi -> datalayout == EROFS_INODE_COMPRESSED_FULL &&
768
+ (vi -> z_advise & Z_EROFS_ADVISE_EXTENTS ))
769
+ err = z_erofs_map_blocks_ext (inode , map , flags );
770
+ else
771
+ err = z_erofs_map_blocks_fo (inode , map , flags );
652
772
}
653
773
if (!err && (map -> m_flags & EROFS_MAP_ENCODED ) &&
654
774
unlikely (map -> m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
0 commit comments