@@ -221,7 +221,6 @@ static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *b
221
221
ASSERT (bi_size && bi_size <= cb -> compressed_len );
222
222
last_io = refcount_sub_and_test (bi_size >> fs_info -> sectorsize_bits ,
223
223
& cb -> pending_sectors );
224
- atomic_dec (& cb -> pending_bios );
225
224
/*
226
225
* Here we must wake up the possible error handler after all other
227
226
* operations on @cb finished, or we can race with
@@ -426,7 +425,6 @@ static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
426
425
blk_status_t ret ;
427
426
428
427
ASSERT (bio -> bi_iter .bi_size );
429
- atomic_inc (& cb -> pending_bios );
430
428
ret = btrfs_bio_wq_end_io (fs_info , bio , BTRFS_WQ_ENDIO_DATA );
431
429
if (ret )
432
430
return ret ;
@@ -508,10 +506,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
508
506
struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
509
507
struct bio * bio = NULL ;
510
508
struct compressed_bio * cb ;
511
- unsigned long bytes_left ;
512
- int pg_index = 0 ;
513
- struct page * page ;
514
- u64 first_byte = disk_start ;
509
+ u64 cur_disk_bytenr = disk_start ;
515
510
u64 next_stripe_start ;
516
511
blk_status_t ret ;
517
512
int skip_sum = inode -> flags & BTRFS_INODE_NODATASUM ;
@@ -522,7 +517,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
522
517
cb = kmalloc (compressed_bio_size (fs_info , compressed_len ), GFP_NOFS );
523
518
if (!cb )
524
519
return BLK_STS_RESOURCE ;
525
- atomic_set (& cb -> pending_bios , 0 );
526
520
refcount_set (& cb -> pending_sectors , compressed_len >> fs_info -> sectorsize_bits );
527
521
cb -> errors = 0 ;
528
522
cb -> inode = & inode -> vfs_inode ;
@@ -534,44 +528,62 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
534
528
cb -> orig_bio = NULL ;
535
529
cb -> nr_pages = nr_pages ;
536
530
537
- bio = alloc_compressed_bio (cb , first_byte , bio_op | write_flags ,
538
- end_compressed_bio_write , & next_stripe_start );
539
- if (IS_ERR (bio )) {
540
- kfree (cb );
541
- return errno_to_blk_status (PTR_ERR (bio ));
542
- }
543
-
544
- if (blkcg_css ) {
545
- bio -> bi_opf |= REQ_CGROUP_PUNT ;
546
- kthread_associate_blkcg (blkcg_css );
547
- }
548
-
549
- /* create and submit bios for the compressed pages */
550
- bytes_left = compressed_len ;
551
- for (pg_index = 0 ; pg_index < cb -> nr_pages ; pg_index ++ ) {
552
- int submit = 0 ;
553
- int len = 0 ;
531
+ while (cur_disk_bytenr < disk_start + compressed_len ) {
532
+ u64 offset = cur_disk_bytenr - disk_start ;
533
+ unsigned int index = offset >> PAGE_SHIFT ;
534
+ unsigned int real_size ;
535
+ unsigned int added ;
536
+ struct page * page = compressed_pages [index ];
537
+ bool submit = false;
554
538
555
- page = compressed_pages [pg_index ];
556
- page -> mapping = inode -> vfs_inode .i_mapping ;
557
- if (bio -> bi_iter .bi_size )
558
- submit = btrfs_bio_fits_in_stripe (page , PAGE_SIZE , bio ,
559
- 0 );
539
+ /* Allocate new bio if submitted or not yet allocated */
540
+ if (!bio ) {
541
+ bio = alloc_compressed_bio (cb , cur_disk_bytenr ,
542
+ bio_op | write_flags , end_compressed_bio_write ,
543
+ & next_stripe_start );
544
+ if (IS_ERR (bio )) {
545
+ ret = errno_to_blk_status (PTR_ERR (bio ));
546
+ bio = NULL ;
547
+ goto finish_cb ;
548
+ }
549
+ }
550
+ /*
551
+ * We should never reach next_stripe_start start as we will
552
+ * submit comp_bio when reach the boundary immediately.
553
+ */
554
+ ASSERT (cur_disk_bytenr != next_stripe_start );
560
555
561
556
/*
562
- * Page can only be added to bio if the current bio fits in
563
- * stripe.
557
+ * We have various limits on the real read size:
558
+ * - stripe boundary
559
+ * - page boundary
560
+ * - compressed length boundary
564
561
*/
565
- if (!submit ) {
566
- if (pg_index == 0 && use_append )
567
- len = bio_add_zone_append_page (bio , page ,
568
- PAGE_SIZE , 0 );
569
- else
570
- len = bio_add_page (bio , page , PAGE_SIZE , 0 );
571
- }
562
+ real_size = min_t (u64 , U32_MAX , next_stripe_start - cur_disk_bytenr );
563
+ real_size = min_t (u64 , real_size , PAGE_SIZE - offset_in_page (offset ));
564
+ real_size = min_t (u64 , real_size , compressed_len - offset );
565
+ ASSERT (IS_ALIGNED (real_size , fs_info -> sectorsize ));
572
566
573
- page -> mapping = NULL ;
574
- if (submit || len < PAGE_SIZE ) {
567
+ if (use_append )
568
+ added = bio_add_zone_append_page (bio , page , real_size ,
569
+ offset_in_page (offset ));
570
+ else
571
+ added = bio_add_page (bio , page , real_size ,
572
+ offset_in_page (offset ));
573
+ /* Reached zoned boundary */
574
+ if (added == 0 )
575
+ submit = true;
576
+
577
+ cur_disk_bytenr += added ;
578
+ /* Reached stripe boundary */
579
+ if (cur_disk_bytenr == next_stripe_start )
580
+ submit = true;
581
+
582
+ /* Finished the range */
583
+ if (cur_disk_bytenr == disk_start + compressed_len )
584
+ submit = true;
585
+
586
+ if (submit ) {
575
587
if (!skip_sum ) {
576
588
ret = btrfs_csum_one_bio (inode , bio , start , 1 );
577
589
if (ret )
@@ -581,61 +593,27 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
581
593
ret = submit_compressed_bio (fs_info , cb , bio , 0 );
582
594
if (ret )
583
595
goto finish_cb ;
584
-
585
- bio = alloc_compressed_bio (cb , first_byte ,
586
- bio_op | write_flags ,
587
- end_compressed_bio_write ,
588
- & next_stripe_start );
589
- if (IS_ERR (bio )) {
590
- ret = errno_to_blk_status (PTR_ERR (bio ));
591
- bio = NULL ;
592
- goto finish_cb ;
593
- }
594
- if (blkcg_css )
595
- bio -> bi_opf |= REQ_CGROUP_PUNT ;
596
- /*
597
- * Use bio_add_page() to ensure the bio has at least one
598
- * page.
599
- */
600
- bio_add_page (bio , page , PAGE_SIZE , 0 );
596
+ bio = NULL ;
601
597
}
602
- if (bytes_left < PAGE_SIZE ) {
603
- btrfs_info (fs_info ,
604
- "bytes left %lu compress len %u nr %u" ,
605
- bytes_left , cb -> compressed_len , cb -> nr_pages );
606
- }
607
- bytes_left -= PAGE_SIZE ;
608
- first_byte += PAGE_SIZE ;
609
598
cond_resched ();
610
599
}
611
-
612
- if (!skip_sum ) {
613
- ret = btrfs_csum_one_bio (inode , bio , start , 1 );
614
- if (ret )
615
- goto last_bio ;
616
- }
617
-
618
- ret = submit_compressed_bio (fs_info , cb , bio , 0 );
619
- if (ret )
620
- goto last_bio ;
621
-
622
600
if (blkcg_css )
623
601
kthread_associate_blkcg (NULL );
624
602
625
603
return 0 ;
626
- last_bio :
627
- bio -> bi_status = ret ;
628
- /* One of the bios' endio function will free @cb. */
629
- bio_endio (bio );
630
- return ret ;
631
604
632
605
finish_cb :
633
606
if (bio ) {
634
607
bio -> bi_status = ret ;
635
608
bio_endio (bio );
636
609
}
610
+ /* Last byte of @cb is submitted, endio will free @cb */
611
+ if (cur_disk_bytenr == disk_start + compressed_len )
612
+ return ret ;
637
613
638
- wait_var_event (cb , atomic_read (& cb -> pending_bios ) == 0 );
614
+ wait_var_event (cb , refcount_read (& cb -> pending_sectors ) ==
615
+ (disk_start + compressed_len - cur_disk_bytenr ) >>
616
+ fs_info -> sectorsize_bits );
639
617
/*
640
618
* Even with previous bio ended, we should still have io not yet
641
619
* submitted, thus need to finish manually.
@@ -846,7 +824,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
846
824
if (!cb )
847
825
goto out ;
848
826
849
- atomic_set (& cb -> pending_bios , 0 );
850
827
refcount_set (& cb -> pending_sectors , compressed_len >> fs_info -> sectorsize_bits );
851
828
cb -> errors = 0 ;
852
829
cb -> inode = inode ;
0 commit comments