@@ -110,6 +110,8 @@ struct dm_bufio_client {
110
110
struct rb_root buffer_tree ;
111
111
wait_queue_head_t free_buffer_wait ;
112
112
113
+ sector_t start ;
114
+
113
115
int async_write_error ;
114
116
115
117
struct list_head client_list ;
@@ -557,8 +559,8 @@ static void dmio_complete(unsigned long error, void *context)
557
559
b -> bio .bi_end_io (& b -> bio );
558
560
}
559
561
560
- static void use_dmio (struct dm_buffer * b , int rw , sector_t block ,
561
- bio_end_io_t * end_io )
562
+ static void use_dmio (struct dm_buffer * b , int rw , sector_t sector ,
563
+ unsigned n_sectors , bio_end_io_t * end_io )
562
564
{
563
565
int r ;
564
566
struct dm_io_request io_req = {
@@ -570,8 +572,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
570
572
};
571
573
struct dm_io_region region = {
572
574
.bdev = b -> c -> bdev ,
573
- .sector = block << b -> c -> sectors_per_block_bits ,
574
- .count = b -> c -> block_size >> SECTOR_SHIFT ,
575
+ .sector = sector ,
576
+ .count = n_sectors ,
575
577
};
576
578
577
579
if (b -> data_mode != DATA_MODE_VMALLOC ) {
@@ -606,14 +608,14 @@ static void inline_endio(struct bio *bio)
606
608
end_fn (bio );
607
609
}
608
610
609
- static void use_inline_bio (struct dm_buffer * b , int rw , sector_t block ,
610
- bio_end_io_t * end_io )
611
+ static void use_inline_bio (struct dm_buffer * b , int rw , sector_t sector ,
612
+ unsigned n_sectors , bio_end_io_t * end_io )
611
613
{
612
614
char * ptr ;
613
615
int len ;
614
616
615
617
bio_init (& b -> bio , b -> bio_vec , DM_BUFIO_INLINE_VECS );
616
- b -> bio .bi_iter .bi_sector = block << b -> c -> sectors_per_block_bits ;
618
+ b -> bio .bi_iter .bi_sector = sector ;
617
619
b -> bio .bi_bdev = b -> c -> bdev ;
618
620
b -> bio .bi_end_io = inline_endio ;
619
621
/*
@@ -628,7 +630,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
628
630
* If len < PAGE_SIZE the buffer doesn't cross page boundary.
629
631
*/
630
632
ptr = b -> data ;
631
- len = b -> c -> block_size ;
633
+ len = n_sectors << SECTOR_SHIFT ;
632
634
633
635
if (len >= PAGE_SIZE )
634
636
BUG_ON ((unsigned long )ptr & (PAGE_SIZE - 1 ));
@@ -640,7 +642,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
640
642
len < PAGE_SIZE ? len : PAGE_SIZE ,
641
643
offset_in_page (ptr ))) {
642
644
BUG_ON (b -> c -> block_size <= PAGE_SIZE );
643
- use_dmio (b , rw , block , end_io );
645
+ use_dmio (b , rw , sector , n_sectors , end_io );
644
646
return ;
645
647
}
646
648
@@ -651,17 +653,22 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
651
653
submit_bio (& b -> bio );
652
654
}
653
655
654
- static void submit_io (struct dm_buffer * b , int rw , sector_t block ,
655
- bio_end_io_t * end_io )
656
+ static void submit_io (struct dm_buffer * b , int rw , bio_end_io_t * end_io )
656
657
{
658
+ unsigned n_sectors ;
659
+ sector_t sector ;
660
+
657
661
if (rw == WRITE && b -> c -> write_callback )
658
662
b -> c -> write_callback (b );
659
663
660
- if (b -> c -> block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
664
+ sector = (b -> block << b -> c -> sectors_per_block_bits ) + b -> c -> start ;
665
+ n_sectors = 1 << b -> c -> sectors_per_block_bits ;
666
+
667
+ if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE ) >> SECTOR_SHIFT ) &&
661
668
b -> data_mode != DATA_MODE_VMALLOC )
662
- use_inline_bio (b , rw , block , end_io );
669
+ use_inline_bio (b , rw , sector , n_sectors , end_io );
663
670
else
664
- use_dmio (b , rw , block , end_io );
671
+ use_dmio (b , rw , sector , n_sectors , end_io );
665
672
}
666
673
667
674
/*----------------------------------------------------------------
@@ -713,7 +720,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
713
720
wait_on_bit_lock_io (& b -> state , B_WRITING , TASK_UNINTERRUPTIBLE );
714
721
715
722
if (!write_list )
716
- submit_io (b , WRITE , b -> block , write_endio );
723
+ submit_io (b , WRITE , write_endio );
717
724
else
718
725
list_add_tail (& b -> write_list , write_list );
719
726
}
@@ -726,7 +733,7 @@ static void __flush_write_list(struct list_head *write_list)
726
733
struct dm_buffer * b =
727
734
list_entry (write_list -> next , struct dm_buffer , write_list );
728
735
list_del (& b -> write_list );
729
- submit_io (b , WRITE , b -> block , write_endio );
736
+ submit_io (b , WRITE , write_endio );
730
737
cond_resched ();
731
738
}
732
739
blk_finish_plug (& plug );
@@ -1094,7 +1101,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
1094
1101
return NULL ;
1095
1102
1096
1103
if (need_submit )
1097
- submit_io (b , READ , b -> block , read_endio );
1104
+ submit_io (b , READ , read_endio );
1098
1105
1099
1106
wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
1100
1107
@@ -1164,7 +1171,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1164
1171
dm_bufio_unlock (c );
1165
1172
1166
1173
if (need_submit )
1167
- submit_io (b , READ , b -> block , read_endio );
1174
+ submit_io (b , READ , read_endio );
1168
1175
dm_bufio_release (b );
1169
1176
1170
1177
cond_resched ();
@@ -1405,7 +1412,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1405
1412
old_block = b -> block ;
1406
1413
__unlink_buffer (b );
1407
1414
__link_buffer (b , new_block , b -> list_mode );
1408
- submit_io (b , WRITE , new_block , write_endio );
1415
+ submit_io (b , WRITE , write_endio );
1409
1416
wait_on_bit_io (& b -> state , B_WRITING ,
1410
1417
TASK_UNINTERRUPTIBLE );
1411
1418
__unlink_buffer (b );
@@ -1762,6 +1769,12 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
1762
1769
}
1763
1770
EXPORT_SYMBOL_GPL (dm_bufio_client_destroy );
1764
1771
1772
+ void dm_bufio_set_sector_offset (struct dm_bufio_client * c , sector_t start )
1773
+ {
1774
+ c -> start = start ;
1775
+ }
1776
+ EXPORT_SYMBOL_GPL (dm_bufio_set_sector_offset );
1777
+
1765
1778
static unsigned get_max_age_hz (void )
1766
1779
{
1767
1780
unsigned max_age = ACCESS_ONCE (dm_bufio_max_age );
0 commit comments