@@ -606,14 +606,14 @@ struct search {
606
606
unsigned insert_bio_sectors ;
607
607
608
608
unsigned recoverable :1 ;
609
- unsigned unaligned_bvec :1 ;
610
609
unsigned write :1 ;
611
610
unsigned read_dirty_data :1 ;
612
611
613
612
unsigned long start_time ;
614
613
615
614
struct btree_op op ;
616
615
struct data_insert_op iop ;
616
+ struct bio_vec bv [BIO_MAX_PAGES ];
617
617
};
618
618
619
619
static void bch_cache_read_endio (struct bio * bio , int error )
@@ -759,10 +759,14 @@ static void bio_complete(struct search *s)
759
759
static void do_bio_hook (struct search * s )
760
760
{
761
761
struct bio * bio = & s -> bio .bio ;
762
- memcpy (bio , s -> orig_bio , sizeof (struct bio ));
763
762
763
+ bio_init (bio );
764
+ bio -> bi_io_vec = s -> bv ;
765
+ bio -> bi_max_vecs = BIO_MAX_PAGES ;
766
+ __bio_clone (bio , s -> orig_bio );
764
767
bio -> bi_end_io = request_endio ;
765
768
bio -> bi_private = & s -> cl ;
769
+
766
770
atomic_set (& bio -> bi_cnt , 3 );
767
771
}
768
772
@@ -774,17 +778,13 @@ static void search_free(struct closure *cl)
774
778
if (s -> iop .bio )
775
779
bio_put (s -> iop .bio );
776
780
777
- if (s -> unaligned_bvec )
778
- mempool_free (s -> bio .bio .bi_io_vec , s -> d -> unaligned_bvec );
779
-
780
781
closure_debug_destroy (cl );
781
782
mempool_free (s , s -> d -> c -> search );
782
783
}
783
784
784
785
static struct search * search_alloc (struct bio * bio , struct bcache_device * d )
785
786
{
786
787
struct search * s ;
787
- struct bio_vec * bv ;
788
788
789
789
s = mempool_alloc (d -> c -> search , GFP_NOIO );
790
790
memset (s , 0 , offsetof(struct search , iop .insert_keys ));
@@ -803,15 +803,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
803
803
s -> start_time = jiffies ;
804
804
do_bio_hook (s );
805
805
806
- if (bio -> bi_size != bio_segments (bio ) * PAGE_SIZE ) {
807
- bv = mempool_alloc (d -> unaligned_bvec , GFP_NOIO );
808
- memcpy (bv , bio_iovec (bio ),
809
- sizeof (struct bio_vec ) * bio_segments (bio ));
810
-
811
- s -> bio .bio .bi_io_vec = bv ;
812
- s -> unaligned_bvec = 1 ;
813
- }
814
-
815
806
return s ;
816
807
}
817
808
@@ -850,26 +841,13 @@ static void cached_dev_read_error(struct closure *cl)
850
841
{
851
842
struct search * s = container_of (cl , struct search , cl );
852
843
struct bio * bio = & s -> bio .bio ;
853
- struct bio_vec * bv ;
854
- int i ;
855
844
856
845
if (s -> recoverable ) {
857
846
/* Retry from the backing device: */
858
847
trace_bcache_read_retry (s -> orig_bio );
859
848
860
849
s -> iop .error = 0 ;
861
- bv = s -> bio .bio .bi_io_vec ;
862
850
do_bio_hook (s );
863
- s -> bio .bio .bi_io_vec = bv ;
864
-
865
- if (!s -> unaligned_bvec )
866
- bio_for_each_segment (bv , s -> orig_bio , i )
867
- bv -> bv_offset = 0 , bv -> bv_len = PAGE_SIZE ;
868
- else
869
- memcpy (s -> bio .bio .bi_io_vec ,
870
- bio_iovec (s -> orig_bio ),
871
- sizeof (struct bio_vec ) *
872
- bio_segments (s -> orig_bio ));
873
851
874
852
/* XXX: invalidate cache */
875
853
@@ -905,8 +883,7 @@ static void cached_dev_read_done(struct closure *cl)
905
883
s -> cache_miss = NULL ;
906
884
}
907
885
908
- if (verify (dc , & s -> bio .bio ) && s -> recoverable &&
909
- !s -> unaligned_bvec && !s -> read_dirty_data )
886
+ if (verify (dc , & s -> bio .bio ) && s -> recoverable && !s -> read_dirty_data )
910
887
bch_data_verify (dc , s -> orig_bio );
911
888
912
889
bio_complete (s );
0 commit comments