@@ -2717,8 +2717,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
2717
2717
};
2718
2718
struct request * rq ;
2719
2719
2720
- if (blk_mq_attempt_bio_merge ( q , bio , nsegs ))
2720
+ if (unlikely ( bio_queue_enter ( bio ) ))
2721
2721
return NULL ;
2722
+ if (unlikely (!submit_bio_checks (bio )))
2723
+ goto queue_exit ;
2724
+ if (blk_mq_attempt_bio_merge (q , bio , nsegs ))
2725
+ goto queue_exit ;
2722
2726
2723
2727
rq_qos_throttle (q , bio );
2724
2728
@@ -2729,64 +2733,44 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
2729
2733
}
2730
2734
2731
2735
rq = __blk_mq_alloc_requests (& data );
2732
- if (rq )
2733
- return rq ;
2736
+ if (!rq )
2737
+ goto fail ;
2738
+ return rq ;
2734
2739
2740
+ fail :
2735
2741
rq_qos_cleanup (q , bio );
2736
2742
if (bio -> bi_opf & REQ_NOWAIT )
2737
2743
bio_wouldblock_error (bio );
2738
-
2744
+ queue_exit :
2745
+ blk_queue_exit (q );
2739
2746
return NULL ;
2740
2747
}
2741
2748
2742
- static inline bool blk_mq_can_use_cached_rq (struct request * rq , struct bio * bio )
2743
- {
2744
- if (blk_mq_get_hctx_type (bio -> bi_opf ) != rq -> mq_hctx -> type )
2745
- return false;
2746
-
2747
- if (op_is_flush (rq -> cmd_flags ) != op_is_flush (bio -> bi_opf ))
2748
- return false;
2749
-
2750
- return true;
2751
- }
2752
-
2753
- static inline struct request * blk_mq_get_request (struct request_queue * q ,
2754
- struct blk_plug * plug ,
2755
- struct bio * bio ,
2756
- unsigned int nsegs )
2749
+ static inline struct request * blk_mq_get_cached_request (struct request_queue * q ,
2750
+ struct blk_plug * plug , struct bio * bio , unsigned int nsegs )
2757
2751
{
2758
2752
struct request * rq ;
2759
- bool checked = false;
2760
2753
2761
- if (plug ) {
2762
- rq = rq_list_peek (& plug -> cached_rq );
2763
- if (rq && rq -> q == q ) {
2764
- if (unlikely (!submit_bio_checks (bio )))
2765
- return NULL ;
2766
- if (blk_mq_attempt_bio_merge (q , bio , nsegs ))
2767
- return NULL ;
2768
- checked = true;
2769
- if (!blk_mq_can_use_cached_rq (rq , bio ))
2770
- goto fallback ;
2771
- rq -> cmd_flags = bio -> bi_opf ;
2772
- plug -> cached_rq = rq_list_next (rq );
2773
- INIT_LIST_HEAD (& rq -> queuelist );
2774
- rq_qos_throttle (q , bio );
2775
- return rq ;
2776
- }
2777
- }
2754
+ if (!plug )
2755
+ return NULL ;
2756
+ rq = rq_list_peek (& plug -> cached_rq );
2757
+ if (!rq || rq -> q != q )
2758
+ return NULL ;
2778
2759
2779
- fallback :
2780
- if (unlikely (bio_queue_enter (bio )))
2760
+ if (unlikely (!submit_bio_checks (bio )))
2781
2761
return NULL ;
2782
- if (unlikely (!checked && !submit_bio_checks (bio )))
2783
- goto out_put ;
2784
- rq = blk_mq_get_new_requests (q , plug , bio , nsegs );
2785
- if (rq )
2786
- return rq ;
2787
- out_put :
2788
- blk_queue_exit (q );
2789
- return NULL ;
2762
+ if (blk_mq_attempt_bio_merge (q , bio , nsegs ))
2763
+ return NULL ;
2764
+ if (blk_mq_get_hctx_type (bio -> bi_opf ) != rq -> mq_hctx -> type )
2765
+ return NULL ;
2766
+ if (op_is_flush (rq -> cmd_flags ) != op_is_flush (bio -> bi_opf ))
2767
+ return NULL ;
2768
+
2769
+ rq -> cmd_flags = bio -> bi_opf ;
2770
+ plug -> cached_rq = rq_list_next (rq );
2771
+ INIT_LIST_HEAD (& rq -> queuelist );
2772
+ rq_qos_throttle (q , bio );
2773
+ return rq ;
2790
2774
}
2791
2775
2792
2776
/**
@@ -2805,9 +2789,9 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
2805
2789
void blk_mq_submit_bio (struct bio * bio )
2806
2790
{
2807
2791
struct request_queue * q = bdev_get_queue (bio -> bi_bdev );
2792
+ struct blk_plug * plug = blk_mq_plug (q , bio );
2808
2793
const int is_sync = op_is_sync (bio -> bi_opf );
2809
2794
struct request * rq ;
2810
- struct blk_plug * plug ;
2811
2795
unsigned int nr_segs = 1 ;
2812
2796
blk_status_t ret ;
2813
2797
@@ -2821,10 +2805,12 @@ void blk_mq_submit_bio(struct bio *bio)
2821
2805
if (!bio_integrity_prep (bio ))
2822
2806
return ;
2823
2807
2824
- plug = blk_mq_plug (q , bio );
2825
- rq = blk_mq_get_request (q , plug , bio , nr_segs );
2826
- if (unlikely (!rq ))
2827
- return ;
2808
+ rq = blk_mq_get_cached_request (q , plug , bio , nr_segs );
2809
+ if (!rq ) {
2810
+ rq = blk_mq_get_new_requests (q , plug , bio , nr_segs );
2811
+ if (unlikely (!rq ))
2812
+ return ;
2813
+ }
2828
2814
2829
2815
trace_block_getrq (bio );
2830
2816
0 commit comments