Skip to content

Commit 5b13bc8

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: cleanup request allocation
Refactor the request alloction so that blk_mq_get_cached_request tries to find a cached request first, and the entirely separate and now self contained blk_mq_get_new_requests allocates one or more requests if that is not possible. There is a small change in behavior as submit_bio_checks is called twice now if a cached request is present but can't be used, but that is a small price to pay for unwinding this code. Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 82d981d commit 5b13bc8

File tree

1 file changed

+38
-52
lines changed

1 file changed

+38
-52
lines changed

block/blk-mq.c

Lines changed: 38 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2717,8 +2717,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
27172717
};
27182718
struct request *rq;
27192719

2720-
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2720+
if (unlikely(bio_queue_enter(bio)))
27212721
return NULL;
2722+
if (unlikely(!submit_bio_checks(bio)))
2723+
goto queue_exit;
2724+
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2725+
goto queue_exit;
27222726

27232727
rq_qos_throttle(q, bio);
27242728

@@ -2729,64 +2733,44 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
27292733
}
27302734

27312735
rq = __blk_mq_alloc_requests(&data);
2732-
if (rq)
2733-
return rq;
2736+
if (!rq)
2737+
goto fail;
2738+
return rq;
27342739

2740+
fail:
27352741
rq_qos_cleanup(q, bio);
27362742
if (bio->bi_opf & REQ_NOWAIT)
27372743
bio_wouldblock_error(bio);
2738-
2744+
queue_exit:
2745+
blk_queue_exit(q);
27392746
return NULL;
27402747
}
27412748

2742-
static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
2743-
{
2744-
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
2745-
return false;
2746-
2747-
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2748-
return false;
2749-
2750-
return true;
2751-
}
2752-
2753-
static inline struct request *blk_mq_get_request(struct request_queue *q,
2754-
struct blk_plug *plug,
2755-
struct bio *bio,
2756-
unsigned int nsegs)
2749+
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2750+
struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
27572751
{
27582752
struct request *rq;
2759-
bool checked = false;
27602753

2761-
if (plug) {
2762-
rq = rq_list_peek(&plug->cached_rq);
2763-
if (rq && rq->q == q) {
2764-
if (unlikely(!submit_bio_checks(bio)))
2765-
return NULL;
2766-
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2767-
return NULL;
2768-
checked = true;
2769-
if (!blk_mq_can_use_cached_rq(rq, bio))
2770-
goto fallback;
2771-
rq->cmd_flags = bio->bi_opf;
2772-
plug->cached_rq = rq_list_next(rq);
2773-
INIT_LIST_HEAD(&rq->queuelist);
2774-
rq_qos_throttle(q, bio);
2775-
return rq;
2776-
}
2777-
}
2754+
if (!plug)
2755+
return NULL;
2756+
rq = rq_list_peek(&plug->cached_rq);
2757+
if (!rq || rq->q != q)
2758+
return NULL;
27782759

2779-
fallback:
2780-
if (unlikely(bio_queue_enter(bio)))
2760+
if (unlikely(!submit_bio_checks(bio)))
27812761
return NULL;
2782-
if (unlikely(!checked && !submit_bio_checks(bio)))
2783-
goto out_put;
2784-
rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
2785-
if (rq)
2786-
return rq;
2787-
out_put:
2788-
blk_queue_exit(q);
2789-
return NULL;
2762+
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2763+
return NULL;
2764+
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
2765+
return NULL;
2766+
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2767+
return NULL;
2768+
2769+
rq->cmd_flags = bio->bi_opf;
2770+
plug->cached_rq = rq_list_next(rq);
2771+
INIT_LIST_HEAD(&rq->queuelist);
2772+
rq_qos_throttle(q, bio);
2773+
return rq;
27902774
}
27912775

27922776
/**
@@ -2805,9 +2789,9 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
28052789
void blk_mq_submit_bio(struct bio *bio)
28062790
{
28072791
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2792+
struct blk_plug *plug = blk_mq_plug(q, bio);
28082793
const int is_sync = op_is_sync(bio->bi_opf);
28092794
struct request *rq;
2810-
struct blk_plug *plug;
28112795
unsigned int nr_segs = 1;
28122796
blk_status_t ret;
28132797

@@ -2821,10 +2805,12 @@ void blk_mq_submit_bio(struct bio *bio)
28212805
if (!bio_integrity_prep(bio))
28222806
return;
28232807

2824-
plug = blk_mq_plug(q, bio);
2825-
rq = blk_mq_get_request(q, plug, bio, nr_segs);
2826-
if (unlikely(!rq))
2827-
return;
2808+
rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
2809+
if (!rq) {
2810+
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2811+
if (unlikely(!rq))
2812+
return;
2813+
}
28282814

28292815
trace_block_getrq(bio);
28302816

0 commit comments

Comments
 (0)