Skip to content

Commit d672d32

Browse files
bvanasscheaxboe
authored andcommitted
block/mq-deadline: Micro-optimize the batching algorithm
When dispatching the first request of a batch, the deadline_move_request() call clears .next_rq[] for the opposite data direction. .next_rq[] is not restored when changing data direction. Fix this by not clearing .next_rq[] and by keeping track of the data direction of a batch in a variable instead. This patch is a micro-optimization because: - The number of deadline_next_request() calls for the read direction is halved. - The number of times that deadline_next_request() returns NULL is reduced. Cc: Damien Le Moal <[email protected]> Cc: Hannes Reinecke <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Ming Lei <[email protected]> Cc: Johannes Thumshirn <[email protected]> Cc: Himanshu Madhani <[email protected]> Signed-off-by: Bart Van Assche <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 0775758 commit d672d32

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

block/mq-deadline.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@ struct deadline_data {
5353
struct rb_root sort_list[DD_DIR_COUNT];
5454
struct list_head fifo_list[DD_DIR_COUNT];
5555

56+
/* Data direction of latest dispatched request. */
57+
enum dd_data_dir last_dir;
5658
/*
5759
* next in sort order. read, write or both are NULL
5860
*/
@@ -179,8 +181,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
179181
{
180182
const enum dd_data_dir data_dir = rq_data_dir(rq);
181183

182-
dd->next_rq[DD_READ] = NULL;
183-
dd->next_rq[DD_WRITE] = NULL;
184184
dd->next_rq[data_dir] = deadline_latter_request(rq);
185185

186186
/*
@@ -292,10 +292,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
292292
/*
293293
* batches are currently reads XOR writes
294294
*/
295-
rq = deadline_next_request(dd, DD_WRITE);
296-
if (!rq)
297-
rq = deadline_next_request(dd, DD_READ);
298-
295+
rq = deadline_next_request(dd, dd->last_dir);
299296
if (rq && dd->batching < dd->fifo_batch)
300297
/* we have a next request are still entitled to batch */
301298
goto dispatch_request;
@@ -361,6 +358,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
361358
if (!rq)
362359
return NULL;
363360

361+
dd->last_dir = data_dir;
364362
dd->batching = 0;
365363

366364
dispatch_request:
@@ -473,6 +471,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
473471
dd->fifo_expire[DD_WRITE] = write_expire;
474472
dd->writes_starved = writes_starved;
475473
dd->front_merges = 1;
474+
dd->last_dir = DD_WRITE;
476475
dd->fifo_batch = fifo_batch;
477476
spin_lock_init(&dd->lock);
478477
spin_lock_init(&dd->zone_lock);

0 commit comments

Comments
 (0)