Skip to content

Commit 38ba64d

Browse files
bvanasscheaxboe
authored andcommitted
block/mq-deadline: Track I/O statistics
Track I/O statistics per I/O priority and export these statistics to debugfs. These statistics help developers of the deadline scheduler. Cc: Damien Le Moal <[email protected]> Cc: Hannes Reinecke <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Ming Lei <[email protected]> Cc: Johannes Thumshirn <[email protected]> Cc: Himanshu Madhani <[email protected]> Signed-off-by: Bart Van Assche <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent c807ab5 commit 38ba64d

File tree

1 file changed

+100
-0
lines changed

1 file changed

+100
-0
lines changed

block/mq-deadline.c

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,19 @@ enum dd_prio {
5151

5252
enum { DD_PRIO_COUNT = 3 };
5353

54+
/* I/O statistics per I/O priority. */
55+
struct io_stats_per_prio {
56+
local_t inserted;
57+
local_t merged;
58+
local_t dispatched;
59+
local_t completed;
60+
};
61+
62+
/* I/O statistics for all I/O priorities (enum dd_prio). */
63+
struct io_stats {
64+
struct io_stats_per_prio stats[DD_PRIO_COUNT];
65+
};
66+
5467
/*
5568
* Deadline scheduler data per I/O priority (enum dd_prio). Requests are
5669
* present on both sort_list[] and fifo_list[].
@@ -75,6 +88,8 @@ struct deadline_data {
7588
unsigned int batching; /* number of sequential requests made */
7689
unsigned int starved; /* times reads have starved writes */
7790

91+
struct io_stats __percpu *stats;
92+
7893
/*
7994
* settings that change how the i/o scheduler behaves
8095
*/
@@ -88,6 +103,33 @@ struct deadline_data {
88103
spinlock_t zone_lock;
89104
};
90105

106+
/* Count one event of type 'event_type' and with I/O priority 'prio' */
107+
#define dd_count(dd, event_type, prio) do { \
108+
struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
109+
\
110+
BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
111+
BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
112+
local_inc(&io_stats->stats[(prio)].event_type); \
113+
put_cpu_ptr(io_stats); \
114+
} while (0)
115+
116+
/*
117+
* Returns the total number of dd_count(dd, event_type, prio) calls across all
118+
* CPUs. No locking or barriers since it is fine if the returned sum is slightly
119+
* outdated.
120+
*/
121+
#define dd_sum(dd, event_type, prio) ({ \
122+
unsigned int cpu; \
123+
u32 sum = 0; \
124+
\
125+
BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
126+
BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
127+
for_each_present_cpu(cpu) \
128+
sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
129+
stats[(prio)].event_type); \
130+
sum; \
131+
})
132+
91133
/* Maps an I/O priority class to a deadline scheduler priority. */
92134
static const enum dd_prio ioprio_class_to_prio[] = {
93135
[IOPRIO_CLASS_NONE] = DD_BE_PRIO,
@@ -187,9 +229,12 @@ static void dd_request_merged(struct request_queue *q, struct request *req,
187229
static void dd_merged_requests(struct request_queue *q, struct request *req,
188230
struct request *next)
189231
{
232+
struct deadline_data *dd = q->elevator->elevator_data;
190233
const u8 ioprio_class = dd_rq_ioclass(next);
191234
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
192235

236+
dd_count(dd, merged, prio);
237+
193238
/*
194239
* if next expires before rq, assign its expire time to rq
195240
* and move into next position (next will be deleted) in fifo
@@ -225,6 +270,12 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
225270
deadline_remove_request(rq->q, per_prio, rq);
226271
}
227272

273+
/* Number of requests queued for a given priority level. */
274+
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
275+
{
276+
return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
277+
}
278+
228279
/*
229280
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
230281
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
@@ -319,6 +370,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
319370
{
320371
struct request *rq, *next_rq;
321372
enum dd_data_dir data_dir;
373+
enum dd_prio prio;
374+
u8 ioprio_class;
322375

323376
lockdep_assert_held(&dd->lock);
324377

@@ -408,6 +461,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
408461
dd->batching++;
409462
deadline_move_request(dd, per_prio, rq);
410463
done:
464+
ioprio_class = dd_rq_ioclass(rq);
465+
prio = ioprio_class_to_prio[ioprio_class];
466+
dd_count(dd, dispatched, prio);
411467
/*
412468
* If the request needs its target zone locked, do it.
413469
*/
@@ -491,6 +547,8 @@ static void dd_exit_sched(struct elevator_queue *e)
491547
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
492548
}
493549

550+
free_percpu(dd->stats);
551+
494552
kfree(dd);
495553
}
496554

@@ -514,6 +572,11 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
514572

515573
eq->elevator_data = dd;
516574

575+
dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
576+
GFP_KERNEL | __GFP_ZERO);
577+
if (!dd->stats)
578+
goto free_dd;
579+
517580
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
518581
struct dd_per_prio *per_prio = &dd->per_prio[prio];
519582

@@ -535,6 +598,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
535598
q->elevator = eq;
536599
return 0;
537600

601+
free_dd:
602+
kfree(dd);
603+
538604
put_eq:
539605
kobject_put(&eq->kobj);
540606
return ret;
@@ -614,6 +680,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
614680
blk_req_zone_write_unlock(rq);
615681

616682
prio = ioprio_class_to_prio[ioprio_class];
683+
dd_count(dd, inserted, prio);
617684

618685
if (blk_mq_sched_try_insert_merge(q, rq))
619686
return;
@@ -692,6 +759,8 @@ static void dd_finish_request(struct request *rq)
692759
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
693760
struct dd_per_prio *per_prio = &dd->per_prio[prio];
694761

762+
dd_count(dd, completed, prio);
763+
695764
if (blk_queue_is_zoned(q)) {
696765
unsigned long flags;
697766

@@ -873,6 +942,35 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
873942
return 0;
874943
}
875944

945+
static int dd_queued_show(void *data, struct seq_file *m)
946+
{
947+
struct request_queue *q = data;
948+
struct deadline_data *dd = q->elevator->elevator_data;
949+
950+
seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
951+
dd_queued(dd, DD_BE_PRIO),
952+
dd_queued(dd, DD_IDLE_PRIO));
953+
return 0;
954+
}
955+
956+
/* Number of requests owned by the block driver for a given priority. */
957+
static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
958+
{
959+
return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
960+
- dd_sum(dd, completed, prio);
961+
}
962+
963+
static int dd_owned_by_driver_show(void *data, struct seq_file *m)
964+
{
965+
struct request_queue *q = data;
966+
struct deadline_data *dd = q->elevator->elevator_data;
967+
968+
seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
969+
dd_owned_by_driver(dd, DD_BE_PRIO),
970+
dd_owned_by_driver(dd, DD_IDLE_PRIO));
971+
return 0;
972+
}
973+
876974
#define DEADLINE_DISPATCH_ATTR(prio) \
877975
static void *deadline_dispatch##prio##_start(struct seq_file *m, \
878976
loff_t *pos) \
@@ -941,6 +1039,8 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
9411039
{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
9421040
{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
9431041
{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1042+
{"owned_by_driver", 0400, dd_owned_by_driver_show},
1043+
{"queued", 0400, dd_queued_show},
9441044
{},
9451045
};
9461046
#undef DEADLINE_QUEUE_DDIR_ATTRS

0 commit comments

Comments
 (0)