@@ -51,6 +51,19 @@ enum dd_prio {
51
51
52
52
enum { DD_PRIO_COUNT = 3 };
53
53
54
+ /* I/O statistics per I/O priority. */
55
+ struct io_stats_per_prio {
56
+ local_t inserted ;
57
+ local_t merged ;
58
+ local_t dispatched ;
59
+ local_t completed ;
60
+ };
61
+
62
+ /* I/O statistics for all I/O priorities (enum dd_prio). */
63
+ struct io_stats {
64
+ struct io_stats_per_prio stats [DD_PRIO_COUNT ];
65
+ };
66
+
54
67
/*
55
68
* Deadline scheduler data per I/O priority (enum dd_prio). Requests are
56
69
* present on both sort_list[] and fifo_list[].
@@ -75,6 +88,8 @@ struct deadline_data {
75
88
unsigned int batching ; /* number of sequential requests made */
76
89
unsigned int starved ; /* times reads have starved writes */
77
90
91
+ struct io_stats __percpu * stats ;
92
+
78
93
/*
79
94
* settings that change how the i/o scheduler behaves
80
95
*/
@@ -88,6 +103,33 @@ struct deadline_data {
88
103
spinlock_t zone_lock ;
89
104
};
90
105
106
+ /* Count one event of type 'event_type' and with I/O priority 'prio' */
107
+ #define dd_count (dd , event_type , prio ) do { \
108
+ struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
109
+ \
110
+ BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
111
+ BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
112
+ local_inc(&io_stats->stats[(prio)].event_type); \
113
+ put_cpu_ptr(io_stats); \
114
+ } while (0)
115
+
116
+ /*
117
+ * Returns the total number of dd_count(dd, event_type, prio) calls across all
118
+ * CPUs. No locking or barriers since it is fine if the returned sum is slightly
119
+ * outdated.
120
+ */
121
+ #define dd_sum (dd , event_type , prio ) ({ \
122
+ unsigned int cpu; \
123
+ u32 sum = 0; \
124
+ \
125
+ BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
126
+ BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
127
+ for_each_present_cpu(cpu) \
128
+ sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
129
+ stats[(prio)].event_type); \
130
+ sum; \
131
+ })
132
+
91
133
/* Maps an I/O priority class to a deadline scheduler priority. */
92
134
static const enum dd_prio ioprio_class_to_prio [] = {
93
135
[IOPRIO_CLASS_NONE ] = DD_BE_PRIO ,
@@ -187,9 +229,12 @@ static void dd_request_merged(struct request_queue *q, struct request *req,
187
229
static void dd_merged_requests (struct request_queue * q , struct request * req ,
188
230
struct request * next )
189
231
{
232
+ struct deadline_data * dd = q -> elevator -> elevator_data ;
190
233
const u8 ioprio_class = dd_rq_ioclass (next );
191
234
const enum dd_prio prio = ioprio_class_to_prio [ioprio_class ];
192
235
236
+ dd_count (dd , merged , prio );
237
+
193
238
/*
194
239
* if next expires before rq, assign its expire time to rq
195
240
* and move into next position (next will be deleted) in fifo
@@ -225,6 +270,12 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
225
270
deadline_remove_request (rq -> q , per_prio , rq );
226
271
}
227
272
273
+ /* Number of requests queued for a given priority level. */
274
+ static u32 dd_queued (struct deadline_data * dd , enum dd_prio prio )
275
+ {
276
+ return dd_sum (dd , inserted , prio ) - dd_sum (dd , completed , prio );
277
+ }
278
+
228
279
/*
229
280
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
230
281
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
@@ -319,6 +370,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
319
370
{
320
371
struct request * rq , * next_rq ;
321
372
enum dd_data_dir data_dir ;
373
+ enum dd_prio prio ;
374
+ u8 ioprio_class ;
322
375
323
376
lockdep_assert_held (& dd -> lock );
324
377
@@ -408,6 +461,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
408
461
dd -> batching ++ ;
409
462
deadline_move_request (dd , per_prio , rq );
410
463
done :
464
+ ioprio_class = dd_rq_ioclass (rq );
465
+ prio = ioprio_class_to_prio [ioprio_class ];
466
+ dd_count (dd , dispatched , prio );
411
467
/*
412
468
* If the request needs its target zone locked, do it.
413
469
*/
@@ -491,6 +547,8 @@ static void dd_exit_sched(struct elevator_queue *e)
491
547
WARN_ON_ONCE (!list_empty (& per_prio -> fifo_list [DD_WRITE ]));
492
548
}
493
549
550
+ free_percpu (dd -> stats );
551
+
494
552
kfree (dd );
495
553
}
496
554
@@ -514,6 +572,11 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
514
572
515
573
eq -> elevator_data = dd ;
516
574
575
+ dd -> stats = alloc_percpu_gfp (typeof (* dd -> stats ),
576
+ GFP_KERNEL | __GFP_ZERO );
577
+ if (!dd -> stats )
578
+ goto free_dd ;
579
+
517
580
for (prio = 0 ; prio <= DD_PRIO_MAX ; prio ++ ) {
518
581
struct dd_per_prio * per_prio = & dd -> per_prio [prio ];
519
582
@@ -535,6 +598,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
535
598
q -> elevator = eq ;
536
599
return 0 ;
537
600
601
+ free_dd :
602
+ kfree (dd );
603
+
538
604
put_eq :
539
605
kobject_put (& eq -> kobj );
540
606
return ret ;
@@ -614,6 +680,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
614
680
blk_req_zone_write_unlock (rq );
615
681
616
682
prio = ioprio_class_to_prio [ioprio_class ];
683
+ dd_count (dd , inserted , prio );
617
684
618
685
if (blk_mq_sched_try_insert_merge (q , rq ))
619
686
return ;
@@ -692,6 +759,8 @@ static void dd_finish_request(struct request *rq)
692
759
const enum dd_prio prio = ioprio_class_to_prio [ioprio_class ];
693
760
struct dd_per_prio * per_prio = & dd -> per_prio [prio ];
694
761
762
+ dd_count (dd , completed , prio );
763
+
695
764
if (blk_queue_is_zoned (q )) {
696
765
unsigned long flags ;
697
766
@@ -873,6 +942,35 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
873
942
return 0 ;
874
943
}
875
944
945
+ static int dd_queued_show (void * data , struct seq_file * m )
946
+ {
947
+ struct request_queue * q = data ;
948
+ struct deadline_data * dd = q -> elevator -> elevator_data ;
949
+
950
+ seq_printf (m , "%u %u %u\n" , dd_queued (dd , DD_RT_PRIO ),
951
+ dd_queued (dd , DD_BE_PRIO ),
952
+ dd_queued (dd , DD_IDLE_PRIO ));
953
+ return 0 ;
954
+ }
955
+
956
+ /* Number of requests owned by the block driver for a given priority. */
957
+ static u32 dd_owned_by_driver (struct deadline_data * dd , enum dd_prio prio )
958
+ {
959
+ return dd_sum (dd , dispatched , prio ) + dd_sum (dd , merged , prio )
960
+ - dd_sum (dd , completed , prio );
961
+ }
962
+
963
+ static int dd_owned_by_driver_show (void * data , struct seq_file * m )
964
+ {
965
+ struct request_queue * q = data ;
966
+ struct deadline_data * dd = q -> elevator -> elevator_data ;
967
+
968
+ seq_printf (m , "%u %u %u\n" , dd_owned_by_driver (dd , DD_RT_PRIO ),
969
+ dd_owned_by_driver (dd , DD_BE_PRIO ),
970
+ dd_owned_by_driver (dd , DD_IDLE_PRIO ));
971
+ return 0 ;
972
+ }
973
+
876
974
#define DEADLINE_DISPATCH_ATTR (prio ) \
877
975
static void *deadline_dispatch##prio##_start(struct seq_file *m, \
878
976
loff_t *pos) \
@@ -941,6 +1039,8 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
941
1039
{"dispatch0" , 0400 , .seq_ops = & deadline_dispatch0_seq_ops },
942
1040
{"dispatch1" , 0400 , .seq_ops = & deadline_dispatch1_seq_ops },
943
1041
{"dispatch2" , 0400 , .seq_ops = & deadline_dispatch2_seq_ops },
1042
+ {"owned_by_driver" , 0400 , dd_owned_by_driver_show },
1043
+ {"queued" , 0400 , dd_queued_show },
944
1044
{},
945
1045
};
946
1046
#undef DEADLINE_QUEUE_DDIR_ATTRS
0 commit comments