31
31
*/
32
32
static const int read_expire = HZ / 2 ; /* max time before a read is submitted. */
33
33
static const int write_expire = 5 * HZ ; /* ditto for writes, these limits are SOFT! */
34
- /*
35
- * Time after which to dispatch lower priority requests even if higher
36
- * priority requests are pending.
37
- */
38
- static const int aging_expire = 10 * HZ ;
39
34
static const int writes_starved = 2 ; /* max times reads can starve a write */
40
35
static const int fifo_batch = 16 ; /* # of sequential requests treated as one
41
36
by the above parameters. For throughput. */
@@ -103,7 +98,6 @@ struct deadline_data {
103
98
int writes_starved ;
104
99
int front_merges ;
105
100
u32 async_depth ;
106
- int aging_expire ;
107
101
108
102
spinlock_t lock ;
109
103
spinlock_t zone_lock ;
@@ -369,11 +363,10 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
369
363
370
364
/*
371
365
* deadline_dispatch_requests selects the best request according to
372
- * read/write expire, fifo_batch, etc and with a start time <= @latest.
366
+ * read/write expire, fifo_batch, etc
373
367
*/
374
368
static struct request * __dd_dispatch_request (struct deadline_data * dd ,
375
- struct dd_per_prio * per_prio ,
376
- u64 latest_start_ns )
369
+ struct dd_per_prio * per_prio )
377
370
{
378
371
struct request * rq , * next_rq ;
379
372
enum dd_data_dir data_dir ;
@@ -385,8 +378,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
385
378
if (!list_empty (& per_prio -> dispatch )) {
386
379
rq = list_first_entry (& per_prio -> dispatch , struct request ,
387
380
queuelist );
388
- if (rq -> start_time_ns > latest_start_ns )
389
- return NULL ;
390
381
list_del_init (& rq -> queuelist );
391
382
goto done ;
392
383
}
@@ -464,8 +455,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
464
455
dd -> batching = 0 ;
465
456
466
457
dispatch_request :
467
- if (rq -> start_time_ns > latest_start_ns )
468
- return NULL ;
469
458
/*
470
459
* rq is the selected appropriate request.
471
460
*/
@@ -494,32 +483,15 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
494
483
static struct request * dd_dispatch_request (struct blk_mq_hw_ctx * hctx )
495
484
{
496
485
struct deadline_data * dd = hctx -> queue -> elevator -> elevator_data ;
497
- const u64 now_ns = ktime_get_ns ();
498
- struct request * rq = NULL ;
486
+ struct request * rq ;
499
487
enum dd_prio prio ;
500
488
501
489
spin_lock (& dd -> lock );
502
- /*
503
- * Start with dispatching requests whose deadline expired more than
504
- * aging_expire jiffies ago.
505
- */
506
- for (prio = DD_BE_PRIO ; prio <= DD_PRIO_MAX ; prio ++ ) {
507
- rq = __dd_dispatch_request (dd , & dd -> per_prio [prio ], now_ns -
508
- jiffies_to_nsecs (dd -> aging_expire ));
509
- if (rq )
510
- goto unlock ;
511
- }
512
- /*
513
- * Next, dispatch requests in priority order. Ignore lower priority
514
- * requests if any higher priority requests are pending.
515
- */
516
490
for (prio = 0 ; prio <= DD_PRIO_MAX ; prio ++ ) {
517
- rq = __dd_dispatch_request (dd , & dd -> per_prio [prio ], now_ns );
518
- if (rq || dd_queued ( dd , prio ) )
491
+ rq = __dd_dispatch_request (dd , & dd -> per_prio [prio ]);
492
+ if (rq )
519
493
break ;
520
494
}
521
-
522
- unlock :
523
495
spin_unlock (& dd -> lock );
524
496
525
497
return rq ;
@@ -620,7 +592,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
620
592
dd -> front_merges = 1 ;
621
593
dd -> last_dir = DD_WRITE ;
622
594
dd -> fifo_batch = fifo_batch ;
623
- dd -> aging_expire = aging_expire ;
624
595
spin_lock_init (& dd -> lock );
625
596
spin_lock_init (& dd -> zone_lock );
626
597
@@ -842,7 +813,6 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \
842
813
#define SHOW_JIFFIES (__FUNC , __VAR ) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
843
814
SHOW_JIFFIES (deadline_read_expire_show , dd -> fifo_expire [DD_READ ]);
844
815
SHOW_JIFFIES (deadline_write_expire_show , dd -> fifo_expire [DD_WRITE ]);
845
- SHOW_JIFFIES (deadline_aging_expire_show , dd -> aging_expire );
846
816
SHOW_INT (deadline_writes_starved_show , dd -> writes_starved );
847
817
SHOW_INT (deadline_front_merges_show , dd -> front_merges );
848
818
SHOW_INT (deadline_async_depth_show , dd -> front_merges );
@@ -872,7 +842,6 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
872
842
STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
873
843
STORE_JIFFIES (deadline_read_expire_store , & dd -> fifo_expire [DD_READ ], 0 , INT_MAX );
874
844
STORE_JIFFIES (deadline_write_expire_store , & dd -> fifo_expire [DD_WRITE ], 0 , INT_MAX );
875
- STORE_JIFFIES (deadline_aging_expire_store , & dd -> aging_expire , 0 , INT_MAX );
876
845
STORE_INT (deadline_writes_starved_store , & dd -> writes_starved , INT_MIN , INT_MAX );
877
846
STORE_INT (deadline_front_merges_store , & dd -> front_merges , 0 , 1 );
878
847
STORE_INT (deadline_async_depth_store , & dd -> front_merges , 1 , INT_MAX );
@@ -891,7 +860,6 @@ static struct elv_fs_entry deadline_attrs[] = {
891
860
DD_ATTR (front_merges ),
892
861
DD_ATTR (async_depth ),
893
862
DD_ATTR (fifo_batch ),
894
- DD_ATTR (aging_expire ),
895
863
__ATTR_NULL
896
864
};
897
865
0 commit comments