Skip to content

Commit ffa772c

Browse files
YangYang866axboe
authored andcommitted
kyber: introduce kyber_depth_updated()
Hang occurs when user changes the scheduler queue depth, by writing to the 'nr_requests' sysfs file of that device. The details of the environment that we found the problem are as follows: an eMMC block device total driver tags: 16 default queue_depth: 32 kqd->async_depth initialized in kyber_init_sched() with queue_depth=32 Then we change queue_depth to 256, by writing to the 'nr_requests' sysfs file. But kqd->async_depth don't be updated after queue_depth changes. Now the value of async depth is too small for queue_depth=256, this may cause hang. This patch introduces kyber_depth_updated(), so that kyber can update async depth when queue depth changes. Signed-off-by: Yang Yang <[email protected]> Reviewed-by: Omar Sandoval <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 4ceddce commit ffa772c

File tree

1 file changed

+13
-16
lines changed

1 file changed

+13
-16
lines changed

block/kyber-iosched.c

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -355,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
355355
}
356356
}
357357

358-
static unsigned int kyber_sched_tags_shift(struct request_queue *q)
359-
{
360-
/*
361-
* All of the hardware queues have the same depth, so we can just grab
362-
* the shift of the first one.
363-
*/
364-
return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
365-
}
366-
367358
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
368359
{
369360
struct kyber_queue_data *kqd;
370-
unsigned int shift;
371361
int ret = -ENOMEM;
372362
int i;
373363

@@ -402,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
402392
kqd->latency_targets[i] = kyber_latency_targets[i];
403393
}
404394

405-
shift = kyber_sched_tags_shift(q);
406-
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
407-
408395
return kqd;
409396

410397
err_buckets:
@@ -460,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
460447
INIT_LIST_HEAD(&kcq->rq_list[i]);
461448
}
462449

463-
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
450+
static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
464451
{
465452
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
453+
struct blk_mq_tags *tags = hctx->sched_tags;
454+
unsigned int shift = tags->bitmap_tags->sb.shift;
455+
456+
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
457+
458+
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
459+
}
460+
461+
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
462+
{
466463
struct kyber_hctx_data *khd;
467464
int i;
468465

@@ -504,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
504501
khd->batching = 0;
505502

506503
hctx->sched_data = khd;
507-
sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
508-
kqd->async_depth);
504+
kyber_depth_updated(hctx);
509505

510506
return 0;
511507

@@ -1024,6 +1020,7 @@ static struct elevator_type kyber_sched = {
10241020
.completed_request = kyber_completed_request,
10251021
.dispatch_request = kyber_dispatch_request,
10261022
.has_work = kyber_has_work,
1023+
.depth_updated = kyber_depth_updated,
10271024
},
10281025
#ifdef CONFIG_BLK_DEBUG_FS
10291026
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,

0 commit comments

Comments
 (0)