Skip to content

Commit 3ef28e8

Browse files
djbwaxboe
authored andcommitted
block: generic request_queue reference counting
Allow pmem, and other synchronous/bio-based block drivers, to fallback on a per-cpu reference count managed by the core for tracking queue live/dead state. The existing per-cpu reference count for the blk_mq case is promoted to be used in all block i/o scenarios. This involves initializing it by default, waiting for it to drop to zero at exit, and holding a live reference over the invocation of q->make_request_fn() in generic_make_request(). The blk_mq code continues to take its own reference per blk_mq request and retains the ability to freeze the queue, but the check that the queue is frozen is moved to generic_make_request(). This fixes crash signatures like the following: BUG: unable to handle kernel paging request at ffff880140000000 [..] Call Trace: [<ffffffff8145e8bf>] ? copy_user_handle_tail+0x5f/0x70 [<ffffffffa004e1e0>] pmem_do_bvec.isra.11+0x70/0xf0 [nd_pmem] [<ffffffffa004e331>] pmem_make_request+0xd1/0x200 [nd_pmem] [<ffffffff811c3162>] ? mempool_alloc+0x72/0x1a0 [<ffffffff8141f8b6>] generic_make_request+0xd6/0x110 [<ffffffff8141f966>] submit_bio+0x76/0x170 [<ffffffff81286dff>] submit_bh_wbc+0x12f/0x160 [<ffffffff81286e62>] submit_bh+0x12/0x20 [<ffffffff813395bd>] jbd2_write_superblock+0x8d/0x170 [<ffffffff8133974d>] jbd2_mark_journal_empty+0x5d/0x90 [<ffffffff813399cb>] jbd2_journal_destroy+0x24b/0x270 [<ffffffff810bc4ca>] ? put_pwq_unlocked+0x2a/0x30 [<ffffffff810bc6f5>] ? destroy_workqueue+0x225/0x250 [<ffffffff81303494>] ext4_put_super+0x64/0x360 [<ffffffff8124ab1a>] generic_shutdown_super+0x6a/0xf0 Cc: Jens Axboe <[email protected]> Cc: Keith Busch <[email protected]> Cc: Ross Zwisler <[email protected]> Suggested-by: Christoph Hellwig <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Tested-by: Ross Zwisler <[email protected]> Signed-off-by: Dan Williams <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 4cfc766 commit 3ef28e8

File tree

7 files changed

+102
-75
lines changed

7 files changed

+102
-75
lines changed

block/blk-core.c

Lines changed: 62 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -554,13 +554,10 @@ void blk_cleanup_queue(struct request_queue *q)
554554
* Drain all requests queued before DYING marking. Set DEAD flag to
555555
* prevent that q->request_fn() gets invoked after draining finished.
556556
*/
557-
if (q->mq_ops) {
558-
blk_mq_freeze_queue(q);
559-
spin_lock_irq(lock);
560-
} else {
561-
spin_lock_irq(lock);
557+
blk_freeze_queue(q);
558+
spin_lock_irq(lock);
559+
if (!q->mq_ops)
562560
__blk_drain_queue(q, true);
563-
}
564561
queue_flag_set(QUEUE_FLAG_DEAD, q);
565562
spin_unlock_irq(lock);
566563

@@ -570,6 +567,7 @@ void blk_cleanup_queue(struct request_queue *q)
570567

571568
if (q->mq_ops)
572569
blk_mq_free_queue(q);
570+
percpu_ref_exit(&q->q_usage_counter);
573571

574572
spin_lock_irq(lock);
575573
if (q->queue_lock != &q->__queue_lock)
@@ -629,6 +627,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
629627
}
630628
EXPORT_SYMBOL(blk_alloc_queue);
631629

630+
int blk_queue_enter(struct request_queue *q, gfp_t gfp)
631+
{
632+
while (true) {
633+
int ret;
634+
635+
if (percpu_ref_tryget_live(&q->q_usage_counter))
636+
return 0;
637+
638+
if (!(gfp & __GFP_WAIT))
639+
return -EBUSY;
640+
641+
ret = wait_event_interruptible(q->mq_freeze_wq,
642+
!atomic_read(&q->mq_freeze_depth) ||
643+
blk_queue_dying(q));
644+
if (blk_queue_dying(q))
645+
return -ENODEV;
646+
if (ret)
647+
return ret;
648+
}
649+
}
650+
651+
void blk_queue_exit(struct request_queue *q)
652+
{
653+
percpu_ref_put(&q->q_usage_counter);
654+
}
655+
656+
static void blk_queue_usage_counter_release(struct percpu_ref *ref)
657+
{
658+
struct request_queue *q =
659+
container_of(ref, struct request_queue, q_usage_counter);
660+
661+
wake_up_all(&q->mq_freeze_wq);
662+
}
663+
632664
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
633665
{
634666
struct request_queue *q;
@@ -690,11 +722,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
690722

691723
init_waitqueue_head(&q->mq_freeze_wq);
692724

693-
if (blkcg_init_queue(q))
725+
/*
726+
* Init percpu_ref in atomic mode so that it's faster to shutdown.
727+
* See blk_register_queue() for details.
728+
*/
729+
if (percpu_ref_init(&q->q_usage_counter,
730+
blk_queue_usage_counter_release,
731+
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
694732
goto fail_bdi;
695733

734+
if (blkcg_init_queue(q))
735+
goto fail_ref;
736+
696737
return q;
697738

739+
fail_ref:
740+
percpu_ref_exit(&q->q_usage_counter);
698741
fail_bdi:
699742
bdi_destroy(&q->backing_dev_info);
700743
fail_split:
@@ -1966,9 +2009,19 @@ void generic_make_request(struct bio *bio)
19662009
do {
19672010
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
19682011

1969-
q->make_request_fn(q, bio);
2012+
if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
2013+
2014+
q->make_request_fn(q, bio);
2015+
2016+
blk_queue_exit(q);
19702017

1971-
bio = bio_list_pop(current->bio_list);
2018+
bio = bio_list_pop(current->bio_list);
2019+
} else {
2020+
struct bio *bio_next = bio_list_pop(current->bio_list);
2021+
2022+
bio_io_error(bio);
2023+
bio = bio_next;
2024+
}
19722025
} while (bio);
19732026
current->bio_list = NULL; /* deactivate */
19742027
}

block/blk-mq-sysfs.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
413413
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
414414
}
415415

416-
/* see blk_register_queue() */
417-
void blk_mq_finish_init(struct request_queue *q)
418-
{
419-
percpu_ref_switch_to_percpu(&q->mq_usage_counter);
420-
}
421-
422416
int blk_mq_register_disk(struct gendisk *disk)
423417
{
424418
struct device *dev = disk_to_dev(disk);

block/blk-mq.c

Lines changed: 24 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -78,66 +78,48 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
7878
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
7979
}
8080

81-
static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
82-
{
83-
while (true) {
84-
int ret;
85-
86-
if (percpu_ref_tryget_live(&q->mq_usage_counter))
87-
return 0;
88-
89-
if (!(gfp & __GFP_WAIT))
90-
return -EBUSY;
91-
92-
ret = wait_event_interruptible(q->mq_freeze_wq,
93-
!atomic_read(&q->mq_freeze_depth) ||
94-
blk_queue_dying(q));
95-
if (blk_queue_dying(q))
96-
return -ENODEV;
97-
if (ret)
98-
return ret;
99-
}
100-
}
101-
102-
static void blk_mq_queue_exit(struct request_queue *q)
103-
{
104-
percpu_ref_put(&q->mq_usage_counter);
105-
}
106-
107-
static void blk_mq_usage_counter_release(struct percpu_ref *ref)
108-
{
109-
struct request_queue *q =
110-
container_of(ref, struct request_queue, mq_usage_counter);
111-
112-
wake_up_all(&q->mq_freeze_wq);
113-
}
114-
11581
void blk_mq_freeze_queue_start(struct request_queue *q)
11682
{
11783
int freeze_depth;
11884

11985
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
12086
if (freeze_depth == 1) {
121-
percpu_ref_kill(&q->mq_usage_counter);
87+
percpu_ref_kill(&q->q_usage_counter);
12288
blk_mq_run_hw_queues(q, false);
12389
}
12490
}
12591
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
12692

12793
static void blk_mq_freeze_queue_wait(struct request_queue *q)
12894
{
129-
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
95+
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
13096
}
13197

13298
/*
13399
* Guarantee no request is in use, so we can change any data structure of
134100
* the queue afterward.
135101
*/
136-
void blk_mq_freeze_queue(struct request_queue *q)
102+
void blk_freeze_queue(struct request_queue *q)
137103
{
104+
/*
105+
* In the !blk_mq case we are only calling this to kill the
106+
* q_usage_counter, otherwise this increases the freeze depth
107+
* and waits for it to return to zero. For this reason there is
108+
* no blk_unfreeze_queue(), and blk_freeze_queue() is not
109+
* exported to drivers as the only user for unfreeze is blk_mq.
110+
*/
138111
blk_mq_freeze_queue_start(q);
139112
blk_mq_freeze_queue_wait(q);
140113
}
114+
115+
void blk_mq_freeze_queue(struct request_queue *q)
116+
{
117+
/*
118+
* ...just an alias to keep freeze and unfreeze actions balanced
119+
* in the blk_mq_* namespace
120+
*/
121+
blk_freeze_queue(q);
122+
}
141123
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
142124

143125
void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
147129
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
148130
WARN_ON_ONCE(freeze_depth < 0);
149131
if (!freeze_depth) {
150-
percpu_ref_reinit(&q->mq_usage_counter);
132+
percpu_ref_reinit(&q->q_usage_counter);
151133
wake_up_all(&q->mq_freeze_wq);
152134
}
153135
}
@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
256238
struct blk_mq_alloc_data alloc_data;
257239
int ret;
258240

259-
ret = blk_mq_queue_enter(q, gfp);
241+
ret = blk_queue_enter(q, gfp);
260242
if (ret)
261243
return ERR_PTR(ret);
262244

@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
279261
}
280262
blk_mq_put_ctx(ctx);
281263
if (!rq) {
282-
blk_mq_queue_exit(q);
264+
blk_queue_exit(q);
283265
return ERR_PTR(-EWOULDBLOCK);
284266
}
285267
return rq;
@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
298280

299281
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
300282
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
301-
blk_mq_queue_exit(q);
283+
blk_queue_exit(q);
302284
}
303285

304286
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1177,11 +1159,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
11771159
int rw = bio_data_dir(bio);
11781160
struct blk_mq_alloc_data alloc_data;
11791161

1180-
if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1181-
bio_io_error(bio);
1182-
return NULL;
1183-
}
1184-
1162+
blk_queue_enter_live(q);
11851163
ctx = blk_mq_get_ctx(q);
11861164
hctx = q->mq_ops->map_queue(q, ctx->cpu);
11871165

@@ -2000,14 +1978,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
20001978
hctxs[i]->queue_num = i;
20011979
}
20021980

2003-
/*
2004-
* Init percpu_ref in atomic mode so that it's faster to shutdown.
2005-
* See blk_register_queue() for details.
2006-
*/
2007-
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
2008-
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
2009-
goto err_hctxs;
2010-
20111981
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
20121982
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
20131983

@@ -2088,8 +2058,6 @@ void blk_mq_free_queue(struct request_queue *q)
20882058

20892059
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
20902060
blk_mq_free_hw_queues(q, set);
2091-
2092-
percpu_ref_exit(&q->mq_usage_counter);
20932061
}
20942062

20952063
/* Basically redo blk_mq_init_queue with queue frozen */

block/blk-sysfs.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -599,9 +599,8 @@ int blk_register_queue(struct gendisk *disk)
599599
*/
600600
if (!blk_queue_init_done(q)) {
601601
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
602+
percpu_ref_switch_to_percpu(&q->q_usage_counter);
602603
blk_queue_bypass_end(q);
603-
if (q->mq_ops)
604-
blk_mq_finish_init(q);
605604
}
606605

607606
ret = blk_trace_init_sysfs(dev);

block/blk.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,20 @@ void blk_dequeue_request(struct request *rq);
7272
void __blk_queue_free_tags(struct request_queue *q);
7373
bool __blk_end_bidi_request(struct request *rq, int error,
7474
unsigned int nr_bytes, unsigned int bidi_bytes);
75+
int blk_queue_enter(struct request_queue *q, gfp_t gfp);
76+
void blk_queue_exit(struct request_queue *q);
77+
void blk_freeze_queue(struct request_queue *q);
78+
79+
static inline void blk_queue_enter_live(struct request_queue *q)
80+
{
81+
/*
82+
* Given that running in generic_make_request() context
83+
* guarantees that a live reference against q_usage_counter has
84+
* been established, further references under that same context
85+
* need not check that the queue has been frozen (marked dead).
86+
*/
87+
percpu_ref_get(&q->q_usage_counter);
88+
}
7589

7690
void blk_rq_timed_out_timer(unsigned long data);
7791
unsigned long blk_rq_timeout(unsigned long timeout);

include/linux/blk-mq.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,6 @@ enum {
166166
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
167167
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
168168
struct request_queue *q);
169-
void blk_mq_finish_init(struct request_queue *q);
170169
int blk_mq_register_disk(struct gendisk *);
171170
void blk_mq_unregister_disk(struct gendisk *);
172171

include/linux/blkdev.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ struct request_queue {
450450
#endif
451451
struct rcu_head rcu_head;
452452
wait_queue_head_t mq_freeze_wq;
453-
struct percpu_ref mq_usage_counter;
453+
struct percpu_ref q_usage_counter;
454454
struct list_head all_q_node;
455455

456456
struct blk_mq_tag_set *tag_set;

0 commit comments

Comments
 (0)