Skip to content

Commit 2eb6e1e

Browse files
Keith Buschsnitm
authored andcommitted
dm: submit stacked requests in irq enabled context
Switch to having request-based DM enqueue all prep'ed requests into work processed by another thread. This allows request-based DM to invoke block APIs that assume interrupt enabled context (e.g. blk_get_request) and is a prerequisite for adding blk-mq support to request-based DM. The new kernel thread is only initialized for request-based DM devices. multipath_map() is now always in irq enabled context so change multipath spinlock (m->lock) locking to always disable interrupts. Signed-off-by: Keith Busch <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 1ae49ea commit 2eb6e1e

File tree

2 files changed

+46
-18
lines changed

2 files changed

+46
-18
lines changed

drivers/md/dm-mpath.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -384,12 +384,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
384384
struct multipath *m = (struct multipath *) ti->private;
385385
int r = DM_MAPIO_REQUEUE;
386386
size_t nr_bytes = blk_rq_bytes(clone);
387-
unsigned long flags;
388387
struct pgpath *pgpath;
389388
struct block_device *bdev;
390389
struct dm_mpath_io *mpio;
391390

392-
spin_lock_irqsave(&m->lock, flags);
391+
spin_lock_irq(&m->lock);
393392

394393
/* Do we need to select a new pgpath? */
395394
if (!m->current_pgpath ||
@@ -411,21 +410,26 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
411410
/* ENOMEM, requeue */
412411
goto out_unlock;
413412

413+
mpio = map_context->ptr;
414+
mpio->pgpath = pgpath;
415+
mpio->nr_bytes = nr_bytes;
416+
414417
bdev = pgpath->path.dev->bdev;
418+
415419
clone->q = bdev_get_queue(bdev);
416420
clone->rq_disk = bdev->bd_disk;
417421
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
418-
mpio = map_context->ptr;
419-
mpio->pgpath = pgpath;
420-
mpio->nr_bytes = nr_bytes;
422+
423+
spin_unlock_irq(&m->lock);
424+
421425
if (pgpath->pg->ps.type->start_io)
422426
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
423427
&pgpath->path,
424428
nr_bytes);
425-
r = DM_MAPIO_REMAPPED;
429+
return DM_MAPIO_REMAPPED;
426430

427431
out_unlock:
428-
spin_unlock_irqrestore(&m->lock, flags);
432+
spin_unlock_irq(&m->lock);
429433

430434
return r;
431435
}

drivers/md/dm.c

Lines changed: 35 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/hdreg.h>
2121
#include <linux/delay.h>
2222
#include <linux/wait.h>
23+
#include <linux/kthread.h>
2324

2425
#include <trace/events/block.h>
2526

@@ -79,6 +80,7 @@ struct dm_rq_target_io {
7980
struct mapped_device *md;
8081
struct dm_target *ti;
8182
struct request *orig, *clone;
83+
struct kthread_work work;
8284
int error;
8385
union map_info info;
8486
};
@@ -208,6 +210,9 @@ struct mapped_device {
208210
struct bio flush_bio;
209211

210212
struct dm_stats stats;
213+
214+
struct kthread_worker kworker;
215+
struct task_struct *kworker_task;
211216
};
212217

213218
/*
@@ -1773,6 +1778,8 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
17731778
return clone;
17741779
}
17751780

1781+
static void map_tio_request(struct kthread_work *work);
1782+
17761783
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
17771784
gfp_t gfp_mask)
17781785
{
@@ -1789,6 +1796,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
17891796
tio->orig = rq;
17901797
tio->error = 0;
17911798
memset(&tio->info, 0, sizeof(tio->info));
1799+
init_kthread_work(&tio->work, map_tio_request);
17921800

17931801
clone = __clone_rq(rq, md, tio, GFP_ATOMIC);
17941802
if (!clone) {
@@ -1833,7 +1841,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
18331841
int r, requeued = 0;
18341842
struct dm_rq_target_io *tio = clone->end_io_data;
18351843

1836-
tio->ti = ti;
18371844
r = ti->type->map_rq(ti, clone, &tio->info);
18381845
switch (r) {
18391846
case DM_MAPIO_SUBMITTED:
@@ -1864,6 +1871,13 @@ static int map_request(struct dm_target *ti, struct request *clone,
18641871
return requeued;
18651872
}
18661873

1874+
static void map_tio_request(struct kthread_work *work)
1875+
{
1876+
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1877+
1878+
map_request(tio->ti, tio->clone, tio->md);
1879+
}
1880+
18671881
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
18681882
{
18691883
struct request *clone;
@@ -1895,6 +1909,7 @@ static void dm_request_fn(struct request_queue *q)
18951909
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
18961910
struct dm_target *ti;
18971911
struct request *rq, *clone;
1912+
struct dm_rq_target_io *tio;
18981913
sector_t pos;
18991914

19001915
/*
@@ -1930,20 +1945,15 @@ static void dm_request_fn(struct request_queue *q)
19301945

19311946
clone = dm_start_request(md, rq);
19321947

1933-
spin_unlock(q->queue_lock);
1934-
if (map_request(ti, clone, md))
1935-
goto requeued;
1936-
1948+
tio = rq->special;
1949+
/* Establish tio->ti before queuing work (map_tio_request) */
1950+
tio->ti = ti;
1951+
queue_kthread_work(&md->kworker, &tio->work);
19371952
BUG_ON(!irqs_disabled());
1938-
spin_lock(q->queue_lock);
19391953
}
19401954

19411955
goto out;
19421956

1943-
requeued:
1944-
BUG_ON(!irqs_disabled());
1945-
spin_lock(q->queue_lock);
1946-
19471957
delay_and_out:
19481958
blk_delay_queue(q, HZ / 10);
19491959
out:
@@ -2129,6 +2139,7 @@ static struct mapped_device *alloc_dev(int minor)
21292139
INIT_WORK(&md->work, dm_wq_work);
21302140
init_waitqueue_head(&md->eventq);
21312141
init_completion(&md->kobj_holder.completion);
2142+
md->kworker_task = NULL;
21322143

21332144
md->disk->major = _major;
21342145
md->disk->first_minor = minor;
@@ -2189,6 +2200,9 @@ static void free_dev(struct mapped_device *md)
21892200
unlock_fs(md);
21902201
bdput(md->bdev);
21912202
destroy_workqueue(md->wq);
2203+
2204+
if (md->kworker_task)
2205+
kthread_stop(md->kworker_task);
21922206
if (md->io_pool)
21932207
mempool_destroy(md->io_pool);
21942208
if (md->rq_pool)
@@ -2484,6 +2498,11 @@ static int dm_init_request_based_queue(struct mapped_device *md)
24842498
blk_queue_prep_rq(md->queue, dm_prep_fn);
24852499
blk_queue_lld_busy(md->queue, dm_lld_busy);
24862500

2501+
/* Also initialize the request-based DM worker thread */
2502+
init_kthread_worker(&md->kworker);
2503+
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2504+
"kdmwork-%s", dm_device_name(md));
2505+
24872506
elv_register_queue(md->queue);
24882507

24892508
return 1;
@@ -2574,6 +2593,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
25742593
set_bit(DMF_FREEING, &md->flags);
25752594
spin_unlock(&_minor_lock);
25762595

2596+
if (dm_request_based(md))
2597+
flush_kthread_worker(&md->kworker);
2598+
25772599
if (!dm_suspended_md(md)) {
25782600
dm_table_presuspend_targets(map);
25792601
dm_table_postsuspend_targets(map);
@@ -2817,8 +2839,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
28172839
* Stop md->queue before flushing md->wq in case request-based
28182840
* dm defers requests to md->wq from md->queue.
28192841
*/
2820-
if (dm_request_based(md))
2842+
if (dm_request_based(md)) {
28212843
stop_queue(md->queue);
2844+
flush_kthread_worker(&md->kworker);
2845+
}
28222846

28232847
flush_workqueue(md->wq);
28242848

0 commit comments

Comments
 (0)