Skip to content

Commit 466d89a

Browse files
Keith Buschsnitm
authored andcommitted
dm: prepare for allocating blk-mq clone requests in target
For blk-mq request-based DM the responsibility of allocating a cloned request will be transfered from DM core to the target type. To prepare for conditionally using this new model the original request's 'special' now points to the dm_rq_target_io because the clone is allocated later in the block layer rather than in DM core. Signed-off-by: Keith Busch <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 2eb6e1e commit 466d89a

File tree

1 file changed

+66
-68
lines changed

1 file changed

+66
-68
lines changed

drivers/md/dm.c

Lines changed: 66 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error)
10161016
* the md may be freed in dm_put() at the end of this function.
10171017
* Or do dm_get() before calling this function and dm_put() later.
10181018
*/
1019-
static void rq_completed(struct mapped_device *md, int rw, int run_queue)
1019+
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
10201020
{
10211021
atomic_dec(&md->pending[rw]);
10221022

@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone)
10501050

10511051
/*
10521052
* Complete the clone and the original request.
1053-
* Must be called without queue lock.
1053+
* Must be called without clone's queue lock held,
1054+
* see end_clone_request() for more details.
10541055
*/
10551056
static void dm_end_request(struct request *clone, int error)
10561057
{
@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error)
10791080

10801081
static void dm_unprep_request(struct request *rq)
10811082
{
1082-
struct request *clone = rq->special;
1083+
struct dm_rq_target_io *tio = rq->special;
1084+
struct request *clone = tio->clone;
10831085

10841086
rq->special = NULL;
10851087
rq->cmd_flags &= ~REQ_DONTPREP;
@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq)
10901092
/*
10911093
* Requeue the original request of a clone.
10921094
*/
1093-
static void dm_requeue_unmapped_request(struct request *clone)
1095+
static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1096+
struct request *rq)
10941097
{
1095-
int rw = rq_data_dir(clone);
1096-
struct dm_rq_target_io *tio = clone->end_io_data;
1097-
struct mapped_device *md = tio->md;
1098-
struct request *rq = tio->orig;
1098+
int rw = rq_data_dir(rq);
10991099
struct request_queue *q = rq->q;
11001100
unsigned long flags;
11011101

@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone)
11051105
blk_requeue_request(q, rq);
11061106
spin_unlock_irqrestore(q->queue_lock, flags);
11071107

1108-
rq_completed(md, rw, 0);
1108+
rq_completed(md, rw, false);
1109+
}
1110+
1111+
static void dm_requeue_unmapped_request(struct request *clone)
1112+
{
1113+
struct dm_rq_target_io *tio = clone->end_io_data;
1114+
1115+
dm_requeue_unmapped_original_request(tio->md, tio->orig);
11091116
}
11101117

11111118
static void __stop_queue(struct request_queue *q)
@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped)
11751182
static void dm_softirq_done(struct request *rq)
11761183
{
11771184
bool mapped = true;
1178-
struct request *clone = rq->completion_data;
1179-
struct dm_rq_target_io *tio = clone->end_io_data;
1185+
struct dm_rq_target_io *tio = rq->special;
1186+
struct request *clone = tio->clone;
11801187

11811188
if (rq->cmd_flags & REQ_FAILED)
11821189
mapped = false;
@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq)
11881195
* Complete the clone and the original request with the error status
11891196
* through softirq context.
11901197
*/
1191-
static void dm_complete_request(struct request *clone, int error)
1198+
static void dm_complete_request(struct request *rq, int error)
11921199
{
1193-
struct dm_rq_target_io *tio = clone->end_io_data;
1194-
struct request *rq = tio->orig;
1200+
struct dm_rq_target_io *tio = rq->special;
11951201

11961202
tio->error = error;
1197-
rq->completion_data = clone;
11981203
blk_complete_request(rq);
11991204
}
12001205

@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error)
12041209
* Target's rq_end_io() function isn't called.
12051210
* This may be used when the target's map_rq() function fails.
12061211
*/
1207-
static void dm_kill_unmapped_request(struct request *clone, int error)
1212+
static void dm_kill_unmapped_request(struct request *rq, int error)
12081213
{
1209-
struct dm_rq_target_io *tio = clone->end_io_data;
1210-
struct request *rq = tio->orig;
1211-
12121214
rq->cmd_flags |= REQ_FAILED;
1213-
dm_complete_request(clone, error);
1215+
dm_complete_request(rq, error);
12141216
}
12151217

12161218
/*
1217-
* Called with the queue lock held
1219+
* Called with the clone's queue lock held
12181220
*/
12191221
static void end_clone_request(struct request *clone, int error)
12201222
{
1223+
struct dm_rq_target_io *tio = clone->end_io_data;
1224+
12211225
/*
12221226
* For just cleaning up the information of the queue in which
12231227
* the clone was dispatched.
@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error)
12281232

12291233
/*
12301234
* Actual request completion is done in a softirq context which doesn't
1231-
* hold the queue lock. Otherwise, deadlock could occur because:
1235+
* hold the clone's queue lock. Otherwise, deadlock could occur because:
12321236
* - another request may be submitted by the upper level driver
12331237
* of the stacking during the completion
12341238
* - the submission which requires queue lock may be done
1235-
* against this queue
1239+
* against this clone's queue
12361240
*/
1237-
dm_complete_request(clone, error);
1241+
dm_complete_request(tio->orig, error);
12381242
}
12391243

12401244
/*
@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio)
17121716
_dm_request(q, bio);
17131717
}
17141718

1715-
static void dm_dispatch_request(struct request *rq)
1719+
static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
17161720
{
17171721
int r;
17181722

1719-
if (blk_queue_io_stat(rq->q))
1720-
rq->cmd_flags |= REQ_IO_STAT;
1723+
if (blk_queue_io_stat(clone->q))
1724+
clone->cmd_flags |= REQ_IO_STAT;
17211725

1722-
rq->start_time = jiffies;
1723-
r = blk_insert_cloned_request(rq->q, rq);
1726+
clone->start_time = jiffies;
1727+
r = blk_insert_cloned_request(clone->q, clone);
17241728
if (r)
1729+
/* must complete clone in terms of original request */
17251730
dm_complete_request(rq, r);
17261731
}
17271732

@@ -1760,8 +1765,8 @@ static int setup_clone(struct request *clone, struct request *rq,
17601765
return 0;
17611766
}
17621767

1763-
static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
1764-
struct dm_rq_target_io *tio, gfp_t gfp_mask)
1768+
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1769+
struct dm_rq_target_io *tio, gfp_t gfp_mask)
17651770
{
17661771
struct request *clone = alloc_clone_request(md, gfp_mask);
17671772

@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
17801785

17811786
static void map_tio_request(struct kthread_work *work);
17821787

1783-
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1784-
gfp_t gfp_mask)
1788+
static struct dm_rq_target_io *prep_tio(struct request *rq,
1789+
struct mapped_device *md, gfp_t gfp_mask)
17851790
{
1786-
struct request *clone;
17871791
struct dm_rq_target_io *tio;
17881792

17891793
tio = alloc_rq_tio(md, gfp_mask);
@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
17981802
memset(&tio->info, 0, sizeof(tio->info));
17991803
init_kthread_work(&tio->work, map_tio_request);
18001804

1801-
clone = __clone_rq(rq, md, tio, GFP_ATOMIC);
1802-
if (!clone) {
1805+
if (!clone_rq(rq, md, tio, gfp_mask)) {
18031806
free_rq_tio(tio);
18041807
return NULL;
18051808
}
18061809

1807-
return clone;
1810+
return tio;
18081811
}
18091812

18101813
/*
@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
18131816
static int dm_prep_fn(struct request_queue *q, struct request *rq)
18141817
{
18151818
struct mapped_device *md = q->queuedata;
1816-
struct request *clone;
1819+
struct dm_rq_target_io *tio;
18171820

18181821
if (unlikely(rq->special)) {
18191822
DMWARN("Already has something in rq->special.");
18201823
return BLKPREP_KILL;
18211824
}
18221825

1823-
clone = clone_rq(rq, md, GFP_ATOMIC);
1824-
if (!clone)
1826+
tio = prep_tio(rq, md, GFP_ATOMIC);
1827+
if (!tio)
18251828
return BLKPREP_DEFER;
18261829

1827-
rq->special = clone;
1830+
rq->special = tio;
18281831
rq->cmd_flags |= REQ_DONTPREP;
18291832

18301833
return BLKPREP_OK;
@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
18351838
* 0 : the request has been processed (not requeued)
18361839
* !0 : the request has been requeued
18371840
*/
1838-
static int map_request(struct dm_target *ti, struct request *clone,
1841+
static int map_request(struct dm_target *ti, struct request *rq,
18391842
struct mapped_device *md)
18401843
{
18411844
int r, requeued = 0;
1842-
struct dm_rq_target_io *tio = clone->end_io_data;
1845+
struct dm_rq_target_io *tio = rq->special;
1846+
struct request *clone = tio->clone;
18431847

18441848
r = ti->type->map_rq(ti, clone, &tio->info);
18451849
switch (r) {
@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone,
18491853
case DM_MAPIO_REMAPPED:
18501854
/* The target has remapped the I/O so dispatch it */
18511855
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1852-
blk_rq_pos(tio->orig));
1853-
dm_dispatch_request(clone);
1856+
blk_rq_pos(rq));
1857+
dm_dispatch_clone_request(clone, rq);
18541858
break;
18551859
case DM_MAPIO_REQUEUE:
18561860
/* The target wants to requeue the I/O */
@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone,
18641868
}
18651869

18661870
/* The target wants to complete the I/O */
1867-
dm_kill_unmapped_request(clone, r);
1871+
dm_kill_unmapped_request(rq, r);
18681872
break;
18691873
}
18701874

@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work)
18751879
{
18761880
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
18771881

1878-
map_request(tio->ti, tio->clone, tio->md);
1882+
map_request(tio->ti, tio->orig, tio->md);
18791883
}
18801884

1881-
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1885+
static void dm_start_request(struct mapped_device *md, struct request *orig)
18821886
{
1883-
struct request *clone;
1884-
18851887
blk_start_request(orig);
1886-
clone = orig->special;
1887-
atomic_inc(&md->pending[rq_data_dir(clone)]);
1888+
atomic_inc(&md->pending[rq_data_dir(orig)]);
18881889

18891890
/*
18901891
* Hold the md reference here for the in-flight I/O.
@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
18941895
* See the comment in rq_completed() too.
18951896
*/
18961897
dm_get(md);
1897-
1898-
return clone;
18991898
}
19001899

19011900
/*
@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q)
19081907
int srcu_idx;
19091908
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
19101909
struct dm_target *ti;
1911-
struct request *rq, *clone;
1910+
struct request *rq;
19121911
struct dm_rq_target_io *tio;
19131912
sector_t pos;
19141913

@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q)
19311930
ti = dm_table_find_target(map, pos);
19321931
if (!dm_target_is_valid(ti)) {
19331932
/*
1934-
* Must perform setup, that dm_done() requires,
1933+
* Must perform setup, that rq_completed() requires,
19351934
* before calling dm_kill_unmapped_request
19361935
*/
19371936
DMERR_LIMIT("request attempted access beyond the end of device");
1938-
clone = dm_start_request(md, rq);
1939-
dm_kill_unmapped_request(clone, -EIO);
1937+
dm_start_request(md, rq);
1938+
dm_kill_unmapped_request(rq, -EIO);
19401939
continue;
19411940
}
19421941

19431942
if (ti->type->busy && ti->type->busy(ti))
19441943
goto delay_and_out;
19451944

1946-
clone = dm_start_request(md, rq);
1945+
dm_start_request(md, rq);
19471946

19481947
tio = rq->special;
19491948
/* Establish tio->ti before queuing work (map_tio_request) */
@@ -2240,16 +2239,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
22402239
bioset_free(md->bs);
22412240
md->bs = p->bs;
22422241
p->bs = NULL;
2243-
} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2244-
/*
2245-
* There's no need to reload with request-based dm
2246-
* because the size of front_pad doesn't change.
2247-
* Note for future: If you are to reload bioset,
2248-
* prep-ed requests in the queue may refer
2249-
* to bio from the old bioset, so you must walk
2250-
* through the queue to unprep.
2251-
*/
22522242
}
2243+
/*
2244+
* There's no need to reload with request-based dm
2245+
* because the size of front_pad doesn't change.
2246+
* Note for future: If you are to reload bioset,
2247+
* prep-ed requests in the queue may refer
2248+
* to bio from the old bioset, so you must walk
2249+
* through the queue to unprep.
2250+
*/
22532251
goto out;
22542252
}
22552253

0 commit comments

Comments
 (0)