@@ -1016,7 +1016,7 @@ static void end_clone_bio(struct bio *clone, int error)
1016
1016
* the md may be freed in dm_put() at the end of this function.
1017
1017
* Or do dm_get() before calling this function and dm_put() later.
1018
1018
*/
1019
- static void rq_completed (struct mapped_device * md , int rw , int run_queue )
1019
+ static void rq_completed (struct mapped_device * md , int rw , bool run_queue )
1020
1020
{
1021
1021
atomic_dec (& md -> pending [rw ]);
1022
1022
@@ -1050,7 +1050,8 @@ static void free_rq_clone(struct request *clone)
1050
1050
1051
1051
/*
1052
1052
* Complete the clone and the original request.
1053
- * Must be called without queue lock.
1053
+ * Must be called without clone's queue lock held,
1054
+ * see end_clone_request() for more details.
1054
1055
*/
1055
1056
static void dm_end_request (struct request * clone , int error )
1056
1057
{
@@ -1079,7 +1080,8 @@ static void dm_end_request(struct request *clone, int error)
1079
1080
1080
1081
static void dm_unprep_request (struct request * rq )
1081
1082
{
1082
- struct request * clone = rq -> special ;
1083
+ struct dm_rq_target_io * tio = rq -> special ;
1084
+ struct request * clone = tio -> clone ;
1083
1085
1084
1086
rq -> special = NULL ;
1085
1087
rq -> cmd_flags &= ~REQ_DONTPREP ;
@@ -1090,12 +1092,10 @@ static void dm_unprep_request(struct request *rq)
1090
1092
/*
1091
1093
* Requeue the original request of a clone.
1092
1094
*/
1093
- static void dm_requeue_unmapped_request (struct request * clone )
1095
+ static void dm_requeue_unmapped_original_request (struct mapped_device * md ,
1096
+ struct request * rq )
1094
1097
{
1095
- int rw = rq_data_dir (clone );
1096
- struct dm_rq_target_io * tio = clone -> end_io_data ;
1097
- struct mapped_device * md = tio -> md ;
1098
- struct request * rq = tio -> orig ;
1098
+ int rw = rq_data_dir (rq );
1099
1099
struct request_queue * q = rq -> q ;
1100
1100
unsigned long flags ;
1101
1101
@@ -1105,7 +1105,14 @@ static void dm_requeue_unmapped_request(struct request *clone)
1105
1105
blk_requeue_request (q , rq );
1106
1106
spin_unlock_irqrestore (q -> queue_lock , flags );
1107
1107
1108
- rq_completed (md , rw , 0 );
1108
+ rq_completed (md , rw , false);
1109
+ }
1110
+
1111
+ static void dm_requeue_unmapped_request (struct request * clone )
1112
+ {
1113
+ struct dm_rq_target_io * tio = clone -> end_io_data ;
1114
+
1115
+ dm_requeue_unmapped_original_request (tio -> md , tio -> orig );
1109
1116
}
1110
1117
1111
1118
static void __stop_queue (struct request_queue * q )
@@ -1175,8 +1182,8 @@ static void dm_done(struct request *clone, int error, bool mapped)
1175
1182
static void dm_softirq_done (struct request * rq )
1176
1183
{
1177
1184
bool mapped = true;
1178
- struct request * clone = rq -> completion_data ;
1179
- struct dm_rq_target_io * tio = clone -> end_io_data ;
1185
+ struct dm_rq_target_io * tio = rq -> special ;
1186
+ struct request * clone = tio -> clone ;
1180
1187
1181
1188
if (rq -> cmd_flags & REQ_FAILED )
1182
1189
mapped = false;
@@ -1188,13 +1195,11 @@ static void dm_softirq_done(struct request *rq)
1188
1195
* Complete the clone and the original request with the error status
1189
1196
* through softirq context.
1190
1197
*/
1191
- static void dm_complete_request (struct request * clone , int error )
1198
+ static void dm_complete_request (struct request * rq , int error )
1192
1199
{
1193
- struct dm_rq_target_io * tio = clone -> end_io_data ;
1194
- struct request * rq = tio -> orig ;
1200
+ struct dm_rq_target_io * tio = rq -> special ;
1195
1201
1196
1202
tio -> error = error ;
1197
- rq -> completion_data = clone ;
1198
1203
blk_complete_request (rq );
1199
1204
}
1200
1205
@@ -1204,20 +1209,19 @@ static void dm_complete_request(struct request *clone, int error)
1204
1209
* Target's rq_end_io() function isn't called.
1205
1210
* This may be used when the target's map_rq() function fails.
1206
1211
*/
1207
- static void dm_kill_unmapped_request (struct request * clone , int error )
1212
+ static void dm_kill_unmapped_request (struct request * rq , int error )
1208
1213
{
1209
- struct dm_rq_target_io * tio = clone -> end_io_data ;
1210
- struct request * rq = tio -> orig ;
1211
-
1212
1214
rq -> cmd_flags |= REQ_FAILED ;
1213
- dm_complete_request (clone , error );
1215
+ dm_complete_request (rq , error );
1214
1216
}
1215
1217
1216
1218
/*
1217
- * Called with the queue lock held
1219
+ * Called with the clone's queue lock held
1218
1220
*/
1219
1221
static void end_clone_request (struct request * clone , int error )
1220
1222
{
1223
+ struct dm_rq_target_io * tio = clone -> end_io_data ;
1224
+
1221
1225
/*
1222
1226
* For just cleaning up the information of the queue in which
1223
1227
* the clone was dispatched.
@@ -1228,13 +1232,13 @@ static void end_clone_request(struct request *clone, int error)
1228
1232
1229
1233
/*
1230
1234
* Actual request completion is done in a softirq context which doesn't
1231
- * hold the queue lock. Otherwise, deadlock could occur because:
1235
+ * hold the clone's queue lock. Otherwise, deadlock could occur because:
1232
1236
* - another request may be submitted by the upper level driver
1233
1237
* of the stacking during the completion
1234
1238
* - the submission which requires queue lock may be done
1235
- * against this queue
1239
+ * against this clone's queue
1236
1240
*/
1237
- dm_complete_request (clone , error );
1241
+ dm_complete_request (tio -> orig , error );
1238
1242
}
1239
1243
1240
1244
/*
@@ -1712,16 +1716,17 @@ static void dm_request(struct request_queue *q, struct bio *bio)
1712
1716
_dm_request (q , bio );
1713
1717
}
1714
1718
1715
- static void dm_dispatch_request ( struct request * rq )
1719
+ static void dm_dispatch_clone_request ( struct request * clone , struct request * rq )
1716
1720
{
1717
1721
int r ;
1718
1722
1719
- if (blk_queue_io_stat (rq -> q ))
1720
- rq -> cmd_flags |= REQ_IO_STAT ;
1723
+ if (blk_queue_io_stat (clone -> q ))
1724
+ clone -> cmd_flags |= REQ_IO_STAT ;
1721
1725
1722
- rq -> start_time = jiffies ;
1723
- r = blk_insert_cloned_request (rq -> q , rq );
1726
+ clone -> start_time = jiffies ;
1727
+ r = blk_insert_cloned_request (clone -> q , clone );
1724
1728
if (r )
1729
+ /* must complete clone in terms of original request */
1725
1730
dm_complete_request (rq , r );
1726
1731
}
1727
1732
@@ -1760,8 +1765,8 @@ static int setup_clone(struct request *clone, struct request *rq,
1760
1765
return 0 ;
1761
1766
}
1762
1767
1763
- static struct request * __clone_rq (struct request * rq , struct mapped_device * md ,
1764
- struct dm_rq_target_io * tio , gfp_t gfp_mask )
1768
+ static struct request * clone_rq (struct request * rq , struct mapped_device * md ,
1769
+ struct dm_rq_target_io * tio , gfp_t gfp_mask )
1765
1770
{
1766
1771
struct request * clone = alloc_clone_request (md , gfp_mask );
1767
1772
@@ -1780,10 +1785,9 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
1780
1785
1781
1786
static void map_tio_request (struct kthread_work * work );
1782
1787
1783
- static struct request * clone_rq (struct request * rq , struct mapped_device * md ,
1784
- gfp_t gfp_mask )
1788
+ static struct dm_rq_target_io * prep_tio (struct request * rq ,
1789
+ struct mapped_device * md , gfp_t gfp_mask )
1785
1790
{
1786
- struct request * clone ;
1787
1791
struct dm_rq_target_io * tio ;
1788
1792
1789
1793
tio = alloc_rq_tio (md , gfp_mask );
@@ -1798,13 +1802,12 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1798
1802
memset (& tio -> info , 0 , sizeof (tio -> info ));
1799
1803
init_kthread_work (& tio -> work , map_tio_request );
1800
1804
1801
- clone = __clone_rq (rq , md , tio , GFP_ATOMIC );
1802
- if (!clone ) {
1805
+ if (!clone_rq (rq , md , tio , gfp_mask )) {
1803
1806
free_rq_tio (tio );
1804
1807
return NULL ;
1805
1808
}
1806
1809
1807
- return clone ;
1810
+ return tio ;
1808
1811
}
1809
1812
1810
1813
/*
@@ -1813,18 +1816,18 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1813
1816
static int dm_prep_fn (struct request_queue * q , struct request * rq )
1814
1817
{
1815
1818
struct mapped_device * md = q -> queuedata ;
1816
- struct request * clone ;
1819
+ struct dm_rq_target_io * tio ;
1817
1820
1818
1821
if (unlikely (rq -> special )) {
1819
1822
DMWARN ("Already has something in rq->special." );
1820
1823
return BLKPREP_KILL ;
1821
1824
}
1822
1825
1823
- clone = clone_rq (rq , md , GFP_ATOMIC );
1824
- if (!clone )
1826
+ tio = prep_tio (rq , md , GFP_ATOMIC );
1827
+ if (!tio )
1825
1828
return BLKPREP_DEFER ;
1826
1829
1827
- rq -> special = clone ;
1830
+ rq -> special = tio ;
1828
1831
rq -> cmd_flags |= REQ_DONTPREP ;
1829
1832
1830
1833
return BLKPREP_OK ;
@@ -1835,11 +1838,12 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1835
1838
* 0 : the request has been processed (not requeued)
1836
1839
* !0 : the request has been requeued
1837
1840
*/
1838
- static int map_request (struct dm_target * ti , struct request * clone ,
1841
+ static int map_request (struct dm_target * ti , struct request * rq ,
1839
1842
struct mapped_device * md )
1840
1843
{
1841
1844
int r , requeued = 0 ;
1842
- struct dm_rq_target_io * tio = clone -> end_io_data ;
1845
+ struct dm_rq_target_io * tio = rq -> special ;
1846
+ struct request * clone = tio -> clone ;
1843
1847
1844
1848
r = ti -> type -> map_rq (ti , clone , & tio -> info );
1845
1849
switch (r ) {
@@ -1849,8 +1853,8 @@ static int map_request(struct dm_target *ti, struct request *clone,
1849
1853
case DM_MAPIO_REMAPPED :
1850
1854
/* The target has remapped the I/O so dispatch it */
1851
1855
trace_block_rq_remap (clone -> q , clone , disk_devt (dm_disk (md )),
1852
- blk_rq_pos (tio -> orig ));
1853
- dm_dispatch_request (clone );
1856
+ blk_rq_pos (rq ));
1857
+ dm_dispatch_clone_request (clone , rq );
1854
1858
break ;
1855
1859
case DM_MAPIO_REQUEUE :
1856
1860
/* The target wants to requeue the I/O */
@@ -1864,7 +1868,7 @@ static int map_request(struct dm_target *ti, struct request *clone,
1864
1868
}
1865
1869
1866
1870
/* The target wants to complete the I/O */
1867
- dm_kill_unmapped_request (clone , r );
1871
+ dm_kill_unmapped_request (rq , r );
1868
1872
break ;
1869
1873
}
1870
1874
@@ -1875,16 +1879,13 @@ static void map_tio_request(struct kthread_work *work)
1875
1879
{
1876
1880
struct dm_rq_target_io * tio = container_of (work , struct dm_rq_target_io , work );
1877
1881
1878
- map_request (tio -> ti , tio -> clone , tio -> md );
1882
+ map_request (tio -> ti , tio -> orig , tio -> md );
1879
1883
}
1880
1884
1881
- static struct request * dm_start_request (struct mapped_device * md , struct request * orig )
1885
+ static void dm_start_request (struct mapped_device * md , struct request * orig )
1882
1886
{
1883
- struct request * clone ;
1884
-
1885
1887
blk_start_request (orig );
1886
- clone = orig -> special ;
1887
- atomic_inc (& md -> pending [rq_data_dir (clone )]);
1888
+ atomic_inc (& md -> pending [rq_data_dir (orig )]);
1888
1889
1889
1890
/*
1890
1891
* Hold the md reference here for the in-flight I/O.
@@ -1894,8 +1895,6 @@ static struct request *dm_start_request(struct mapped_device *md, struct request
1894
1895
* See the comment in rq_completed() too.
1895
1896
*/
1896
1897
dm_get (md );
1897
-
1898
- return clone ;
1899
1898
}
1900
1899
1901
1900
/*
@@ -1908,7 +1907,7 @@ static void dm_request_fn(struct request_queue *q)
1908
1907
int srcu_idx ;
1909
1908
struct dm_table * map = dm_get_live_table (md , & srcu_idx );
1910
1909
struct dm_target * ti ;
1911
- struct request * rq , * clone ;
1910
+ struct request * rq ;
1912
1911
struct dm_rq_target_io * tio ;
1913
1912
sector_t pos ;
1914
1913
@@ -1931,19 +1930,19 @@ static void dm_request_fn(struct request_queue *q)
1931
1930
ti = dm_table_find_target (map , pos );
1932
1931
if (!dm_target_is_valid (ti )) {
1933
1932
/*
1934
- * Must perform setup, that dm_done () requires,
1933
+ * Must perform setup, that rq_completed () requires,
1935
1934
* before calling dm_kill_unmapped_request
1936
1935
*/
1937
1936
DMERR_LIMIT ("request attempted access beyond the end of device" );
1938
- clone = dm_start_request (md , rq );
1939
- dm_kill_unmapped_request (clone , - EIO );
1937
+ dm_start_request (md , rq );
1938
+ dm_kill_unmapped_request (rq , - EIO );
1940
1939
continue ;
1941
1940
}
1942
1941
1943
1942
if (ti -> type -> busy && ti -> type -> busy (ti ))
1944
1943
goto delay_and_out ;
1945
1944
1946
- clone = dm_start_request (md , rq );
1945
+ dm_start_request (md , rq );
1947
1946
1948
1947
tio = rq -> special ;
1949
1948
/* Establish tio->ti before queuing work (map_tio_request) */
@@ -2240,16 +2239,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2240
2239
bioset_free (md -> bs );
2241
2240
md -> bs = p -> bs ;
2242
2241
p -> bs = NULL ;
2243
- } else if (dm_table_get_type (t ) == DM_TYPE_REQUEST_BASED ) {
2244
- /*
2245
- * There's no need to reload with request-based dm
2246
- * because the size of front_pad doesn't change.
2247
- * Note for future: If you are to reload bioset,
2248
- * prep-ed requests in the queue may refer
2249
- * to bio from the old bioset, so you must walk
2250
- * through the queue to unprep.
2251
- */
2252
2242
}
2243
+ /*
2244
+ * There's no need to reload with request-based dm
2245
+ * because the size of front_pad doesn't change.
2246
+ * Note for future: If you are to reload bioset,
2247
+ * prep-ed requests in the queue may refer
2248
+ * to bio from the old bioset, so you must walk
2249
+ * through the queue to unprep.
2250
+ */
2253
2251
goto out ;
2254
2252
}
2255
2253
0 commit comments