20
20
#include <linux/hdreg.h>
21
21
#include <linux/delay.h>
22
22
#include <linux/wait.h>
23
+ #include <linux/kthread.h>
23
24
24
25
#include <trace/events/block.h>
25
26
@@ -79,6 +80,7 @@ struct dm_rq_target_io {
79
80
struct mapped_device * md ;
80
81
struct dm_target * ti ;
81
82
struct request * orig , * clone ;
83
+ struct kthread_work work ;
82
84
int error ;
83
85
union map_info info ;
84
86
};
@@ -208,6 +210,9 @@ struct mapped_device {
208
210
struct bio flush_bio ;
209
211
210
212
struct dm_stats stats ;
213
+
214
+ struct kthread_worker kworker ;
215
+ struct task_struct * kworker_task ;
211
216
};
212
217
213
218
/*
@@ -1773,6 +1778,8 @@ static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
1773
1778
return clone ;
1774
1779
}
1775
1780
1781
+ static void map_tio_request (struct kthread_work * work );
1782
+
1776
1783
static struct request * clone_rq (struct request * rq , struct mapped_device * md ,
1777
1784
gfp_t gfp_mask )
1778
1785
{
@@ -1789,6 +1796,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1789
1796
tio -> orig = rq ;
1790
1797
tio -> error = 0 ;
1791
1798
memset (& tio -> info , 0 , sizeof (tio -> info ));
1799
+ init_kthread_work (& tio -> work , map_tio_request );
1792
1800
1793
1801
clone = __clone_rq (rq , md , tio , GFP_ATOMIC );
1794
1802
if (!clone ) {
@@ -1833,7 +1841,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
1833
1841
int r , requeued = 0 ;
1834
1842
struct dm_rq_target_io * tio = clone -> end_io_data ;
1835
1843
1836
- tio -> ti = ti ;
1837
1844
r = ti -> type -> map_rq (ti , clone , & tio -> info );
1838
1845
switch (r ) {
1839
1846
case DM_MAPIO_SUBMITTED :
@@ -1864,6 +1871,13 @@ static int map_request(struct dm_target *ti, struct request *clone,
1864
1871
return requeued ;
1865
1872
}
1866
1873
1874
+ static void map_tio_request (struct kthread_work * work )
1875
+ {
1876
+ struct dm_rq_target_io * tio = container_of (work , struct dm_rq_target_io , work );
1877
+
1878
+ map_request (tio -> ti , tio -> clone , tio -> md );
1879
+ }
1880
+
1867
1881
static struct request * dm_start_request (struct mapped_device * md , struct request * orig )
1868
1882
{
1869
1883
struct request * clone ;
@@ -1895,6 +1909,7 @@ static void dm_request_fn(struct request_queue *q)
1895
1909
struct dm_table * map = dm_get_live_table (md , & srcu_idx );
1896
1910
struct dm_target * ti ;
1897
1911
struct request * rq , * clone ;
1912
+ struct dm_rq_target_io * tio ;
1898
1913
sector_t pos ;
1899
1914
1900
1915
/*
@@ -1930,20 +1945,15 @@ static void dm_request_fn(struct request_queue *q)
1930
1945
1931
1946
clone = dm_start_request (md , rq );
1932
1947
1933
- spin_unlock ( q -> queue_lock ) ;
1934
- if ( map_request ( ti , clone , md ))
1935
- goto requeued ;
1936
-
1948
+ tio = rq -> special ;
1949
+ /* Establish tio->ti before queuing work (map_tio_request) */
1950
+ tio -> ti = ti ;
1951
+ queue_kthread_work ( & md -> kworker , & tio -> work );
1937
1952
BUG_ON (!irqs_disabled ());
1938
- spin_lock (q -> queue_lock );
1939
1953
}
1940
1954
1941
1955
goto out ;
1942
1956
1943
- requeued :
1944
- BUG_ON (!irqs_disabled ());
1945
- spin_lock (q -> queue_lock );
1946
-
1947
1957
delay_and_out :
1948
1958
blk_delay_queue (q , HZ / 10 );
1949
1959
out :
@@ -2129,6 +2139,7 @@ static struct mapped_device *alloc_dev(int minor)
2129
2139
INIT_WORK (& md -> work , dm_wq_work );
2130
2140
init_waitqueue_head (& md -> eventq );
2131
2141
init_completion (& md -> kobj_holder .completion );
2142
+ md -> kworker_task = NULL ;
2132
2143
2133
2144
md -> disk -> major = _major ;
2134
2145
md -> disk -> first_minor = minor ;
@@ -2189,6 +2200,9 @@ static void free_dev(struct mapped_device *md)
2189
2200
unlock_fs (md );
2190
2201
bdput (md -> bdev );
2191
2202
destroy_workqueue (md -> wq );
2203
+
2204
+ if (md -> kworker_task )
2205
+ kthread_stop (md -> kworker_task );
2192
2206
if (md -> io_pool )
2193
2207
mempool_destroy (md -> io_pool );
2194
2208
if (md -> rq_pool )
@@ -2484,6 +2498,11 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2484
2498
blk_queue_prep_rq (md -> queue , dm_prep_fn );
2485
2499
blk_queue_lld_busy (md -> queue , dm_lld_busy );
2486
2500
2501
+ /* Also initialize the request-based DM worker thread */
2502
+ init_kthread_worker (& md -> kworker );
2503
+ md -> kworker_task = kthread_run (kthread_worker_fn , & md -> kworker ,
2504
+ "kdmwork-%s" , dm_device_name (md ));
2505
+
2487
2506
elv_register_queue (md -> queue );
2488
2507
2489
2508
return 1 ;
@@ -2574,6 +2593,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2574
2593
set_bit (DMF_FREEING , & md -> flags );
2575
2594
spin_unlock (& _minor_lock );
2576
2595
2596
+ if (dm_request_based (md ))
2597
+ flush_kthread_worker (& md -> kworker );
2598
+
2577
2599
if (!dm_suspended_md (md )) {
2578
2600
dm_table_presuspend_targets (map );
2579
2601
dm_table_postsuspend_targets (map );
@@ -2817,8 +2839,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2817
2839
* Stop md->queue before flushing md->wq in case request-based
2818
2840
* dm defers requests to md->wq from md->queue.
2819
2841
*/
2820
- if (dm_request_based (md ))
2842
+ if (dm_request_based (md )) {
2821
2843
stop_queue (md -> queue );
2844
+ flush_kthread_worker (& md -> kworker );
2845
+ }
2822
2846
2823
2847
flush_workqueue (md -> wq );
2824
2848
0 commit comments