27
27
#include <linux/bsg-lib.h>
28
28
#include <linux/export.h>
29
29
#include <scsi/scsi_cmnd.h>
30
+ #include <scsi/sg.h>
31
+
32
+ #define uptr64 (val ) ((void __user *)(uintptr_t)(val))
33
+
34
+ static int bsg_transport_check_proto (struct sg_io_v4 * hdr )
35
+ {
36
+ if (hdr -> protocol != BSG_PROTOCOL_SCSI ||
37
+ hdr -> subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT )
38
+ return - EINVAL ;
39
+ if (!capable (CAP_SYS_RAWIO ))
40
+ return - EPERM ;
41
+ return 0 ;
42
+ }
43
+
44
+ static int bsg_transport_fill_hdr (struct request * rq , struct sg_io_v4 * hdr ,
45
+ fmode_t mode )
46
+ {
47
+ struct bsg_job * job = blk_mq_rq_to_pdu (rq );
48
+
49
+ job -> request_len = hdr -> request_len ;
50
+ job -> request = memdup_user (uptr64 (hdr -> request ), hdr -> request_len );
51
+ if (IS_ERR (job -> request ))
52
+ return PTR_ERR (job -> request );
53
+ return 0 ;
54
+ }
55
+
56
+ static int bsg_transport_complete_rq (struct request * rq , struct sg_io_v4 * hdr )
57
+ {
58
+ struct bsg_job * job = blk_mq_rq_to_pdu (rq );
59
+ int ret = 0 ;
60
+
61
+ /*
62
+ * The assignments below don't make much sense, but are kept for
63
+ * bug by bug backwards compatibility:
64
+ */
65
+ hdr -> device_status = job -> result & 0xff ;
66
+ hdr -> transport_status = host_byte (job -> result );
67
+ hdr -> driver_status = driver_byte (job -> result );
68
+ hdr -> info = 0 ;
69
+ if (hdr -> device_status || hdr -> transport_status || hdr -> driver_status )
70
+ hdr -> info |= SG_INFO_CHECK ;
71
+ hdr -> response_len = 0 ;
72
+
73
+ if (job -> result < 0 ) {
74
+ /* we're only returning the result field in the reply */
75
+ job -> reply_len = sizeof (u32 );
76
+ ret = job -> result ;
77
+ }
78
+
79
+ if (job -> reply_len && hdr -> response ) {
80
+ int len = min (hdr -> max_response_len , job -> reply_len );
81
+
82
+ if (copy_to_user (uptr64 (hdr -> response ), job -> reply , len ))
83
+ ret = - EFAULT ;
84
+ else
85
+ hdr -> response_len = len ;
86
+ }
87
+
88
+ /* we assume all request payload was transferred, residual == 0 */
89
+ hdr -> dout_resid = 0 ;
90
+
91
+ if (rq -> next_rq ) {
92
+ unsigned int rsp_len = job -> reply_payload .payload_len ;
93
+
94
+ if (WARN_ON (job -> reply_payload_rcv_len > rsp_len ))
95
+ hdr -> din_resid = 0 ;
96
+ else
97
+ hdr -> din_resid = rsp_len - job -> reply_payload_rcv_len ;
98
+ } else {
99
+ hdr -> din_resid = 0 ;
100
+ }
101
+
102
+ return ret ;
103
+ }
104
+
105
+ static void bsg_transport_free_rq (struct request * rq )
106
+ {
107
+ struct bsg_job * job = blk_mq_rq_to_pdu (rq );
108
+
109
+ kfree (job -> request );
110
+ }
111
+
112
+ static const struct bsg_ops bsg_transport_ops = {
113
+ .check_proto = bsg_transport_check_proto ,
114
+ .fill_hdr = bsg_transport_fill_hdr ,
115
+ .complete_rq = bsg_transport_complete_rq ,
116
+ .free_rq = bsg_transport_free_rq ,
117
+ };
30
118
31
119
/**
32
120
* bsg_teardown_job - routine to teardown a bsg job
@@ -68,27 +156,9 @@ EXPORT_SYMBOL_GPL(bsg_job_get);
68
156
void bsg_job_done (struct bsg_job * job , int result ,
69
157
unsigned int reply_payload_rcv_len )
70
158
{
71
- struct request * req = blk_mq_rq_from_pdu (job );
72
- struct request * rsp = req -> next_rq ;
73
- int err ;
74
-
75
- err = job -> sreq .result = result ;
76
- if (err < 0 )
77
- /* we're only returning the result field in the reply */
78
- job -> sreq .sense_len = sizeof (u32 );
79
- else
80
- job -> sreq .sense_len = job -> reply_len ;
81
- /* we assume all request payload was transferred, residual == 0 */
82
- job -> sreq .resid_len = 0 ;
83
-
84
- if (rsp ) {
85
- WARN_ON (reply_payload_rcv_len > scsi_req (rsp )-> resid_len );
86
-
87
- /* set reply (bidi) residual */
88
- scsi_req (rsp )-> resid_len -=
89
- min (reply_payload_rcv_len , scsi_req (rsp )-> resid_len );
90
- }
91
- blk_complete_request (req );
159
+ job -> result = result ;
160
+ job -> reply_payload_rcv_len = reply_payload_rcv_len ;
161
+ blk_complete_request (blk_mq_rq_from_pdu (job ));
92
162
}
93
163
EXPORT_SYMBOL_GPL (bsg_job_done );
94
164
@@ -113,7 +183,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
113
183
if (!buf -> sg_list )
114
184
return - ENOMEM ;
115
185
sg_init_table (buf -> sg_list , req -> nr_phys_segments );
116
- scsi_req (req )-> resid_len = blk_rq_bytes (req );
117
186
buf -> sg_cnt = blk_rq_map_sg (req -> q , req , buf -> sg_list );
118
187
buf -> payload_len = blk_rq_bytes (req );
119
188
return 0 ;
@@ -124,16 +193,13 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
124
193
* @dev: device that is being sent the bsg request
125
194
* @req: BSG request that needs a job structure
126
195
*/
127
- static int bsg_prepare_job (struct device * dev , struct request * req )
196
+ static bool bsg_prepare_job (struct device * dev , struct request * req )
128
197
{
129
198
struct request * rsp = req -> next_rq ;
130
- struct scsi_request * rq = scsi_req (req );
131
199
struct bsg_job * job = blk_mq_rq_to_pdu (req );
132
200
int ret ;
133
201
134
202
job -> timeout = req -> timeout ;
135
- job -> request = rq -> cmd ;
136
- job -> request_len = rq -> cmd_len ;
137
203
138
204
if (req -> bio ) {
139
205
ret = bsg_map_buffer (& job -> request_payload , req );
@@ -149,12 +215,13 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
149
215
/* take a reference for the request */
150
216
get_device (job -> dev );
151
217
kref_init (& job -> kref );
152
- return 0 ;
218
+ return true ;
153
219
154
220
failjob_rls_rqst_payload :
155
221
kfree (job -> request_payload .sg_list );
156
222
failjob_rls_job :
157
- return - ENOMEM ;
223
+ job -> result = - ENOMEM ;
224
+ return false;
158
225
}
159
226
160
227
/**
@@ -183,9 +250,7 @@ static void bsg_request_fn(struct request_queue *q)
183
250
break ;
184
251
spin_unlock_irq (q -> queue_lock );
185
252
186
- ret = bsg_prepare_job (dev , req );
187
- if (ret ) {
188
- scsi_req (req )-> result = ret ;
253
+ if (!bsg_prepare_job (dev , req )) {
189
254
blk_end_request_all (req , BLK_STS_OK );
190
255
spin_lock_irq (q -> queue_lock );
191
256
continue ;
@@ -202,46 +267,34 @@ static void bsg_request_fn(struct request_queue *q)
202
267
spin_lock_irq (q -> queue_lock );
203
268
}
204
269
270
+ /* called right after the request is allocated for the request_queue */
205
271
static int bsg_init_rq (struct request_queue * q , struct request * req , gfp_t gfp )
206
272
{
207
273
struct bsg_job * job = blk_mq_rq_to_pdu (req );
208
- struct scsi_request * sreq = & job -> sreq ;
209
-
210
- /* called right after the request is allocated for the request_queue */
211
274
212
- sreq -> sense = kzalloc (SCSI_SENSE_BUFFERSIZE , gfp );
213
- if (!sreq -> sense )
275
+ job -> reply = kzalloc (SCSI_SENSE_BUFFERSIZE , gfp );
276
+ if (!job -> reply )
214
277
return - ENOMEM ;
215
-
216
278
return 0 ;
217
279
}
218
280
281
+ /* called right before the request is given to the request_queue user */
219
282
static void bsg_initialize_rq (struct request * req )
220
283
{
221
284
struct bsg_job * job = blk_mq_rq_to_pdu (req );
222
- struct scsi_request * sreq = & job -> sreq ;
223
- void * sense = sreq -> sense ;
224
-
225
- /* called right before the request is given to the request_queue user */
285
+ void * reply = job -> reply ;
226
286
227
287
memset (job , 0 , sizeof (* job ));
228
-
229
- scsi_req_init (sreq );
230
-
231
- sreq -> sense = sense ;
232
- sreq -> sense_len = SCSI_SENSE_BUFFERSIZE ;
233
-
234
- job -> reply = sense ;
235
- job -> reply_len = sreq -> sense_len ;
288
+ job -> reply = reply ;
289
+ job -> reply_len = SCSI_SENSE_BUFFERSIZE ;
236
290
job -> dd_data = job + 1 ;
237
291
}
238
292
239
293
static void bsg_exit_rq (struct request_queue * q , struct request * req )
240
294
{
241
295
struct bsg_job * job = blk_mq_rq_to_pdu (req );
242
- struct scsi_request * sreq = & job -> sreq ;
243
296
244
- kfree (sreq -> sense );
297
+ kfree (job -> reply );
245
298
}
246
299
247
300
/**
@@ -275,11 +328,10 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
275
328
q -> queuedata = dev ;
276
329
q -> bsg_job_fn = job_fn ;
277
330
blk_queue_flag_set (QUEUE_FLAG_BIDI , q );
278
- blk_queue_flag_set (QUEUE_FLAG_SCSI_PASSTHROUGH , q );
279
331
blk_queue_softirq_done (q , bsg_softirq_done );
280
332
blk_queue_rq_timeout (q , BLK_DEFAULT_SG_TIMEOUT );
281
333
282
- ret = bsg_register_queue (q , dev , name , release );
334
+ ret = bsg_register_queue (q , dev , name , & bsg_transport_ops , release );
283
335
if (ret ) {
284
336
printk (KERN_ERR "%s: bsg interface failed to "
285
337
"initialize - register queue\n" , dev -> kobj .name );
0 commit comments