Skip to content

Commit 17cb960

Browse files
Christoph Hellwigaxboe
authored andcommitted
bsg: split handling of SCSI CDBs vs transport requeues
The current BSG design tries to shoe-horn the transport-specific passthrough commands into the overall framework for SCSI passthrough requests. This has a couple problems: - each passthrough queue has to set the QUEUE_FLAG_SCSI_PASSTHROUGH flag despite not dealing with SCSI commands at all. Because of that these queues could also incorrectly accept SCSI commands from in-kernel users or through the legacy SCSI_IOCTL_SEND_COMMAND ioctl. - the real SCSI bsg queues also incorrectly accept bsg requests of the BSG_SUB_PROTOCOL_SCSI_TRANSPORT type - the bsg transport code is almost unredable because it tries to reuse different SCSI concepts for its own purpose. This patch instead adds a new bsg_ops structure to handle the two cases differently, and thus solves all of the above problems. Another side effect is that the bsg-lib queues also don't need to embedd a struct scsi_request anymore. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Reviewed-by: Johannes Thumshirn <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent ef6fa64 commit 17cb960

File tree

7 files changed

+250
-217
lines changed

7 files changed

+250
-217
lines changed

block/bsg-lib.c

Lines changed: 105 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,94 @@
2727
#include <linux/bsg-lib.h>
2828
#include <linux/export.h>
2929
#include <scsi/scsi_cmnd.h>
30+
#include <scsi/sg.h>
31+
32+
#define uptr64(val) ((void __user *)(uintptr_t)(val))
33+
34+
static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
35+
{
36+
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
37+
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
38+
return -EINVAL;
39+
if (!capable(CAP_SYS_RAWIO))
40+
return -EPERM;
41+
return 0;
42+
}
43+
44+
static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
45+
fmode_t mode)
46+
{
47+
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
48+
49+
job->request_len = hdr->request_len;
50+
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
51+
if (IS_ERR(job->request))
52+
return PTR_ERR(job->request);
53+
return 0;
54+
}
55+
56+
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
57+
{
58+
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
59+
int ret = 0;
60+
61+
/*
62+
* The assignments below don't make much sense, but are kept for
63+
* bug by bug backwards compatibility:
64+
*/
65+
hdr->device_status = job->result & 0xff;
66+
hdr->transport_status = host_byte(job->result);
67+
hdr->driver_status = driver_byte(job->result);
68+
hdr->info = 0;
69+
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
70+
hdr->info |= SG_INFO_CHECK;
71+
hdr->response_len = 0;
72+
73+
if (job->result < 0) {
74+
/* we're only returning the result field in the reply */
75+
job->reply_len = sizeof(u32);
76+
ret = job->result;
77+
}
78+
79+
if (job->reply_len && hdr->response) {
80+
int len = min(hdr->max_response_len, job->reply_len);
81+
82+
if (copy_to_user(uptr64(hdr->response), job->reply, len))
83+
ret = -EFAULT;
84+
else
85+
hdr->response_len = len;
86+
}
87+
88+
/* we assume all request payload was transferred, residual == 0 */
89+
hdr->dout_resid = 0;
90+
91+
if (rq->next_rq) {
92+
unsigned int rsp_len = job->reply_payload.payload_len;
93+
94+
if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
95+
hdr->din_resid = 0;
96+
else
97+
hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
98+
} else {
99+
hdr->din_resid = 0;
100+
}
101+
102+
return ret;
103+
}
104+
105+
static void bsg_transport_free_rq(struct request *rq)
106+
{
107+
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
108+
109+
kfree(job->request);
110+
}
111+
112+
static const struct bsg_ops bsg_transport_ops = {
113+
.check_proto = bsg_transport_check_proto,
114+
.fill_hdr = bsg_transport_fill_hdr,
115+
.complete_rq = bsg_transport_complete_rq,
116+
.free_rq = bsg_transport_free_rq,
117+
};
30118

31119
/**
32120
* bsg_teardown_job - routine to teardown a bsg job
@@ -68,27 +156,9 @@ EXPORT_SYMBOL_GPL(bsg_job_get);
68156
void bsg_job_done(struct bsg_job *job, int result,
69157
unsigned int reply_payload_rcv_len)
70158
{
71-
struct request *req = blk_mq_rq_from_pdu(job);
72-
struct request *rsp = req->next_rq;
73-
int err;
74-
75-
err = job->sreq.result = result;
76-
if (err < 0)
77-
/* we're only returning the result field in the reply */
78-
job->sreq.sense_len = sizeof(u32);
79-
else
80-
job->sreq.sense_len = job->reply_len;
81-
/* we assume all request payload was transferred, residual == 0 */
82-
job->sreq.resid_len = 0;
83-
84-
if (rsp) {
85-
WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
86-
87-
/* set reply (bidi) residual */
88-
scsi_req(rsp)->resid_len -=
89-
min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
90-
}
91-
blk_complete_request(req);
159+
job->result = result;
160+
job->reply_payload_rcv_len = reply_payload_rcv_len;
161+
blk_complete_request(blk_mq_rq_from_pdu(job));
92162
}
93163
EXPORT_SYMBOL_GPL(bsg_job_done);
94164

@@ -113,7 +183,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
113183
if (!buf->sg_list)
114184
return -ENOMEM;
115185
sg_init_table(buf->sg_list, req->nr_phys_segments);
116-
scsi_req(req)->resid_len = blk_rq_bytes(req);
117186
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
118187
buf->payload_len = blk_rq_bytes(req);
119188
return 0;
@@ -124,16 +193,13 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
124193
* @dev: device that is being sent the bsg request
125194
* @req: BSG request that needs a job structure
126195
*/
127-
static int bsg_prepare_job(struct device *dev, struct request *req)
196+
static bool bsg_prepare_job(struct device *dev, struct request *req)
128197
{
129198
struct request *rsp = req->next_rq;
130-
struct scsi_request *rq = scsi_req(req);
131199
struct bsg_job *job = blk_mq_rq_to_pdu(req);
132200
int ret;
133201

134202
job->timeout = req->timeout;
135-
job->request = rq->cmd;
136-
job->request_len = rq->cmd_len;
137203

138204
if (req->bio) {
139205
ret = bsg_map_buffer(&job->request_payload, req);
@@ -149,12 +215,13 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
149215
/* take a reference for the request */
150216
get_device(job->dev);
151217
kref_init(&job->kref);
152-
return 0;
218+
return true;
153219

154220
failjob_rls_rqst_payload:
155221
kfree(job->request_payload.sg_list);
156222
failjob_rls_job:
157-
return -ENOMEM;
223+
job->result = -ENOMEM;
224+
return false;
158225
}
159226

160227
/**
@@ -183,9 +250,7 @@ static void bsg_request_fn(struct request_queue *q)
183250
break;
184251
spin_unlock_irq(q->queue_lock);
185252

186-
ret = bsg_prepare_job(dev, req);
187-
if (ret) {
188-
scsi_req(req)->result = ret;
253+
if (!bsg_prepare_job(dev, req)) {
189254
blk_end_request_all(req, BLK_STS_OK);
190255
spin_lock_irq(q->queue_lock);
191256
continue;
@@ -202,46 +267,34 @@ static void bsg_request_fn(struct request_queue *q)
202267
spin_lock_irq(q->queue_lock);
203268
}
204269

270+
/* called right after the request is allocated for the request_queue */
205271
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
206272
{
207273
struct bsg_job *job = blk_mq_rq_to_pdu(req);
208-
struct scsi_request *sreq = &job->sreq;
209-
210-
/* called right after the request is allocated for the request_queue */
211274

212-
sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
213-
if (!sreq->sense)
275+
job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
276+
if (!job->reply)
214277
return -ENOMEM;
215-
216278
return 0;
217279
}
218280

281+
/* called right before the request is given to the request_queue user */
219282
static void bsg_initialize_rq(struct request *req)
220283
{
221284
struct bsg_job *job = blk_mq_rq_to_pdu(req);
222-
struct scsi_request *sreq = &job->sreq;
223-
void *sense = sreq->sense;
224-
225-
/* called right before the request is given to the request_queue user */
285+
void *reply = job->reply;
226286

227287
memset(job, 0, sizeof(*job));
228-
229-
scsi_req_init(sreq);
230-
231-
sreq->sense = sense;
232-
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
233-
234-
job->reply = sense;
235-
job->reply_len = sreq->sense_len;
288+
job->reply = reply;
289+
job->reply_len = SCSI_SENSE_BUFFERSIZE;
236290
job->dd_data = job + 1;
237291
}
238292

239293
static void bsg_exit_rq(struct request_queue *q, struct request *req)
240294
{
241295
struct bsg_job *job = blk_mq_rq_to_pdu(req);
242-
struct scsi_request *sreq = &job->sreq;
243296

244-
kfree(sreq->sense);
297+
kfree(job->reply);
245298
}
246299

247300
/**
@@ -275,11 +328,10 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
275328
q->queuedata = dev;
276329
q->bsg_job_fn = job_fn;
277330
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
278-
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
279331
blk_queue_softirq_done(q, bsg_softirq_done);
280332
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
281333

282-
ret = bsg_register_queue(q, dev, name, release);
334+
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops, release);
283335
if (ret) {
284336
printk(KERN_ERR "%s: bsg interface failed to "
285337
"initialize - register queue\n", dev->kobj.name);

0 commit comments

Comments
 (0)