Skip to content

Commit 19240e6

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Two sets of NVMe pull requests from Christoph: - Fixes for the Fibre Channel host/target to fix spec compliance - Allow a zero keep alive timeout - Make the debug printk for broken SGLs work better - Fix queue zeroing during initialization - Set of RDMA and FC fixes - Target div-by-zero fix - bsg double-free fix. - ndb unknown ioctl fix from Josef. - Buffered vs O_DIRECT page cache inconsistency fix. Has been floating around for a long time, well reviewed. From Lukas. - brd overflow fix from Mikulas. - Fix for a loop regression in this merge window, where using a union for two members of the loop_cmd turned out to be a really bad idea. From Omar. - Fix for an iostat regression fix in this series, using the wrong API to get at the block queue. From Shaohua. - Fix for a potential blktrace delection deadlock. From Waiman. * 'for-linus' of git://git.kernel.dk/linux-block: (30 commits) nvme-fcloop: fix port deletes and callbacks nvmet-fc: sync header templates with comments nvmet-fc: ensure target queue id within range. nvmet-fc: on port remove call put outside lock nvme-rdma: don't fully stop the controller in error recovery nvme-rdma: give up reconnect if state change fails nvme-core: Use nvme_wq to queue async events and fw activation nvme: fix sqhd reference when admin queue connect fails block: fix a crash caused by wrong API fs: Fix page cache inconsistency when mixing buffered and AIO DIO nvmet: implement valid sqhd values in completions nvme-fabrics: Allow 0 as KATO value nvme: allow timed-out ios to retry nvme: stop aer posting if controller state not live nvme-pci: Print invalid SGL only once nvme-pci: initialize queue memory before interrupts nvmet-fc: fix failing max io queue connections nvme-fc: use transport-specific sgl format nvme: add transport SGL definitions nvme.h: remove FC transport-specific error values ...
2 parents 1776364 + fddc992 commit 19240e6

File tree

25 files changed

+226
-175
lines changed

25 files changed

+226
-175
lines changed

block/blk-core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
854854

855855
kobject_init(&q->kobj, &blk_queue_ktype);
856856

857+
#ifdef CONFIG_BLK_DEV_IO_TRACE
858+
mutex_init(&q->blk_trace_mutex);
859+
#endif
857860
mutex_init(&q->sysfs_lock);
858861
spin_lock_init(&q->__queue_lock);
859862

block/bsg-lib.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
154154
failjob_rls_rqst_payload:
155155
kfree(job->request_payload.sg_list);
156156
failjob_rls_job:
157-
kfree(job);
158157
return -ENOMEM;
159158
}
160159

block/partition-generic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev,
112112
struct device_attribute *attr, char *buf)
113113
{
114114
struct hd_struct *p = dev_to_part(dev);
115-
struct request_queue *q = dev_to_disk(dev)->queue;
115+
struct request_queue *q = part_to_disk(p)->queue;
116116
unsigned int inflight[2];
117117
int cpu;
118118

drivers/block/brd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
342342

343343
if (!brd)
344344
return -ENODEV;
345-
page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
345+
page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
346346
if (!page)
347347
return -ENOSPC;
348348
*kaddr = page_address(page);

drivers/block/loop.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,8 @@ struct loop_device {
6767
struct loop_cmd {
6868
struct kthread_work work;
6969
struct request *rq;
70-
union {
71-
bool use_aio; /* use AIO interface to handle I/O */
72-
atomic_t ref; /* only for aio */
73-
};
70+
bool use_aio; /* use AIO interface to handle I/O */
71+
atomic_t ref; /* only for aio */
7472
long ret;
7573
struct kiocb iocb;
7674
struct bio_vec *bvec;

drivers/block/nbd.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1194,6 +1194,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
11941194
if (!capable(CAP_SYS_ADMIN))
11951195
return -EPERM;
11961196

1197+
/* The block layer will pass back some non-nbd ioctls in case we have
1198+
* special handling for them, but we don't so just return an error.
1199+
*/
1200+
if (_IOC_TYPE(cmd) != 0xab)
1201+
return -EINVAL;
1202+
11971203
mutex_lock(&nbd->config_lock);
11981204

11991205
/* Don't allow ioctl operations on a nbd device that was created with

drivers/nvme/host/core.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req)
134134
return false;
135135
if (nvme_req(req)->status & NVME_SC_DNR)
136136
return false;
137-
if (jiffies - req->start_time >= req->timeout)
138-
return false;
139137
if (nvme_req(req)->retries >= nvme_max_retries)
140138
return false;
141139
return true;
@@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work)
25902588
container_of(work, struct nvme_ctrl, async_event_work);
25912589

25922590
spin_lock_irq(&ctrl->lock);
2593-
while (ctrl->event_limit > 0) {
2591+
while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
25942592
int aer_idx = --ctrl->event_limit;
25952593

25962594
spin_unlock_irq(&ctrl->lock);
@@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
26772675
/*FALLTHRU*/
26782676
case NVME_SC_ABORT_REQ:
26792677
++ctrl->event_limit;
2680-
queue_work(nvme_wq, &ctrl->async_event_work);
2678+
if (ctrl->state == NVME_CTRL_LIVE)
2679+
queue_work(nvme_wq, &ctrl->async_event_work);
26812680
break;
26822681
default:
26832682
break;
@@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
26922691
nvme_queue_scan(ctrl);
26932692
break;
26942693
case NVME_AER_NOTICE_FW_ACT_STARTING:
2695-
schedule_work(&ctrl->fw_act_work);
2694+
queue_work(nvme_wq, &ctrl->fw_act_work);
26962695
break;
26972696
default:
26982697
dev_warn(ctrl->device, "async event result %08x\n", result);

drivers/nvme/host/fabrics.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
565565
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
566566
opts->nr_io_queues = num_online_cpus();
567567
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
568+
opts->kato = NVME_DEFAULT_KATO;
568569

569570
options = o = kstrdup(buf, GFP_KERNEL);
570571
if (!options)
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
655656
goto out;
656657
}
657658

658-
if (opts->discovery_nqn) {
659-
pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
660-
ret = -EINVAL;
661-
goto out;
662-
}
663-
664659
if (token < 0) {
665660
pr_err("Invalid keep_alive_tmo %d\n", token);
666661
ret = -EINVAL;
667662
goto out;
668-
} else if (token == 0) {
663+
} else if (token == 0 && !opts->discovery_nqn) {
669664
/* Allowed for debug */
670665
pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
671666
}
672667
opts->kato = token;
668+
669+
if (opts->discovery_nqn && opts->kato) {
670+
pr_err("Discovery controllers cannot accept KATO != 0\n");
671+
ret = -EINVAL;
672+
goto out;
673+
}
674+
673675
break;
674676
case NVMF_OPT_CTRL_LOSS_TMO:
675677
if (match_int(args, &token)) {
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
762764
uuid_copy(&opts->host->id, &hostid);
763765

764766
out:
765-
if (!opts->discovery_nqn && !opts->kato)
766-
opts->kato = NVME_DEFAULT_KATO;
767767
kfree(options);
768768
return ret;
769769
}

drivers/nvme/host/fc.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
13761376
if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
13771377
status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
13781378
else if (freq->status)
1379-
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1379+
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
13801380

13811381
/*
13821382
* For the linux implementation, if we have an unsuccesful
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
14041404
*/
14051405
if (freq->transferred_length !=
14061406
be32_to_cpu(op->cmd_iu.data_len)) {
1407-
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1407+
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
14081408
goto done;
14091409
}
14101410
result.u64 = 0;
@@ -1421,15 +1421,15 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
14211421
freq->transferred_length ||
14221422
op->rsp_iu.status_code ||
14231423
sqe->common.command_id != cqe->command_id)) {
1424-
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1424+
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
14251425
goto done;
14261426
}
14271427
result = cqe->result;
14281428
status = cqe->status;
14291429
break;
14301430

14311431
default:
1432-
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1432+
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
14331433
goto done;
14341434
}
14351435

@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
19891989
* as well as those by FC-NVME spec.
19901990
*/
19911991
WARN_ON_ONCE(sqe->common.metadata);
1992-
WARN_ON_ONCE(sqe->common.dptr.prp1);
1993-
WARN_ON_ONCE(sqe->common.dptr.prp2);
19941992
sqe->common.flags |= NVME_CMD_SGL_METABUF;
19951993

19961994
/*
1997-
* format SQE DPTR field per FC-NVME rules
1998-
* type=data block descr; subtype=offset;
1999-
* offset is currently 0.
1995+
* format SQE DPTR field per FC-NVME rules:
1996+
* type=0x5 Transport SGL Data Block Descriptor
1997+
* subtype=0xA Transport-specific value
1998+
* address=0
1999+
* length=length of the data series
20002000
*/
2001-
sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
2001+
sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2002+
NVME_SGL_FMT_TRANSPORT_A;
20022003
sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
20032004
sqe->rw.dptr.sgl.addr = 0;
20042005

drivers/nvme/host/pci.c

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <linux/mm.h>
2525
#include <linux/module.h>
2626
#include <linux/mutex.h>
27+
#include <linux/once.h>
2728
#include <linux/pci.h>
2829
#include <linux/poison.h>
2930
#include <linux/t10-pi.h>
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
540541
}
541542
#endif
542543

544+
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
545+
{
546+
int i;
547+
struct scatterlist *sg;
548+
549+
for_each_sg(sgl, sg, nents, i) {
550+
dma_addr_t phys = sg_phys(sg);
551+
pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
552+
"dma_address:%pad dma_length:%d\n",
553+
i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
554+
sg_dma_len(sg));
555+
}
556+
}
557+
543558
static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
544559
{
545560
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
622637
return BLK_STS_OK;
623638

624639
bad_sgl:
625-
if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
626-
blk_rq_payload_bytes(req), iod->nents)) {
627-
for_each_sg(iod->sg, sg, iod->nents, i) {
628-
dma_addr_t phys = sg_phys(sg);
629-
pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
630-
"dma_address:%pad dma_length:%d\n", i, &phys,
631-
sg->offset, sg->length,
632-
&sg_dma_address(sg),
633-
sg_dma_len(sg));
634-
}
635-
}
640+
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
641+
"Invalid SGL for payload:%d nents:%d\n",
642+
blk_rq_payload_bytes(req), iod->nents);
636643
return BLK_STS_IOERR;
637-
638644
}
639645

640646
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
13131319
if (result < 0)
13141320
goto release_cq;
13151321

1322+
nvme_init_queue(nvmeq, qid);
13161323
result = queue_request_irq(nvmeq);
13171324
if (result < 0)
13181325
goto release_sq;
13191326

1320-
nvme_init_queue(nvmeq, qid);
13211327
return result;
13221328

13231329
release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
14641470
return result;
14651471

14661472
nvmeq->cq_vector = 0;
1473+
nvme_init_queue(nvmeq, 0);
14671474
result = queue_request_irq(nvmeq);
14681475
if (result) {
14691476
nvmeq->cq_vector = -1;
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
21562163
if (result)
21572164
goto out;
21582165

2159-
nvme_init_queue(dev->queues[0], 0);
21602166
result = nvme_alloc_admin_tags(dev);
21612167
if (result)
21622168
goto out;

drivers/nvme/host/rdma.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
942942
}
943943

944944
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
945-
WARN_ON_ONCE(!changed);
945+
if (!changed) {
946+
/* state change failure is ok if we're in DELETING state */
947+
WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
948+
return;
949+
}
950+
946951
ctrl->ctrl.nr_reconnects = 0;
947952

948953
nvme_start_ctrl(&ctrl->ctrl);
@@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
962967
struct nvme_rdma_ctrl *ctrl = container_of(work,
963968
struct nvme_rdma_ctrl, err_work);
964969

965-
nvme_stop_ctrl(&ctrl->ctrl);
970+
nvme_stop_keep_alive(&ctrl->ctrl);
966971

967972
if (ctrl->ctrl.queue_count > 1) {
968973
nvme_stop_queues(&ctrl->ctrl);

drivers/nvme/target/core.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
390390
if (status)
391391
nvmet_set_status(req, status);
392392

393-
/* XXX: need to fill in something useful for sq_head */
394-
req->rsp->sq_head = 0;
395-
if (likely(req->sq)) /* may happen during early failure */
396-
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
393+
if (req->sq->size)
394+
req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
395+
req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
396+
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
397397
req->rsp->command_id = req->cmd->common.command_id;
398398

399399
if (req->ns)
@@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
420420
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
421421
u16 qid, u16 size)
422422
{
423+
sq->sqhd = 0;
423424
sq->qid = qid;
424425
sq->size = size;
425426

drivers/nvme/target/fabrics-cmd.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
109109
pr_warn("queue already connected!\n");
110110
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
111111
}
112+
if (!sqsize) {
113+
pr_warn("queue size zero!\n");
114+
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
115+
}
112116

113-
nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
114-
nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
117+
/* note: convert queue size from 0's-based value to 1's-based value */
118+
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
119+
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
115120
return 0;
116121
}
117122

0 commit comments

Comments
 (0)