Skip to content

Commit 95c7c09

Browse files
committed
Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph: "Fix various little regressions introduced in this merge window, plus a rework of the fibre channel connect and reconnect path to share the code instead of having separate sets of bugs. Last but not least a trivial trace point addition from Hannes." * 'nvme-4.18' of git://git.infradead.org/nvme: nvme-fabrics: fix and refine state checks in __nvmf_check_ready nvme-fabrics: handle the admin-only case properly in nvmf_check_ready nvme-fabrics: refactor queue ready check blk-mq: remove blk_mq_tagset_iter nvme: remove nvme_reinit_tagset nvme-fc: fix nulling of queue data on reconnect nvme-fc: remove reinit_request routine nvme-fc: change controllers first connect to use reconnect path nvme: don't rely on the changed namespace list log nvmet: free smart-log buffer after use nvme-rdma: fix error flow during mapping request data nvme: add bio remapping tracepoint nvme: fix NULL pointer dereference in nvme_init_subsystem
2 parents da66126 + 35897b9 commit 95c7c09

File tree

11 files changed

+154
-224
lines changed

11 files changed

+154
-224
lines changed

block/blk-mq-tag.c

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
311311
}
312312
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
313313

314-
int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
315-
int (fn)(void *, struct request *))
316-
{
317-
int i, j, ret = 0;
318-
319-
if (WARN_ON_ONCE(!fn))
320-
goto out;
321-
322-
for (i = 0; i < set->nr_hw_queues; i++) {
323-
struct blk_mq_tags *tags = set->tags[i];
324-
325-
if (!tags)
326-
continue;
327-
328-
for (j = 0; j < tags->nr_tags; j++) {
329-
if (!tags->static_rqs[j])
330-
continue;
331-
332-
ret = fn(data, tags->static_rqs[j]);
333-
if (ret)
334-
goto out;
335-
}
336-
}
337-
338-
out:
339-
return ret;
340-
}
341-
EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
342-
343314
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
344315
void *priv)
345316
{

drivers/nvme/host/core.c

Lines changed: 12 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -2208,7 +2208,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
22082208
* Verify that the subsystem actually supports multiple
22092209
* controllers, else bail out.
22102210
*/
2211-
if (!ctrl->opts->discovery_nqn &&
2211+
if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
22122212
nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
22132213
dev_err(ctrl->device,
22142214
"ignoring ctrl due to duplicate subnqn (%s).\n",
@@ -3197,40 +3197,28 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
31973197
nvme_remove_invalid_namespaces(ctrl, nn);
31983198
}
31993199

3200-
static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
3200+
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
32013201
{
32023202
size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
32033203
__le32 *log;
3204-
int error, i;
3205-
bool ret = false;
3204+
int error;
32063205

32073206
log = kzalloc(log_size, GFP_KERNEL);
32083207
if (!log)
3209-
return false;
3208+
return;
32103209

3210+
/*
3211+
* We need to read the log to clear the AEN, but we don't want to rely
3212+
* on it for the changed namespace information as userspace could have
3213+
* raced with us in reading the log page, which could cause us to miss
3214+
* updates.
3215+
*/
32113216
error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
3212-
if (error) {
3217+
if (error)
32133218
dev_warn(ctrl->device,
32143219
"reading changed ns log failed: %d\n", error);
3215-
goto out_free_log;
3216-
}
3217-
3218-
if (log[0] == cpu_to_le32(0xffffffff))
3219-
goto out_free_log;
3220-
3221-
for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
3222-
u32 nsid = le32_to_cpu(log[i]);
32233220

3224-
if (nsid == 0)
3225-
break;
3226-
dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
3227-
nvme_validate_ns(ctrl, nsid);
3228-
}
3229-
ret = true;
3230-
3231-
out_free_log:
32323221
kfree(log);
3233-
return ret;
32343222
}
32353223

32363224
static void nvme_scan_work(struct work_struct *work)
@@ -3246,9 +3234,8 @@ static void nvme_scan_work(struct work_struct *work)
32463234
WARN_ON_ONCE(!ctrl->tagset);
32473235

32483236
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3249-
if (nvme_scan_changed_ns_log(ctrl))
3250-
goto out_sort_namespaces;
32513237
dev_info(ctrl->device, "rescanning namespaces.\n");
3238+
nvme_clear_changed_ns_log(ctrl);
32523239
}
32533240

32543241
if (nvme_identify_ctrl(ctrl, &id))
@@ -3263,7 +3250,6 @@ static void nvme_scan_work(struct work_struct *work)
32633250
nvme_scan_ns_sequential(ctrl, nn);
32643251
out_free_id:
32653252
kfree(id);
3266-
out_sort_namespaces:
32673253
down_write(&ctrl->namespaces_rwsem);
32683254
list_sort(NULL, &ctrl->namespaces, ns_cmp);
32693255
up_write(&ctrl->namespaces_rwsem);
@@ -3641,16 +3627,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
36413627
}
36423628
EXPORT_SYMBOL_GPL(nvme_start_queues);
36433629

3644-
int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
3645-
{
3646-
if (!ctrl->ops->reinit_request)
3647-
return 0;
3648-
3649-
return blk_mq_tagset_iter(set, set->driver_data,
3650-
ctrl->ops->reinit_request);
3651-
}
3652-
EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
3653-
36543630
int __init nvme_core_init(void)
36553631
{
36563632
int result = -ENOMEM;

drivers/nvme/host/fabrics.c

Lines changed: 37 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -536,67 +536,55 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
536536
return NULL;
537537
}
538538

539-
blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
540-
bool queue_live, bool is_connected)
539+
/*
540+
* For something we're not in a state to send to the device the default action
541+
* is to busy it and retry it after the controller state is recovered. However,
542+
* anything marked for failfast or nvme multipath is immediately failed.
543+
*
544+
* Note: commands used to initialize the controller will be marked for failfast.
545+
* Note: nvme cli/ioctl commands are marked for failfast.
546+
*/
547+
blk_status_t nvmf_fail_nonready_command(struct request *rq)
541548
{
542-
struct nvme_command *cmd = nvme_req(rq)->cmd;
549+
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550+
return BLK_STS_RESOURCE;
551+
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552+
return BLK_STS_IOERR;
553+
}
554+
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
543555

544-
if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
545-
return BLK_STS_OK;
556+
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
557+
bool queue_live)
558+
{
559+
struct nvme_request *req = nvme_req(rq);
560+
561+
/*
562+
* If we are in some state of setup or teardown only allow
563+
* internally generated commands.
564+
*/
565+
if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
566+
return false;
546567

568+
/*
569+
* Only allow commands on a live queue, except for the connect command,
570+
* which is require to set the queue live in the appropinquate states.
571+
*/
547572
switch (ctrl->state) {
548573
case NVME_CTRL_NEW:
549574
case NVME_CTRL_CONNECTING:
550-
case NVME_CTRL_DELETING:
551-
/*
552-
* This is the case of starting a new or deleting an association
553-
* but connectivity was lost before it was fully created or torn
554-
* down. We need to error the commands used to initialize the
555-
* controller so the reconnect can go into a retry attempt. The
556-
* commands should all be marked REQ_FAILFAST_DRIVER, which will
557-
* hit the reject path below. Anything else will be queued while
558-
* the state settles.
559-
*/
560-
if (!is_connected)
561-
break;
562-
563-
/*
564-
* If queue is live, allow only commands that are internally
565-
* generated pass through. These are commands on the admin
566-
* queue to initialize the controller. This will reject any
567-
* ioctl admin cmds received while initializing.
568-
*/
569-
if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
570-
return BLK_STS_OK;
571-
572-
/*
573-
* If the queue is not live, allow only a connect command. This
574-
* will reject any ioctl admin cmd as well as initialization
575-
* commands if the controller reverted the queue to non-live.
576-
*/
577-
if (!queue_live && blk_rq_is_passthrough(rq) &&
578-
cmd->common.opcode == nvme_fabrics_command &&
579-
cmd->fabrics.fctype == nvme_fabrics_type_connect)
580-
return BLK_STS_OK;
575+
if (req->cmd->common.opcode == nvme_fabrics_command &&
576+
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
577+
return true;
581578
break;
582579
default:
583580
break;
581+
case NVME_CTRL_DEAD:
582+
return false;
584583
}
585584

586-
/*
587-
* Any other new io is something we're not in a state to send to the
588-
* device. Default action is to busy it and retry it after the
589-
* controller state is recovered. However, anything marked for failfast
590-
* or nvme multipath is immediately failed. Note: commands used to
591-
* initialize the controller will be marked for failfast.
592-
* Note: nvme cli/ioctl commands are marked for failfast.
593-
*/
594-
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
595-
return BLK_STS_RESOURCE;
596-
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
597-
return BLK_STS_IOERR;
585+
return queue_live;
598586
}
599-
EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
587+
EXPORT_SYMBOL_GPL(__nvmf_check_ready);
600588

601589
static const match_table_t opt_tokens = {
602590
{ NVMF_OPT_TRANSPORT, "transport=%s" },

drivers/nvme/host/fabrics.h

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,17 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162162
void nvmf_free_options(struct nvmf_ctrl_options *opts);
163163
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164164
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165-
blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
166-
struct request *rq, bool queue_live, bool is_connected);
165+
blk_status_t nvmf_fail_nonready_command(struct request *rq);
166+
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167+
bool queue_live);
168+
169+
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
170+
bool queue_live)
171+
{
172+
if (likely(ctrl->state == NVME_CTRL_LIVE ||
173+
ctrl->state == NVME_CTRL_ADMIN_ONLY))
174+
return true;
175+
return __nvmf_check_ready(ctrl, rq, queue_live);
176+
}
167177

168178
#endif /* _NVME_FABRICS_H */

0 commit comments

Comments
 (0)