Skip to content

Commit 265c559

Browse files
committed
Merge tag 'for-linus-20180616' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A collection of fixes that should go into -rc1. This contains: - bsg_open vs bsg_unregister race fix (Anatoliy) - NVMe pull request from Christoph, with fixes for regressions in this window, FC connect/reconnect path code unification, and a trace point addition. - timeout fix (Christoph) - remove a few unused functions (Christoph) - blk-mq tag_set reinit fix (Roman)" * tag 'for-linus-20180616' of git://git.kernel.dk/linux-block: bsg: fix race of bsg_open and bsg_unregister block: remov blk_queue_invalidate_tags nvme-fabrics: fix and refine state checks in __nvmf_check_ready nvme-fabrics: handle the admin-only case properly in nvmf_check_ready nvme-fabrics: refactor queue ready check blk-mq: remove blk_mq_tagset_iter nvme: remove nvme_reinit_tagset nvme-fc: fix nulling of queue data on reconnect nvme-fc: remove reinit_request routine blk-mq: don't time out requests again that are in the timeout handler nvme-fc: change controllers first connect to use reconnect path nvme: don't rely on the changed namespace list log nvmet: free smart-log buffer after use nvme-rdma: fix error flow during mapping request data nvme: add bio remapping tracepoint nvme: fix NULL pointer dereference in nvme_init_subsystem blk-mq: reinit q->tag_set_list entry only after grace period
2 parents 5e7b921 + d6c7396 commit 265c559

File tree

16 files changed

+174
-275
lines changed

16 files changed

+174
-275
lines changed

Documentation/block/biodoc.txt

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag
752752
operations before calling end_that_request_last()! For an example of a user
753753
of these helpers, see the IDE tagged command queueing support.
754754

755-
Certain hardware conditions may dictate a need to invalidate the block tag
756-
queue. For instance, on IDE any tagged request error needs to clear both
757-
the hardware and software block queue and enable the driver to sanely restart
758-
all the outstanding requests. There's a third helper to do that:
759-
760-
blk_queue_invalidate_tags(struct request_queue *q)
761-
762-
Clear the internal block tag queue and re-add all the pending requests
763-
to the request queue. The driver will receive them again on the
764-
next request_fn run, just like it did the first time it encountered
765-
them.
766-
767755
3.2.5.2 Tag info
768756

769757
Some block functions exist to query current tag status or to go from a
@@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure:
805793
Most of the above is simple and straight forward, however busy_list may need
806794
a bit of explaining. Normally we don't care too much about request ordering,
807795
but in the event of any barrier requests in the tag queue we need to ensure
808-
that requests are restarted in the order they were queue. This may happen
809-
if the driver needs to use blk_queue_invalidate_tags().
796+
that requests are restarted in the order they were queue.
810797

811798
3.3 I/O Submission
812799

block/blk-mq-tag.c

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
311311
}
312312
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
313313

314-
int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
315-
int (fn)(void *, struct request *))
316-
{
317-
int i, j, ret = 0;
318-
319-
if (WARN_ON_ONCE(!fn))
320-
goto out;
321-
322-
for (i = 0; i < set->nr_hw_queues; i++) {
323-
struct blk_mq_tags *tags = set->tags[i];
324-
325-
if (!tags)
326-
continue;
327-
328-
for (j = 0; j < tags->nr_tags; j++) {
329-
if (!tags->static_rqs[j])
330-
continue;
331-
332-
ret = fn(data, tags->static_rqs[j]);
333-
if (ret)
334-
goto out;
335-
}
336-
}
337-
338-
out:
339-
return ret;
340-
}
341-
EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
342-
343314
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
344315
void *priv)
345316
{

block/blk-mq.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)
671671

672672
if (blk_mq_request_started(rq)) {
673673
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
674+
rq->rq_flags &= ~RQF_TIMED_OUT;
674675
if (q->dma_drain_size && blk_rq_bytes(rq))
675676
rq->nr_phys_segments--;
676677
}
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
770771

771772
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
772773
{
774+
req->rq_flags |= RQF_TIMED_OUT;
773775
if (req->q->mq_ops->timeout) {
774776
enum blk_eh_timer_return ret;
775777

@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
779781
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
780782
}
781783

784+
req->rq_flags &= ~RQF_TIMED_OUT;
782785
blk_add_timer(req);
783786
}
784787

@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
788791

789792
if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
790793
return false;
794+
if (rq->rq_flags & RQF_TIMED_OUT)
795+
return false;
791796

792797
deadline = blk_rq_deadline(rq);
793798
if (time_after_eq(jiffies, deadline))
@@ -2349,16 +2354,15 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
23492354

23502355
mutex_lock(&set->tag_list_lock);
23512356
list_del_rcu(&q->tag_set_list);
2352-
INIT_LIST_HEAD(&q->tag_set_list);
23532357
if (list_is_singular(&set->tag_list)) {
23542358
/* just transitioned to unshared */
23552359
set->flags &= ~BLK_MQ_F_TAG_SHARED;
23562360
/* update existing queue */
23572361
blk_mq_update_tag_set_depth(set, false);
23582362
}
23592363
mutex_unlock(&set->tag_list_lock);
2360-
23612364
synchronize_rcu();
2365+
INIT_LIST_HEAD(&q->tag_set_list);
23622366
}
23632367

23642368
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,

block/blk-tag.c

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
188188
*/
189189
q->queue_tags = tags;
190190
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
191-
INIT_LIST_HEAD(&q->tag_busy_list);
192191
return 0;
193192
}
194193
EXPORT_SYMBOL(blk_queue_init_tags);
@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
374373
rq->tag = tag;
375374
bqt->tag_index[tag] = rq;
376375
blk_start_request(rq);
377-
list_add(&rq->queuelist, &q->tag_busy_list);
378376
return 0;
379377
}
380378
EXPORT_SYMBOL(blk_queue_start_tag);
381-
382-
/**
383-
* blk_queue_invalidate_tags - invalidate all pending tags
384-
* @q: the request queue for the device
385-
*
386-
* Description:
387-
* Hardware conditions may dictate a need to stop all pending requests.
388-
* In this case, we will safely clear the block side of the tag queue and
389-
* readd all requests to the request queue in the right order.
390-
**/
391-
void blk_queue_invalidate_tags(struct request_queue *q)
392-
{
393-
struct list_head *tmp, *n;
394-
395-
lockdep_assert_held(q->queue_lock);
396-
397-
list_for_each_safe(tmp, n, &q->tag_busy_list)
398-
blk_requeue_request(q, list_entry_rq(tmp));
399-
}
400-
EXPORT_SYMBOL(blk_queue_invalidate_tags);

block/bsg.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
693693
struct bsg_device *bd;
694694
unsigned char buf[32];
695695

696+
lockdep_assert_held(&bsg_mutex);
697+
696698
if (!blk_get_queue(rq))
697699
return ERR_PTR(-ENXIO);
698700

@@ -707,22 +709,20 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
707709
bsg_set_block(bd, file);
708710

709711
atomic_set(&bd->ref_count, 1);
710-
mutex_lock(&bsg_mutex);
711712
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
712713

713714
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
714715
bsg_dbg(bd, "bound to <%s>, max queue %d\n",
715716
format_dev_t(buf, inode->i_rdev), bd->max_queue);
716717

717-
mutex_unlock(&bsg_mutex);
718718
return bd;
719719
}
720720

721721
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
722722
{
723723
struct bsg_device *bd;
724724

725-
mutex_lock(&bsg_mutex);
725+
lockdep_assert_held(&bsg_mutex);
726726

727727
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
728728
if (bd->queue == q) {
@@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
732732
}
733733
bd = NULL;
734734
found:
735-
mutex_unlock(&bsg_mutex);
736735
return bd;
737736
}
738737

@@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
746745
*/
747746
mutex_lock(&bsg_mutex);
748747
bcd = idr_find(&bsg_minor_idr, iminor(inode));
749-
mutex_unlock(&bsg_mutex);
750748

751-
if (!bcd)
752-
return ERR_PTR(-ENODEV);
749+
if (!bcd) {
750+
bd = ERR_PTR(-ENODEV);
751+
goto out_unlock;
752+
}
753753

754754
bd = __bsg_get_device(iminor(inode), bcd->queue);
755-
if (bd)
756-
return bd;
757-
758-
bd = bsg_add_device(inode, bcd->queue, file);
755+
if (!bd)
756+
bd = bsg_add_device(inode, bcd->queue, file);
759757

758+
out_unlock:
759+
mutex_unlock(&bsg_mutex);
760760
return bd;
761761
}
762762

drivers/nvme/host/core.c

Lines changed: 12 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -2208,7 +2208,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
22082208
* Verify that the subsystem actually supports multiple
22092209
* controllers, else bail out.
22102210
*/
2211-
if (!ctrl->opts->discovery_nqn &&
2211+
if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
22122212
nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
22132213
dev_err(ctrl->device,
22142214
"ignoring ctrl due to duplicate subnqn (%s).\n",
@@ -3197,40 +3197,28 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
31973197
nvme_remove_invalid_namespaces(ctrl, nn);
31983198
}
31993199

3200-
static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
3200+
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
32013201
{
32023202
size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
32033203
__le32 *log;
3204-
int error, i;
3205-
bool ret = false;
3204+
int error;
32063205

32073206
log = kzalloc(log_size, GFP_KERNEL);
32083207
if (!log)
3209-
return false;
3208+
return;
32103209

3210+
/*
3211+
* We need to read the log to clear the AEN, but we don't want to rely
3212+
* on it for the changed namespace information as userspace could have
3213+
* raced with us in reading the log page, which could cause us to miss
3214+
* updates.
3215+
*/
32113216
error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
3212-
if (error) {
3217+
if (error)
32133218
dev_warn(ctrl->device,
32143219
"reading changed ns log failed: %d\n", error);
3215-
goto out_free_log;
3216-
}
3217-
3218-
if (log[0] == cpu_to_le32(0xffffffff))
3219-
goto out_free_log;
3220-
3221-
for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
3222-
u32 nsid = le32_to_cpu(log[i]);
32233220

3224-
if (nsid == 0)
3225-
break;
3226-
dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
3227-
nvme_validate_ns(ctrl, nsid);
3228-
}
3229-
ret = true;
3230-
3231-
out_free_log:
32323221
kfree(log);
3233-
return ret;
32343222
}
32353223

32363224
static void nvme_scan_work(struct work_struct *work)
@@ -3246,9 +3234,8 @@ static void nvme_scan_work(struct work_struct *work)
32463234
WARN_ON_ONCE(!ctrl->tagset);
32473235

32483236
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3249-
if (nvme_scan_changed_ns_log(ctrl))
3250-
goto out_sort_namespaces;
32513237
dev_info(ctrl->device, "rescanning namespaces.\n");
3238+
nvme_clear_changed_ns_log(ctrl);
32523239
}
32533240

32543241
if (nvme_identify_ctrl(ctrl, &id))
@@ -3263,7 +3250,6 @@ static void nvme_scan_work(struct work_struct *work)
32633250
nvme_scan_ns_sequential(ctrl, nn);
32643251
out_free_id:
32653252
kfree(id);
3266-
out_sort_namespaces:
32673253
down_write(&ctrl->namespaces_rwsem);
32683254
list_sort(NULL, &ctrl->namespaces, ns_cmp);
32693255
up_write(&ctrl->namespaces_rwsem);
@@ -3641,16 +3627,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
36413627
}
36423628
EXPORT_SYMBOL_GPL(nvme_start_queues);
36433629

3644-
int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
3645-
{
3646-
if (!ctrl->ops->reinit_request)
3647-
return 0;
3648-
3649-
return blk_mq_tagset_iter(set, set->driver_data,
3650-
ctrl->ops->reinit_request);
3651-
}
3652-
EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
3653-
36543630
int __init nvme_core_init(void)
36553631
{
36563632
int result = -ENOMEM;

0 commit comments

Comments
 (0)