Skip to content

Commit b9bce6e

Browse files
committed
Merge tag 'block-6.0-2022-08-19' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes that should go into this release: - Small series of patches for ublk (ZiyangZhang) - Remove dead function (Yu) - Fix for running a block queue in case of resource starvation (Yufen)" * tag 'block-6.0-2022-08-19' of git://git.kernel.dk/linux-block: blk-mq: run queue no matter whether the request is the last request blk-mq: remove unused function blk_mq_queue_stopped() ublk_drv: do not add a re-issued request aborted previously to ioucmd's task_work ublk_drv: update comment for __ublk_fail_req() ublk_drv: check ubq_daemon_is_dying() in __ublk_rq_task_work() ublk_drv: update iod->addr for UBLK_IO_NEED_GET_DATA
2 parents beaf139 + d3b3859 commit b9bce6e

File tree

3 files changed

+28
-28
lines changed

3 files changed

+28
-28
lines changed

block/blk-mq.c

Lines changed: 1 addition & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2229,26 +2229,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
22292229
}
22302230
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
22312231

2232-
/**
2233-
* blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
2234-
* @q: request queue.
2235-
*
2236-
* The caller is responsible for serializing this function against
2237-
* blk_mq_{start,stop}_hw_queue().
2238-
*/
2239-
bool blk_mq_queue_stopped(struct request_queue *q)
2240-
{
2241-
struct blk_mq_hw_ctx *hctx;
2242-
unsigned long i;
2243-
2244-
queue_for_each_hw_ctx(q, hctx, i)
2245-
if (blk_mq_hctx_stopped(hctx))
2246-
return true;
2247-
2248-
return false;
2249-
}
2250-
EXPORT_SYMBOL(blk_mq_queue_stopped);
2251-
22522232
/*
22532233
* This function is often used for pausing .queue_rq() by driver when
22542234
* there isn't enough resource or some conditions aren't satisfied, and
@@ -2570,7 +2550,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
25702550
break;
25712551
case BLK_STS_RESOURCE:
25722552
case BLK_STS_DEV_RESOURCE:
2573-
blk_mq_request_bypass_insert(rq, false, last);
2553+
blk_mq_request_bypass_insert(rq, false, true);
25742554
blk_mq_commit_rqs(hctx, &queued, from_schedule);
25752555
return;
25762556
default:

drivers/block/ublk_drv.c

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
555555
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
556556
}
557557

558-
static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
558+
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
559559
{
560560
return ubq->ubq_daemon->flags & PF_EXITING;
561561
}
@@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
605605
}
606606

607607
/*
608-
* __ublk_fail_req() may be called from abort context or ->ubq_daemon
609-
* context during exiting, so lock is required.
608+
* Since __ublk_rq_task_work always fails requests immediately during
609+
* exiting, __ublk_fail_req() is only called from abort context during
610+
* exiting. So lock is unnecessary.
610611
*
611612
* Also aborting may not be started yet, keep in mind that one failed
612613
* request may be issued by block layer again.
@@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
644645
struct ublk_device *ub = ubq->dev;
645646
int tag = req->tag;
646647
struct ublk_io *io = &ubq->ios[tag];
647-
bool task_exiting = current != ubq->ubq_daemon ||
648-
(current->flags & PF_EXITING);
648+
bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
649649
unsigned int mapped_bytes;
650650

651651
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
@@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
680680
* do the copy work.
681681
*/
682682
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
683+
/* update iod->addr because ublksrv may have passed a new io buffer */
684+
ublk_get_iod(ubq, req->tag)->addr = io->addr;
685+
pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
686+
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
687+
ublk_get_iod(ubq, req->tag)->addr);
683688
}
684689

685690
mapped_bytes = ublk_map_io(ubq, req, io);
@@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
751756
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
752757
goto fail;
753758
} else {
754-
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
759+
struct ublk_io *io = &ubq->ios[rq->tag];
760+
struct io_uring_cmd *cmd = io->cmd;
755761
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
756762

763+
/*
764+
* If the check pass, we know that this is a re-issued request aborted
765+
* previously in monitor_work because the ubq_daemon(cmd's task) is
766+
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
767+
* because this ioucmd's io_uring context may be freed now if no inflight
768+
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
769+
*
770+
* Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
771+
* the tag). Then the request is re-started(allocating the tag) and we are here.
772+
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
773+
* guarantees that here is a re-issued request aborted previously.
774+
*/
775+
if ((io->flags & UBLK_IO_FLAG_ABORTED))
776+
goto fail;
777+
757778
pdu->req = rq;
758779
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
759780
}

include/linux/blk-mq.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q);
857857
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
858858
void blk_mq_complete_request(struct request *rq);
859859
bool blk_mq_complete_request_remote(struct request *rq);
860-
bool blk_mq_queue_stopped(struct request_queue *q);
861860
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
862861
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
863862
void blk_mq_stop_hw_queues(struct request_queue *q);

0 commit comments

Comments
 (0)