Skip to content

Commit 4a0c6f4

Browse files
doug-gilbertmartinkpetersen
authored andcommitted
scsi: scsi_debug: Add new defer type for mq_poll
Add a new sdeb_defer_type enumeration: SDEB_DEFER_POLL for requests that have REQ_HIPRI set in cmd_flags field. It is expected that these requests will be polled via the mq_poll entry point which is driven by calls to blk_poll() in the block layer. Therefore timer events are not 'wired up' in the normal fashion. There are still cases with short delays (e.g. < 10 microseconds) where by the time the command response processing occurs, the delay is already exceeded in which case the code calls scsi_done() directly. In such cases there is no window for mq_poll() to be called. Add 'mq_polls' counter that increments on each scsi_done() called via the mq_poll entry point. Can be used to show (with 'cat /proc/scsi/scsi_debug/<host_id>') that blk_poll() is causing completions rather than some other mechanism. Link: https://lore.kernel.org/r/[email protected] Tested-by: Kashyap Desai <[email protected]> Signed-off-by: Douglas Gilbert <[email protected]> Signed-off-by: Kashyap Desai <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent c4b57d8 commit 4a0c6f4

File tree

1 file changed

+94
-54
lines changed

1 file changed

+94
-54
lines changed

drivers/scsi/scsi_debug.c

Lines changed: 94 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -322,17 +322,19 @@ struct sdeb_store_info {
322322
container_of(d, struct sdebug_host_info, dev)
323323

324324
enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325-
SDEB_DEFER_WQ = 2};
325+
SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
326326

327327
struct sdebug_defer {
328328
struct hrtimer hrt;
329329
struct execute_work ew;
330+
ktime_t cmpl_ts;/* time since boot to complete this cmd */
330331
int sqa_idx; /* index of sdebug_queue array */
331332
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
332333
int hc_idx; /* hostwide tag index */
333334
int issuing_cpu;
334335
bool init_hrt;
335336
bool init_wq;
337+
bool init_poll;
336338
bool aborted; /* true when blk_abort_request() already called */
337339
enum sdeb_defer_type defer_t;
338340
};
@@ -357,6 +359,7 @@ static atomic_t sdebug_completions; /* count of deferred completions */
357359
static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
358360
static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
359361
static atomic_t sdeb_inject_pending;
362+
static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
360363

361364
struct opcode_info_t {
362365
u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
@@ -4730,7 +4733,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
47304733
struct scsi_cmnd *scp;
47314734
struct sdebug_dev_info *devip;
47324735

4733-
sd_dp->defer_t = SDEB_DEFER_NONE;
47344736
if (unlikely(aborted))
47354737
sd_dp->aborted = false;
47364738
qc_idx = sd_dp->qc_idx;
@@ -4745,6 +4747,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
47454747
return;
47464748
}
47474749
spin_lock_irqsave(&sqp->qc_lock, iflags);
4750+
sd_dp->defer_t = SDEB_DEFER_NONE;
47484751
sqcp = &sqp->qc_arr[qc_idx];
47494752
scp = sqcp->a_cmnd;
47504753
if (unlikely(scp == NULL)) {
@@ -5434,13 +5437,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
54345437
sd_dp = sqcp->sd_dp;
54355438
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
54365439

5437-
/* Do not complete IO from default completion path.
5438-
* Let it to be on queue.
5439-
* Completion should happen from mq_poll interface.
5440-
*/
5441-
if ((sqp - sdebug_q_arr) >= (submit_queues - poll_queues))
5442-
return 0;
5443-
54445440
if (!sd_dp) {
54455441
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
54465442
if (!sd_dp) {
@@ -5517,40 +5513,66 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
55175513
kt -= d;
55185514
}
55195515
}
5520-
if (!sd_dp->init_hrt) {
5521-
sd_dp->init_hrt = true;
5522-
sqcp->sd_dp = sd_dp;
5523-
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5524-
HRTIMER_MODE_REL_PINNED);
5525-
sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5526-
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5527-
sd_dp->qc_idx = k;
5516+
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5517+
if (cmnd->request->cmd_flags & REQ_HIPRI) {
5518+
spin_lock_irqsave(&sqp->qc_lock, iflags);
5519+
if (!sd_dp->init_poll) {
5520+
sd_dp->init_poll = true;
5521+
sqcp->sd_dp = sd_dp;
5522+
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5523+
sd_dp->qc_idx = k;
5524+
}
5525+
sd_dp->defer_t = SDEB_DEFER_POLL;
5526+
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5527+
} else {
5528+
if (!sd_dp->init_hrt) {
5529+
sd_dp->init_hrt = true;
5530+
sqcp->sd_dp = sd_dp;
5531+
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5532+
HRTIMER_MODE_REL_PINNED);
5533+
sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5534+
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5535+
sd_dp->qc_idx = k;
5536+
}
5537+
sd_dp->defer_t = SDEB_DEFER_HRT;
5538+
/* schedule the invocation of scsi_done() for a later time */
5539+
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
55285540
}
55295541
if (sdebug_statistics)
55305542
sd_dp->issuing_cpu = raw_smp_processor_id();
5531-
sd_dp->defer_t = SDEB_DEFER_HRT;
5532-
/* schedule the invocation of scsi_done() for a later time */
5533-
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
55345543
} else { /* jdelay < 0, use work queue */
5535-
if (!sd_dp->init_wq) {
5536-
sd_dp->init_wq = true;
5537-
sqcp->sd_dp = sd_dp;
5538-
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5539-
sd_dp->qc_idx = k;
5540-
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5541-
}
5542-
if (sdebug_statistics)
5543-
sd_dp->issuing_cpu = raw_smp_processor_id();
5544-
sd_dp->defer_t = SDEB_DEFER_WQ;
55455544
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
55465545
atomic_read(&sdeb_inject_pending)))
55475546
sd_dp->aborted = true;
5548-
schedule_work(&sd_dp->ew.work);
5549-
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5550-
atomic_read(&sdeb_inject_pending))) {
5547+
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5548+
if (cmnd->request->cmd_flags & REQ_HIPRI) {
5549+
spin_lock_irqsave(&sqp->qc_lock, iflags);
5550+
if (!sd_dp->init_poll) {
5551+
sd_dp->init_poll = true;
5552+
sqcp->sd_dp = sd_dp;
5553+
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5554+
sd_dp->qc_idx = k;
5555+
}
5556+
sd_dp->defer_t = SDEB_DEFER_POLL;
5557+
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5558+
} else {
5559+
if (!sd_dp->init_wq) {
5560+
sd_dp->init_wq = true;
5561+
sqcp->sd_dp = sd_dp;
5562+
sd_dp->sqa_idx = sqp - sdebug_q_arr;
5563+
sd_dp->qc_idx = k;
5564+
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5565+
}
5566+
sd_dp->defer_t = SDEB_DEFER_WQ;
5567+
schedule_work(&sd_dp->ew.work);
5568+
}
5569+
if (sdebug_statistics)
5570+
sd_dp->issuing_cpu = raw_smp_processor_id();
5571+
if (unlikely(sd_dp->aborted)) {
55515572
sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
55525573
blk_abort_request(cmnd->request);
55535574
atomic_set(&sdeb_inject_pending, 0);
5575+
sd_dp->aborted = false;
55545576
}
55555577
}
55565578
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
@@ -5779,11 +5801,12 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
57795801
dix_reads, dix_writes, dif_errors);
57805802
seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
57815803
sdebug_statistics);
5782-
seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5804+
seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
57835805
atomic_read(&sdebug_cmnd_count),
57845806
atomic_read(&sdebug_completions),
57855807
"miss_cpus", atomic_read(&sdebug_miss_cpus),
5786-
atomic_read(&sdebug_a_tsf));
5808+
atomic_read(&sdebug_a_tsf),
5809+
atomic_read(&sdeb_mq_poll_count));
57875810

57885811
seq_printf(m, "submit_queues=%d\n", submit_queues);
57895812
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
@@ -7247,70 +7270,87 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
72477270

72487271
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
72497272
{
7250-
int qc_idx;
7251-
int retiring = 0;
7273+
bool first;
7274+
bool retiring = false;
7275+
int num_entries = 0;
7276+
unsigned int qc_idx = 0;
72527277
unsigned long iflags;
7278+
ktime_t kt_from_boot = ktime_get_boottime();
72537279
struct sdebug_queue *sqp;
72547280
struct sdebug_queued_cmd *sqcp;
72557281
struct scsi_cmnd *scp;
72567282
struct sdebug_dev_info *devip;
7257-
int num_entries = 0;
7283+
struct sdebug_defer *sd_dp;
72587284

72597285
sqp = sdebug_q_arr + queue_num;
7286+
spin_lock_irqsave(&sqp->qc_lock, iflags);
72607287

7261-
do {
7262-
spin_lock_irqsave(&sqp->qc_lock, iflags);
7263-
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7264-
if (unlikely((qc_idx < 0) || (qc_idx >= sdebug_max_queue)))
7265-
goto out;
7288+
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7289+
if (first) {
7290+
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7291+
first = false;
7292+
} else {
7293+
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7294+
}
7295+
if (unlikely(qc_idx >= sdebug_max_queue))
7296+
break;
72667297

72677298
sqcp = &sqp->qc_arr[qc_idx];
7299+
sd_dp = sqcp->sd_dp;
7300+
if (unlikely(!sd_dp))
7301+
continue;
72687302
scp = sqcp->a_cmnd;
72697303
if (unlikely(scp == NULL)) {
7270-
pr_err("scp is NULL, queue_num=%d, qc_idx=%d from %s\n",
7304+
pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
72717305
queue_num, qc_idx, __func__);
7272-
goto out;
7306+
break;
72737307
}
7308+
if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7309+
if (kt_from_boot < sd_dp->cmpl_ts)
7310+
continue;
7311+
7312+
} else /* ignoring non REQ_HIPRI requests */
7313+
continue;
72747314
devip = (struct sdebug_dev_info *)scp->device->hostdata;
72757315
if (likely(devip))
72767316
atomic_dec(&devip->num_in_q);
72777317
else
72787318
pr_err("devip=NULL from %s\n", __func__);
72797319
if (unlikely(atomic_read(&retired_max_queue) > 0))
7280-
retiring = 1;
7320+
retiring = true;
72817321

72827322
sqcp->a_cmnd = NULL;
72837323
if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7284-
pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%d from %s\n",
7324+
pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
72857325
sqp, queue_num, qc_idx, __func__);
7286-
goto out;
7326+
break;
72877327
}
7288-
72897328
if (unlikely(retiring)) { /* user has reduced max_queue */
72907329
int k, retval;
72917330

72927331
retval = atomic_read(&retired_max_queue);
72937332
if (qc_idx >= retval) {
72947333
pr_err("index %d too large\n", retval);
7295-
goto out;
7334+
break;
72967335
}
72977336
k = find_last_bit(sqp->in_use_bm, retval);
72987337
if ((k < sdebug_max_queue) || (k == retval))
72997338
atomic_set(&retired_max_queue, 0);
73007339
else
73017340
atomic_set(&retired_max_queue, k + 1);
73027341
}
7342+
sd_dp->defer_t = SDEB_DEFER_NONE;
73037343
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
73047344
scp->scsi_done(scp); /* callback to mid level */
7345+
spin_lock_irqsave(&sqp->qc_lock, iflags);
73057346
num_entries++;
7306-
} while (1);
7307-
7308-
out:
7347+
}
73097348
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7349+
if (num_entries > 0)
7350+
atomic_add(num_entries, &sdeb_mq_poll_count);
73107351
return num_entries;
73117352
}
73127353

7313-
73147354
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
73157355
struct scsi_cmnd *scp)
73167356
{

0 commit comments

Comments
 (0)