@@ -322,17 +322,19 @@ struct sdeb_store_info {
322
322
container_of(d, struct sdebug_host_info, dev)
323
323
324
324
enum sdeb_defer_type {SDEB_DEFER_NONE = 0 , SDEB_DEFER_HRT = 1 ,
325
- SDEB_DEFER_WQ = 2 };
325
+ SDEB_DEFER_WQ = 2 , SDEB_DEFER_POLL = 3 };
326
326
327
327
struct sdebug_defer {
328
328
struct hrtimer hrt ;
329
329
struct execute_work ew ;
330
+ ktime_t cmpl_ts ;/* time since boot to complete this cmd */
330
331
int sqa_idx ; /* index of sdebug_queue array */
331
332
int qc_idx ; /* index of sdebug_queued_cmd array within sqa_idx */
332
333
int hc_idx ; /* hostwide tag index */
333
334
int issuing_cpu ;
334
335
bool init_hrt ;
335
336
bool init_wq ;
337
+ bool init_poll ;
336
338
bool aborted ; /* true when blk_abort_request() already called */
337
339
enum sdeb_defer_type defer_t ;
338
340
};
@@ -357,6 +359,7 @@ static atomic_t sdebug_completions; /* count of deferred completions */
357
359
static atomic_t sdebug_miss_cpus ; /* submission + completion cpus differ */
358
360
static atomic_t sdebug_a_tsf ; /* 'almost task set full' counter */
359
361
static atomic_t sdeb_inject_pending ;
362
+ static atomic_t sdeb_mq_poll_count ; /* bumped when mq_poll returns > 0 */
360
363
361
364
struct opcode_info_t {
362
365
u8 num_attached ; /* 0 if this is it (i.e. a leaf); use 0xff */
@@ -4730,7 +4733,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4730
4733
struct scsi_cmnd * scp ;
4731
4734
struct sdebug_dev_info * devip ;
4732
4735
4733
- sd_dp -> defer_t = SDEB_DEFER_NONE ;
4734
4736
if (unlikely (aborted ))
4735
4737
sd_dp -> aborted = false;
4736
4738
qc_idx = sd_dp -> qc_idx ;
@@ -4745,6 +4747,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4745
4747
return ;
4746
4748
}
4747
4749
spin_lock_irqsave (& sqp -> qc_lock , iflags );
4750
+ sd_dp -> defer_t = SDEB_DEFER_NONE ;
4748
4751
sqcp = & sqp -> qc_arr [qc_idx ];
4749
4752
scp = sqcp -> a_cmnd ;
4750
4753
if (unlikely (scp == NULL )) {
@@ -5434,13 +5437,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5434
5437
sd_dp = sqcp -> sd_dp ;
5435
5438
spin_unlock_irqrestore (& sqp -> qc_lock , iflags );
5436
5439
5437
- /* Do not complete IO from default completion path.
5438
- * Let it to be on queue.
5439
- * Completion should happen from mq_poll interface.
5440
- */
5441
- if ((sqp - sdebug_q_arr ) >= (submit_queues - poll_queues ))
5442
- return 0 ;
5443
-
5444
5440
if (!sd_dp ) {
5445
5441
sd_dp = kzalloc (sizeof (* sd_dp ), GFP_ATOMIC );
5446
5442
if (!sd_dp ) {
@@ -5517,40 +5513,66 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5517
5513
kt -= d ;
5518
5514
}
5519
5515
}
5520
- if (!sd_dp -> init_hrt ) {
5521
- sd_dp -> init_hrt = true;
5522
- sqcp -> sd_dp = sd_dp ;
5523
- hrtimer_init (& sd_dp -> hrt , CLOCK_MONOTONIC ,
5524
- HRTIMER_MODE_REL_PINNED );
5525
- sd_dp -> hrt .function = sdebug_q_cmd_hrt_complete ;
5526
- sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5527
- sd_dp -> qc_idx = k ;
5516
+ sd_dp -> cmpl_ts = ktime_add (ns_to_ktime (ns_from_boot ), kt );
5517
+ if (cmnd -> request -> cmd_flags & REQ_HIPRI ) {
5518
+ spin_lock_irqsave (& sqp -> qc_lock , iflags );
5519
+ if (!sd_dp -> init_poll ) {
5520
+ sd_dp -> init_poll = true;
5521
+ sqcp -> sd_dp = sd_dp ;
5522
+ sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5523
+ sd_dp -> qc_idx = k ;
5524
+ }
5525
+ sd_dp -> defer_t = SDEB_DEFER_POLL ;
5526
+ spin_unlock_irqrestore (& sqp -> qc_lock , iflags );
5527
+ } else {
5528
+ if (!sd_dp -> init_hrt ) {
5529
+ sd_dp -> init_hrt = true;
5530
+ sqcp -> sd_dp = sd_dp ;
5531
+ hrtimer_init (& sd_dp -> hrt , CLOCK_MONOTONIC ,
5532
+ HRTIMER_MODE_REL_PINNED );
5533
+ sd_dp -> hrt .function = sdebug_q_cmd_hrt_complete ;
5534
+ sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5535
+ sd_dp -> qc_idx = k ;
5536
+ }
5537
+ sd_dp -> defer_t = SDEB_DEFER_HRT ;
5538
+ /* schedule the invocation of scsi_done() for a later time */
5539
+ hrtimer_start (& sd_dp -> hrt , kt , HRTIMER_MODE_REL_PINNED );
5528
5540
}
5529
5541
if (sdebug_statistics )
5530
5542
sd_dp -> issuing_cpu = raw_smp_processor_id ();
5531
- sd_dp -> defer_t = SDEB_DEFER_HRT ;
5532
- /* schedule the invocation of scsi_done() for a later time */
5533
- hrtimer_start (& sd_dp -> hrt , kt , HRTIMER_MODE_REL_PINNED );
5534
5543
} else { /* jdelay < 0, use work queue */
5535
- if (!sd_dp -> init_wq ) {
5536
- sd_dp -> init_wq = true;
5537
- sqcp -> sd_dp = sd_dp ;
5538
- sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5539
- sd_dp -> qc_idx = k ;
5540
- INIT_WORK (& sd_dp -> ew .work , sdebug_q_cmd_wq_complete );
5541
- }
5542
- if (sdebug_statistics )
5543
- sd_dp -> issuing_cpu = raw_smp_processor_id ();
5544
- sd_dp -> defer_t = SDEB_DEFER_WQ ;
5545
5544
if (unlikely ((sdebug_opts & SDEBUG_OPT_CMD_ABORT ) &&
5546
5545
atomic_read (& sdeb_inject_pending )))
5547
5546
sd_dp -> aborted = true;
5548
- schedule_work (& sd_dp -> ew .work );
5549
- if (unlikely ((sdebug_opts & SDEBUG_OPT_CMD_ABORT ) &&
5550
- atomic_read (& sdeb_inject_pending ))) {
5547
+ sd_dp -> cmpl_ts = ns_to_ktime (ns_from_boot );
5548
+ if (cmnd -> request -> cmd_flags & REQ_HIPRI ) {
5549
+ spin_lock_irqsave (& sqp -> qc_lock , iflags );
5550
+ if (!sd_dp -> init_poll ) {
5551
+ sd_dp -> init_poll = true;
5552
+ sqcp -> sd_dp = sd_dp ;
5553
+ sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5554
+ sd_dp -> qc_idx = k ;
5555
+ }
5556
+ sd_dp -> defer_t = SDEB_DEFER_POLL ;
5557
+ spin_unlock_irqrestore (& sqp -> qc_lock , iflags );
5558
+ } else {
5559
+ if (!sd_dp -> init_wq ) {
5560
+ sd_dp -> init_wq = true;
5561
+ sqcp -> sd_dp = sd_dp ;
5562
+ sd_dp -> sqa_idx = sqp - sdebug_q_arr ;
5563
+ sd_dp -> qc_idx = k ;
5564
+ INIT_WORK (& sd_dp -> ew .work , sdebug_q_cmd_wq_complete );
5565
+ }
5566
+ sd_dp -> defer_t = SDEB_DEFER_WQ ;
5567
+ schedule_work (& sd_dp -> ew .work );
5568
+ }
5569
+ if (sdebug_statistics )
5570
+ sd_dp -> issuing_cpu = raw_smp_processor_id ();
5571
+ if (unlikely (sd_dp -> aborted )) {
5551
5572
sdev_printk (KERN_INFO , sdp , "abort request tag %d\n" , cmnd -> request -> tag );
5552
5573
blk_abort_request (cmnd -> request );
5553
5574
atomic_set (& sdeb_inject_pending , 0 );
5575
+ sd_dp -> aborted = false;
5554
5576
}
5555
5577
}
5556
5578
if (unlikely ((SDEBUG_OPT_Q_NOISE & sdebug_opts ) && scsi_result == device_qfull_result ))
@@ -5779,11 +5801,12 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5779
5801
dix_reads , dix_writes , dif_errors );
5780
5802
seq_printf (m , "usec_in_jiffy=%lu, statistics=%d\n" , TICK_NSEC / 1000 ,
5781
5803
sdebug_statistics );
5782
- seq_printf (m , "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n" ,
5804
+ seq_printf (m , "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d \n" ,
5783
5805
atomic_read (& sdebug_cmnd_count ),
5784
5806
atomic_read (& sdebug_completions ),
5785
5807
"miss_cpus" , atomic_read (& sdebug_miss_cpus ),
5786
- atomic_read (& sdebug_a_tsf ));
5808
+ atomic_read (& sdebug_a_tsf ),
5809
+ atomic_read (& sdeb_mq_poll_count ));
5787
5810
5788
5811
seq_printf (m , "submit_queues=%d\n" , submit_queues );
5789
5812
for (j = 0 , sqp = sdebug_q_arr ; j < submit_queues ; ++ j , ++ sqp ) {
@@ -7247,70 +7270,87 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
7247
7270
7248
7271
static int sdebug_blk_mq_poll (struct Scsi_Host * shost , unsigned int queue_num )
7249
7272
{
7250
- int qc_idx ;
7251
- int retiring = 0 ;
7273
+ bool first ;
7274
+ bool retiring = false;
7275
+ int num_entries = 0 ;
7276
+ unsigned int qc_idx = 0 ;
7252
7277
unsigned long iflags ;
7278
+ ktime_t kt_from_boot = ktime_get_boottime ();
7253
7279
struct sdebug_queue * sqp ;
7254
7280
struct sdebug_queued_cmd * sqcp ;
7255
7281
struct scsi_cmnd * scp ;
7256
7282
struct sdebug_dev_info * devip ;
7257
- int num_entries = 0 ;
7283
+ struct sdebug_defer * sd_dp ;
7258
7284
7259
7285
sqp = sdebug_q_arr + queue_num ;
7286
+ spin_lock_irqsave (& sqp -> qc_lock , iflags );
7260
7287
7261
- do {
7262
- spin_lock_irqsave (& sqp -> qc_lock , iflags );
7263
- qc_idx = find_first_bit (sqp -> in_use_bm , sdebug_max_queue );
7264
- if (unlikely ((qc_idx < 0 ) || (qc_idx >= sdebug_max_queue )))
7265
- goto out ;
7288
+ for (first = true; first || qc_idx + 1 < sdebug_max_queue ; ) {
7289
+ if (first ) {
7290
+ qc_idx = find_first_bit (sqp -> in_use_bm , sdebug_max_queue );
7291
+ first = false;
7292
+ } else {
7293
+ qc_idx = find_next_bit (sqp -> in_use_bm , sdebug_max_queue , qc_idx + 1 );
7294
+ }
7295
+ if (unlikely (qc_idx >= sdebug_max_queue ))
7296
+ break ;
7266
7297
7267
7298
sqcp = & sqp -> qc_arr [qc_idx ];
7299
+ sd_dp = sqcp -> sd_dp ;
7300
+ if (unlikely (!sd_dp ))
7301
+ continue ;
7268
7302
scp = sqcp -> a_cmnd ;
7269
7303
if (unlikely (scp == NULL )) {
7270
- pr_err ("scp is NULL, queue_num=%d, qc_idx=%d from %s\n" ,
7304
+ pr_err ("scp is NULL, queue_num=%d, qc_idx=%u from %s\n" ,
7271
7305
queue_num , qc_idx , __func__ );
7272
- goto out ;
7306
+ break ;
7273
7307
}
7308
+ if (sd_dp -> defer_t == SDEB_DEFER_POLL ) {
7309
+ if (kt_from_boot < sd_dp -> cmpl_ts )
7310
+ continue ;
7311
+
7312
+ } else /* ignoring non REQ_HIPRI requests */
7313
+ continue ;
7274
7314
devip = (struct sdebug_dev_info * )scp -> device -> hostdata ;
7275
7315
if (likely (devip ))
7276
7316
atomic_dec (& devip -> num_in_q );
7277
7317
else
7278
7318
pr_err ("devip=NULL from %s\n" , __func__ );
7279
7319
if (unlikely (atomic_read (& retired_max_queue ) > 0 ))
7280
- retiring = 1 ;
7320
+ retiring = true ;
7281
7321
7282
7322
sqcp -> a_cmnd = NULL ;
7283
7323
if (unlikely (!test_and_clear_bit (qc_idx , sqp -> in_use_bm ))) {
7284
- pr_err ("Unexpected completion sqp %p queue_num=%d qc_idx=%d from %s\n" ,
7324
+ pr_err ("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n" ,
7285
7325
sqp , queue_num , qc_idx , __func__ );
7286
- goto out ;
7326
+ break ;
7287
7327
}
7288
-
7289
7328
if (unlikely (retiring )) { /* user has reduced max_queue */
7290
7329
int k , retval ;
7291
7330
7292
7331
retval = atomic_read (& retired_max_queue );
7293
7332
if (qc_idx >= retval ) {
7294
7333
pr_err ("index %d too large\n" , retval );
7295
- goto out ;
7334
+ break ;
7296
7335
}
7297
7336
k = find_last_bit (sqp -> in_use_bm , retval );
7298
7337
if ((k < sdebug_max_queue ) || (k == retval ))
7299
7338
atomic_set (& retired_max_queue , 0 );
7300
7339
else
7301
7340
atomic_set (& retired_max_queue , k + 1 );
7302
7341
}
7342
+ sd_dp -> defer_t = SDEB_DEFER_NONE ;
7303
7343
spin_unlock_irqrestore (& sqp -> qc_lock , iflags );
7304
7344
scp -> scsi_done (scp ); /* callback to mid level */
7345
+ spin_lock_irqsave (& sqp -> qc_lock , iflags );
7305
7346
num_entries ++ ;
7306
- } while (1 );
7307
-
7308
- out :
7347
+ }
7309
7348
spin_unlock_irqrestore (& sqp -> qc_lock , iflags );
7349
+ if (num_entries > 0 )
7350
+ atomic_add (num_entries , & sdeb_mq_poll_count );
7310
7351
return num_entries ;
7311
7352
}
7312
7353
7313
-
7314
7354
static int scsi_debug_queuecommand (struct Scsi_Host * shost ,
7315
7355
struct scsi_cmnd * scp )
7316
7356
{
0 commit comments