@@ -989,14 +989,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
989
989
}
990
990
991
991
static inline int nvme_process_cq (struct nvme_queue * nvmeq , u16 * start ,
992
- u16 * end , unsigned int tag )
992
+ u16 * end )
993
993
{
994
994
int found = 0 ;
995
995
996
996
* start = nvmeq -> cq_head ;
997
997
while (nvme_cqe_pending (nvmeq )) {
998
- if (tag == -1U || nvmeq -> cqes [nvmeq -> cq_head ].command_id == tag )
999
- found ++ ;
998
+ found ++ ;
1000
999
nvme_update_cq_head (nvmeq );
1001
1000
}
1002
1001
* end = nvmeq -> cq_head ;
@@ -1017,7 +1016,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
1017
1016
* the irq handler, even if that was on another CPU.
1018
1017
*/
1019
1018
rmb ();
1020
- nvme_process_cq (nvmeq , & start , & end , -1 );
1019
+ nvme_process_cq (nvmeq , & start , & end );
1021
1020
wmb ();
1022
1021
1023
1022
if (start != end ) {
@@ -1040,7 +1039,7 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
1040
1039
* Poll for completions any queue, including those not dedicated to polling.
1041
1040
* Can be called from any context.
1042
1041
*/
1043
- static int nvme_poll_irqdisable (struct nvme_queue * nvmeq , unsigned int tag )
1042
+ static int nvme_poll_irqdisable (struct nvme_queue * nvmeq )
1044
1043
{
1045
1044
struct pci_dev * pdev = to_pci_dev (nvmeq -> dev -> dev );
1046
1045
u16 start , end ;
@@ -1053,11 +1052,11 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
1053
1052
*/
1054
1053
if (test_bit (NVMEQ_POLLED , & nvmeq -> flags )) {
1055
1054
spin_lock (& nvmeq -> cq_poll_lock );
1056
- found = nvme_process_cq (nvmeq , & start , & end , tag );
1055
+ found = nvme_process_cq (nvmeq , & start , & end );
1057
1056
spin_unlock (& nvmeq -> cq_poll_lock );
1058
1057
} else {
1059
1058
disable_irq (pci_irq_vector (pdev , nvmeq -> cq_vector ));
1060
- found = nvme_process_cq (nvmeq , & start , & end , tag );
1059
+ found = nvme_process_cq (nvmeq , & start , & end );
1061
1060
enable_irq (pci_irq_vector (pdev , nvmeq -> cq_vector ));
1062
1061
}
1063
1062
@@ -1075,8 +1074,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
1075
1074
return 0 ;
1076
1075
1077
1076
spin_lock (& nvmeq -> cq_poll_lock );
1078
- found = nvme_process_cq (nvmeq , & start , & end , -1 );
1079
- nvme_complete_cqes (nvmeq , start , end );
1077
+ found = nvme_process_cq (nvmeq , & start , & end );
1080
1078
spin_unlock (& nvmeq -> cq_poll_lock );
1081
1079
1082
1080
return found ;
@@ -1253,7 +1251,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1253
1251
/*
1254
1252
* Did we miss an interrupt?
1255
1253
*/
1256
- if (nvme_poll_irqdisable (nvmeq , req -> tag )) {
1254
+ nvme_poll_irqdisable (nvmeq );
1255
+ if (blk_mq_request_completed (req )) {
1257
1256
dev_warn (dev -> ctrl .device ,
1258
1257
"I/O %d QID %d timeout, completion polled\n" ,
1259
1258
req -> tag , nvmeq -> qid );
@@ -1396,7 +1395,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1396
1395
else
1397
1396
nvme_disable_ctrl (& dev -> ctrl );
1398
1397
1399
- nvme_poll_irqdisable (nvmeq , -1 );
1398
+ nvme_poll_irqdisable (nvmeq );
1400
1399
}
1401
1400
1402
1401
/*
@@ -1411,7 +1410,7 @@ static void nvme_reap_pending_cqes(struct nvme_dev *dev)
1411
1410
int i ;
1412
1411
1413
1412
for (i = dev -> ctrl .queue_count - 1 ; i > 0 ; i -- ) {
1414
- nvme_process_cq (& dev -> queues [i ], & start , & end , -1 );
1413
+ nvme_process_cq (& dev -> queues [i ], & start , & end );
1415
1414
nvme_complete_cqes (& dev -> queues [i ], start , end );
1416
1415
}
1417
1416
}
0 commit comments