@@ -161,7 +161,6 @@ struct nvme_queue {
161
161
u16 cq_head ;
162
162
u16 qid ;
163
163
u8 cq_phase ;
164
- u8 cqe_seen ;
165
164
u32 * dbbuf_sq_db ;
166
165
u32 * dbbuf_cq_db ;
167
166
u32 * dbbuf_sq_ei ;
@@ -932,9 +931,9 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
932
931
}
933
932
}
934
933
935
- static inline void nvme_handle_cqe (struct nvme_queue * nvmeq ,
936
- struct nvme_completion * cqe )
934
+ static inline void nvme_handle_cqe (struct nvme_queue * nvmeq , u16 idx )
937
935
{
936
+ volatile struct nvme_completion * cqe = & nvmeq -> cqes [idx ];
938
937
struct request * req ;
939
938
940
939
if (unlikely (cqe -> command_id >= nvmeq -> q_depth )) {
@@ -957,50 +956,58 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
957
956
return ;
958
957
}
959
958
960
- nvmeq -> cqe_seen = 1 ;
961
959
req = blk_mq_tag_to_rq (* nvmeq -> tags , cqe -> command_id );
962
960
nvme_end_request (req , cqe -> status , cqe -> result );
963
961
}
964
962
965
- static inline bool nvme_read_cqe (struct nvme_queue * nvmeq ,
966
- struct nvme_completion * cqe )
963
+ static void nvme_complete_cqes (struct nvme_queue * nvmeq , u16 start , u16 end )
967
964
{
968
- if (nvme_cqe_pending (nvmeq )) {
969
- * cqe = nvmeq -> cqes [nvmeq -> cq_head ];
965
+ while (start != end ) {
966
+ nvme_handle_cqe (nvmeq , start );
967
+ if (++ start == nvmeq -> q_depth )
968
+ start = 0 ;
969
+ }
970
+ }
970
971
971
- if ( ++ nvmeq -> cq_head == nvmeq -> q_depth ) {
972
- nvmeq -> cq_head = 0 ;
973
- nvmeq -> cq_phase = ! nvmeq -> cq_phase ;
974
- }
975
- return true ;
972
+ static inline void nvme_update_cq_head ( struct nvme_queue * nvmeq )
973
+ {
974
+ if ( ++ nvmeq -> cq_head == nvmeq -> q_depth ) {
975
+ nvmeq -> cq_head = 0 ;
976
+ nvmeq -> cq_phase = ! nvmeq -> cq_phase ;
976
977
}
977
- return false;
978
978
}
979
979
980
- static void nvme_process_cq (struct nvme_queue * nvmeq )
980
+ static inline bool nvme_process_cq (struct nvme_queue * nvmeq , u16 * start ,
981
+ u16 * end , int tag )
981
982
{
982
- struct nvme_completion cqe ;
983
- int consumed = 0 ;
983
+ bool found = false;
984
984
985
- while (nvme_read_cqe (nvmeq , & cqe )) {
986
- nvme_handle_cqe (nvmeq , & cqe );
987
- consumed ++ ;
985
+ * start = nvmeq -> cq_head ;
986
+ while (!found && nvme_cqe_pending (nvmeq )) {
987
+ if (nvmeq -> cqes [nvmeq -> cq_head ].command_id == tag )
988
+ found = true;
989
+ nvme_update_cq_head (nvmeq );
988
990
}
991
+ * end = nvmeq -> cq_head ;
989
992
990
- if (consumed )
993
+ if (* start != * end )
991
994
nvme_ring_cq_doorbell (nvmeq );
995
+ return found ;
992
996
}
993
997
994
998
static irqreturn_t nvme_irq (int irq , void * data )
995
999
{
996
- irqreturn_t result ;
997
1000
struct nvme_queue * nvmeq = data ;
1001
+ u16 start , end ;
1002
+
998
1003
spin_lock (& nvmeq -> q_lock );
999
- nvme_process_cq (nvmeq );
1000
- result = nvmeq -> cqe_seen ? IRQ_HANDLED : IRQ_NONE ;
1001
- nvmeq -> cqe_seen = 0 ;
1004
+ nvme_process_cq (nvmeq , & start , & end , -1 );
1002
1005
spin_unlock (& nvmeq -> q_lock );
1003
- return result ;
1006
+
1007
+ if (start == end )
1008
+ return IRQ_NONE ;
1009
+ nvme_complete_cqes (nvmeq , start , end );
1010
+ return IRQ_HANDLED ;
1004
1011
}
1005
1012
1006
1013
static irqreturn_t nvme_irq_check (int irq , void * data )
@@ -1013,27 +1020,17 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
1013
1020
1014
1021
static int __nvme_poll (struct nvme_queue * nvmeq , unsigned int tag )
1015
1022
{
1016
- struct nvme_completion cqe ;
1017
- int found = 0 , consumed = 0 ;
1023
+ u16 start , end ;
1024
+ bool found ;
1018
1025
1019
1026
if (!nvme_cqe_pending (nvmeq ))
1020
1027
return 0 ;
1021
1028
1022
1029
spin_lock_irq (& nvmeq -> q_lock );
1023
- while (nvme_read_cqe (nvmeq , & cqe )) {
1024
- nvme_handle_cqe (nvmeq , & cqe );
1025
- consumed ++ ;
1026
-
1027
- if (tag == cqe .command_id ) {
1028
- found = 1 ;
1029
- break ;
1030
- }
1031
- }
1032
-
1033
- if (consumed )
1034
- nvme_ring_cq_doorbell (nvmeq );
1030
+ found = nvme_process_cq (nvmeq , & start , & end , tag );
1035
1031
spin_unlock_irq (& nvmeq -> q_lock );
1036
1032
1033
+ nvme_complete_cqes (nvmeq , start , end );
1037
1034
return found ;
1038
1035
}
1039
1036
@@ -1340,15 +1337,18 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1340
1337
static void nvme_disable_admin_queue (struct nvme_dev * dev , bool shutdown )
1341
1338
{
1342
1339
struct nvme_queue * nvmeq = & dev -> queues [0 ];
1340
+ u16 start , end ;
1343
1341
1344
1342
if (shutdown )
1345
1343
nvme_shutdown_ctrl (& dev -> ctrl );
1346
1344
else
1347
1345
nvme_disable_ctrl (& dev -> ctrl , dev -> ctrl .cap );
1348
1346
1349
1347
spin_lock_irq (& nvmeq -> q_lock );
1350
- nvme_process_cq (nvmeq );
1348
+ nvme_process_cq (nvmeq , & start , & end , -1 );
1351
1349
spin_unlock_irq (& nvmeq -> q_lock );
1350
+
1351
+ nvme_complete_cqes (nvmeq , start , end );
1352
1352
}
1353
1353
1354
1354
static int nvme_cmb_qdepth (struct nvme_dev * dev , int nr_io_queues ,
@@ -1995,6 +1995,7 @@ static void nvme_del_queue_end(struct request *req, blk_status_t error)
1995
1995
static void nvme_del_cq_end (struct request * req , blk_status_t error )
1996
1996
{
1997
1997
struct nvme_queue * nvmeq = req -> end_io_data ;
1998
+ u16 start , end ;
1998
1999
1999
2000
if (!error ) {
2000
2001
unsigned long flags ;
@@ -2006,8 +2007,10 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error)
2006
2007
*/
2007
2008
spin_lock_irqsave_nested (& nvmeq -> q_lock , flags ,
2008
2009
SINGLE_DEPTH_NESTING );
2009
- nvme_process_cq (nvmeq );
2010
+ nvme_process_cq (nvmeq , & start , & end , -1 );
2010
2011
spin_unlock_irqrestore (& nvmeq -> q_lock , flags );
2012
+
2013
+ nvme_complete_cqes (nvmeq , start , end );
2011
2014
}
2012
2015
2013
2016
nvme_del_queue_end (req , error );
0 commit comments