@@ -147,9 +147,10 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
147
147
struct nvme_queue {
148
148
struct device * q_dmadev ;
149
149
struct nvme_dev * dev ;
150
- spinlock_t q_lock ;
150
+ spinlock_t sq_lock ;
151
151
struct nvme_command * sq_cmds ;
152
152
struct nvme_command __iomem * sq_cmds_io ;
153
+ spinlock_t cq_lock ____cacheline_aligned_in_smp ;
153
154
volatile struct nvme_completion * cqes ;
154
155
struct blk_mq_tags * * tags ;
155
156
dma_addr_t sq_dma_addr ;
@@ -894,9 +895,9 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
894
895
895
896
blk_mq_start_request (req );
896
897
897
- spin_lock_irq (& nvmeq -> q_lock );
898
+ spin_lock_irq (& nvmeq -> sq_lock );
898
899
__nvme_submit_cmd (nvmeq , & cmnd );
899
- spin_unlock_irq (& nvmeq -> q_lock );
900
+ spin_unlock_irq (& nvmeq -> sq_lock );
900
901
return BLK_STS_OK ;
901
902
out_cleanup_iod :
902
903
nvme_free_iod (dev , req );
@@ -1000,9 +1001,9 @@ static irqreturn_t nvme_irq(int irq, void *data)
1000
1001
struct nvme_queue * nvmeq = data ;
1001
1002
u16 start , end ;
1002
1003
1003
- spin_lock (& nvmeq -> q_lock );
1004
+ spin_lock (& nvmeq -> cq_lock );
1004
1005
nvme_process_cq (nvmeq , & start , & end , -1 );
1005
- spin_unlock (& nvmeq -> q_lock );
1006
+ spin_unlock (& nvmeq -> cq_lock );
1006
1007
1007
1008
if (start == end )
1008
1009
return IRQ_NONE ;
@@ -1026,9 +1027,9 @@ static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
1026
1027
if (!nvme_cqe_pending (nvmeq ))
1027
1028
return 0 ;
1028
1029
1029
- spin_lock_irq (& nvmeq -> q_lock );
1030
+ spin_lock_irq (& nvmeq -> cq_lock );
1030
1031
found = nvme_process_cq (nvmeq , & start , & end , tag );
1031
- spin_unlock_irq (& nvmeq -> q_lock );
1032
+ spin_unlock_irq (& nvmeq -> cq_lock );
1032
1033
1033
1034
nvme_complete_cqes (nvmeq , start , end );
1034
1035
return found ;
@@ -1051,9 +1052,9 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1051
1052
c .common .opcode = nvme_admin_async_event ;
1052
1053
c .common .command_id = NVME_AQ_BLK_MQ_DEPTH ;
1053
1054
1054
- spin_lock_irq (& nvmeq -> q_lock );
1055
+ spin_lock_irq (& nvmeq -> sq_lock );
1055
1056
__nvme_submit_cmd (nvmeq , & c );
1056
- spin_unlock_irq (& nvmeq -> q_lock );
1057
+ spin_unlock_irq (& nvmeq -> sq_lock );
1057
1058
}
1058
1059
1059
1060
static int adapter_delete_queue (struct nvme_dev * dev , u8 opcode , u16 id )
@@ -1310,15 +1311,15 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1310
1311
{
1311
1312
int vector ;
1312
1313
1313
- spin_lock_irq (& nvmeq -> q_lock );
1314
+ spin_lock_irq (& nvmeq -> cq_lock );
1314
1315
if (nvmeq -> cq_vector == -1 ) {
1315
- spin_unlock_irq (& nvmeq -> q_lock );
1316
+ spin_unlock_irq (& nvmeq -> cq_lock );
1316
1317
return 1 ;
1317
1318
}
1318
1319
vector = nvmeq -> cq_vector ;
1319
1320
nvmeq -> dev -> online_queues -- ;
1320
1321
nvmeq -> cq_vector = -1 ;
1321
- spin_unlock_irq (& nvmeq -> q_lock );
1322
+ spin_unlock_irq (& nvmeq -> cq_lock );
1322
1323
1323
1324
/*
1324
1325
* Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
@@ -1344,9 +1345,9 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1344
1345
else
1345
1346
nvme_disable_ctrl (& dev -> ctrl , dev -> ctrl .cap );
1346
1347
1347
- spin_lock_irq (& nvmeq -> q_lock );
1348
+ spin_lock_irq (& nvmeq -> cq_lock );
1348
1349
nvme_process_cq (nvmeq , & start , & end , -1 );
1349
- spin_unlock_irq (& nvmeq -> q_lock );
1350
+ spin_unlock_irq (& nvmeq -> cq_lock );
1350
1351
1351
1352
nvme_complete_cqes (nvmeq , start , end );
1352
1353
}
@@ -1406,7 +1407,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1406
1407
1407
1408
nvmeq -> q_dmadev = dev -> dev ;
1408
1409
nvmeq -> dev = dev ;
1409
- spin_lock_init (& nvmeq -> q_lock );
1410
+ spin_lock_init (& nvmeq -> sq_lock );
1411
+ spin_lock_init (& nvmeq -> cq_lock );
1410
1412
nvmeq -> cq_head = 0 ;
1411
1413
nvmeq -> cq_phase = 1 ;
1412
1414
nvmeq -> q_db = & dev -> dbs [qid * 2 * dev -> db_stride ];
@@ -1442,15 +1444,15 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1442
1444
{
1443
1445
struct nvme_dev * dev = nvmeq -> dev ;
1444
1446
1445
- spin_lock_irq (& nvmeq -> q_lock );
1447
+ spin_lock_irq (& nvmeq -> cq_lock );
1446
1448
nvmeq -> sq_tail = 0 ;
1447
1449
nvmeq -> cq_head = 0 ;
1448
1450
nvmeq -> cq_phase = 1 ;
1449
1451
nvmeq -> q_db = & dev -> dbs [qid * 2 * dev -> db_stride ];
1450
1452
memset ((void * )nvmeq -> cqes , 0 , CQ_SIZE (nvmeq -> q_depth ));
1451
1453
nvme_dbbuf_init (dev , nvmeq , qid );
1452
1454
dev -> online_queues ++ ;
1453
- spin_unlock_irq (& nvmeq -> q_lock );
1455
+ spin_unlock_irq (& nvmeq -> cq_lock );
1454
1456
}
1455
1457
1456
1458
static int nvme_create_queue (struct nvme_queue * nvmeq , int qid )
@@ -2001,14 +2003,14 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error)
2001
2003
unsigned long flags ;
2002
2004
2003
2005
/*
2004
- * We might be called with the AQ q_lock held
2005
- * and the I/O queue q_lock should always
2006
+ * We might be called with the AQ cq_lock held
2007
+ * and the I/O queue cq_lock should always
2006
2008
* nest inside the AQ one.
2007
2009
*/
2008
- spin_lock_irqsave_nested (& nvmeq -> q_lock , flags ,
2010
+ spin_lock_irqsave_nested (& nvmeq -> cq_lock , flags ,
2009
2011
SINGLE_DEPTH_NESTING );
2010
2012
nvme_process_cq (nvmeq , & start , & end , -1 );
2011
- spin_unlock_irqrestore (& nvmeq -> q_lock , flags );
2013
+ spin_unlock_irqrestore (& nvmeq -> cq_lock , flags );
2012
2014
2013
2015
nvme_complete_cqes (nvmeq , start , end );
2014
2016
}
0 commit comments