@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
75
75
* Represents an NVM Express device. Each nvme_dev is a PCI function.
76
76
*/
77
77
struct nvme_dev {
78
- struct nvme_queue * * queues ;
78
+ struct nvme_queue * queues ;
79
79
struct blk_mq_tag_set tagset ;
80
80
struct blk_mq_tag_set admin_tagset ;
81
81
u32 __iomem * dbs ;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
365
365
unsigned int hctx_idx )
366
366
{
367
367
struct nvme_dev * dev = data ;
368
- struct nvme_queue * nvmeq = dev -> queues [0 ];
368
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
369
369
370
370
WARN_ON (hctx_idx != 0 );
371
371
WARN_ON (dev -> admin_tagset .tags [0 ] != hctx -> tags );
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
387
387
unsigned int hctx_idx )
388
388
{
389
389
struct nvme_dev * dev = data ;
390
- struct nvme_queue * nvmeq = dev -> queues [hctx_idx + 1 ];
390
+ struct nvme_queue * nvmeq = & dev -> queues [hctx_idx + 1 ];
391
391
392
392
if (!nvmeq -> tags )
393
393
nvmeq -> tags = & dev -> tagset .tags [hctx_idx ];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
403
403
struct nvme_dev * dev = set -> driver_data ;
404
404
struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
405
405
int queue_idx = (set == & dev -> tagset ) ? hctx_idx + 1 : 0 ;
406
- struct nvme_queue * nvmeq = dev -> queues [queue_idx ];
406
+ struct nvme_queue * nvmeq = & dev -> queues [queue_idx ];
407
407
408
408
BUG_ON (!nvmeq );
409
409
iod -> nvmeq = nvmeq ;
@@ -1046,7 +1046,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1046
1046
static void nvme_pci_submit_async_event (struct nvme_ctrl * ctrl )
1047
1047
{
1048
1048
struct nvme_dev * dev = to_nvme_dev (ctrl );
1049
- struct nvme_queue * nvmeq = dev -> queues [0 ];
1049
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
1050
1050
struct nvme_command c ;
1051
1051
1052
1052
memset (& c , 0 , sizeof (c ));
@@ -1282,18 +1282,15 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
1282
1282
if (nvmeq -> sq_cmds )
1283
1283
dma_free_coherent (nvmeq -> q_dmadev , SQ_SIZE (nvmeq -> q_depth ),
1284
1284
nvmeq -> sq_cmds , nvmeq -> sq_dma_addr );
1285
- kfree (nvmeq );
1286
1285
}
1287
1286
1288
1287
static void nvme_free_queues (struct nvme_dev * dev , int lowest )
1289
1288
{
1290
1289
int i ;
1291
1290
1292
1291
for (i = dev -> ctrl .queue_count - 1 ; i >= lowest ; i -- ) {
1293
- struct nvme_queue * nvmeq = dev -> queues [i ];
1294
1292
dev -> ctrl .queue_count -- ;
1295
- dev -> queues [i ] = NULL ;
1296
- nvme_free_queue (nvmeq );
1293
+ nvme_free_queue (& dev -> queues [i ]);
1297
1294
}
1298
1295
}
1299
1296
@@ -1325,10 +1322,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1325
1322
1326
1323
static void nvme_disable_admin_queue (struct nvme_dev * dev , bool shutdown )
1327
1324
{
1328
- struct nvme_queue * nvmeq = dev -> queues [0 ];
1325
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
1329
1326
1330
- if (!nvmeq )
1331
- return ;
1332
1327
if (nvme_suspend_queue (nvmeq ))
1333
1328
return ;
1334
1329
@@ -1384,13 +1379,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1384
1379
return 0 ;
1385
1380
}
1386
1381
1387
- static struct nvme_queue * nvme_alloc_queue (struct nvme_dev * dev , int qid ,
1388
- int depth , int node )
1382
+ static int nvme_alloc_queue (struct nvme_dev * dev , int qid ,
1383
+ int depth , int node )
1389
1384
{
1390
- struct nvme_queue * nvmeq = kzalloc_node (sizeof (* nvmeq ), GFP_KERNEL ,
1391
- node );
1392
- if (!nvmeq )
1393
- return NULL ;
1385
+ struct nvme_queue * nvmeq = & dev -> queues [qid ];
1394
1386
1395
1387
nvmeq -> cqes = dma_zalloc_coherent (dev -> dev , CQ_SIZE (depth ),
1396
1388
& nvmeq -> cq_dma_addr , GFP_KERNEL );
@@ -1409,17 +1401,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1409
1401
nvmeq -> q_depth = depth ;
1410
1402
nvmeq -> qid = qid ;
1411
1403
nvmeq -> cq_vector = -1 ;
1412
- dev -> queues [qid ] = nvmeq ;
1413
1404
dev -> ctrl .queue_count ++ ;
1414
1405
1415
- return nvmeq ;
1406
+ return 0 ;
1416
1407
1417
1408
free_cqdma :
1418
1409
dma_free_coherent (dev -> dev , CQ_SIZE (depth ), (void * )nvmeq -> cqes ,
1419
1410
nvmeq -> cq_dma_addr );
1420
1411
free_nvmeq :
1421
- kfree (nvmeq );
1422
- return NULL ;
1412
+ return - ENOMEM ;
1423
1413
}
1424
1414
1425
1415
static int queue_request_irq (struct nvme_queue * nvmeq )
@@ -1592,14 +1582,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1592
1582
if (result < 0 )
1593
1583
return result ;
1594
1584
1595
- nvmeq = dev -> queues [0 ];
1596
- if (!nvmeq ) {
1597
- nvmeq = nvme_alloc_queue (dev , 0 , NVME_AQ_DEPTH ,
1598
- dev_to_node (dev -> dev ));
1599
- if (!nvmeq )
1600
- return - ENOMEM ;
1601
- }
1585
+ result = nvme_alloc_queue (dev , 0 , NVME_AQ_DEPTH ,
1586
+ dev_to_node (dev -> dev ));
1587
+ if (result )
1588
+ return result ;
1602
1589
1590
+ nvmeq = & dev -> queues [0 ];
1603
1591
aqa = nvmeq -> q_depth - 1 ;
1604
1592
aqa |= aqa << 16 ;
1605
1593
@@ -1629,7 +1617,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1629
1617
1630
1618
for (i = dev -> ctrl .queue_count ; i <= dev -> max_qid ; i ++ ) {
1631
1619
/* vector == qid - 1, match nvme_create_queue */
1632
- if (! nvme_alloc_queue (dev , i , dev -> q_depth ,
1620
+ if (nvme_alloc_queue (dev , i , dev -> q_depth ,
1633
1621
pci_irq_get_node (to_pci_dev (dev -> dev ), i - 1 ))) {
1634
1622
ret = - ENOMEM ;
1635
1623
break ;
@@ -1638,7 +1626,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1638
1626
1639
1627
max = min (dev -> max_qid , dev -> ctrl .queue_count - 1 );
1640
1628
for (i = dev -> online_queues ; i <= max ; i ++ ) {
1641
- ret = nvme_create_queue (dev -> queues [i ], i );
1629
+ ret = nvme_create_queue (& dev -> queues [i ], i );
1642
1630
if (ret )
1643
1631
break ;
1644
1632
}
@@ -1894,7 +1882,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
1894
1882
1895
1883
static int nvme_setup_io_queues (struct nvme_dev * dev )
1896
1884
{
1897
- struct nvme_queue * adminq = dev -> queues [0 ];
1885
+ struct nvme_queue * adminq = & dev -> queues [0 ];
1898
1886
struct pci_dev * pdev = to_pci_dev (dev -> dev );
1899
1887
int result , nr_io_queues ;
1900
1888
unsigned long size ;
@@ -2020,7 +2008,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
2020
2008
retry :
2021
2009
timeout = ADMIN_TIMEOUT ;
2022
2010
for (; i > 0 ; i -- , sent ++ )
2023
- if (nvme_delete_queue (dev -> queues [i ], opcode ))
2011
+ if (nvme_delete_queue (& dev -> queues [i ], opcode ))
2024
2012
break ;
2025
2013
2026
2014
while (sent -- ) {
@@ -2212,15 +2200,15 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2212
2200
2213
2201
queues = dev -> online_queues - 1 ;
2214
2202
for (i = dev -> ctrl .queue_count - 1 ; i > 0 ; i -- )
2215
- nvme_suspend_queue (dev -> queues [i ]);
2203
+ nvme_suspend_queue (& dev -> queues [i ]);
2216
2204
2217
2205
if (dead ) {
2218
2206
/* A device might become IO incapable very soon during
2219
2207
* probe, before the admin queue is configured. Thus,
2220
2208
* queue_count can be 0 here.
2221
2209
*/
2222
2210
if (dev -> ctrl .queue_count )
2223
- nvme_suspend_queue (dev -> queues [0 ]);
2211
+ nvme_suspend_queue (& dev -> queues [0 ]);
2224
2212
} else {
2225
2213
nvme_disable_io_queues (dev , queues );
2226
2214
nvme_disable_admin_queue (dev , shutdown );
@@ -2482,8 +2470,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2482
2470
dev = kzalloc_node (sizeof (* dev ), GFP_KERNEL , node );
2483
2471
if (!dev )
2484
2472
return - ENOMEM ;
2485
- dev -> queues = kzalloc_node ((num_possible_cpus () + 1 ) * sizeof (void * ),
2486
- GFP_KERNEL , node );
2473
+
2474
+ dev -> queues = kcalloc_node (num_possible_cpus () + 1 ,
2475
+ sizeof (struct nvme_queue ), GFP_KERNEL , node );
2487
2476
if (!dev -> queues )
2488
2477
goto free ;
2489
2478
0 commit comments