@@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
77
77
* Represents an NVM Express device. Each nvme_dev is a PCI function.
78
78
*/
79
79
struct nvme_dev {
80
- struct nvme_queue * * queues ;
80
+ struct nvme_queue * queues ;
81
81
struct blk_mq_tag_set tagset ;
82
82
struct blk_mq_tag_set admin_tagset ;
83
83
u32 __iomem * dbs ;
@@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
348
348
unsigned int hctx_idx )
349
349
{
350
350
struct nvme_dev * dev = data ;
351
- struct nvme_queue * nvmeq = dev -> queues [0 ];
351
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
352
352
353
353
WARN_ON (hctx_idx != 0 );
354
354
WARN_ON (dev -> admin_tagset .tags [0 ] != hctx -> tags );
@@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
370
370
unsigned int hctx_idx )
371
371
{
372
372
struct nvme_dev * dev = data ;
373
- struct nvme_queue * nvmeq = dev -> queues [hctx_idx + 1 ];
373
+ struct nvme_queue * nvmeq = & dev -> queues [hctx_idx + 1 ];
374
374
375
375
if (!nvmeq -> tags )
376
376
nvmeq -> tags = & dev -> tagset .tags [hctx_idx ];
@@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
386
386
struct nvme_dev * dev = set -> driver_data ;
387
387
struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
388
388
int queue_idx = (set == & dev -> tagset ) ? hctx_idx + 1 : 0 ;
389
- struct nvme_queue * nvmeq = dev -> queues [queue_idx ];
389
+ struct nvme_queue * nvmeq = & dev -> queues [queue_idx ];
390
390
391
391
BUG_ON (!nvmeq );
392
392
iod -> nvmeq = nvmeq ;
@@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
900
900
static void nvme_pci_submit_async_event (struct nvme_ctrl * ctrl , int aer_idx )
901
901
{
902
902
struct nvme_dev * dev = to_nvme_dev (ctrl );
903
- struct nvme_queue * nvmeq = dev -> queues [0 ];
903
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
904
904
struct nvme_command c ;
905
905
906
906
memset (& c , 0 , sizeof (c ));
@@ -1146,18 +1146,15 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
1146
1146
if (nvmeq -> sq_cmds )
1147
1147
dma_free_coherent (nvmeq -> q_dmadev , SQ_SIZE (nvmeq -> q_depth ),
1148
1148
nvmeq -> sq_cmds , nvmeq -> sq_dma_addr );
1149
- kfree (nvmeq );
1150
1149
}
1151
1150
1152
1151
static void nvme_free_queues (struct nvme_dev * dev , int lowest )
1153
1152
{
1154
1153
int i ;
1155
1154
1156
1155
for (i = dev -> ctrl .queue_count - 1 ; i >= lowest ; i -- ) {
1157
- struct nvme_queue * nvmeq = dev -> queues [i ];
1158
1156
dev -> ctrl .queue_count -- ;
1159
- dev -> queues [i ] = NULL ;
1160
- nvme_free_queue (nvmeq );
1157
+ nvme_free_queue (& dev -> queues [i ]);
1161
1158
}
1162
1159
}
1163
1160
@@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1189
1186
1190
1187
static void nvme_disable_admin_queue (struct nvme_dev * dev , bool shutdown )
1191
1188
{
1192
- struct nvme_queue * nvmeq = dev -> queues [0 ];
1189
+ struct nvme_queue * nvmeq = & dev -> queues [0 ];
1193
1190
1194
- if (!nvmeq )
1195
- return ;
1196
1191
if (nvme_suspend_queue (nvmeq ))
1197
1192
return ;
1198
1193
@@ -1246,13 +1241,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1246
1241
return 0 ;
1247
1242
}
1248
1243
1249
- static struct nvme_queue * nvme_alloc_queue (struct nvme_dev * dev , int qid ,
1250
- int depth , int node )
1244
+ static int nvme_alloc_queue (struct nvme_dev * dev , int qid ,
1245
+ int depth , int node )
1251
1246
{
1252
- struct nvme_queue * nvmeq = kzalloc_node (sizeof (* nvmeq ), GFP_KERNEL ,
1253
- node );
1254
- if (!nvmeq )
1255
- return NULL ;
1247
+ struct nvme_queue * nvmeq = & dev -> queues [qid ];
1256
1248
1257
1249
nvmeq -> cqes = dma_zalloc_coherent (dev -> dev , CQ_SIZE (depth ),
1258
1250
& nvmeq -> cq_dma_addr , GFP_KERNEL );
@@ -1271,17 +1263,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1271
1263
nvmeq -> q_depth = depth ;
1272
1264
nvmeq -> qid = qid ;
1273
1265
nvmeq -> cq_vector = -1 ;
1274
- dev -> queues [qid ] = nvmeq ;
1275
1266
dev -> ctrl .queue_count ++ ;
1276
1267
1277
- return nvmeq ;
1268
+ return 0 ;
1278
1269
1279
1270
free_cqdma :
1280
1271
dma_free_coherent (dev -> dev , CQ_SIZE (depth ), (void * )nvmeq -> cqes ,
1281
1272
nvmeq -> cq_dma_addr );
1282
1273
free_nvmeq :
1283
- kfree (nvmeq );
1284
- return NULL ;
1274
+ return - ENOMEM ;
1285
1275
}
1286
1276
1287
1277
static int queue_request_irq (struct nvme_queue * nvmeq )
@@ -1468,14 +1458,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1468
1458
if (result < 0 )
1469
1459
return result ;
1470
1460
1471
- nvmeq = dev -> queues [0 ];
1472
- if (!nvmeq ) {
1473
- nvmeq = nvme_alloc_queue (dev , 0 , NVME_AQ_DEPTH ,
1474
- dev_to_node (dev -> dev ));
1475
- if (!nvmeq )
1476
- return - ENOMEM ;
1477
- }
1461
+ result = nvme_alloc_queue (dev , 0 , NVME_AQ_DEPTH ,
1462
+ dev_to_node (dev -> dev ));
1463
+ if (result )
1464
+ return result ;
1478
1465
1466
+ nvmeq = & dev -> queues [0 ];
1479
1467
aqa = nvmeq -> q_depth - 1 ;
1480
1468
aqa |= aqa << 16 ;
1481
1469
@@ -1505,7 +1493,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1505
1493
1506
1494
for (i = dev -> ctrl .queue_count ; i <= dev -> max_qid ; i ++ ) {
1507
1495
/* vector == qid - 1, match nvme_create_queue */
1508
- if (! nvme_alloc_queue (dev , i , dev -> q_depth ,
1496
+ if (nvme_alloc_queue (dev , i , dev -> q_depth ,
1509
1497
pci_irq_get_node (to_pci_dev (dev -> dev ), i - 1 ))) {
1510
1498
ret = - ENOMEM ;
1511
1499
break ;
@@ -1514,7 +1502,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1514
1502
1515
1503
max = min (dev -> max_qid , dev -> ctrl .queue_count - 1 );
1516
1504
for (i = dev -> online_queues ; i <= max ; i ++ ) {
1517
- ret = nvme_create_queue (dev -> queues [i ], i );
1505
+ ret = nvme_create_queue (& dev -> queues [i ], i );
1518
1506
if (ret )
1519
1507
break ;
1520
1508
}
@@ -1770,7 +1758,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
1770
1758
1771
1759
static int nvme_setup_io_queues (struct nvme_dev * dev )
1772
1760
{
1773
- struct nvme_queue * adminq = dev -> queues [0 ];
1761
+ struct nvme_queue * adminq = & dev -> queues [0 ];
1774
1762
struct pci_dev * pdev = to_pci_dev (dev -> dev );
1775
1763
int result , nr_io_queues ;
1776
1764
unsigned long size ;
@@ -1896,7 +1884,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
1896
1884
retry :
1897
1885
timeout = ADMIN_TIMEOUT ;
1898
1886
for (; i > 0 ; i -- , sent ++ )
1899
- if (nvme_delete_queue (dev -> queues [i ], opcode ))
1887
+ if (nvme_delete_queue (& dev -> queues [i ], opcode ))
1900
1888
break ;
1901
1889
1902
1890
while (sent -- ) {
@@ -2081,15 +2069,15 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2081
2069
2082
2070
queues = dev -> online_queues - 1 ;
2083
2071
for (i = dev -> ctrl .queue_count - 1 ; i > 0 ; i -- )
2084
- nvme_suspend_queue (dev -> queues [i ]);
2072
+ nvme_suspend_queue (& dev -> queues [i ]);
2085
2073
2086
2074
if (dead ) {
2087
2075
/* A device might become IO incapable very soon during
2088
2076
* probe, before the admin queue is configured. Thus,
2089
2077
* queue_count can be 0 here.
2090
2078
*/
2091
2079
if (dev -> ctrl .queue_count )
2092
- nvme_suspend_queue (dev -> queues [0 ]);
2080
+ nvme_suspend_queue (& dev -> queues [0 ]);
2093
2081
} else {
2094
2082
nvme_disable_io_queues (dev , queues );
2095
2083
nvme_disable_admin_queue (dev , shutdown );
@@ -2345,7 +2333,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2345
2333
dev = kzalloc_node (sizeof (* dev ), GFP_KERNEL , node );
2346
2334
if (!dev )
2347
2335
return - ENOMEM ;
2348
- dev -> queues = kzalloc_node ((num_possible_cpus () + 1 ) * sizeof (void * ),
2336
+
2337
+ dev -> queues = kzalloc_node ((num_possible_cpus () + 1 ) * sizeof (struct nvme_queue ),
2349
2338
GFP_KERNEL , node );
2350
2339
if (!dev -> queues )
2351
2340
goto free ;
0 commit comments