@@ -427,9 +427,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
427
427
return rc ;
428
428
}
429
429
430
- static int bnxt_re_net_ring_alloc (struct bnxt_re_dev * rdev , dma_addr_t * dma_arr ,
431
- int pages , int type , u32 ring_mask ,
432
- u32 map_index , u16 * fw_ring_id )
430
+ static int bnxt_re_net_ring_alloc (struct bnxt_re_dev * rdev ,
431
+ struct bnxt_re_ring_attr * ring_attr ,
432
+ u16 * fw_ring_id )
433
433
{
434
434
struct bnxt_en_dev * en_dev = rdev -> en_dev ;
435
435
struct hwrm_ring_alloc_input req = {0 };
@@ -443,18 +443,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
443
443
memset (& fw_msg , 0 , sizeof (fw_msg ));
444
444
bnxt_re_init_hwrm_hdr (rdev , (void * )& req , HWRM_RING_ALLOC , -1 , -1 );
445
445
req .enables = 0 ;
446
- req .page_tbl_addr = cpu_to_le64 (dma_arr [0 ]);
447
- if (pages > 1 ) {
446
+ req .page_tbl_addr = cpu_to_le64 (ring_attr -> dma_arr [0 ]);
447
+ if (ring_attr -> pages > 1 ) {
448
448
/* Page size is in log2 units */
449
449
req .page_size = BNXT_PAGE_SHIFT ;
450
450
req .page_tbl_depth = 1 ;
451
451
}
452
452
req .fbo = 0 ;
453
453
/* Association of ring index with doorbell index and MSIX number */
454
- req .logical_id = cpu_to_le16 (map_index );
455
- req .length = cpu_to_le32 (ring_mask + 1 );
456
- req .ring_type = type ;
457
- req .int_mode = RING_ALLOC_REQ_INT_MODE_MSIX ;
454
+ req .logical_id = cpu_to_le16 (ring_attr -> lrid );
455
+ req .length = cpu_to_le32 (ring_attr -> depth + 1 );
456
+ req .ring_type = ring_attr -> type ;
457
+ req .int_mode = ring_attr -> mode ;
458
458
bnxt_re_fill_fw_msg (& fw_msg , (void * )& req , sizeof (req ), (void * )& resp ,
459
459
sizeof (resp ), DFLT_HWRM_CMD_TIMEOUT );
460
460
rc = en_dev -> en_ops -> bnxt_send_fw_msg (en_dev , BNXT_ROCE_ULP , & fw_msg );
@@ -1006,10 +1006,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
1006
1006
1007
1007
static int bnxt_re_alloc_res (struct bnxt_re_dev * rdev )
1008
1008
{
1009
+ struct bnxt_re_ring_attr rattr = {};
1010
+ struct bnxt_qplib_ctx * qplib_ctx ;
1009
1011
int num_vec_created = 0 ;
1010
- dma_addr_t * pg_map ;
1011
1012
int rc = 0 , i ;
1012
- int pages ;
1013
1013
u8 type ;
1014
1014
1015
1015
/* Configure and allocate resources for qplib */
@@ -1030,23 +1030,27 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1030
1030
if (rc )
1031
1031
goto dealloc_res ;
1032
1032
1033
+ qplib_ctx = & rdev -> qplib_ctx ;
1033
1034
for (i = 0 ; i < rdev -> num_msix - 1 ; i ++ ) {
1034
- rdev -> nq [i ].res = & rdev -> qplib_res ;
1035
- rdev -> nq [i ].hwq .max_elements = BNXT_RE_MAX_CQ_COUNT +
1036
- BNXT_RE_MAX_SRQC_COUNT + 2 ;
1035
+ struct bnxt_qplib_nq * nq ;
1036
+
1037
+ nq = & rdev -> nq [i ];
1038
+ nq -> hwq .max_elements = (qplib_ctx -> cq_count +
1039
+ qplib_ctx -> srqc_count + 2 );
1037
1040
rc = bnxt_qplib_alloc_nq (& rdev -> qplib_res , & rdev -> nq [i ]);
1038
1041
if (rc ) {
1039
1042
dev_err (rdev_to_dev (rdev ), "Alloc Failed NQ%d rc:%#x" ,
1040
1043
i , rc );
1041
1044
goto free_nq ;
1042
1045
}
1043
1046
type = bnxt_qplib_get_ring_type (rdev -> chip_ctx );
1044
- pg_map = rdev -> nq [i ].hwq .pbl [PBL_LVL_0 ].pg_map_arr ;
1045
- pages = rdev -> nq [i ].hwq .pbl [rdev -> nq [i ].hwq .level ].pg_count ;
1046
- rc = bnxt_re_net_ring_alloc (rdev , pg_map , pages , type ,
1047
- BNXT_QPLIB_NQE_MAX_CNT - 1 ,
1048
- rdev -> msix_entries [i + 1 ].ring_idx ,
1049
- & rdev -> nq [i ].ring_id );
1047
+ rattr .dma_arr = nq -> hwq .pbl [PBL_LVL_0 ].pg_map_arr ;
1048
+ rattr .pages = nq -> hwq .pbl [rdev -> nq [i ].hwq .level ].pg_count ;
1049
+ rattr .type = type ;
1050
+ rattr .mode = RING_ALLOC_REQ_INT_MODE_MSIX ;
1051
+ rattr .depth = BNXT_QPLIB_NQE_MAX_CNT - 1 ;
1052
+ rattr .lrid = rdev -> msix_entries [i + 1 ].ring_idx ;
1053
+ rc = bnxt_re_net_ring_alloc (rdev , & rattr , & nq -> ring_id );
1050
1054
if (rc ) {
1051
1055
dev_err (rdev_to_dev (rdev ),
1052
1056
"Failed to allocate NQ fw id with rc = 0x%x" ,
@@ -1371,10 +1375,10 @@ static void bnxt_re_worker(struct work_struct *work)
1371
1375
1372
1376
static int bnxt_re_ib_reg (struct bnxt_re_dev * rdev )
1373
1377
{
1374
- dma_addr_t * pg_map ;
1375
- u32 db_offt , ridx ;
1376
- int pages , vid ;
1378
+ struct bnxt_re_ring_attr rattr ;
1379
+ u32 db_offt ;
1377
1380
bool locked ;
1381
+ int vid ;
1378
1382
u8 type ;
1379
1383
int rc ;
1380
1384
@@ -1383,6 +1387,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1383
1387
locked = true;
1384
1388
1385
1389
/* Registered a new RoCE device instance to netdev */
1390
+ memset (& rattr , 0 , sizeof (rattr ));
1386
1391
rc = bnxt_re_register_netdev (rdev );
1387
1392
if (rc ) {
1388
1393
rtnl_unlock ();
@@ -1422,12 +1427,13 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1422
1427
}
1423
1428
1424
1429
type = bnxt_qplib_get_ring_type (rdev -> chip_ctx );
1425
- pg_map = rdev -> rcfw .creq .pbl [PBL_LVL_0 ].pg_map_arr ;
1426
- pages = rdev -> rcfw .creq .pbl [rdev -> rcfw .creq .level ].pg_count ;
1427
- ridx = rdev -> msix_entries [BNXT_RE_AEQ_IDX ].ring_idx ;
1428
- rc = bnxt_re_net_ring_alloc (rdev , pg_map , pages , type ,
1429
- BNXT_QPLIB_CREQE_MAX_CNT - 1 ,
1430
- ridx , & rdev -> rcfw .creq_ring_id );
1430
+ rattr .dma_arr = rdev -> rcfw .creq .pbl [PBL_LVL_0 ].pg_map_arr ;
1431
+ rattr .pages = rdev -> rcfw .creq .pbl [rdev -> rcfw .creq .level ].pg_count ;
1432
+ rattr .type = type ;
1433
+ rattr .mode = RING_ALLOC_REQ_INT_MODE_MSIX ;
1434
+ rattr .depth = BNXT_QPLIB_CREQE_MAX_CNT - 1 ;
1435
+ rattr .lrid = rdev -> msix_entries [BNXT_RE_AEQ_IDX ].ring_idx ;
1436
+ rc = bnxt_re_net_ring_alloc (rdev , & rattr , & rdev -> rcfw .creq_ring_id );
1431
1437
if (rc ) {
1432
1438
pr_err ("Failed to allocate CREQ: %#x\n" , rc );
1433
1439
goto free_rcfw ;
0 commit comments