@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
259
259
port_attr -> sm_sl = 0 ;
260
260
port_attr -> subnet_timeout = 0 ;
261
261
port_attr -> init_type_reply = 0 ;
262
- /* call the underlying netdev's ethtool hooks to query speed settings
263
- * for which we acquire rtnl_lock _only_ if it's registered with
264
- * IB stack to avoid race in the NETDEV_UNREG path
265
- */
266
- if (test_bit (BNXT_RE_FLAG_IBDEV_REGISTERED , & rdev -> flags ))
267
- if (ib_get_eth_speed (ibdev , port_num , & port_attr -> active_speed ,
268
- & port_attr -> active_width ))
269
- return - EINVAL ;
262
+ port_attr -> active_speed = rdev -> active_speed ;
263
+ port_attr -> active_width = rdev -> active_width ;
264
+
270
265
return 0 ;
271
266
}
272
267
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
319
314
struct bnxt_re_gid_ctx * ctx , * * ctx_tbl ;
320
315
struct bnxt_re_dev * rdev = to_bnxt_re_dev (ibdev , ibdev );
321
316
struct bnxt_qplib_sgid_tbl * sgid_tbl = & rdev -> qplib_res .sgid_tbl ;
317
+ struct bnxt_qplib_gid * gid_to_del ;
322
318
323
319
/* Delete the entry from the hardware */
324
320
ctx = * context ;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
328
324
if (sgid_tbl && sgid_tbl -> active ) {
329
325
if (ctx -> idx >= sgid_tbl -> max )
330
326
return - EINVAL ;
327
+ gid_to_del = & sgid_tbl -> tbl [ctx -> idx ];
328
+ /* DEL_GID is called in WQ context(netdevice_event_work_handler)
329
+ * or via the ib_unregister_device path. In the former case QP1
330
+ * may not be destroyed yet, in which case just return as FW
331
+ * needs that entry to be present and will fail it's deletion.
332
+ * We could get invoked again after QP1 is destroyed OR get an
333
+ * ADD_GID call with a different GID value for the same index
334
+ * where we issue MODIFY_GID cmd to update the GID entry -- TBD
335
+ */
336
+ if (ctx -> idx == 0 &&
337
+ rdma_link_local_addr ((struct in6_addr * )gid_to_del ) &&
338
+ ctx -> refcnt == 1 && rdev -> qp1_sqp ) {
339
+ dev_dbg (rdev_to_dev (rdev ),
340
+ "Trying to delete GID0 while QP1 is alive\n" );
341
+ return - EFAULT ;
342
+ }
331
343
ctx -> refcnt -- ;
332
344
if (!ctx -> refcnt ) {
333
- rc = bnxt_qplib_del_sgid (sgid_tbl ,
334
- & sgid_tbl -> tbl [ctx -> idx ],
335
- true);
345
+ rc = bnxt_qplib_del_sgid (sgid_tbl , gid_to_del , true);
336
346
if (rc ) {
337
347
dev_err (rdev_to_dev (rdev ),
338
348
"Failed to remove GID: %#x" , rc );
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
816
826
817
827
kfree (rdev -> sqp_ah );
818
828
kfree (rdev -> qp1_sqp );
829
+ rdev -> qp1_sqp = NULL ;
830
+ rdev -> sqp_ah = NULL ;
819
831
}
820
832
821
833
if (!IS_ERR_OR_NULL (qp -> rumem ))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1436
1448
qp -> qplib_qp .modify_flags |=
1437
1449
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU ;
1438
1450
qp -> qplib_qp .path_mtu = __from_ib_mtu (qp_attr -> path_mtu );
1451
+ qp -> qplib_qp .mtu = ib_mtu_enum_to_int (qp_attr -> path_mtu );
1439
1452
} else if (qp_attr -> qp_state == IB_QPS_RTR ) {
1440
1453
qp -> qplib_qp .modify_flags |=
1441
1454
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU ;
1442
1455
qp -> qplib_qp .path_mtu =
1443
1456
__from_ib_mtu (iboe_get_mtu (rdev -> netdev -> mtu ));
1457
+ qp -> qplib_qp .mtu =
1458
+ ib_mtu_enum_to_int (iboe_get_mtu (rdev -> netdev -> mtu ));
1444
1459
}
1445
1460
1446
1461
if (qp_attr_mask & IB_QP_TIMEOUT ) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1551
1566
{
1552
1567
struct bnxt_re_qp * qp = container_of (ib_qp , struct bnxt_re_qp , ib_qp );
1553
1568
struct bnxt_re_dev * rdev = qp -> rdev ;
1554
- struct bnxt_qplib_qp qplib_qp ;
1569
+ struct bnxt_qplib_qp * qplib_qp ;
1555
1570
int rc ;
1556
1571
1557
- memset (& qplib_qp , 0 , sizeof (struct bnxt_qplib_qp ));
1558
- qplib_qp .id = qp -> qplib_qp .id ;
1559
- qplib_qp .ah .host_sgid_index = qp -> qplib_qp .ah .host_sgid_index ;
1572
+ qplib_qp = kzalloc (sizeof (* qplib_qp ), GFP_KERNEL );
1573
+ if (!qplib_qp )
1574
+ return - ENOMEM ;
1575
+
1576
+ qplib_qp -> id = qp -> qplib_qp .id ;
1577
+ qplib_qp -> ah .host_sgid_index = qp -> qplib_qp .ah .host_sgid_index ;
1560
1578
1561
- rc = bnxt_qplib_query_qp (& rdev -> qplib_res , & qplib_qp );
1579
+ rc = bnxt_qplib_query_qp (& rdev -> qplib_res , qplib_qp );
1562
1580
if (rc ) {
1563
1581
dev_err (rdev_to_dev (rdev ), "Failed to query HW QP" );
1564
- return rc ;
1582
+ goto out ;
1565
1583
}
1566
- qp_attr -> qp_state = __to_ib_qp_state (qplib_qp . state );
1567
- qp_attr -> en_sqd_async_notify = qplib_qp . en_sqd_async_notify ? 1 : 0 ;
1568
- qp_attr -> qp_access_flags = __to_ib_access_flags (qplib_qp . access );
1569
- qp_attr -> pkey_index = qplib_qp . pkey_index ;
1570
- qp_attr -> qkey = qplib_qp . qkey ;
1584
+ qp_attr -> qp_state = __to_ib_qp_state (qplib_qp -> state );
1585
+ qp_attr -> en_sqd_async_notify = qplib_qp -> en_sqd_async_notify ? 1 : 0 ;
1586
+ qp_attr -> qp_access_flags = __to_ib_access_flags (qplib_qp -> access );
1587
+ qp_attr -> pkey_index = qplib_qp -> pkey_index ;
1588
+ qp_attr -> qkey = qplib_qp -> qkey ;
1571
1589
qp_attr -> ah_attr .type = RDMA_AH_ATTR_TYPE_ROCE ;
1572
- rdma_ah_set_grh (& qp_attr -> ah_attr , NULL , qplib_qp . ah .flow_label ,
1573
- qplib_qp . ah .host_sgid_index ,
1574
- qplib_qp . ah .hop_limit ,
1575
- qplib_qp . ah .traffic_class );
1576
- rdma_ah_set_dgid_raw (& qp_attr -> ah_attr , qplib_qp . ah .dgid .data );
1577
- rdma_ah_set_sl (& qp_attr -> ah_attr , qplib_qp . ah .sl );
1578
- ether_addr_copy (qp_attr -> ah_attr .roce .dmac , qplib_qp . ah .dmac );
1579
- qp_attr -> path_mtu = __to_ib_mtu (qplib_qp . path_mtu );
1580
- qp_attr -> timeout = qplib_qp . timeout ;
1581
- qp_attr -> retry_cnt = qplib_qp . retry_cnt ;
1582
- qp_attr -> rnr_retry = qplib_qp . rnr_retry ;
1583
- qp_attr -> min_rnr_timer = qplib_qp . min_rnr_timer ;
1584
- qp_attr -> rq_psn = qplib_qp . rq .psn ;
1585
- qp_attr -> max_rd_atomic = qplib_qp . max_rd_atomic ;
1586
- qp_attr -> sq_psn = qplib_qp . sq .psn ;
1587
- qp_attr -> max_dest_rd_atomic = qplib_qp . max_dest_rd_atomic ;
1588
- qp_init_attr -> sq_sig_type = qplib_qp . sig_type ? IB_SIGNAL_ALL_WR :
1589
- IB_SIGNAL_REQ_WR ;
1590
- qp_attr -> dest_qp_num = qplib_qp . dest_qpn ;
1590
+ rdma_ah_set_grh (& qp_attr -> ah_attr , NULL , qplib_qp -> ah .flow_label ,
1591
+ qplib_qp -> ah .host_sgid_index ,
1592
+ qplib_qp -> ah .hop_limit ,
1593
+ qplib_qp -> ah .traffic_class );
1594
+ rdma_ah_set_dgid_raw (& qp_attr -> ah_attr , qplib_qp -> ah .dgid .data );
1595
+ rdma_ah_set_sl (& qp_attr -> ah_attr , qplib_qp -> ah .sl );
1596
+ ether_addr_copy (qp_attr -> ah_attr .roce .dmac , qplib_qp -> ah .dmac );
1597
+ qp_attr -> path_mtu = __to_ib_mtu (qplib_qp -> path_mtu );
1598
+ qp_attr -> timeout = qplib_qp -> timeout ;
1599
+ qp_attr -> retry_cnt = qplib_qp -> retry_cnt ;
1600
+ qp_attr -> rnr_retry = qplib_qp -> rnr_retry ;
1601
+ qp_attr -> min_rnr_timer = qplib_qp -> min_rnr_timer ;
1602
+ qp_attr -> rq_psn = qplib_qp -> rq .psn ;
1603
+ qp_attr -> max_rd_atomic = qplib_qp -> max_rd_atomic ;
1604
+ qp_attr -> sq_psn = qplib_qp -> sq .psn ;
1605
+ qp_attr -> max_dest_rd_atomic = qplib_qp -> max_dest_rd_atomic ;
1606
+ qp_init_attr -> sq_sig_type = qplib_qp -> sig_type ? IB_SIGNAL_ALL_WR :
1607
+ IB_SIGNAL_REQ_WR ;
1608
+ qp_attr -> dest_qp_num = qplib_qp -> dest_qpn ;
1591
1609
1592
1610
qp_attr -> cap .max_send_wr = qp -> qplib_qp .sq .max_wqe ;
1593
1611
qp_attr -> cap .max_send_sge = qp -> qplib_qp .sq .max_sge ;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1596
1614
qp_attr -> cap .max_inline_data = qp -> qplib_qp .max_inline_data ;
1597
1615
qp_init_attr -> cap = qp_attr -> cap ;
1598
1616
1599
- return 0 ;
1617
+ out :
1618
+ kfree (qplib_qp );
1619
+ return rc ;
1600
1620
}
1601
1621
1602
1622
/* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1908
1928
switch (wr -> opcode ) {
1909
1929
case IB_WR_ATOMIC_CMP_AND_SWP :
1910
1930
wqe -> type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP ;
1931
+ wqe -> atomic .cmp_data = atomic_wr (wr )-> compare_add ;
1911
1932
wqe -> atomic .swap_data = atomic_wr (wr )-> swap ;
1912
1933
break ;
1913
1934
case IB_WR_ATOMIC_FETCH_AND_ADD :
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3062
3083
return rc ;
3063
3084
}
3064
3085
3065
- if (mr -> npages && mr -> pages ) {
3086
+ if (mr -> pages ) {
3066
3087
rc = bnxt_qplib_free_fast_reg_page_list (& rdev -> qplib_res ,
3067
3088
& mr -> qplib_frpl );
3068
3089
kfree (mr -> pages );
0 commit comments