@@ -7670,6 +7670,71 @@ static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
7670
7670
return rc ;
7671
7671
}
7672
7672
7673
+ static int bnxt_hwrm_func_backing_store_cfg_v2 (struct bnxt * bp ,
7674
+ struct bnxt_ctx_mem_type * ctxm ,
7675
+ bool last )
7676
+ {
7677
+ struct hwrm_func_backing_store_cfg_v2_input * req ;
7678
+ u32 instance_bmap = ctxm -> instance_bmap ;
7679
+ int i , j , rc = 0 , n = 1 ;
7680
+ __le32 * p ;
7681
+
7682
+ if (!(ctxm -> flags & BNXT_CTX_MEM_TYPE_VALID ) || !ctxm -> pg_info )
7683
+ return 0 ;
7684
+
7685
+ if (instance_bmap )
7686
+ n = hweight32 (ctxm -> instance_bmap );
7687
+ else
7688
+ instance_bmap = 1 ;
7689
+
7690
+ rc = hwrm_req_init (bp , req , HWRM_FUNC_BACKING_STORE_CFG_V2 );
7691
+ if (rc )
7692
+ return rc ;
7693
+ hwrm_req_hold (bp , req );
7694
+ req -> type = cpu_to_le16 (ctxm -> type );
7695
+ req -> entry_size = cpu_to_le16 (ctxm -> entry_size );
7696
+ req -> subtype_valid_cnt = ctxm -> split_entry_cnt ;
7697
+ for (i = 0 , p = & req -> split_entry_0 ; i < ctxm -> split_entry_cnt ; i ++ )
7698
+ p [i ] = cpu_to_le32 (ctxm -> split [i ]);
7699
+ for (i = 0 , j = 0 ; j < n && !rc ; i ++ ) {
7700
+ struct bnxt_ctx_pg_info * ctx_pg ;
7701
+
7702
+ if (!(instance_bmap & (1 << i )))
7703
+ continue ;
7704
+ req -> instance = cpu_to_le16 (i );
7705
+ ctx_pg = & ctxm -> pg_info [j ++ ];
7706
+ if (!ctx_pg -> entries )
7707
+ continue ;
7708
+ req -> num_entries = cpu_to_le32 (ctx_pg -> entries );
7709
+ bnxt_hwrm_set_pg_attr (& ctx_pg -> ring_mem ,
7710
+ & req -> page_size_pbl_level ,
7711
+ & req -> page_dir );
7712
+ if (last && j == n )
7713
+ req -> flags =
7714
+ cpu_to_le32 (FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE );
7715
+ rc = hwrm_req_send (bp , req );
7716
+ }
7717
+ hwrm_req_drop (bp , req );
7718
+ return rc ;
7719
+ }
7720
+
7721
+ static int bnxt_backing_store_cfg_v2 (struct bnxt * bp )
7722
+ {
7723
+ struct bnxt_ctx_mem_info * ctx = bp -> ctx ;
7724
+ struct bnxt_ctx_mem_type * ctxm ;
7725
+ int rc = 0 ;
7726
+ u16 type ;
7727
+
7728
+ for (type = 0 ; type < BNXT_CTX_V2_MAX ; type ++ ) {
7729
+ ctxm = & ctx -> ctx_arr [type ];
7730
+
7731
+ rc = bnxt_hwrm_func_backing_store_cfg_v2 (bp , ctxm , ctxm -> last );
7732
+ if (rc )
7733
+ return rc ;
7734
+ }
7735
+ return 0 ;
7736
+ }
7737
+
7673
7738
void bnxt_free_ctx_mem (struct bnxt * bp )
7674
7739
{
7675
7740
struct bnxt_ctx_mem_info * ctx = bp -> ctx ;
@@ -7804,7 +7869,11 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7804
7869
for (i = 0 ; i < ctx -> tqm_fp_rings_count + 1 ; i ++ )
7805
7870
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i ;
7806
7871
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES ;
7807
- rc = bnxt_hwrm_func_backing_store_cfg (bp , ena );
7872
+
7873
+ if (bp -> fw_cap & BNXT_FW_CAP_BACKING_STORE_V2 )
7874
+ rc = bnxt_backing_store_cfg_v2 (bp );
7875
+ else
7876
+ rc = bnxt_hwrm_func_backing_store_cfg (bp , ena );
7808
7877
if (rc ) {
7809
7878
netdev_err (bp -> dev , "Failed configuring context mem, rc = %d.\n" ,
7810
7879
rc );
0 commit comments