@@ -416,7 +416,100 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
416
416
return hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
417
417
}
418
418
419
- /* only call by PF to reserve resources for VF */
419
+ /* Only called by PF to reserve resources for VFs, returns actual number of
420
+ * VFs configured, or < 0 on error.
421
+ */
422
+ static int bnxt_hwrm_func_vf_resc_cfg (struct bnxt * bp , int num_vfs )
423
+ {
424
+ struct hwrm_func_vf_resource_cfg_input req = {0 };
425
+ struct bnxt_hw_resc * hw_resc = & bp -> hw_resc ;
426
+ u16 vf_tx_rings , vf_rx_rings , vf_cp_rings ;
427
+ u16 vf_stat_ctx , vf_vnics , vf_ring_grps ;
428
+ struct bnxt_pf_info * pf = & bp -> pf ;
429
+ int i , rc = 0 ;
430
+
431
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_FUNC_VF_RESOURCE_CFG , -1 , -1 );
432
+
433
+ vf_cp_rings = hw_resc -> max_cp_rings - bp -> cp_nr_rings ;
434
+ vf_stat_ctx = hw_resc -> max_stat_ctxs - bp -> num_stat_ctxs ;
435
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS )
436
+ vf_rx_rings = hw_resc -> max_rx_rings - bp -> rx_nr_rings * 2 ;
437
+ else
438
+ vf_rx_rings = hw_resc -> max_rx_rings - bp -> rx_nr_rings ;
439
+ vf_ring_grps = hw_resc -> max_hw_ring_grps - bp -> rx_nr_rings ;
440
+ vf_tx_rings = hw_resc -> max_tx_rings - bp -> tx_nr_rings ;
441
+ vf_vnics = hw_resc -> max_vnics - bp -> nr_vnics ;
442
+ vf_vnics = min_t (u16 , vf_vnics , vf_rx_rings );
443
+
444
+ req .min_rsscos_ctx = cpu_to_le16 (1 );
445
+ req .max_rsscos_ctx = cpu_to_le16 (1 );
446
+ if (pf -> vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ) {
447
+ req .min_cmpl_rings = cpu_to_le16 (1 );
448
+ req .min_tx_rings = cpu_to_le16 (1 );
449
+ req .min_rx_rings = cpu_to_le16 (1 );
450
+ req .min_l2_ctxs = cpu_to_le16 (1 );
451
+ req .min_vnics = cpu_to_le16 (1 );
452
+ req .min_stat_ctx = cpu_to_le16 (1 );
453
+ req .min_hw_ring_grps = cpu_to_le16 (1 );
454
+ } else {
455
+ vf_cp_rings /= num_vfs ;
456
+ vf_tx_rings /= num_vfs ;
457
+ vf_rx_rings /= num_vfs ;
458
+ vf_vnics /= num_vfs ;
459
+ vf_stat_ctx /= num_vfs ;
460
+ vf_ring_grps /= num_vfs ;
461
+
462
+ req .min_cmpl_rings = cpu_to_le16 (vf_cp_rings );
463
+ req .min_tx_rings = cpu_to_le16 (vf_tx_rings );
464
+ req .min_rx_rings = cpu_to_le16 (vf_rx_rings );
465
+ req .min_l2_ctxs = cpu_to_le16 (4 );
466
+ req .min_vnics = cpu_to_le16 (vf_vnics );
467
+ req .min_stat_ctx = cpu_to_le16 (vf_stat_ctx );
468
+ req .min_hw_ring_grps = cpu_to_le16 (vf_ring_grps );
469
+ }
470
+ req .max_cmpl_rings = cpu_to_le16 (vf_cp_rings );
471
+ req .max_tx_rings = cpu_to_le16 (vf_tx_rings );
472
+ req .max_rx_rings = cpu_to_le16 (vf_rx_rings );
473
+ req .max_l2_ctxs = cpu_to_le16 (4 );
474
+ req .max_vnics = cpu_to_le16 (vf_vnics );
475
+ req .max_stat_ctx = cpu_to_le16 (vf_stat_ctx );
476
+ req .max_hw_ring_grps = cpu_to_le16 (vf_ring_grps );
477
+
478
+ mutex_lock (& bp -> hwrm_cmd_lock );
479
+ for (i = 0 ; i < num_vfs ; i ++ ) {
480
+ req .vf_id = cpu_to_le16 (pf -> first_vf_id + i );
481
+ rc = _hwrm_send_message (bp , & req , sizeof (req ),
482
+ HWRM_CMD_TIMEOUT );
483
+ if (rc ) {
484
+ rc = - ENOMEM ;
485
+ break ;
486
+ }
487
+ pf -> active_vfs = i + 1 ;
488
+ pf -> vf [i ].fw_fid = pf -> first_vf_id + i ;
489
+ }
490
+ mutex_unlock (& bp -> hwrm_cmd_lock );
491
+ if (pf -> active_vfs ) {
492
+ u16 n = 1 ;
493
+
494
+ if (pf -> vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL )
495
+ n = pf -> active_vfs ;
496
+
497
+ hw_resc -> max_tx_rings -= vf_tx_rings * n ;
498
+ hw_resc -> max_rx_rings -= vf_rx_rings * n ;
499
+ hw_resc -> max_hw_ring_grps -= vf_ring_grps * n ;
500
+ hw_resc -> max_cp_rings -= vf_cp_rings * n ;
501
+ hw_resc -> max_rsscos_ctxs -= pf -> active_vfs ;
502
+ hw_resc -> max_stat_ctxs -= vf_stat_ctx * n ;
503
+ hw_resc -> max_vnics -= vf_vnics * n ;
504
+
505
+ rc = pf -> active_vfs ;
506
+ }
507
+ return rc ;
508
+ }
509
+
510
+ /* Only called by PF to reserve resources for VFs, returns actual number of
511
+ * VFs configured, or < 0 on error.
512
+ */
420
513
static int bnxt_hwrm_func_cfg (struct bnxt * bp , int num_vfs )
421
514
{
422
515
u32 rc = 0 , mtu , i ;
@@ -489,18 +582,29 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
489
582
total_vf_tx_rings += vf_tx_rsvd ;
490
583
}
491
584
mutex_unlock (& bp -> hwrm_cmd_lock );
492
- if (!rc ) {
585
+ if (rc )
586
+ rc = - ENOMEM ;
587
+ if (pf -> active_vfs ) {
493
588
hw_resc -> max_tx_rings -= total_vf_tx_rings ;
494
589
hw_resc -> max_rx_rings -= vf_rx_rings * num_vfs ;
495
590
hw_resc -> max_hw_ring_grps -= vf_ring_grps * num_vfs ;
496
591
hw_resc -> max_cp_rings -= vf_cp_rings * num_vfs ;
497
592
hw_resc -> max_rsscos_ctxs -= num_vfs ;
498
593
hw_resc -> max_stat_ctxs -= vf_stat_ctx * num_vfs ;
499
594
hw_resc -> max_vnics -= vf_vnics * num_vfs ;
595
+ rc = pf -> active_vfs ;
500
596
}
501
597
return rc ;
502
598
}
503
599
600
+ static int bnxt_func_cfg (struct bnxt * bp , int num_vfs )
601
+ {
602
+ if (bp -> flags & BNXT_FLAG_NEW_RM )
603
+ return bnxt_hwrm_func_vf_resc_cfg (bp , num_vfs );
604
+ else
605
+ return bnxt_hwrm_func_cfg (bp , num_vfs );
606
+ }
607
+
504
608
static int bnxt_sriov_enable (struct bnxt * bp , int * num_vfs )
505
609
{
506
610
int rc = 0 , vfs_supported ;
@@ -567,9 +671,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
567
671
goto err_out1 ;
568
672
569
673
/* Reserve resources for VFs */
570
- rc = bnxt_hwrm_func_cfg (bp , * num_vfs );
571
- if (rc )
572
- goto err_out2 ;
674
+ rc = bnxt_func_cfg (bp , * num_vfs );
675
+ if (rc != * num_vfs ) {
676
+ if (rc <= 0 ) {
677
+ netdev_warn (bp -> dev , "Unable to reserve resources for SRIOV.\n" );
678
+ * num_vfs = 0 ;
679
+ goto err_out2 ;
680
+ }
681
+ netdev_warn (bp -> dev , "Only able to reserve resources for %d VFs.\n" , rc );
682
+ * num_vfs = rc ;
683
+ }
573
684
574
685
/* Register buffers for VFs */
575
686
rc = bnxt_hwrm_func_buf_rgtr (bp );
0 commit comments