@@ -492,6 +492,8 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
492
492
return 0 ;
493
493
}
494
494
495
+ #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
496
+
495
497
int mlxsw_sp_buffers_init (struct mlxsw_sp * mlxsw_sp )
496
498
{
497
499
int err ;
@@ -503,8 +505,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
503
505
if (err )
504
506
return err ;
505
507
err = mlxsw_sp_sb_mms_init (mlxsw_sp );
508
+ if (err )
509
+ return err ;
510
+ return devlink_sb_register (priv_to_devlink (mlxsw_sp -> core ), 0 ,
511
+ MLXSW_SP_SB_SIZE ,
512
+ MLXSW_SP_SB_POOL_COUNT ,
513
+ MLXSW_SP_SB_POOL_COUNT ,
514
+ MLXSW_SP_SB_TC_COUNT ,
515
+ MLXSW_SP_SB_TC_COUNT );
516
+ }
506
517
507
- return err ;
518
+ void mlxsw_sp_buffers_fini (struct mlxsw_sp * mlxsw_sp )
519
+ {
520
+ devlink_sb_unregister (priv_to_devlink (mlxsw_sp -> core ), 0 );
508
521
}
509
522
510
523
int mlxsw_sp_port_buffers_init (struct mlxsw_sp_port * mlxsw_sp_port )
@@ -521,3 +534,175 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
521
534
522
535
return err ;
523
536
}
537
+
538
+ static u8 pool_get (u16 pool_index )
539
+ {
540
+ return pool_index % MLXSW_SP_SB_POOL_COUNT ;
541
+ }
542
+
543
+ static u16 pool_index_get (u8 pool , enum mlxsw_reg_sbxx_dir dir )
544
+ {
545
+ u16 pool_index ;
546
+
547
+ pool_index = pool ;
548
+ if (dir == MLXSW_REG_SBXX_DIR_EGRESS )
549
+ pool_index += MLXSW_SP_SB_POOL_COUNT ;
550
+ return pool_index ;
551
+ }
552
+
553
+ static enum mlxsw_reg_sbxx_dir dir_get (u16 pool_index )
554
+ {
555
+ return pool_index < MLXSW_SP_SB_POOL_COUNT ?
556
+ MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS ;
557
+ }
558
+
559
+ int mlxsw_sp_sb_pool_get (struct mlxsw_core * mlxsw_core ,
560
+ unsigned int sb_index , u16 pool_index ,
561
+ struct devlink_sb_pool_info * pool_info )
562
+ {
563
+ struct mlxsw_sp * mlxsw_sp = mlxsw_core_driver_priv (mlxsw_core );
564
+ u8 pool = pool_get (pool_index );
565
+ enum mlxsw_reg_sbxx_dir dir = dir_get (pool_index );
566
+ struct mlxsw_sp_sb_pr * pr = mlxsw_sp_sb_pr_get (mlxsw_sp , pool , dir );
567
+
568
+ pool_info -> pool_type = dir ;
569
+ pool_info -> size = MLXSW_SP_CELLS_TO_BYTES (pr -> size );
570
+ pool_info -> threshold_type = pr -> mode ;
571
+ return 0 ;
572
+ }
573
+
574
+ int mlxsw_sp_sb_pool_set (struct mlxsw_core * mlxsw_core ,
575
+ unsigned int sb_index , u16 pool_index , u32 size ,
576
+ enum devlink_sb_threshold_type threshold_type )
577
+ {
578
+ struct mlxsw_sp * mlxsw_sp = mlxsw_core_driver_priv (mlxsw_core );
579
+ u8 pool = pool_get (pool_index );
580
+ enum mlxsw_reg_sbxx_dir dir = dir_get (pool_index );
581
+ enum mlxsw_reg_sbpr_mode mode = threshold_type ;
582
+ u32 pool_size = MLXSW_SP_BYTES_TO_CELLS (size );
583
+
584
+ return mlxsw_sp_sb_pr_write (mlxsw_sp , pool , dir , mode , pool_size );
585
+ }
586
+
587
+ #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
588
+
589
+ static u32 mlxsw_sp_sb_threshold_out (struct mlxsw_sp * mlxsw_sp , u8 pool ,
590
+ enum mlxsw_reg_sbxx_dir dir , u32 max_buff )
591
+ {
592
+ struct mlxsw_sp_sb_pr * pr = mlxsw_sp_sb_pr_get (mlxsw_sp , pool , dir );
593
+
594
+ if (pr -> mode == MLXSW_REG_SBPR_MODE_DYNAMIC )
595
+ return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET ;
596
+ return MLXSW_SP_CELLS_TO_BYTES (max_buff );
597
+ }
598
+
599
+ static int mlxsw_sp_sb_threshold_in (struct mlxsw_sp * mlxsw_sp , u8 pool ,
600
+ enum mlxsw_reg_sbxx_dir dir , u32 threshold ,
601
+ u32 * p_max_buff )
602
+ {
603
+ struct mlxsw_sp_sb_pr * pr = mlxsw_sp_sb_pr_get (mlxsw_sp , pool , dir );
604
+
605
+ if (pr -> mode == MLXSW_REG_SBPR_MODE_DYNAMIC ) {
606
+ int val ;
607
+
608
+ val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET ;
609
+ if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
610
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX )
611
+ return - EINVAL ;
612
+ * p_max_buff = val ;
613
+ } else {
614
+ * p_max_buff = MLXSW_SP_BYTES_TO_CELLS (threshold );
615
+ }
616
+ return 0 ;
617
+ }
618
+
619
+ int mlxsw_sp_sb_port_pool_get (struct mlxsw_core_port * mlxsw_core_port ,
620
+ unsigned int sb_index , u16 pool_index ,
621
+ u32 * p_threshold )
622
+ {
623
+ struct mlxsw_sp_port * mlxsw_sp_port =
624
+ mlxsw_core_port_driver_priv (mlxsw_core_port );
625
+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
626
+ u8 local_port = mlxsw_sp_port -> local_port ;
627
+ u8 pool = pool_get (pool_index );
628
+ enum mlxsw_reg_sbxx_dir dir = dir_get (pool_index );
629
+ struct mlxsw_sp_sb_pm * pm = mlxsw_sp_sb_pm_get (mlxsw_sp , local_port ,
630
+ pool , dir );
631
+
632
+ * p_threshold = mlxsw_sp_sb_threshold_out (mlxsw_sp , pool , dir ,
633
+ pm -> max_buff );
634
+ return 0 ;
635
+ }
636
+
637
+ int mlxsw_sp_sb_port_pool_set (struct mlxsw_core_port * mlxsw_core_port ,
638
+ unsigned int sb_index , u16 pool_index ,
639
+ u32 threshold )
640
+ {
641
+ struct mlxsw_sp_port * mlxsw_sp_port =
642
+ mlxsw_core_port_driver_priv (mlxsw_core_port );
643
+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
644
+ u8 local_port = mlxsw_sp_port -> local_port ;
645
+ u8 pool = pool_get (pool_index );
646
+ enum mlxsw_reg_sbxx_dir dir = dir_get (pool_index );
647
+ u32 max_buff ;
648
+ int err ;
649
+
650
+ err = mlxsw_sp_sb_threshold_in (mlxsw_sp , pool , dir ,
651
+ threshold , & max_buff );
652
+ if (err )
653
+ return err ;
654
+
655
+ return mlxsw_sp_sb_pm_write (mlxsw_sp , local_port , pool , dir ,
656
+ 0 , max_buff );
657
+ }
658
+
659
+ int mlxsw_sp_sb_tc_pool_bind_get (struct mlxsw_core_port * mlxsw_core_port ,
660
+ unsigned int sb_index , u16 tc_index ,
661
+ enum devlink_sb_pool_type pool_type ,
662
+ u16 * p_pool_index , u32 * p_threshold )
663
+ {
664
+ struct mlxsw_sp_port * mlxsw_sp_port =
665
+ mlxsw_core_port_driver_priv (mlxsw_core_port );
666
+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
667
+ u8 local_port = mlxsw_sp_port -> local_port ;
668
+ u8 pg_buff = tc_index ;
669
+ enum mlxsw_reg_sbxx_dir dir = pool_type ;
670
+ struct mlxsw_sp_sb_cm * cm = mlxsw_sp_sb_cm_get (mlxsw_sp , local_port ,
671
+ pg_buff , dir );
672
+
673
+ * p_threshold = mlxsw_sp_sb_threshold_out (mlxsw_sp , cm -> pool , dir ,
674
+ cm -> max_buff );
675
+ * p_pool_index = pool_index_get (cm -> pool , pool_type );
676
+ return 0 ;
677
+ }
678
+
679
+ int mlxsw_sp_sb_tc_pool_bind_set (struct mlxsw_core_port * mlxsw_core_port ,
680
+ unsigned int sb_index , u16 tc_index ,
681
+ enum devlink_sb_pool_type pool_type ,
682
+ u16 pool_index , u32 threshold )
683
+ {
684
+ struct mlxsw_sp_port * mlxsw_sp_port =
685
+ mlxsw_core_port_driver_priv (mlxsw_core_port );
686
+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
687
+ u8 local_port = mlxsw_sp_port -> local_port ;
688
+ u8 pg_buff = tc_index ;
689
+ enum mlxsw_reg_sbxx_dir dir = pool_type ;
690
+ u8 pool = pool_index ;
691
+ u32 max_buff ;
692
+ int err ;
693
+
694
+ err = mlxsw_sp_sb_threshold_in (mlxsw_sp , pool , dir ,
695
+ threshold , & max_buff );
696
+ if (err )
697
+ return err ;
698
+
699
+ if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS ) {
700
+ if (pool < MLXSW_SP_SB_POOL_COUNT )
701
+ return - EINVAL ;
702
+ pool -= MLXSW_SP_SB_POOL_COUNT ;
703
+ } else if (pool >= MLXSW_SP_SB_POOL_COUNT ) {
704
+ return - EINVAL ;
705
+ }
706
+ return mlxsw_sp_sb_cm_write (mlxsw_sp , local_port , pg_buff , dir ,
707
+ 0 , max_buff , pool );
708
+ }
0 commit comments