@@ -1601,13 +1601,180 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1601
1601
mlx5_devcom_unregister_component (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
1602
1602
}
1603
1603
1604
+ static int esw_vport_ingress_prio_tag_config (struct mlx5_eswitch * esw ,
1605
+ struct mlx5_vport * vport )
1606
+ {
1607
+ struct mlx5_core_dev * dev = esw -> dev ;
1608
+ struct mlx5_flow_act flow_act = {0 };
1609
+ struct mlx5_flow_spec * spec ;
1610
+ int err = 0 ;
1611
+
1612
+ /* For prio tag mode, there is only 1 FTEs:
1613
+ * 1) Untagged packets - push prio tag VLAN, allow
1614
+ * Unmatched traffic is allowed by default
1615
+ */
1616
+
1617
+ if (!MLX5_CAP_ESW_INGRESS_ACL (dev , ft_support ))
1618
+ return - EOPNOTSUPP ;
1619
+
1620
+ esw_vport_cleanup_ingress_rules (esw , vport );
1621
+
1622
+ err = esw_vport_enable_ingress_acl (esw , vport );
1623
+ if (err ) {
1624
+ mlx5_core_warn (esw -> dev ,
1625
+ "failed to enable prio tag ingress acl (%d) on vport[%d]\n" ,
1626
+ err , vport -> vport );
1627
+ return err ;
1628
+ }
1629
+
1630
+ esw_debug (esw -> dev ,
1631
+ "vport[%d] configure ingress rules\n" , vport -> vport );
1632
+
1633
+ spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
1634
+ if (!spec ) {
1635
+ err = - ENOMEM ;
1636
+ goto out_no_mem ;
1637
+ }
1638
+
1639
+ /* Untagged packets - push prio tag VLAN, allow */
1640
+ MLX5_SET_TO_ONES (fte_match_param , spec -> match_criteria , outer_headers .cvlan_tag );
1641
+ MLX5_SET (fte_match_param , spec -> match_value , outer_headers .cvlan_tag , 0 );
1642
+ spec -> match_criteria_enable = MLX5_MATCH_OUTER_HEADERS ;
1643
+ flow_act .action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1644
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW ;
1645
+ flow_act .vlan [0 ].ethtype = ETH_P_8021Q ;
1646
+ flow_act .vlan [0 ].vid = 0 ;
1647
+ flow_act .vlan [0 ].prio = 0 ;
1648
+ vport -> ingress .allow_rule =
1649
+ mlx5_add_flow_rules (vport -> ingress .acl , spec ,
1650
+ & flow_act , NULL , 0 );
1651
+ if (IS_ERR (vport -> ingress .allow_rule )) {
1652
+ err = PTR_ERR (vport -> ingress .allow_rule );
1653
+ esw_warn (esw -> dev ,
1654
+ "vport[%d] configure ingress untagged allow rule, err(%d)\n" ,
1655
+ vport -> vport , err );
1656
+ vport -> ingress .allow_rule = NULL ;
1657
+ goto out ;
1658
+ }
1659
+
1660
+ out :
1661
+ kvfree (spec );
1662
+ out_no_mem :
1663
+ if (err )
1664
+ esw_vport_cleanup_ingress_rules (esw , vport );
1665
+ return err ;
1666
+ }
1667
+
1668
+ static int esw_vport_egress_prio_tag_config (struct mlx5_eswitch * esw ,
1669
+ struct mlx5_vport * vport )
1670
+ {
1671
+ struct mlx5_flow_act flow_act = {0 };
1672
+ struct mlx5_flow_spec * spec ;
1673
+ int err = 0 ;
1674
+
1675
+ /* For prio tag mode, there is only 1 FTEs:
1676
+ * 1) prio tag packets - pop the prio tag VLAN, allow
1677
+ * Unmatched traffic is allowed by default
1678
+ */
1679
+
1680
+ esw_vport_cleanup_egress_rules (esw , vport );
1681
+
1682
+ err = esw_vport_enable_egress_acl (esw , vport );
1683
+ if (err ) {
1684
+ mlx5_core_warn (esw -> dev ,
1685
+ "failed to enable egress acl (%d) on vport[%d]\n" ,
1686
+ err , vport -> vport );
1687
+ return err ;
1688
+ }
1689
+
1690
+ esw_debug (esw -> dev ,
1691
+ "vport[%d] configure prio tag egress rules\n" , vport -> vport );
1692
+
1693
+ spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
1694
+ if (!spec ) {
1695
+ err = - ENOMEM ;
1696
+ goto out_no_mem ;
1697
+ }
1698
+
1699
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
1700
+ MLX5_SET_TO_ONES (fte_match_param , spec -> match_criteria , outer_headers .cvlan_tag );
1701
+ MLX5_SET_TO_ONES (fte_match_param , spec -> match_value , outer_headers .cvlan_tag );
1702
+ MLX5_SET_TO_ONES (fte_match_param , spec -> match_criteria , outer_headers .first_vid );
1703
+ MLX5_SET (fte_match_param , spec -> match_value , outer_headers .first_vid , 0 );
1704
+
1705
+ spec -> match_criteria_enable = MLX5_MATCH_OUTER_HEADERS ;
1706
+ flow_act .action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1707
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW ;
1708
+ vport -> egress .allowed_vlan =
1709
+ mlx5_add_flow_rules (vport -> egress .acl , spec ,
1710
+ & flow_act , NULL , 0 );
1711
+ if (IS_ERR (vport -> egress .allowed_vlan )) {
1712
+ err = PTR_ERR (vport -> egress .allowed_vlan );
1713
+ esw_warn (esw -> dev ,
1714
+ "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n" ,
1715
+ vport -> vport , err );
1716
+ vport -> egress .allowed_vlan = NULL ;
1717
+ goto out ;
1718
+ }
1719
+
1720
+ out :
1721
+ kvfree (spec );
1722
+ out_no_mem :
1723
+ if (err )
1724
+ esw_vport_cleanup_egress_rules (esw , vport );
1725
+ return err ;
1726
+ }
1727
+
1728
+ static int esw_prio_tag_acls_config (struct mlx5_eswitch * esw , int nvports )
1729
+ {
1730
+ int i , j ;
1731
+ int err ;
1732
+
1733
+ mlx5_esw_for_each_vf_vport (esw , i , nvports ) {
1734
+ err = esw_vport_ingress_prio_tag_config (esw , & esw -> vports [i ]);
1735
+ if (err )
1736
+ goto err_ingress ;
1737
+ err = esw_vport_egress_prio_tag_config (esw , & esw -> vports [i ]);
1738
+ if (err )
1739
+ goto err_egress ;
1740
+ }
1741
+
1742
+ return 0 ;
1743
+
1744
+ err_egress :
1745
+ esw_vport_disable_ingress_acl (esw , & esw -> vports [i ]);
1746
+ err_ingress :
1747
+ mlx5_esw_for_each_vf_vport_reverse (esw , j , i - 1 ) {
1748
+ esw_vport_disable_egress_acl (esw , & esw -> vports [j ]);
1749
+ esw_vport_disable_ingress_acl (esw , & esw -> vports [j ]);
1750
+ }
1751
+
1752
+ return err ;
1753
+ }
1754
+
1755
+ static void esw_prio_tag_acls_cleanup (struct mlx5_eswitch * esw )
1756
+ {
1757
+ int i ;
1758
+
1759
+ mlx5_esw_for_each_vf_vport (esw , i , esw -> nvports ) {
1760
+ esw_vport_disable_egress_acl (esw , & esw -> vports [i ]);
1761
+ esw_vport_disable_ingress_acl (esw , & esw -> vports [i ]);
1762
+ }
1763
+ }
1764
+
1604
1765
static int esw_offloads_steering_init (struct mlx5_eswitch * esw , int nvports )
1605
1766
{
1606
1767
int err ;
1607
1768
1608
1769
memset (& esw -> fdb_table .offloads , 0 , sizeof (struct offloads_fdb ));
1609
1770
mutex_init (& esw -> fdb_table .offloads .fdb_prio_lock );
1610
1771
1772
+ if (MLX5_CAP_GEN (esw -> dev , prio_tag_required )) {
1773
+ err = esw_prio_tag_acls_config (esw , nvports );
1774
+ if (err )
1775
+ return err ;
1776
+ }
1777
+
1611
1778
err = esw_create_offloads_fdb_tables (esw , nvports );
1612
1779
if (err )
1613
1780
return err ;
@@ -1636,6 +1803,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1636
1803
esw_destroy_vport_rx_group (esw );
1637
1804
esw_destroy_offloads_table (esw );
1638
1805
esw_destroy_offloads_fdb_tables (esw );
1806
+ if (MLX5_CAP_GEN (esw -> dev , prio_tag_required ))
1807
+ esw_prio_tag_acls_cleanup (esw );
1639
1808
}
1640
1809
1641
1810
static void esw_host_params_event_handler (struct work_struct * work )
0 commit comments