Skip to content

Commit 9b818a3

Browse files
committed
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2023-11-06 (ice) This series contains updates to ice driver only. Dave removes SR-IOV LAG attribute for only the interface being disabled to allow for proper unwinding of all interfaces. Michal Schmidt changes some LAG allocations from GFP_KERNEL to GFP_ATOMIC due to non-allowed sleeping. Aniruddha and Marcin fix redirection and drop rules for switchdev by properly setting and marking egress/ingress type. * '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: ice: Fix VF-VF direction matching in drop rule in switchdev ice: Fix VF-VF filter rules in switchdev mode ice: lag: in RCU, use atomic allocation ice: Fix SRIOV LAG disable on non-compliant aggregate ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents f1a3b28 + 68c51db commit 9b818a3

File tree

2 files changed

+91
-41
lines changed

2 files changed

+91
-41
lines changed

drivers/net/ethernet/intel/ice/ice_lag.c

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
628628
INIT_LIST_HEAD(&ndlist.node);
629629
rcu_read_lock();
630630
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
631-
nl = kzalloc(sizeof(*nl), GFP_KERNEL);
631+
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
632632
if (!nl)
633633
break;
634634

@@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
15551555
*/
15561556
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
15571557
{
1558-
struct ice_lag_netdev_list *entry;
15591558
struct ice_netdev_priv *np;
1560-
struct net_device *netdev;
15611559
struct ice_pf *pf;
15621560

1563-
list_for_each_entry(entry, lag->netdev_head, node) {
1564-
netdev = entry->netdev;
1565-
np = netdev_priv(netdev);
1566-
pf = np->vsi->back;
1567-
1568-
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
1569-
}
1561+
np = netdev_priv(lag->netdev);
1562+
pf = np->vsi->back;
1563+
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
15701564
}
15711565

15721566
/**
@@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
16981692

16991693
rcu_read_lock();
17001694
for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
1701-
nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
1695+
nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
17021696
if (!nd_list)
17031697
break;
17041698

@@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
20752069
INIT_LIST_HEAD(&ndlist.node);
20762070
rcu_read_lock();
20772071
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
2078-
nl = kzalloc(sizeof(*nl), GFP_KERNEL);
2072+
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
20792073
if (!nl)
20802074
break;
20812075

drivers/net/ethernet/intel/ice/ice_tc_lib.c

Lines changed: 85 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
630630
return ice_tc_tun_get_type(dev) != TNL_LAST;
631631
}
632632

633-
static int
634-
ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
635-
struct flow_action_entry *act)
633+
static bool ice_tc_is_dev_uplink(struct net_device *dev)
634+
{
635+
return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
636+
}
637+
638+
static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
639+
struct ice_tc_flower_fltr *fltr,
640+
struct net_device *target_dev)
636641
{
637642
struct ice_repr *repr;
638643

644+
fltr->action.fltr_act = ICE_FWD_TO_VSI;
645+
646+
if (ice_is_port_repr_netdev(filter_dev) &&
647+
ice_is_port_repr_netdev(target_dev)) {
648+
repr = ice_netdev_to_repr(target_dev);
649+
650+
fltr->dest_vsi = repr->src_vsi;
651+
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
652+
} else if (ice_is_port_repr_netdev(filter_dev) &&
653+
ice_tc_is_dev_uplink(target_dev)) {
654+
repr = ice_netdev_to_repr(filter_dev);
655+
656+
fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
657+
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
658+
} else if (ice_tc_is_dev_uplink(filter_dev) &&
659+
ice_is_port_repr_netdev(target_dev)) {
660+
repr = ice_netdev_to_repr(target_dev);
661+
662+
fltr->dest_vsi = repr->src_vsi;
663+
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
664+
} else {
665+
NL_SET_ERR_MSG_MOD(fltr->extack,
666+
"Unsupported netdevice in switchdev mode");
667+
return -EINVAL;
668+
}
669+
670+
return 0;
671+
}
672+
673+
static int
674+
ice_tc_setup_drop_action(struct net_device *filter_dev,
675+
struct ice_tc_flower_fltr *fltr)
676+
{
677+
fltr->action.fltr_act = ICE_DROP_PACKET;
678+
679+
if (ice_is_port_repr_netdev(filter_dev)) {
680+
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
681+
} else if (ice_tc_is_dev_uplink(filter_dev)) {
682+
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
683+
} else {
684+
NL_SET_ERR_MSG_MOD(fltr->extack,
685+
"Unsupported netdevice in switchdev mode");
686+
return -EINVAL;
687+
}
688+
689+
return 0;
690+
}
691+
692+
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
693+
struct ice_tc_flower_fltr *fltr,
694+
struct flow_action_entry *act)
695+
{
696+
int err;
697+
639698
switch (act->id) {
640699
case FLOW_ACTION_DROP:
641-
fltr->action.fltr_act = ICE_DROP_PACKET;
700+
err = ice_tc_setup_drop_action(filter_dev, fltr);
701+
if (err)
702+
return err;
703+
642704
break;
643705

644706
case FLOW_ACTION_REDIRECT:
645-
fltr->action.fltr_act = ICE_FWD_TO_VSI;
646-
647-
if (ice_is_port_repr_netdev(act->dev)) {
648-
repr = ice_netdev_to_repr(act->dev);
649-
650-
fltr->dest_vsi = repr->src_vsi;
651-
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
652-
} else if (netif_is_ice(act->dev) ||
653-
ice_is_tunnel_supported(act->dev)) {
654-
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
655-
} else {
656-
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
657-
return -EINVAL;
658-
}
707+
err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
708+
if (err)
709+
return err;
659710

660711
break;
661712

@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
696747
goto exit;
697748
}
698749

699-
/* egress traffic is always redirect to uplink */
700-
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
701-
fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
702-
703750
rule_info.sw_act.fltr_act = fltr->action.fltr_act;
704751
if (fltr->action.fltr_act != ICE_DROP_PACKET)
705752
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
713760
rule_info.flags_info.act_valid = true;
714761

715762
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
763+
/* Uplink to VF */
716764
rule_info.sw_act.flag |= ICE_FLTR_RX;
717765
rule_info.sw_act.src = hw->pf_id;
718766
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
719-
} else {
767+
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
768+
fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
769+
/* VF to Uplink */
720770
rule_info.sw_act.flag |= ICE_FLTR_TX;
721771
rule_info.sw_act.src = vsi->idx;
722772
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
773+
} else {
774+
/* VF to VF */
775+
rule_info.sw_act.flag |= ICE_FLTR_TX;
776+
rule_info.sw_act.src = vsi->idx;
777+
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
723778
}
724779

725780
/* specify the cookie as filter_rule_id */
@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
17451800

17461801
/**
17471802
* ice_parse_tc_flower_actions - Parse the actions for a TC filter
1803+
* @filter_dev: Pointer to device on which filter is being added
17481804
* @vsi: Pointer to VSI
17491805
* @cls_flower: Pointer to TC flower offload structure
17501806
* @fltr: Pointer to TC flower filter structure
17511807
*
17521808
* Parse the actions for a TC filter
17531809
*/
1754-
static int
1755-
ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1756-
struct flow_cls_offload *cls_flower,
1757-
struct ice_tc_flower_fltr *fltr)
1810+
static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
1811+
struct ice_vsi *vsi,
1812+
struct flow_cls_offload *cls_flower,
1813+
struct ice_tc_flower_fltr *fltr)
17581814
{
17591815
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
17601816
struct flow_action *flow_action = &rule->action;
@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
17691825

17701826
flow_action_for_each(i, act, flow_action) {
17711827
if (ice_is_eswitch_mode_switchdev(vsi->back))
1772-
err = ice_eswitch_tc_parse_action(fltr, act);
1828+
err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
17731829
else
17741830
err = ice_tc_parse_action(vsi, fltr, act);
17751831
if (err)
@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
18561912
if (err < 0)
18571913
goto err;
18581914

1859-
err = ice_parse_tc_flower_actions(vsi, f, fltr);
1915+
err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
18601916
if (err < 0)
18611917
goto err;
18621918

0 commit comments

Comments
 (0)