Skip to content

Commit c7b9e63

Browse files
committed
Merge branch 'mlx5-sriov-vlan-push-pop'
Saeed Mahameed says: ==================== Mellanox 100G SRIOV offloads vlan push/pop From Or Gerlitz: This series further enhances the SRIOV TC offloads of mlx5 to handle the TC vlan push and pop actions. This serves a common use-case in virtualization systems where the virtual switch add (push) vlan tags to packets sent from VMs and removes (pop) vlan tags before the packet is received by the VM. We use the new E-Switch switchdev mode and the TC vlan action to achieve that also in SW defined SRIOV environments by offloading TC rules that contain this action along with forwarding (TC mirred/redirect action) the packet. In the first patch we add some helpers to access the TC vlan action info by offloading drivers. The next five patches don't add any new functionality, they do some refactoring and cleanups in the current code to be used next. The seventh patch deals with supporting vlans by the mlx5 e-switch in switchdev mode. The eighth patch does the vlan action offload from TC and the last patch adds matching for vlans as typically required by TC flows that involve vlan pop action. The series was applied on top of commit 524605e "cxgb4: Convert to use simple_open()" ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents fefa569 + 095b6cf commit c7b9e63

File tree

8 files changed

+446
-83
lines changed

8 files changed

+446
-83
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
869869
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
870870
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
871871
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
872+
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
872873

873874
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
874875
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -446,6 +446,16 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
446446
kfree(rq->mpwqe.info);
447447
}
448448

449+
static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
450+
{
451+
struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
452+
453+
if (rep && rep->vport != FDB_UPLINK_VPORT)
454+
return true;
455+
456+
return false;
457+
}
458+
449459
static int mlx5e_create_rq(struct mlx5e_channel *c,
450460
struct mlx5e_rq_param *param,
451461
struct mlx5e_rq *rq)
@@ -487,6 +497,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
487497

488498
switch (priv->params.rq_wq_type) {
489499
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
500+
if (mlx5e_is_vf_vport_rep(priv)) {
501+
err = -EINVAL;
502+
goto err_rq_wq_destroy;
503+
}
504+
490505
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
491506
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
492507
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@ -512,7 +527,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
512527
goto err_rq_wq_destroy;
513528
}
514529

515-
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
530+
if (mlx5e_is_vf_vport_rep(priv))
531+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
532+
else
533+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
534+
516535
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
517536
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
518537

@@ -3726,9 +3745,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
37263745
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
37273746
rep.load = mlx5e_nic_rep_load;
37283747
rep.unload = mlx5e_nic_rep_unload;
3729-
rep.vport = 0;
3748+
rep.vport = FDB_UPLINK_VPORT;
37303749
rep.priv_data = priv;
3731-
mlx5_eswitch_register_vport_rep(esw, &rep);
3750+
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
37323751
}
37333752
}
37343753

@@ -3867,7 +3886,7 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
38673886
rep.unload = mlx5e_vport_rep_unload;
38683887
rep.vport = vport;
38693888
ether_addr_copy(rep.hw_id, mac);
3870-
mlx5_eswitch_register_vport_rep(esw, &rep);
3889+
mlx5_eswitch_register_vport_rep(esw, vport, &rep);
38713890
}
38723891
}
38733892

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 61 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <net/busy_poll.h>
3737
#include "en.h"
3838
#include "en_tc.h"
39+
#include "eswitch.h"
3940

4041
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
4142
{
@@ -629,7 +630,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
629630
rq->stats.packets++;
630631
rq->stats.bytes += cqe_bcnt;
631632
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
632-
napi_gro_receive(rq->cq.napi, skb);
633633
}
634634

635635
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
@@ -733,20 +733,15 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
733733
}
734734
}
735735

736-
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
736+
static inline
737+
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
738+
u16 wqe_counter, u32 cqe_bcnt)
737739
{
738740
struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
739741
struct mlx5e_dma_info *di;
740-
struct mlx5e_rx_wqe *wqe;
741-
__be16 wqe_counter_be;
742742
struct sk_buff *skb;
743-
u16 wqe_counter;
744743
void *va, *data;
745-
u32 cqe_bcnt;
746744

747-
wqe_counter_be = cqe->wqe_counter;
748-
wqe_counter = be16_to_cpu(wqe_counter_be);
749-
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
750745
di = &rq->dma_info[wqe_counter];
751746
va = page_address(di->page);
752747
data = va + MLX5_RX_HEADROOM;
@@ -757,22 +752,21 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
757752
rq->buff.wqe_sz,
758753
DMA_FROM_DEVICE);
759754
prefetch(data);
760-
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
761755

762756
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
763757
rq->stats.wqe_err++;
764758
mlx5e_page_release(rq, di, true);
765-
goto wq_ll_pop;
759+
return NULL;
766760
}
767761

768762
if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
769-
goto wq_ll_pop; /* page/packet was consumed by XDP */
763+
return NULL; /* page/packet was consumed by XDP */
770764

771765
skb = build_skb(va, RQ_PAGE_SIZE(rq));
772766
if (unlikely(!skb)) {
773767
rq->stats.buff_alloc_err++;
774768
mlx5e_page_release(rq, di, true);
775-
goto wq_ll_pop;
769+
return NULL;
776770
}
777771

778772
/* queue up for recycling ..*/
@@ -782,7 +776,60 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
782776
skb_reserve(skb, MLX5_RX_HEADROOM);
783777
skb_put(skb, cqe_bcnt);
784778

779+
return skb;
780+
}
781+
782+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
783+
{
784+
struct mlx5e_rx_wqe *wqe;
785+
__be16 wqe_counter_be;
786+
struct sk_buff *skb;
787+
u16 wqe_counter;
788+
u32 cqe_bcnt;
789+
790+
wqe_counter_be = cqe->wqe_counter;
791+
wqe_counter = be16_to_cpu(wqe_counter_be);
792+
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
793+
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
794+
795+
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
796+
if (!skb)
797+
goto wq_ll_pop;
798+
785799
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
800+
napi_gro_receive(rq->cq.napi, skb);
801+
802+
wq_ll_pop:
803+
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
804+
&wqe->next.next_wqe_index);
805+
}
806+
807+
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
808+
{
809+
struct net_device *netdev = rq->netdev;
810+
struct mlx5e_priv *priv = netdev_priv(netdev);
811+
struct mlx5_eswitch_rep *rep = priv->ppriv;
812+
struct mlx5e_rx_wqe *wqe;
813+
struct sk_buff *skb;
814+
__be16 wqe_counter_be;
815+
u16 wqe_counter;
816+
u32 cqe_bcnt;
817+
818+
wqe_counter_be = cqe->wqe_counter;
819+
wqe_counter = be16_to_cpu(wqe_counter_be);
820+
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
821+
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
822+
823+
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
824+
if (!skb)
825+
goto wq_ll_pop;
826+
827+
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
828+
829+
if (rep->vlan && skb_vlan_tag_present(skb))
830+
skb_vlan_pop(skb);
831+
832+
napi_gro_receive(rq->cq.napi, skb);
786833

787834
wq_ll_pop:
788835
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
@@ -861,6 +908,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
861908

862909
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
863910
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
911+
napi_gro_receive(rq->cq.napi, skb);
864912

865913
mpwrq_cqe_out:
866914
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))

0 commit comments

Comments
 (0)