Skip to content

Commit 39a7f4a

Browse files
praveenkaligineedidavem330
authored andcommitted
gve: Add XDP REDIRECT support for GQI-QPL format
This patch contains the following changes: 1) Support for XDP REDIRECT action on rx 2) ndo_xdp_xmit callback support In GQI-QPL queue format, the driver needs to allocate a fixed size memory, the size specified by vNIC device, for RX/TX and register this memory as a bounce buffer with the vNIC device when a queue is created. The number of pages in the bounce buffer is limited and the pages need to be made available to the vNIC by copying the RX data out to prevent head-of-line blocking. The XDP_REDIRECT packets are therefore immediately copied to a newly allocated page. Signed-off-by: Praveen Kaligineedi <[email protected]> Reviewed-by: Jeroen de Borst <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 75eaae1 commit 39a7f4a

File tree

5 files changed

+138
-17
lines changed

5 files changed

+138
-17
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,7 @@ struct gve_rx_ring {
236236
u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
237237
u64 xdp_tx_errors;
238238
u64 xdp_redirect_errors;
239+
u64 xdp_alloc_fails;
239240
u64 xdp_actions[GVE_XDP_ACTIONS];
240241
u32 q_num; /* queue index */
241242
u32 ntfy_id; /* notification block index */
@@ -247,6 +248,7 @@ struct gve_rx_ring {
247248

248249
/* XDP stuff */
249250
struct xdp_rxq_info xdp_rxq;
251+
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
250252
};
251253

252254
/* A TX desc ring entry */
@@ -267,7 +269,10 @@ struct gve_tx_iovec {
267269
* ring entry but only used for a pkt_desc not a seg_desc
268270
*/
269271
struct gve_tx_buffer_state {
270-
struct sk_buff *skb; /* skb for this pkt */
272+
union {
273+
struct sk_buff *skb; /* skb for this pkt */
274+
struct xdp_frame *xdp_frame; /* xdp_frame */
275+
};
271276
struct {
272277
u16 size; /* size of xmitted xdp pkt */
273278
} xdp;
@@ -385,6 +390,8 @@ struct gve_tx_ring {
385390
struct {
386391
/* Spinlock for when cleanup in progress */
387392
spinlock_t clean_lock;
393+
/* Spinlock for XDP tx traffic */
394+
spinlock_t xdp_lock;
388395
};
389396

390397
/* DQO fields. */
@@ -462,6 +469,8 @@ struct gve_tx_ring {
462469
dma_addr_t q_resources_bus; /* dma address of the queue resources */
463470
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
464471
struct u64_stats_sync statss; /* sync stats for 32bit archs */
472+
u64 xdp_xmit;
473+
u64 xdp_xmit_errors;
465474
} ____cacheline_aligned;
466475

467476
/* Wraps the info for one irq including the napi struct and the queues
@@ -919,8 +928,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
919928
enum dma_data_direction);
920929
/* tx handling */
921930
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
931+
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
932+
u32 flags);
922933
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
923-
void *data, int len);
934+
void *data, int len, void *frame_p);
924935
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
925936
bool gve_tx_poll(struct gve_notify_block *block, int budget);
926937
bool gve_xdp_poll(struct gve_notify_block *block, int budget);

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -56,13 +56,14 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
5656
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
5757
"rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
5858
"rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
59-
"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]",
59+
"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
6060
};
6161

6262
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
6363
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
6464
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
6565
"tx_dma_mapping_error[%u]",
66+
"tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
6667
};
6768

6869
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -313,9 +314,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
313314
data[i + j] = rx->xdp_actions[j];
314315
data[i + j++] = rx->xdp_tx_errors;
315316
data[i + j++] = rx->xdp_redirect_errors;
317+
data[i + j++] = rx->xdp_alloc_fails;
316318
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
317319
start));
318-
i += GVE_XDP_ACTIONS + 2; /* XDP rx counters */
320+
i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
319321
}
320322
} else {
321323
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
@@ -371,13 +373,21 @@ gve_get_ethtool_stats(struct net_device *netdev,
371373
if (skip_nic_stats) {
372374
/* skip NIC tx stats */
373375
i += NIC_TX_STATS_REPORT_NUM;
374-
continue;
375-
}
376-
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
377-
u64 value =
378-
be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
379-
data[i++] = value;
376+
} else {
377+
stats_idx = tx_qid_to_stats_idx[ring];
378+
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
379+
u64 value =
380+
be64_to_cpu(report_stats[stats_idx + j].value);
381+
data[i++] = value;
382+
}
380383
}
384+
do {
385+
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
386+
data[i] = tx->xdp_xmit;
387+
data[i + 1] = tx->xdp_xmit_errors;
388+
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
389+
start));
390+
i += 2; /* XDP tx counters */
381391
}
382392
} else {
383393
i += num_tx_queues * NUM_GVE_TX_CNTS;

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,6 +1230,21 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
12301230
}
12311231
}
12321232

1233+
static void gve_drain_page_cache(struct gve_priv *priv)
1234+
{
1235+
struct page_frag_cache *nc;
1236+
int i;
1237+
1238+
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1239+
nc = &priv->rx[i].page_cache;
1240+
if (nc->va) {
1241+
__page_frag_cache_drain(virt_to_page(nc->va),
1242+
nc->pagecnt_bias);
1243+
nc->va = NULL;
1244+
}
1245+
}
1246+
}
1247+
12331248
static int gve_open(struct net_device *dev)
12341249
{
12351250
struct gve_priv *priv = netdev_priv(dev);
@@ -1313,6 +1328,7 @@ static int gve_close(struct net_device *dev)
13131328
netif_carrier_off(dev);
13141329
if (gve_get_device_rings_ok(priv)) {
13151330
gve_turndown(priv);
1331+
gve_drain_page_cache(priv);
13161332
err = gve_destroy_rings(priv);
13171333
if (err)
13181334
goto err;
@@ -1696,6 +1712,7 @@ static const struct net_device_ops gve_netdev_ops = {
16961712
.ndo_tx_timeout = gve_tx_timeout,
16971713
.ndo_set_features = gve_set_features,
16981714
.ndo_bpf = gve_xdp,
1715+
.ndo_xdp_xmit = gve_xdp_xmit,
16991716
};
17001717

17011718
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -1819,6 +1836,8 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
18191836
{
18201837
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
18211838
priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
1839+
priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
1840+
priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
18221841
} else {
18231842
priv->dev->xdp_features = 0;
18241843
}

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 43 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -593,6 +593,35 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
593593
return skb;
594594
}
595595

596+
static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
597+
struct xdp_buff *orig, struct bpf_prog *xdp_prog)
598+
{
599+
int total_len, len = orig->data_end - orig->data;
600+
int headroom = XDP_PACKET_HEADROOM;
601+
struct xdp_buff new;
602+
void *frame;
603+
int err;
604+
605+
total_len = headroom + SKB_DATA_ALIGN(len) +
606+
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
607+
frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
608+
if (!frame) {
609+
u64_stats_update_begin(&rx->statss);
610+
rx->xdp_alloc_fails++;
611+
u64_stats_update_end(&rx->statss);
612+
return -ENOMEM;
613+
}
614+
xdp_init_buff(&new, total_len, &rx->xdp_rxq);
615+
xdp_prepare_buff(&new, frame, headroom, len, false);
616+
memcpy(new.data, orig->data, len);
617+
618+
err = xdp_do_redirect(dev, &new, xdp_prog);
619+
if (err)
620+
page_frag_free(frame);
621+
622+
return err;
623+
}
624+
596625
static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
597626
struct xdp_buff *xdp, struct bpf_prog *xprog,
598627
int xdp_act)
@@ -609,8 +638,10 @@ static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
609638
case XDP_TX:
610639
tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
611640
tx = &priv->tx[tx_qid];
641+
spin_lock(&tx->xdp_lock);
612642
err = gve_xdp_xmit_one(priv, tx, xdp->data,
613-
xdp->data_end - xdp->data);
643+
xdp->data_end - xdp->data, NULL);
644+
spin_unlock(&tx->xdp_lock);
614645

615646
if (unlikely(err)) {
616647
u64_stats_update_begin(&rx->statss);
@@ -619,9 +650,13 @@ static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
619650
}
620651
break;
621652
case XDP_REDIRECT:
622-
u64_stats_update_begin(&rx->statss);
623-
rx->xdp_redirect_errors++;
624-
u64_stats_update_end(&rx->statss);
653+
err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
654+
655+
if (unlikely(err)) {
656+
u64_stats_update_begin(&rx->statss);
657+
rx->xdp_redirect_errors++;
658+
u64_stats_update_end(&rx->statss);
659+
}
625660
break;
626661
}
627662
u64_stats_update_begin(&rx->statss);
@@ -841,6 +876,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
841876
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
842877
netdev_features_t feat)
843878
{
879+
u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
844880
u64 xdp_txs = rx->xdp_actions[XDP_TX];
845881
struct gve_rx_ctx *ctx = &rx->ctx;
846882
struct gve_priv *priv = rx->gve;
@@ -892,6 +928,9 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
892928
if (xdp_txs != rx->xdp_actions[XDP_TX])
893929
gve_xdp_tx_flush(priv, rx->q_num);
894930

931+
if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
932+
xdp_do_flush();
933+
895934
/* restock ring slots */
896935
if (!rx->data.raw_addressing) {
897936
/* In QPL mode buffs are refilled as the desc are processed */

drivers/net/ethernet/google/gve/gve_tx.c

Lines changed: 45 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,10 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
173173
pkts++;
174174

175175
info->xdp.size = 0;
176+
if (info->xdp_frame) {
177+
xdp_return_frame(info->xdp_frame);
178+
info->xdp_frame = NULL;
179+
}
176180
space_freed += gve_tx_clear_buffer_state(info);
177181
}
178182

@@ -233,6 +237,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
233237
/* Make sure everything is zeroed to start */
234238
memset(tx, 0, sizeof(*tx));
235239
spin_lock_init(&tx->clean_lock);
240+
spin_lock_init(&tx->xdp_lock);
236241
tx->q_num = idx;
237242

238243
tx->mask = slots - 1;
@@ -715,7 +720,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
715720
}
716721

717722
static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
718-
void *data, int len)
723+
void *data, int len, void *frame_p)
719724
{
720725
int pad, nfrags, ndescs, iovi, offset;
721726
struct gve_tx_buffer_state *info;
@@ -725,6 +730,7 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
725730
if (pad >= GVE_TX_MAX_HEADER_SIZE)
726731
pad = 0;
727732
info = &tx->info[reqi & tx->mask];
733+
info->xdp_frame = frame_p;
728734
info->xdp.size = len;
729735

730736
nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
@@ -759,15 +765,51 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
759765
return ndescs;
760766
}
761767

768+
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
769+
u32 flags)
770+
{
771+
struct gve_priv *priv = netdev_priv(dev);
772+
struct gve_tx_ring *tx;
773+
int i, err = 0, qid;
774+
775+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
776+
return -EINVAL;
777+
778+
qid = gve_xdp_tx_queue_id(priv,
779+
smp_processor_id() % priv->num_xdp_queues);
780+
781+
tx = &priv->tx[qid];
782+
783+
spin_lock(&tx->xdp_lock);
784+
for (i = 0; i < n; i++) {
785+
err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
786+
frames[i]->len, frames[i]);
787+
if (err)
788+
break;
789+
}
790+
791+
if (flags & XDP_XMIT_FLUSH)
792+
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
793+
794+
spin_unlock(&tx->xdp_lock);
795+
796+
u64_stats_update_begin(&tx->statss);
797+
tx->xdp_xmit += n;
798+
tx->xdp_xmit_errors += n - i;
799+
u64_stats_update_end(&tx->statss);
800+
801+
return i ? i : err;
802+
}
803+
762804
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
763-
void *data, int len)
805+
void *data, int len, void *frame_p)
764806
{
765807
int nsegs;
766808

767809
if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1))
768810
return -EBUSY;
769811

770-
nsegs = gve_tx_fill_xdp(priv, tx, data, len);
812+
nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p);
771813
tx->req += nsegs;
772814

773815
return 0;

0 commit comments

Comments
 (0)