47
47
48
48
#define GVE_RX_BUFFER_SIZE_DQO 2048
49
49
50
+ #define GVE_XDP_ACTIONS 5
51
+
52
+ #define GVE_TX_MAX_HEADER_SIZE 182
53
+
50
54
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
51
55
struct gve_rx_desc_queue {
52
56
struct gve_rx_desc * desc_ring ; /* the descriptor ring */
@@ -230,14 +234,19 @@ struct gve_rx_ring {
230
234
u64 rx_frag_flip_cnt ; /* free-running count of rx segments where page_flip was used */
231
235
u64 rx_frag_copy_cnt ; /* free-running count of rx segments copied */
232
236
u64 rx_frag_alloc_cnt ; /* free-running count of rx page allocations */
233
-
237
+ u64 xdp_tx_errors ;
238
+ u64 xdp_redirect_errors ;
239
+ u64 xdp_actions [GVE_XDP_ACTIONS ];
234
240
u32 q_num ; /* queue index */
235
241
u32 ntfy_id ; /* notification block index */
236
242
struct gve_queue_resources * q_resources ; /* head and tail pointer idx */
237
243
dma_addr_t q_resources_bus ; /* dma address for the queue resources */
238
244
struct u64_stats_sync statss ; /* sync stats for 32bit archs */
239
245
240
246
struct gve_rx_ctx ctx ; /* Info for packet currently being processed in this ring. */
247
+
248
+ /* XDP stuff */
249
+ struct xdp_rxq_info xdp_rxq ;
241
250
};
242
251
243
252
/* A TX desc ring entry */
@@ -259,6 +268,9 @@ struct gve_tx_iovec {
259
268
*/
260
269
struct gve_tx_buffer_state {
261
270
struct sk_buff * skb ; /* skb for this pkt */
271
+ struct {
272
+ u16 size ; /* size of xmitted xdp pkt */
273
+ } xdp ;
262
274
union {
263
275
struct gve_tx_iovec iov [GVE_TX_MAX_IOVEC ]; /* segments of this pkt */
264
276
struct {
@@ -526,9 +538,11 @@ struct gve_priv {
526
538
u16 rx_data_slot_cnt ; /* rx buffer length */
527
539
u64 max_registered_pages ;
528
540
u64 num_registered_pages ; /* num pages registered with NIC */
541
+ struct bpf_prog * xdp_prog ; /* XDP BPF program */
529
542
u32 rx_copybreak ; /* copy packets smaller than this */
530
543
u16 default_num_queues ; /* default num queues to set up */
531
544
545
+ u16 num_xdp_queues ;
532
546
struct gve_queue_config tx_cfg ;
533
547
struct gve_queue_config rx_cfg ;
534
548
struct gve_qpl_config qpl_cfg ; /* map used QPL ids */
@@ -785,7 +799,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
785
799
if (priv -> queue_format != GVE_GQI_QPL_FORMAT )
786
800
return 0 ;
787
801
788
- return priv -> tx_cfg .num_queues ;
802
+ return priv -> tx_cfg .num_queues + priv -> num_xdp_queues ;
803
+ }
804
+
805
+ /* Returns the number of XDP tx queue page lists
806
+ */
807
+ static inline u32 gve_num_xdp_qpls (struct gve_priv * priv )
808
+ {
809
+ if (priv -> queue_format != GVE_GQI_QPL_FORMAT )
810
+ return 0 ;
811
+
812
+ return priv -> num_xdp_queues ;
789
813
}
790
814
791
815
/* Returns the number of rx queue page lists
@@ -874,7 +898,17 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
874
898
875
899
static inline u32 gve_num_tx_queues (struct gve_priv * priv )
876
900
{
877
- return priv -> tx_cfg .num_queues ;
901
+ return priv -> tx_cfg .num_queues + priv -> num_xdp_queues ;
902
+ }
903
+
904
+ static inline u32 gve_xdp_tx_queue_id (struct gve_priv * priv , u32 queue_id )
905
+ {
906
+ return priv -> tx_cfg .num_queues + queue_id ;
907
+ }
908
+
909
+ static inline u32 gve_xdp_tx_start_queue_id (struct gve_priv * priv )
910
+ {
911
+ return gve_xdp_tx_queue_id (priv , 0 );
878
912
}
879
913
880
914
/* buffers */
@@ -885,7 +919,11 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
885
919
enum dma_data_direction );
886
920
/* tx handling */
887
921
netdev_tx_t gve_tx (struct sk_buff * skb , struct net_device * dev );
922
+ int gve_xdp_xmit_one (struct gve_priv * priv , struct gve_tx_ring * tx ,
923
+ void * data , int len );
924
+ void gve_xdp_tx_flush (struct gve_priv * priv , u32 xdp_qid );
888
925
bool gve_tx_poll (struct gve_notify_block * block , int budget );
926
+ bool gve_xdp_poll (struct gve_notify_block * block , int budget );
889
927
int gve_tx_alloc_rings (struct gve_priv * priv , int start_id , int num_rings );
890
928
void gve_tx_free_rings_gqi (struct gve_priv * priv , int start_id , int num_rings );
891
929
u32 gve_tx_load_event_counter (struct gve_priv * priv ,
0 commit comments