Skip to content

Commit cc5b114

Browse files
borkmannJeff Kirsher
authored andcommitted
bpf, i40e: add meta data support
Add support for XDP meta data when using build skb variant of the i40e driver. Implementation is analogous to the existing ixgbe and ixgbevf support for meta data from 366a88f ("bpf, ixgbe: add meta data support") and be83333 ("ixgbevf: Add support for meta data"). With the build skb variant we get 192 bytes of extra headroom which can be used for encaps or meta data. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: John Fastabend <[email protected]> Tested-by: John Fastabend <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent e9c7218 commit cc5b114

File tree

1 file changed

+31
-8
lines changed

1 file changed

+31
-8
lines changed

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 31 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2032,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
20322032
#if L1_CACHE_BYTES < 128
20332033
prefetch(xdp->data + L1_CACHE_BYTES);
20342034
#endif
2035+
/* Note, we get here by enabling legacy-rx via:
2036+
*
2037+
* ethtool --set-priv-flags <dev> legacy-rx on
2038+
*
2039+
* In this mode, we currently get 0 extra XDP headroom as
2040+
* opposed to having legacy-rx off, where we process XDP
2041+
* packets going to stack via i40e_build_skb(). The latter
2042+
* provides us currently with 192 bytes of headroom.
2043+
*
2044+
* For i40e_construct_skb() mode it means that the
2045+
* xdp->data_meta will always point to xdp->data, since
2046+
* the helper cannot expand the head. Should this ever
2047+
* change in future for legacy-rx mode on, then lets also
2048+
* add xdp->data_meta handling here.
2049+
*/
20352050

20362051
/* allocate a skb to store the frags */
20372052
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -2083,28 +2098,36 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
20832098
struct i40e_rx_buffer *rx_buffer,
20842099
struct xdp_buff *xdp)
20852100
{
2086-
unsigned int size = xdp->data_end - xdp->data;
2101+
unsigned int metasize = xdp->data - xdp->data_meta;
20872102
#if (PAGE_SIZE < 8192)
20882103
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
20892104
#else
20902105
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2091-
SKB_DATA_ALIGN(I40E_SKB_PAD + size);
2106+
SKB_DATA_ALIGN(I40E_SKB_PAD +
2107+
(xdp->data_end -
2108+
xdp->data_hard_start));
20922109
#endif
20932110
struct sk_buff *skb;
20942111

2095-
/* prefetch first cache line of first page */
2096-
prefetch(xdp->data);
2112+
/* Prefetch first cache line of first page. If xdp->data_meta
2113+
* is unused, this points exactly as xdp->data, otherwise we
2114+
* likely have a consumer accessing first few bytes of meta
2115+
* data, and then actual data.
2116+
*/
2117+
prefetch(xdp->data_meta);
20972118
#if L1_CACHE_BYTES < 128
2098-
prefetch(xdp->data + L1_CACHE_BYTES);
2119+
prefetch(xdp->data_meta + L1_CACHE_BYTES);
20992120
#endif
21002121
/* build an skb around the page buffer */
21012122
skb = build_skb(xdp->data_hard_start, truesize);
21022123
if (unlikely(!skb))
21032124
return NULL;
21042125

21052126
/* update pointers within the skb to store the data */
2106-
skb_reserve(skb, I40E_SKB_PAD);
2107-
__skb_put(skb, size);
2127+
skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
2128+
__skb_put(skb, xdp->data_end - xdp->data);
2129+
if (metasize)
2130+
skb_metadata_set(skb, metasize);
21082131

21092132
/* buffer is used by skb, update page_offset */
21102133
#if (PAGE_SIZE < 8192)
@@ -2341,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
23412364
if (!skb) {
23422365
xdp.data = page_address(rx_buffer->page) +
23432366
rx_buffer->page_offset;
2344-
xdp_set_data_meta_invalid(&xdp);
2367+
xdp.data_meta = xdp.data;
23452368
xdp.data_hard_start = xdp.data -
23462369
i40e_rx_offset(rx_ring);
23472370
xdp.data_end = xdp.data + size;

0 commit comments

Comments
 (0)