|
17 | 17 | #include <net/rtnetlink.h>
|
18 | 18 | #include <net/dst.h>
|
19 | 19 | #include <net/xfrm.h>
|
| 20 | +#include <net/xdp.h> |
20 | 21 | #include <linux/veth.h>
|
21 | 22 | #include <linux/module.h>
|
22 | 23 | #include <linux/bpf.h>
|
@@ -125,6 +126,11 @@ static void *veth_ptr_to_xdp(void *ptr)
|
125 | 126 | return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
|
126 | 127 | }
|
127 | 128 |
|
| 129 | +static void *veth_xdp_to_ptr(void *ptr) |
| 130 | +{ |
| 131 | + return (void *)((unsigned long)ptr | VETH_XDP_FLAG); |
| 132 | +} |
| 133 | + |
128 | 134 | static void veth_ptr_free(void *ptr)
|
129 | 135 | {
|
130 | 136 | if (veth_is_xdp_frame(ptr))
|
@@ -267,6 +273,50 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
|
267 | 273 | return skb;
|
268 | 274 | }
|
269 | 275 |
|
| 276 | +static int veth_xdp_xmit(struct net_device *dev, int n, |
| 277 | + struct xdp_frame **frames, u32 flags) |
| 278 | +{ |
| 279 | + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); |
| 280 | + struct net_device *rcv; |
| 281 | + unsigned int max_len; |
| 282 | + int i, drops = 0; |
| 283 | + |
| 284 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
| 285 | + return -EINVAL; |
| 286 | + |
| 287 | + rcv = rcu_dereference(priv->peer); |
| 288 | + if (unlikely(!rcv)) |
| 289 | + return -ENXIO; |
| 290 | + |
| 291 | + rcv_priv = netdev_priv(rcv); |
| 292 | + /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive |
| 293 | + * side. This means an XDP program is loaded on the peer and the peer |
| 294 | + * device is up. |
| 295 | + */ |
| 296 | + if (!rcu_access_pointer(rcv_priv->xdp_prog)) |
| 297 | + return -ENXIO; |
| 298 | + |
| 299 | + max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; |
| 300 | + |
| 301 | + spin_lock(&rcv_priv->xdp_ring.producer_lock); |
| 302 | + for (i = 0; i < n; i++) { |
| 303 | + struct xdp_frame *frame = frames[i]; |
| 304 | + void *ptr = veth_xdp_to_ptr(frame); |
| 305 | + |
| 306 | + if (unlikely(frame->len > max_len || |
| 307 | + __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) { |
| 308 | + xdp_return_frame_rx_napi(frame); |
| 309 | + drops++; |
| 310 | + } |
| 311 | + } |
| 312 | + spin_unlock(&rcv_priv->xdp_ring.producer_lock); |
| 313 | + |
| 314 | + if (flags & XDP_XMIT_FLUSH) |
| 315 | + __veth_xdp_flush(rcv_priv); |
| 316 | + |
| 317 | + return n - drops; |
| 318 | +} |
| 319 | + |
270 | 320 | static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv,
|
271 | 321 | struct xdp_frame *frame)
|
272 | 322 | {
|
@@ -769,6 +819,7 @@ static const struct net_device_ops veth_netdev_ops = {
|
769 | 819 | .ndo_features_check = passthru_features_check,
|
770 | 820 | .ndo_set_rx_headroom = veth_set_rx_headroom,
|
771 | 821 | .ndo_bpf = veth_xdp,
|
| 822 | + .ndo_xdp_xmit = veth_xdp_xmit, |
772 | 823 | };
|
773 | 824 |
|
774 | 825 | #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
|
|
0 commit comments