|
3 | 3 | #include <linux/netdevice.h>
|
4 | 4 | #include <net/netdev_queues.h>
|
5 | 5 | #include <net/netdev_rx_queue.h>
|
| 6 | +#include <net/page_pool/memory_provider.h> |
6 | 7 |
|
7 | 8 | #include "page_pool_priv.h"
|
8 | 9 |
|
@@ -80,3 +81,71 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
|
80 | 81 | return err;
|
81 | 82 | }
|
82 | 83 | EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
|
| 84 | + |
| 85 | +static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, |
| 86 | + struct pp_memory_provider_params *p) |
| 87 | +{ |
| 88 | + struct netdev_rx_queue *rxq; |
| 89 | + int ret; |
| 90 | + |
| 91 | + if (ifq_idx >= dev->real_num_rx_queues) |
| 92 | + return -EINVAL; |
| 93 | + ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); |
| 94 | + |
| 95 | + rxq = __netif_get_rx_queue(dev, ifq_idx); |
| 96 | + if (rxq->mp_params.mp_ops) |
| 97 | + return -EEXIST; |
| 98 | + |
| 99 | + rxq->mp_params = *p; |
| 100 | + ret = netdev_rx_queue_restart(dev, ifq_idx); |
| 101 | + if (ret) { |
| 102 | + rxq->mp_params.mp_ops = NULL; |
| 103 | + rxq->mp_params.mp_priv = NULL; |
| 104 | + } |
| 105 | + return ret; |
| 106 | +} |
| 107 | + |
| 108 | +int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, |
| 109 | + struct pp_memory_provider_params *p) |
| 110 | +{ |
| 111 | + int ret; |
| 112 | + |
| 113 | + rtnl_lock(); |
| 114 | + ret = __net_mp_open_rxq(dev, ifq_idx, p); |
| 115 | + rtnl_unlock(); |
| 116 | + return ret; |
| 117 | +} |
| 118 | + |
| 119 | +static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, |
| 120 | + struct pp_memory_provider_params *old_p) |
| 121 | +{ |
| 122 | + struct netdev_rx_queue *rxq; |
| 123 | + |
| 124 | + if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) |
| 125 | + return; |
| 126 | + |
| 127 | + rxq = __netif_get_rx_queue(dev, ifq_idx); |
| 128 | + |
| 129 | + /* Callers holding a netdev ref may get here after we already |
| 130 | + * went thru shutdown via dev_memory_provider_uninstall(). |
| 131 | + */ |
| 132 | + if (dev->reg_state > NETREG_REGISTERED && |
| 133 | + !rxq->mp_params.mp_ops) |
| 134 | + return; |
| 135 | + |
| 136 | + if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || |
| 137 | + rxq->mp_params.mp_priv != old_p->mp_priv)) |
| 138 | + return; |
| 139 | + |
| 140 | + rxq->mp_params.mp_ops = NULL; |
| 141 | + rxq->mp_params.mp_priv = NULL; |
| 142 | + WARN_ON(netdev_rx_queue_restart(dev, ifq_idx)); |
| 143 | +} |
| 144 | + |
| 145 | +void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, |
| 146 | + struct pp_memory_provider_params *old_p) |
| 147 | +{ |
| 148 | + rtnl_lock(); |
| 149 | + __net_mp_close_rxq(dev, ifq_idx, old_p); |
| 150 | + rtnl_unlock(); |
| 151 | +} |
0 commit comments