Skip to content

Commit 5d053f9

Browse files
netoptimizerAlexei Starovoitov
authored andcommitted
bpf: devmap prepare xdp frames for bulking
Like cpumap create queue for xdp frames that will be bulked. For now, this patch simply invoke ndo_xdp_xmit foreach frame. This happens, either when the map flush operation is envoked, or when the limit DEV_MAP_BULK_SIZE is reached. V5: Avoid memleak on error path in dev_map_update_elem() Signed-off-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 67f29e0 commit 5d053f9

File tree

1 file changed

+70
-4
lines changed

1 file changed

+70
-4
lines changed

kernel/bpf/devmap.c

Lines changed: 70 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,17 @@
5555
#define DEV_CREATE_FLAG_MASK \
5656
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
5757

58+
#define DEV_MAP_BULK_SIZE 16
59+
struct xdp_bulk_queue {
60+
struct xdp_frame *q[DEV_MAP_BULK_SIZE];
61+
unsigned int count;
62+
};
63+
5864
struct bpf_dtab_netdev {
5965
struct net_device *dev; /* must be first member, due to tracepoint */
6066
struct bpf_dtab *dtab;
6167
unsigned int bit;
68+
struct xdp_bulk_queue __percpu *bulkq;
6269
struct rcu_head rcu;
6370
};
6471

@@ -208,6 +215,34 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
208215
__set_bit(bit, bitmap);
209216
}
210217

218+
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
219+
struct xdp_bulk_queue *bq)
220+
{
221+
struct net_device *dev = obj->dev;
222+
int i;
223+
224+
if (unlikely(!bq->count))
225+
return 0;
226+
227+
for (i = 0; i < bq->count; i++) {
228+
struct xdp_frame *xdpf = bq->q[i];
229+
230+
prefetch(xdpf);
231+
}
232+
233+
for (i = 0; i < bq->count; i++) {
234+
struct xdp_frame *xdpf = bq->q[i];
235+
int err;
236+
237+
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
238+
if (err)
239+
xdp_return_frame(xdpf);
240+
}
241+
bq->count = 0;
242+
243+
return 0;
244+
}
245+
211246
/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
212247
* from the driver before returning from its napi->poll() routine. The poll()
213248
* routine is called either from busy_poll context or net_rx_action signaled
@@ -223,6 +258,7 @@ void __dev_map_flush(struct bpf_map *map)
223258

224259
for_each_set_bit(bit, bitmap, map->max_entries) {
225260
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
261+
struct xdp_bulk_queue *bq;
226262
struct net_device *netdev;
227263

228264
/* This is possible if the dev entry is removed by user space
@@ -232,6 +268,9 @@ void __dev_map_flush(struct bpf_map *map)
232268
continue;
233269

234270
__clear_bit(bit, bitmap);
271+
272+
bq = this_cpu_ptr(dev->bulkq);
273+
bq_xmit_all(dev, bq);
235274
netdev = dev->dev;
236275
if (likely(netdev->netdev_ops->ndo_xdp_flush))
237276
netdev->netdev_ops->ndo_xdp_flush(netdev);
@@ -254,6 +293,20 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
254293
return obj;
255294
}
256295

296+
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
297+
* Thus, safe percpu variable access.
298+
*/
299+
static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
300+
{
301+
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
302+
303+
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
304+
bq_xmit_all(obj, bq);
305+
306+
bq->q[bq->count++] = xdpf;
307+
return 0;
308+
}
309+
257310
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
258311
{
259312
struct net_device *dev = dst->dev;
@@ -266,8 +319,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp)
266319
if (unlikely(!xdpf))
267320
return -EOVERFLOW;
268321

269-
/* TODO: implement a bulking/enqueue step later */
270-
return dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
322+
return bq_enqueue(dst, xdpf);
271323
}
272324

273325
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
@@ -282,13 +334,18 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
282334
{
283335
if (dev->dev->netdev_ops->ndo_xdp_flush) {
284336
struct net_device *fl = dev->dev;
337+
struct xdp_bulk_queue *bq;
285338
unsigned long *bitmap;
339+
286340
int cpu;
287341

288342
for_each_online_cpu(cpu) {
289343
bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
290344
__clear_bit(dev->bit, bitmap);
291345

346+
bq = per_cpu_ptr(dev->bulkq, cpu);
347+
bq_xmit_all(dev, bq);
348+
292349
fl->netdev_ops->ndo_xdp_flush(dev->dev);
293350
}
294351
}
@@ -300,6 +357,7 @@ static void __dev_map_entry_free(struct rcu_head *rcu)
300357

301358
dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
302359
dev_map_flush_old(dev);
360+
free_percpu(dev->bulkq);
303361
dev_put(dev->dev);
304362
kfree(dev);
305363
}
@@ -332,6 +390,7 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
332390
{
333391
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
334392
struct net *net = current->nsproxy->net_ns;
393+
gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
335394
struct bpf_dtab_netdev *dev, *old_dev;
336395
u32 i = *(u32 *)key;
337396
u32 ifindex = *(u32 *)value;
@@ -346,13 +405,20 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
346405
if (!ifindex) {
347406
dev = NULL;
348407
} else {
349-
dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
350-
map->numa_node);
408+
dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
351409
if (!dev)
352410
return -ENOMEM;
353411

412+
dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
413+
sizeof(void *), gfp);
414+
if (!dev->bulkq) {
415+
kfree(dev);
416+
return -ENOMEM;
417+
}
418+
354419
dev->dev = dev_get_by_index(net, ifindex);
355420
if (!dev->dev) {
421+
free_percpu(dev->bulkq);
356422
kfree(dev);
357423
return -EINVAL;
358424
}

0 commit comments

Comments
 (0)