Skip to content

Commit 0536b85

Browse files
Björn TöpelAlexei Starovoitov
authored andcommitted
xdp: Simplify devmap cleanup
After the RCU flavor consolidation [1], call_rcu() and synchronize_rcu() waits for preempt-disable regions (NAPI) in addition to the read-side critical sections. As a result of this, the cleanup code in devmap can be simplified * There is no longer a need to flush in __dev_map_entry_free, since we know that this has been done when the call_rcu() callback is triggered. * When freeing the map, there is no need to explicitly wait for a flush. It's guaranteed to be done after the synchronize_rcu() call in dev_map_free(). The rcu_barrier() is still needed, so that the map is not freed prior the elements. [1] https://lwn.net/Articles/777036/ Signed-off-by: Björn Töpel <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Toke Høiland-Jørgensen <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 5bf2fc1 commit 0536b85

File tree

1 file changed

+5
-38
lines changed

1 file changed

+5
-38
lines changed

kernel/bpf/devmap.c

Lines changed: 5 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
201201
static void dev_map_free(struct bpf_map *map)
202202
{
203203
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
204-
int i, cpu;
204+
int i;
205205

206206
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
207207
* so the programs (can be more than one that used this map) were
@@ -221,18 +221,6 @@ static void dev_map_free(struct bpf_map *map)
221221
/* Make sure prior __dev_map_entry_free() have completed. */
222222
rcu_barrier();
223223

224-
/* To ensure all pending flush operations have completed wait for flush
225-
* list to empty on _all_ cpus.
226-
* Because the above synchronize_rcu() ensures the map is disconnected
227-
* from the program we can assume no new items will be added.
228-
*/
229-
for_each_online_cpu(cpu) {
230-
struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
231-
232-
while (!list_empty(flush_list))
233-
cond_resched();
234-
}
235-
236224
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
237225
for (i = 0; i < dtab->n_buckets; i++) {
238226
struct bpf_dtab_netdev *dev;
@@ -345,8 +333,7 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
345333
return -ENOENT;
346334
}
347335

348-
static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
349-
bool in_napi_ctx)
336+
static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags)
350337
{
351338
struct bpf_dtab_netdev *obj = bq->obj;
352339
struct net_device *dev = obj->dev;
@@ -384,11 +371,7 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
384371
for (i = 0; i < bq->count; i++) {
385372
struct xdp_frame *xdpf = bq->q[i];
386373

387-
/* RX path under NAPI protection, can return frames faster */
388-
if (likely(in_napi_ctx))
389-
xdp_return_frame_rx_napi(xdpf);
390-
else
391-
xdp_return_frame(xdpf);
374+
xdp_return_frame_rx_napi(xdpf);
392375
drops++;
393376
}
394377
goto out;
@@ -409,7 +392,7 @@ void __dev_map_flush(struct bpf_map *map)
409392

410393
rcu_read_lock();
411394
list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
412-
bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
395+
bq_xmit_all(bq, XDP_XMIT_FLUSH);
413396
rcu_read_unlock();
414397
}
415398

@@ -440,7 +423,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
440423
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
441424

442425
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
443-
bq_xmit_all(bq, 0, true);
426+
bq_xmit_all(bq, 0);
444427

445428
/* Ingress dev_rx will be the same for all xdp_frame's in
446429
* bulk_queue, because bq stored per-CPU and must be flushed
@@ -509,27 +492,11 @@ static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
509492
return dev ? &dev->ifindex : NULL;
510493
}
511494

512-
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
513-
{
514-
if (dev->dev->netdev_ops->ndo_xdp_xmit) {
515-
struct xdp_bulk_queue *bq;
516-
int cpu;
517-
518-
rcu_read_lock();
519-
for_each_online_cpu(cpu) {
520-
bq = per_cpu_ptr(dev->bulkq, cpu);
521-
bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
522-
}
523-
rcu_read_unlock();
524-
}
525-
}
526-
527495
static void __dev_map_entry_free(struct rcu_head *rcu)
528496
{
529497
struct bpf_dtab_netdev *dev;
530498

531499
dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
532-
dev_map_flush_old(dev);
533500
free_percpu(dev->bulkq);
534501
dev_put(dev->dev);
535502
kfree(dev);

0 commit comments

Comments
 (0)