Skip to content

Commit 5fe6e56

Browse files
LorenzoBianconidavem330
authored andcommitted
veth: rely on peer veth_rq for ndo_xdp_xmit accounting
Rely on 'remote' veth_rq to account ndo_xdp_xmit ethtool counters. Move XDP_TX accounting to veth_xdp_flush_bq routine. Remove 'rx' prefix in rx xdp ethool counters Signed-off-by: Lorenzo Bianconi <[email protected]> Acked-by: Toshiaki Makita <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent bd32aa1 commit 5fe6e56

File tree

1 file changed

+82
-47
lines changed

1 file changed

+82
-47
lines changed

drivers/net/veth.c

Lines changed: 82 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ struct veth_stats {
4545
u64 xdp_drops;
4646
u64 xdp_tx;
4747
u64 xdp_tx_err;
48-
u64 xdp_xmit;
49-
u64 xdp_xmit_err;
48+
u64 peer_tq_xdp_xmit;
49+
u64 peer_tq_xdp_xmit_err;
5050
};
5151

5252
struct veth_rq_stats {
@@ -92,17 +92,22 @@ struct veth_q_stat_desc {
9292
static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
9393
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) },
9494
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
95-
{ "rx_drops", VETH_RQ_STAT(rx_drops) },
96-
{ "rx_xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
97-
{ "rx_xdp_drops", VETH_RQ_STAT(xdp_drops) },
98-
{ "rx_xdp_tx", VETH_RQ_STAT(xdp_tx) },
99-
{ "rx_xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
100-
{ "tx_xdp_xmit", VETH_RQ_STAT(xdp_xmit) },
101-
{ "tx_xdp_xmit_errors", VETH_RQ_STAT(xdp_xmit_err) },
95+
{ "drops", VETH_RQ_STAT(rx_drops) },
96+
{ "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
97+
{ "xdp_drops", VETH_RQ_STAT(xdp_drops) },
98+
{ "xdp_tx", VETH_RQ_STAT(xdp_tx) },
99+
{ "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
102100
};
103101

104102
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105103

104+
static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
105+
{ "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
106+
{ "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
107+
};
108+
109+
#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
110+
106111
static struct {
107112
const char string[ETH_GSTRING_LEN];
108113
} ethtool_stats_keys[] = {
@@ -142,6 +147,14 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
142147
p += ETH_GSTRING_LEN;
143148
}
144149
}
150+
for (i = 0; i < dev->real_num_tx_queues; i++) {
151+
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
152+
snprintf(p, ETH_GSTRING_LEN,
153+
"tx_queue_%u_%.18s",
154+
i, veth_tq_stats_desc[j].desc);
155+
p += ETH_GSTRING_LEN;
156+
}
157+
}
145158
break;
146159
}
147160
}
@@ -151,7 +164,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
151164
switch (sset) {
152165
case ETH_SS_STATS:
153166
return ARRAY_SIZE(ethtool_stats_keys) +
154-
VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
167+
VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168+
VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
155169
default:
156170
return -EOPNOTSUPP;
157171
}
@@ -160,7 +174,7 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
160174
static void veth_get_ethtool_stats(struct net_device *dev,
161175
struct ethtool_stats *stats, u64 *data)
162176
{
163-
struct veth_priv *priv = netdev_priv(dev);
177+
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
164178
struct net_device *peer = rtnl_dereference(priv->peer);
165179
int i, j, idx;
166180

@@ -181,6 +195,26 @@ static void veth_get_ethtool_stats(struct net_device *dev,
181195
} while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
182196
idx += VETH_RQ_STATS_LEN;
183197
}
198+
199+
if (!peer)
200+
return;
201+
202+
rcv_priv = netdev_priv(peer);
203+
for (i = 0; i < peer->real_num_rx_queues; i++) {
204+
const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
205+
const void *base = (void *)&rq_stats->vs;
206+
unsigned int start, tx_idx = idx;
207+
size_t offset;
208+
209+
tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
210+
do {
211+
start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
212+
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
213+
offset = veth_tq_stats_desc[j].offset;
214+
data[tx_idx + j] += *(u64 *)(base + offset);
215+
}
216+
} while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
217+
}
184218
}
185219

186220
static const struct ethtool_ops veth_ethtool_ops = {
@@ -301,25 +335,25 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
301335
struct veth_priv *priv = netdev_priv(dev);
302336
int i;
303337

304-
result->xdp_xmit_err = 0;
338+
result->peer_tq_xdp_xmit_err = 0;
305339
result->xdp_packets = 0;
306340
result->xdp_tx_err = 0;
307341
result->xdp_bytes = 0;
308342
result->rx_drops = 0;
309343
for (i = 0; i < dev->num_rx_queues; i++) {
310-
u64 packets, bytes, drops, xdp_tx_err, xdp_xmit_err;
344+
u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
311345
struct veth_rq_stats *stats = &priv->rq[i].stats;
312346
unsigned int start;
313347

314348
do {
315349
start = u64_stats_fetch_begin_irq(&stats->syncp);
316-
xdp_xmit_err = stats->vs.xdp_xmit_err;
350+
peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
317351
xdp_tx_err = stats->vs.xdp_tx_err;
318352
packets = stats->vs.xdp_packets;
319353
bytes = stats->vs.xdp_bytes;
320354
drops = stats->vs.rx_drops;
321355
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
322-
result->xdp_xmit_err += xdp_xmit_err;
356+
result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
323357
result->xdp_tx_err += xdp_tx_err;
324358
result->xdp_packets += packets;
325359
result->xdp_bytes += bytes;
@@ -340,8 +374,8 @@ static void veth_get_stats64(struct net_device *dev,
340374
tot->tx_packets = packets;
341375

342376
veth_stats_rx(&rx, dev);
343-
tot->tx_dropped += rx.xdp_xmit_err + rx.xdp_tx_err;
344-
tot->rx_dropped = rx.rx_drops;
377+
tot->tx_dropped += rx.xdp_tx_err;
378+
tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
345379
tot->rx_bytes = rx.xdp_bytes;
346380
tot->rx_packets = rx.xdp_packets;
347381

@@ -353,7 +387,8 @@ static void veth_get_stats64(struct net_device *dev,
353387
tot->rx_packets += packets;
354388

355389
veth_stats_rx(&rx, peer);
356-
tot->rx_dropped += rx.xdp_xmit_err + rx.xdp_tx_err;
390+
tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
391+
tot->rx_dropped += rx.xdp_tx_err;
357392
tot->tx_bytes += rx.xdp_bytes;
358393
tot->tx_packets += rx.xdp_packets;
359394
}
@@ -394,38 +429,28 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
394429
u32 flags, bool ndo_xmit)
395430
{
396431
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
397-
unsigned int qidx, max_len;
432+
int i, ret = -ENXIO, drops = 0;
398433
struct net_device *rcv;
399-
int i, ret, drops = n;
434+
unsigned int max_len;
400435
struct veth_rq *rq;
401436

402-
rcu_read_lock();
403-
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
404-
rcu_read_unlock();
405-
atomic64_add(drops, &priv->dropped);
437+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
406438
return -EINVAL;
407-
}
408439

440+
rcu_read_lock();
409441
rcv = rcu_dereference(priv->peer);
410-
if (unlikely(!rcv)) {
411-
rcu_read_unlock();
412-
atomic64_add(drops, &priv->dropped);
413-
return -ENXIO;
414-
}
442+
if (unlikely(!rcv))
443+
goto out;
415444

416445
rcv_priv = netdev_priv(rcv);
417-
qidx = veth_select_rxq(rcv);
418-
rq = &rcv_priv->rq[qidx];
446+
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
419447
/* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
420448
* side. This means an XDP program is loaded on the peer and the peer
421449
* device is up.
422450
*/
423-
if (!rcu_access_pointer(rq->xdp_prog)) {
424-
ret = -ENXIO;
425-
goto drop;
426-
}
451+
if (!rcu_access_pointer(rq->xdp_prog))
452+
goto out;
427453

428-
drops = 0;
429454
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
430455

431456
spin_lock(&rq->xdp_ring.producer_lock);
@@ -445,18 +470,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
445470
__veth_xdp_flush(rq);
446471

447472
ret = n - drops;
448-
drop:
449-
rq = &priv->rq[qidx];
450-
u64_stats_update_begin(&rq->stats.syncp);
451473
if (ndo_xmit) {
452-
rq->stats.vs.xdp_xmit += n - drops;
453-
rq->stats.vs.xdp_xmit_err += drops;
454-
} else {
455-
rq->stats.vs.xdp_tx += n - drops;
456-
rq->stats.vs.xdp_tx_err += drops;
474+
u64_stats_update_begin(&rq->stats.syncp);
475+
rq->stats.vs.peer_tq_xdp_xmit += n - drops;
476+
rq->stats.vs.peer_tq_xdp_xmit_err += drops;
477+
u64_stats_update_end(&rq->stats.syncp);
457478
}
458-
u64_stats_update_end(&rq->stats.syncp);
459479

480+
out:
460481
rcu_read_unlock();
461482

462483
return ret;
@@ -465,7 +486,16 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
465486
static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
466487
struct xdp_frame **frames, u32 flags)
467488
{
468-
return veth_xdp_xmit(dev, n, frames, flags, true);
489+
int err;
490+
491+
err = veth_xdp_xmit(dev, n, frames, flags, true);
492+
if (err < 0) {
493+
struct veth_priv *priv = netdev_priv(dev);
494+
495+
atomic64_add(n, &priv->dropped);
496+
}
497+
498+
return err;
469499
}
470500

471501
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
@@ -481,6 +511,11 @@ static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
481511
}
482512
trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
483513

514+
u64_stats_update_begin(&rq->stats.syncp);
515+
rq->stats.vs.xdp_tx += sent;
516+
rq->stats.vs.xdp_tx_err += bq->count - sent;
517+
u64_stats_update_end(&rq->stats.syncp);
518+
484519
bq->count = 0;
485520
}
486521

0 commit comments

Comments
 (0)