Skip to content

Commit 1a7f512

Browse files
Eran Ben ElishaSaeed Mahameed
authored andcommitted
net/mlx5e: Split SW group counters update function
SW group counter update function aggregates sw stats out of many mlx5e_*_stats resides in a given mlx5e_channel_stats struct. Split the function into a few helper functions. This will be used later in the series to calculate specific mlx5e_*_stats which are not defined inside mlx5e_channel_stats. Signed-off-by: Eran Ben Elisha <[email protected]> Reviewed-by: Tariq Toukan <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 0b676aa commit 1a7f512

File tree

1 file changed

+161
-127
lines changed

1 file changed

+161
-127
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_stats.c

Lines changed: 161 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,160 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
248248
return idx;
249249
}
250250

251+
static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
252+
struct mlx5e_xdpsq_stats *xdpsq_red_stats)
253+
{
254+
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
255+
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
256+
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
257+
s->tx_xdp_nops += xdpsq_red_stats->nops;
258+
s->tx_xdp_full += xdpsq_red_stats->full;
259+
s->tx_xdp_err += xdpsq_red_stats->err;
260+
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
261+
}
262+
263+
static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
264+
struct mlx5e_xdpsq_stats *xdpsq_stats)
265+
{
266+
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
267+
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
268+
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
269+
s->rx_xdp_tx_nops += xdpsq_stats->nops;
270+
s->rx_xdp_tx_full += xdpsq_stats->full;
271+
s->rx_xdp_tx_err += xdpsq_stats->err;
272+
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
273+
}
274+
275+
static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
276+
struct mlx5e_xdpsq_stats *xsksq_stats)
277+
{
278+
s->tx_xsk_xmit += xsksq_stats->xmit;
279+
s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
280+
s->tx_xsk_inlnw += xsksq_stats->inlnw;
281+
s->tx_xsk_full += xsksq_stats->full;
282+
s->tx_xsk_err += xsksq_stats->err;
283+
s->tx_xsk_cqes += xsksq_stats->cqes;
284+
}
285+
286+
static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
287+
struct mlx5e_rq_stats *xskrq_stats)
288+
{
289+
s->rx_xsk_packets += xskrq_stats->packets;
290+
s->rx_xsk_bytes += xskrq_stats->bytes;
291+
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
292+
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
293+
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
294+
s->rx_xsk_csum_none += xskrq_stats->csum_none;
295+
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
296+
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
297+
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
298+
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
299+
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
300+
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
301+
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
302+
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
303+
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
304+
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
305+
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
306+
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
307+
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
308+
}
309+
310+
static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
311+
struct mlx5e_rq_stats *rq_stats)
312+
{
313+
s->rx_packets += rq_stats->packets;
314+
s->rx_bytes += rq_stats->bytes;
315+
s->rx_lro_packets += rq_stats->lro_packets;
316+
s->rx_lro_bytes += rq_stats->lro_bytes;
317+
s->rx_ecn_mark += rq_stats->ecn_mark;
318+
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
319+
s->rx_csum_none += rq_stats->csum_none;
320+
s->rx_csum_complete += rq_stats->csum_complete;
321+
s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
322+
s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
323+
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
324+
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
325+
s->rx_xdp_drop += rq_stats->xdp_drop;
326+
s->rx_xdp_redirect += rq_stats->xdp_redirect;
327+
s->rx_wqe_err += rq_stats->wqe_err;
328+
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
329+
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
330+
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
331+
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
332+
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
333+
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
334+
s->rx_cache_reuse += rq_stats->cache_reuse;
335+
s->rx_cache_full += rq_stats->cache_full;
336+
s->rx_cache_empty += rq_stats->cache_empty;
337+
s->rx_cache_busy += rq_stats->cache_busy;
338+
s->rx_cache_waive += rq_stats->cache_waive;
339+
s->rx_congst_umr += rq_stats->congst_umr;
340+
s->rx_arfs_err += rq_stats->arfs_err;
341+
s->rx_recover += rq_stats->recover;
342+
#ifdef CONFIG_MLX5_EN_TLS
343+
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
344+
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
345+
s->rx_tls_ctx += rq_stats->tls_ctx;
346+
s->rx_tls_del += rq_stats->tls_del;
347+
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
348+
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
349+
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
350+
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
351+
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
352+
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
353+
s->rx_tls_err += rq_stats->tls_err;
354+
#endif
355+
}
356+
357+
static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
358+
struct mlx5e_ch_stats *ch_stats)
359+
{
360+
s->ch_events += ch_stats->events;
361+
s->ch_poll += ch_stats->poll;
362+
s->ch_arm += ch_stats->arm;
363+
s->ch_aff_change += ch_stats->aff_change;
364+
s->ch_force_irq += ch_stats->force_irq;
365+
s->ch_eq_rearm += ch_stats->eq_rearm;
366+
}
367+
368+
static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
369+
struct mlx5e_sq_stats *sq_stats)
370+
{
371+
s->tx_packets += sq_stats->packets;
372+
s->tx_bytes += sq_stats->bytes;
373+
s->tx_tso_packets += sq_stats->tso_packets;
374+
s->tx_tso_bytes += sq_stats->tso_bytes;
375+
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
376+
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
377+
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
378+
s->tx_nop += sq_stats->nop;
379+
s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
380+
s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
381+
s->tx_queue_stopped += sq_stats->stopped;
382+
s->tx_queue_wake += sq_stats->wake;
383+
s->tx_queue_dropped += sq_stats->dropped;
384+
s->tx_cqe_err += sq_stats->cqe_err;
385+
s->tx_recover += sq_stats->recover;
386+
s->tx_xmit_more += sq_stats->xmit_more;
387+
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
388+
s->tx_csum_none += sq_stats->csum_none;
389+
s->tx_csum_partial += sq_stats->csum_partial;
390+
#ifdef CONFIG_MLX5_EN_TLS
391+
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
392+
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
393+
s->tx_tls_ctx += sq_stats->tls_ctx;
394+
s->tx_tls_ooo += sq_stats->tls_ooo;
395+
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
396+
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
397+
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
398+
s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
399+
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
400+
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
401+
#endif
402+
s->tx_cqes += sq_stats->cqes;
403+
}
404+
251405
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
252406
{
253407
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -258,139 +412,19 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
258412
for (i = 0; i < priv->max_nch; i++) {
259413
struct mlx5e_channel_stats *channel_stats =
260414
&priv->channel_stats[i];
261-
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
262-
struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
263-
struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
264-
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
265-
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
266-
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
267415
int j;
268416

269-
s->rx_packets += rq_stats->packets;
270-
s->rx_bytes += rq_stats->bytes;
271-
s->rx_lro_packets += rq_stats->lro_packets;
272-
s->rx_lro_bytes += rq_stats->lro_bytes;
273-
s->rx_ecn_mark += rq_stats->ecn_mark;
274-
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
275-
s->rx_csum_none += rq_stats->csum_none;
276-
s->rx_csum_complete += rq_stats->csum_complete;
277-
s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
278-
s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
279-
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
280-
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
281-
s->rx_xdp_drop += rq_stats->xdp_drop;
282-
s->rx_xdp_redirect += rq_stats->xdp_redirect;
283-
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
284-
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285-
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286-
s->rx_xdp_tx_nops += xdpsq_stats->nops;
287-
s->rx_xdp_tx_full += xdpsq_stats->full;
288-
s->rx_xdp_tx_err += xdpsq_stats->err;
289-
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
290-
s->rx_wqe_err += rq_stats->wqe_err;
291-
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
292-
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
293-
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
294-
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
295-
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
296-
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
297-
s->rx_cache_reuse += rq_stats->cache_reuse;
298-
s->rx_cache_full += rq_stats->cache_full;
299-
s->rx_cache_empty += rq_stats->cache_empty;
300-
s->rx_cache_busy += rq_stats->cache_busy;
301-
s->rx_cache_waive += rq_stats->cache_waive;
302-
s->rx_congst_umr += rq_stats->congst_umr;
303-
s->rx_arfs_err += rq_stats->arfs_err;
304-
s->rx_recover += rq_stats->recover;
305-
#ifdef CONFIG_MLX5_EN_TLS
306-
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
307-
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
308-
s->rx_tls_ctx += rq_stats->tls_ctx;
309-
s->rx_tls_del += rq_stats->tls_del;
310-
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
311-
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
312-
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
313-
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
314-
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
315-
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
316-
s->rx_tls_err += rq_stats->tls_err;
317-
#endif
318-
s->ch_events += ch_stats->events;
319-
s->ch_poll += ch_stats->poll;
320-
s->ch_arm += ch_stats->arm;
321-
s->ch_aff_change += ch_stats->aff_change;
322-
s->ch_force_irq += ch_stats->force_irq;
323-
s->ch_eq_rearm += ch_stats->eq_rearm;
417+
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
418+
mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
419+
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
324420
/* xdp redirect */
325-
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
326-
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
327-
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
328-
s->tx_xdp_nops += xdpsq_red_stats->nops;
329-
s->tx_xdp_full += xdpsq_red_stats->full;
330-
s->tx_xdp_err += xdpsq_red_stats->err;
331-
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
421+
mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
332422
/* AF_XDP zero-copy */
333-
s->rx_xsk_packets += xskrq_stats->packets;
334-
s->rx_xsk_bytes += xskrq_stats->bytes;
335-
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
336-
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
337-
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
338-
s->rx_xsk_csum_none += xskrq_stats->csum_none;
339-
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
340-
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
341-
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
342-
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
343-
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
344-
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
345-
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
346-
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
347-
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
348-
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
349-
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
350-
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
351-
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
352-
s->tx_xsk_xmit += xsksq_stats->xmit;
353-
s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
354-
s->tx_xsk_inlnw += xsksq_stats->inlnw;
355-
s->tx_xsk_full += xsksq_stats->full;
356-
s->tx_xsk_err += xsksq_stats->err;
357-
s->tx_xsk_cqes += xsksq_stats->cqes;
423+
mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
424+
mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
358425

359426
for (j = 0; j < priv->max_opened_tc; j++) {
360-
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
361-
362-
s->tx_packets += sq_stats->packets;
363-
s->tx_bytes += sq_stats->bytes;
364-
s->tx_tso_packets += sq_stats->tso_packets;
365-
s->tx_tso_bytes += sq_stats->tso_bytes;
366-
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
367-
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
368-
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
369-
s->tx_nop += sq_stats->nop;
370-
s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
371-
s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
372-
s->tx_queue_stopped += sq_stats->stopped;
373-
s->tx_queue_wake += sq_stats->wake;
374-
s->tx_queue_dropped += sq_stats->dropped;
375-
s->tx_cqe_err += sq_stats->cqe_err;
376-
s->tx_recover += sq_stats->recover;
377-
s->tx_xmit_more += sq_stats->xmit_more;
378-
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
379-
s->tx_csum_none += sq_stats->csum_none;
380-
s->tx_csum_partial += sq_stats->csum_partial;
381-
#ifdef CONFIG_MLX5_EN_TLS
382-
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
383-
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
384-
s->tx_tls_ctx += sq_stats->tls_ctx;
385-
s->tx_tls_ooo += sq_stats->tls_ooo;
386-
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
387-
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
388-
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
389-
s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
390-
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
391-
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
392-
#endif
393-
s->tx_cqes += sq_stats->cqes;
427+
mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
394428

395429
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
396430
barrier();

0 commit comments

Comments
 (0)