Skip to content

Commit c49fa01

Browse files
committed
Merge branch 'netdev_tracing'
Ben Hutchings says: ==================== Improve tracing at the driver/core boundary These patches add static tracpeoints at the driver/core boundary which record various skb fields likely to be useful for datapath debugging. On the TX side the boundary is where the core calls ndo_start_xmit, and on the RX side it is where any of the various exported receive functions is called. The set of skb fields is mostly based on what I thought would be interesting for sfc. These patches are basically the same as what I sent as an RFC in November, but rebased. They now depend on 'net: core: explicitly select a txq before doing l2 forwarding', so please merge net into net-next before trying to apply them. The first patch fixes a code formatting error left behind after that fix. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 0a379e2 + ae78dbf commit c49fa01

File tree

2 files changed

+222
-41
lines changed

2 files changed

+222
-41
lines changed

include/trace/events/net.h

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,67 @@
66

77
#include <linux/skbuff.h>
88
#include <linux/netdevice.h>
9+
#include <linux/if_vlan.h>
910
#include <linux/ip.h>
1011
#include <linux/tracepoint.h>
1112

13+
TRACE_EVENT(net_dev_start_xmit,
14+
15+
TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
16+
17+
TP_ARGS(skb, dev),
18+
19+
TP_STRUCT__entry(
20+
__string( name, dev->name )
21+
__field( u16, queue_mapping )
22+
__field( const void *, skbaddr )
23+
__field( bool, vlan_tagged )
24+
__field( u16, vlan_proto )
25+
__field( u16, vlan_tci )
26+
__field( u16, protocol )
27+
__field( u8, ip_summed )
28+
__field( unsigned int, len )
29+
__field( unsigned int, data_len )
30+
__field( int, network_offset )
31+
__field( bool, transport_offset_valid)
32+
__field( int, transport_offset)
33+
__field( u8, tx_flags )
34+
__field( u16, gso_size )
35+
__field( u16, gso_segs )
36+
__field( u16, gso_type )
37+
),
38+
39+
TP_fast_assign(
40+
__assign_str(name, dev->name);
41+
__entry->queue_mapping = skb->queue_mapping;
42+
__entry->skbaddr = skb;
43+
__entry->vlan_tagged = vlan_tx_tag_present(skb);
44+
__entry->vlan_proto = ntohs(skb->vlan_proto);
45+
__entry->vlan_tci = vlan_tx_tag_get(skb);
46+
__entry->protocol = ntohs(skb->protocol);
47+
__entry->ip_summed = skb->ip_summed;
48+
__entry->len = skb->len;
49+
__entry->data_len = skb->data_len;
50+
__entry->network_offset = skb_network_offset(skb);
51+
__entry->transport_offset_valid =
52+
skb_transport_header_was_set(skb);
53+
__entry->transport_offset = skb_transport_offset(skb);
54+
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
55+
__entry->gso_size = skb_shinfo(skb)->gso_size;
56+
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
57+
__entry->gso_type = skb_shinfo(skb)->gso_type;
58+
),
59+
60+
TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
61+
__get_str(name), __entry->queue_mapping, __entry->skbaddr,
62+
__entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
63+
__entry->protocol, __entry->ip_summed, __entry->len,
64+
__entry->data_len,
65+
__entry->network_offset, __entry->transport_offset_valid,
66+
__entry->transport_offset, __entry->tx_flags,
67+
__entry->gso_size, __entry->gso_segs, __entry->gso_type)
68+
);
69+
1270
TRACE_EVENT(net_dev_xmit,
1371

1472
TP_PROTO(struct sk_buff *skb,
@@ -78,6 +136,106 @@ DEFINE_EVENT(net_dev_template, netif_rx,
78136

79137
TP_ARGS(skb)
80138
);
139+
140+
DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
141+
142+
TP_PROTO(const struct sk_buff *skb),
143+
144+
TP_ARGS(skb),
145+
146+
TP_STRUCT__entry(
147+
__string( name, skb->dev->name )
148+
__field( unsigned int, napi_id )
149+
__field( u16, queue_mapping )
150+
__field( const void *, skbaddr )
151+
__field( bool, vlan_tagged )
152+
__field( u16, vlan_proto )
153+
__field( u16, vlan_tci )
154+
__field( u16, protocol )
155+
__field( u8, ip_summed )
156+
__field( u32, rxhash )
157+
__field( bool, l4_rxhash )
158+
__field( unsigned int, len )
159+
__field( unsigned int, data_len )
160+
__field( unsigned int, truesize )
161+
__field( bool, mac_header_valid)
162+
__field( int, mac_header )
163+
__field( unsigned char, nr_frags )
164+
__field( u16, gso_size )
165+
__field( u16, gso_type )
166+
),
167+
168+
TP_fast_assign(
169+
__assign_str(name, skb->dev->name);
170+
#ifdef CONFIG_NET_RX_BUSY_POLL
171+
__entry->napi_id = skb->napi_id;
172+
#else
173+
__entry->napi_id = 0;
174+
#endif
175+
__entry->queue_mapping = skb->queue_mapping;
176+
__entry->skbaddr = skb;
177+
__entry->vlan_tagged = vlan_tx_tag_present(skb);
178+
__entry->vlan_proto = ntohs(skb->vlan_proto);
179+
__entry->vlan_tci = vlan_tx_tag_get(skb);
180+
__entry->protocol = ntohs(skb->protocol);
181+
__entry->ip_summed = skb->ip_summed;
182+
__entry->rxhash = skb->rxhash;
183+
__entry->l4_rxhash = skb->l4_rxhash;
184+
__entry->len = skb->len;
185+
__entry->data_len = skb->data_len;
186+
__entry->truesize = skb->truesize;
187+
__entry->mac_header_valid = skb_mac_header_was_set(skb);
188+
__entry->mac_header = skb_mac_header(skb) - skb->data;
189+
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
190+
__entry->gso_size = skb_shinfo(skb)->gso_size;
191+
__entry->gso_type = skb_shinfo(skb)->gso_type;
192+
),
193+
194+
TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
195+
__get_str(name), __entry->napi_id, __entry->queue_mapping,
196+
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
197+
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
198+
__entry->rxhash, __entry->l4_rxhash, __entry->len,
199+
__entry->data_len, __entry->truesize,
200+
__entry->mac_header_valid, __entry->mac_header,
201+
__entry->nr_frags, __entry->gso_size, __entry->gso_type)
202+
);
203+
204+
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
205+
206+
TP_PROTO(const struct sk_buff *skb),
207+
208+
TP_ARGS(skb)
209+
);
210+
211+
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
212+
213+
TP_PROTO(const struct sk_buff *skb),
214+
215+
TP_ARGS(skb)
216+
);
217+
218+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
219+
220+
TP_PROTO(const struct sk_buff *skb),
221+
222+
TP_ARGS(skb)
223+
);
224+
225+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
226+
227+
TP_PROTO(const struct sk_buff *skb),
228+
229+
TP_ARGS(skb)
230+
);
231+
232+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
233+
234+
TP_PROTO(const struct sk_buff *skb),
235+
236+
TP_ARGS(skb)
237+
);
238+
81239
#endif /* _TRACE_NET_H */
82240

83241
/* This part must be outside protection */

net/core/dev.c

Lines changed: 64 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,8 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147147
struct list_head ptype_all __read_mostly; /* Taps */
148148
static struct list_head offload_base __read_mostly;
149149

150+
static int netif_rx_internal(struct sk_buff *skb);
151+
150152
/*
151153
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
152154
* semaphore.
@@ -1698,7 +1700,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16981700
skb_scrub_packet(skb, true);
16991701
skb->protocol = eth_type_trans(skb, dev);
17001702

1701-
return netif_rx(skb);
1703+
return netif_rx_internal(skb);
17021704
}
17031705
EXPORT_SYMBOL_GPL(dev_forward_skb);
17041706

@@ -2596,8 +2598,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
25962598
dev_queue_xmit_nit(skb, dev);
25972599

25982600
skb_len = skb->len;
2599-
rc = ops->ndo_start_xmit(skb, dev);
2600-
2601+
trace_net_dev_start_xmit(skb, dev);
2602+
rc = ops->ndo_start_xmit(skb, dev);
26012603
trace_net_dev_xmit(skb, rc, dev, skb_len);
26022604
if (rc == NETDEV_TX_OK)
26032605
txq_trans_update(txq);
@@ -2615,6 +2617,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
26152617
dev_queue_xmit_nit(nskb, dev);
26162618

26172619
skb_len = nskb->len;
2620+
trace_net_dev_start_xmit(nskb, dev);
26182621
rc = ops->ndo_start_xmit(nskb, dev);
26192622
trace_net_dev_xmit(nskb, rc, dev, skb_len);
26202623
if (unlikely(rc != NETDEV_TX_OK)) {
@@ -3218,22 +3221,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
32183221
return NET_RX_DROP;
32193222
}
32203223

3221-
/**
3222-
* netif_rx - post buffer to the network code
3223-
* @skb: buffer to post
3224-
*
3225-
* This function receives a packet from a device driver and queues it for
3226-
* the upper (protocol) levels to process. It always succeeds. The buffer
3227-
* may be dropped during processing for congestion control or by the
3228-
* protocol layers.
3229-
*
3230-
* return values:
3231-
* NET_RX_SUCCESS (no congestion)
3232-
* NET_RX_DROP (packet was dropped)
3233-
*
3234-
*/
3235-
3236-
int netif_rx(struct sk_buff *skb)
3224+
static int netif_rx_internal(struct sk_buff *skb)
32373225
{
32383226
int ret;
32393227

@@ -3269,14 +3257,38 @@ int netif_rx(struct sk_buff *skb)
32693257
}
32703258
return ret;
32713259
}
3260+
3261+
/**
3262+
* netif_rx - post buffer to the network code
3263+
* @skb: buffer to post
3264+
*
3265+
* This function receives a packet from a device driver and queues it for
3266+
* the upper (protocol) levels to process. It always succeeds. The buffer
3267+
* may be dropped during processing for congestion control or by the
3268+
* protocol layers.
3269+
*
3270+
* return values:
3271+
* NET_RX_SUCCESS (no congestion)
3272+
* NET_RX_DROP (packet was dropped)
3273+
*
3274+
*/
3275+
3276+
int netif_rx(struct sk_buff *skb)
3277+
{
3278+
trace_netif_rx_entry(skb);
3279+
3280+
return netif_rx_internal(skb);
3281+
}
32723282
EXPORT_SYMBOL(netif_rx);
32733283

32743284
int netif_rx_ni(struct sk_buff *skb)
32753285
{
32763286
int err;
32773287

3288+
trace_netif_rx_ni_entry(skb);
3289+
32783290
preempt_disable();
3279-
err = netif_rx(skb);
3291+
err = netif_rx_internal(skb);
32803292
if (local_softirq_pending())
32813293
do_softirq();
32823294
preempt_enable();
@@ -3661,22 +3673,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
36613673
return ret;
36623674
}
36633675

3664-
/**
3665-
* netif_receive_skb - process receive buffer from network
3666-
* @skb: buffer to process
3667-
*
3668-
* netif_receive_skb() is the main receive data processing function.
3669-
* It always succeeds. The buffer may be dropped during processing
3670-
* for congestion control or by the protocol layers.
3671-
*
3672-
* This function may only be called from softirq context and interrupts
3673-
* should be enabled.
3674-
*
3675-
* Return values (usually ignored):
3676-
* NET_RX_SUCCESS: no congestion
3677-
* NET_RX_DROP: packet was dropped
3678-
*/
3679-
int netif_receive_skb(struct sk_buff *skb)
3676+
static int netif_receive_skb_internal(struct sk_buff *skb)
36803677
{
36813678
net_timestamp_check(netdev_tstamp_prequeue, skb);
36823679

@@ -3702,6 +3699,28 @@ int netif_receive_skb(struct sk_buff *skb)
37023699
#endif
37033700
return __netif_receive_skb(skb);
37043701
}
3702+
3703+
/**
3704+
* netif_receive_skb - process receive buffer from network
3705+
* @skb: buffer to process
3706+
*
3707+
* netif_receive_skb() is the main receive data processing function.
3708+
* It always succeeds. The buffer may be dropped during processing
3709+
* for congestion control or by the protocol layers.
3710+
*
3711+
* This function may only be called from softirq context and interrupts
3712+
* should be enabled.
3713+
*
3714+
* Return values (usually ignored):
3715+
* NET_RX_SUCCESS: no congestion
3716+
* NET_RX_DROP: packet was dropped
3717+
*/
3718+
int netif_receive_skb(struct sk_buff *skb)
3719+
{
3720+
trace_netif_receive_skb_entry(skb);
3721+
3722+
return netif_receive_skb_internal(skb);
3723+
}
37053724
EXPORT_SYMBOL(netif_receive_skb);
37063725

37073726
/* Network device is going away, flush any packets still pending
@@ -3763,7 +3782,7 @@ static int napi_gro_complete(struct sk_buff *skb)
37633782
}
37643783

37653784
out:
3766-
return netif_receive_skb(skb);
3785+
return netif_receive_skb_internal(skb);
37673786
}
37683787

37693788
/* napi->gro_list contains packets ordered by age.
@@ -3971,7 +3990,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
39713990
{
39723991
switch (ret) {
39733992
case GRO_NORMAL:
3974-
if (netif_receive_skb(skb))
3993+
if (netif_receive_skb_internal(skb))
39753994
ret = GRO_DROP;
39763995
break;
39773996

@@ -3996,6 +4015,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
39964015

39974016
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
39984017
{
4018+
trace_napi_gro_receive_entry(skb);
4019+
39994020
return napi_skb_finish(dev_gro_receive(napi, skb), skb);
40004021
}
40014022
EXPORT_SYMBOL(napi_gro_receive);
@@ -4029,7 +4050,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
40294050
{
40304051
switch (ret) {
40314052
case GRO_NORMAL:
4032-
if (netif_receive_skb(skb))
4053+
if (netif_receive_skb_internal(skb))
40334054
ret = GRO_DROP;
40344055
break;
40354056

@@ -4068,6 +4089,8 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
40684089
if (!skb)
40694090
return GRO_DROP;
40704091

4092+
trace_napi_gro_frags_entry(skb);
4093+
40714094
return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
40724095
}
40734096
EXPORT_SYMBOL(napi_gro_frags);
@@ -6620,11 +6643,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
66206643

66216644
/* Process offline CPU's input_pkt_queue */
66226645
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6623-
netif_rx(skb);
6646+
netif_rx_internal(skb);
66246647
input_queue_head_incr(oldsd);
66256648
}
66266649
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6627-
netif_rx(skb);
6650+
netif_rx_internal(skb);
66286651
input_queue_head_incr(oldsd);
66296652
}
66306653

0 commit comments

Comments
 (0)