Skip to content

Commit ae78dbf

Browse files
Ben Hutchingsdavem330
authored andcommitted
net: Add trace events for all receive entry points, exposing more skb fields
The existing net/netif_rx and net/netif_receive_skb trace events provide little information about the skb, nor do they indicate how it entered the stack. Add trace events at entry of each of the exported functions, including most fields that are likely to be interesting for debugging driver datapath behaviour. Split netif_rx() and netif_receive_skb() so that internal calls are not traced. Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent d87d04a commit ae78dbf

File tree

2 files changed

+161
-39
lines changed

2 files changed

+161
-39
lines changed

include/trace/events/net.h

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,106 @@ DEFINE_EVENT(net_dev_template, netif_rx,
136136

137137
TP_ARGS(skb)
138138
);
139+
140+
DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
141+
142+
TP_PROTO(const struct sk_buff *skb),
143+
144+
TP_ARGS(skb),
145+
146+
TP_STRUCT__entry(
147+
__string( name, skb->dev->name )
148+
__field( unsigned int, napi_id )
149+
__field( u16, queue_mapping )
150+
__field( const void *, skbaddr )
151+
__field( bool, vlan_tagged )
152+
__field( u16, vlan_proto )
153+
__field( u16, vlan_tci )
154+
__field( u16, protocol )
155+
__field( u8, ip_summed )
156+
__field( u32, rxhash )
157+
__field( bool, l4_rxhash )
158+
__field( unsigned int, len )
159+
__field( unsigned int, data_len )
160+
__field( unsigned int, truesize )
161+
__field( bool, mac_header_valid)
162+
__field( int, mac_header )
163+
__field( unsigned char, nr_frags )
164+
__field( u16, gso_size )
165+
__field( u16, gso_type )
166+
),
167+
168+
TP_fast_assign(
169+
__assign_str(name, skb->dev->name);
170+
#ifdef CONFIG_NET_RX_BUSY_POLL
171+
__entry->napi_id = skb->napi_id;
172+
#else
173+
__entry->napi_id = 0;
174+
#endif
175+
__entry->queue_mapping = skb->queue_mapping;
176+
__entry->skbaddr = skb;
177+
__entry->vlan_tagged = vlan_tx_tag_present(skb);
178+
__entry->vlan_proto = ntohs(skb->vlan_proto);
179+
__entry->vlan_tci = vlan_tx_tag_get(skb);
180+
__entry->protocol = ntohs(skb->protocol);
181+
__entry->ip_summed = skb->ip_summed;
182+
__entry->rxhash = skb->rxhash;
183+
__entry->l4_rxhash = skb->l4_rxhash;
184+
__entry->len = skb->len;
185+
__entry->data_len = skb->data_len;
186+
__entry->truesize = skb->truesize;
187+
__entry->mac_header_valid = skb_mac_header_was_set(skb);
188+
__entry->mac_header = skb_mac_header(skb) - skb->data;
189+
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
190+
__entry->gso_size = skb_shinfo(skb)->gso_size;
191+
__entry->gso_type = skb_shinfo(skb)->gso_type;
192+
),
193+
194+
TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
195+
__get_str(name), __entry->napi_id, __entry->queue_mapping,
196+
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
197+
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
198+
__entry->rxhash, __entry->l4_rxhash, __entry->len,
199+
__entry->data_len, __entry->truesize,
200+
__entry->mac_header_valid, __entry->mac_header,
201+
__entry->nr_frags, __entry->gso_size, __entry->gso_type)
202+
);
203+
204+
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
205+
206+
TP_PROTO(const struct sk_buff *skb),
207+
208+
TP_ARGS(skb)
209+
);
210+
211+
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
212+
213+
TP_PROTO(const struct sk_buff *skb),
214+
215+
TP_ARGS(skb)
216+
);
217+
218+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
219+
220+
TP_PROTO(const struct sk_buff *skb),
221+
222+
TP_ARGS(skb)
223+
);
224+
225+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
226+
227+
TP_PROTO(const struct sk_buff *skb),
228+
229+
TP_ARGS(skb)
230+
);
231+
232+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
233+
234+
TP_PROTO(const struct sk_buff *skb),
235+
236+
TP_ARGS(skb)
237+
);
238+
139239
#endif /* _TRACE_NET_H */
140240

141241
/* This part must be outside protection */

net/core/dev.c

Lines changed: 61 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,8 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147147
struct list_head ptype_all __read_mostly; /* Taps */
148148
static struct list_head offload_base __read_mostly;
149149

150+
static int netif_rx_internal(struct sk_buff *skb);
151+
150152
/*
151153
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
152154
* semaphore.
@@ -1698,7 +1700,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
16981700
skb_scrub_packet(skb, true);
16991701
skb->protocol = eth_type_trans(skb, dev);
17001702

1701-
return netif_rx(skb);
1703+
return netif_rx_internal(skb);
17021704
}
17031705
EXPORT_SYMBOL_GPL(dev_forward_skb);
17041706

@@ -3219,22 +3221,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
32193221
return NET_RX_DROP;
32203222
}
32213223

3222-
/**
3223-
* netif_rx - post buffer to the network code
3224-
* @skb: buffer to post
3225-
*
3226-
* This function receives a packet from a device driver and queues it for
3227-
* the upper (protocol) levels to process. It always succeeds. The buffer
3228-
* may be dropped during processing for congestion control or by the
3229-
* protocol layers.
3230-
*
3231-
* return values:
3232-
* NET_RX_SUCCESS (no congestion)
3233-
* NET_RX_DROP (packet was dropped)
3234-
*
3235-
*/
3236-
3237-
int netif_rx(struct sk_buff *skb)
3224+
static int netif_rx_internal(struct sk_buff *skb)
32383225
{
32393226
int ret;
32403227

@@ -3270,14 +3257,38 @@ int netif_rx(struct sk_buff *skb)
32703257
}
32713258
return ret;
32723259
}
3260+
3261+
/**
3262+
* netif_rx - post buffer to the network code
3263+
* @skb: buffer to post
3264+
*
3265+
* This function receives a packet from a device driver and queues it for
3266+
* the upper (protocol) levels to process. It always succeeds. The buffer
3267+
* may be dropped during processing for congestion control or by the
3268+
* protocol layers.
3269+
*
3270+
* return values:
3271+
* NET_RX_SUCCESS (no congestion)
3272+
* NET_RX_DROP (packet was dropped)
3273+
*
3274+
*/
3275+
3276+
int netif_rx(struct sk_buff *skb)
3277+
{
3278+
trace_netif_rx_entry(skb);
3279+
3280+
return netif_rx_internal(skb);
3281+
}
32733282
EXPORT_SYMBOL(netif_rx);
32743283

32753284
int netif_rx_ni(struct sk_buff *skb)
32763285
{
32773286
int err;
32783287

3288+
trace_netif_rx_ni_entry(skb);
3289+
32793290
preempt_disable();
3280-
err = netif_rx(skb);
3291+
err = netif_rx_internal(skb);
32813292
if (local_softirq_pending())
32823293
do_softirq();
32833294
preempt_enable();
@@ -3662,22 +3673,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
36623673
return ret;
36633674
}
36643675

3665-
/**
3666-
* netif_receive_skb - process receive buffer from network
3667-
* @skb: buffer to process
3668-
*
3669-
* netif_receive_skb() is the main receive data processing function.
3670-
* It always succeeds. The buffer may be dropped during processing
3671-
* for congestion control or by the protocol layers.
3672-
*
3673-
* This function may only be called from softirq context and interrupts
3674-
* should be enabled.
3675-
*
3676-
* Return values (usually ignored):
3677-
* NET_RX_SUCCESS: no congestion
3678-
* NET_RX_DROP: packet was dropped
3679-
*/
3680-
int netif_receive_skb(struct sk_buff *skb)
3676+
static int netif_receive_skb_internal(struct sk_buff *skb)
36813677
{
36823678
net_timestamp_check(netdev_tstamp_prequeue, skb);
36833679

@@ -3703,6 +3699,28 @@ int netif_receive_skb(struct sk_buff *skb)
37033699
#endif
37043700
return __netif_receive_skb(skb);
37053701
}
3702+
3703+
/**
3704+
* netif_receive_skb - process receive buffer from network
3705+
* @skb: buffer to process
3706+
*
3707+
* netif_receive_skb() is the main receive data processing function.
3708+
* It always succeeds. The buffer may be dropped during processing
3709+
* for congestion control or by the protocol layers.
3710+
*
3711+
* This function may only be called from softirq context and interrupts
3712+
* should be enabled.
3713+
*
3714+
* Return values (usually ignored):
3715+
* NET_RX_SUCCESS: no congestion
3716+
* NET_RX_DROP: packet was dropped
3717+
*/
3718+
int netif_receive_skb(struct sk_buff *skb)
3719+
{
3720+
trace_netif_receive_skb_entry(skb);
3721+
3722+
return netif_receive_skb_internal(skb);
3723+
}
37063724
EXPORT_SYMBOL(netif_receive_skb);
37073725

37083726
/* Network device is going away, flush any packets still pending
@@ -3764,7 +3782,7 @@ static int napi_gro_complete(struct sk_buff *skb)
37643782
}
37653783

37663784
out:
3767-
return netif_receive_skb(skb);
3785+
return netif_receive_skb_internal(skb);
37683786
}
37693787

37703788
/* napi->gro_list contains packets ordered by age.
@@ -3972,7 +3990,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
39723990
{
39733991
switch (ret) {
39743992
case GRO_NORMAL:
3975-
if (netif_receive_skb(skb))
3993+
if (netif_receive_skb_internal(skb))
39763994
ret = GRO_DROP;
39773995
break;
39783996

@@ -3997,6 +4015,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
39974015

39984016
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
39994017
{
4018+
trace_napi_gro_receive_entry(skb);
4019+
40004020
return napi_skb_finish(dev_gro_receive(napi, skb), skb);
40014021
}
40024022
EXPORT_SYMBOL(napi_gro_receive);
@@ -4030,7 +4050,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
40304050
{
40314051
switch (ret) {
40324052
case GRO_NORMAL:
4033-
if (netif_receive_skb(skb))
4053+
if (netif_receive_skb_internal(skb))
40344054
ret = GRO_DROP;
40354055
break;
40364056

@@ -4069,6 +4089,8 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
40694089
if (!skb)
40704090
return GRO_DROP;
40714091

4092+
trace_napi_gro_frags_entry(skb);
4093+
40724094
return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
40734095
}
40744096
EXPORT_SYMBOL(napi_gro_frags);
@@ -6621,11 +6643,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
66216643

66226644
/* Process offline CPU's input_pkt_queue */
66236645
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6624-
netif_rx(skb);
6646+
netif_rx_internal(skb);
66256647
input_queue_head_incr(oldsd);
66266648
}
66276649
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6628-
netif_rx(skb);
6650+
netif_rx_internal(skb);
66296651
input_queue_head_incr(oldsd);
66306652
}
66316653

0 commit comments

Comments
 (0)