22
22
#include <linux/timer.h>
23
23
#include <linux/bitops.h>
24
24
#include <net/genetlink.h>
25
+ #include <net/netevent.h>
25
26
26
27
#include <trace/skb.h>
28
+ #include <trace/napi.h>
27
29
28
30
#include <asm/unaligned.h>
29
31
@@ -38,7 +40,8 @@ static void send_dm_alert(struct work_struct *unused);
38
40
* and the work handle that will send up
39
41
* netlink alerts
40
42
*/
41
- struct sock * dm_sock ;
43
+ static int trace_state = TRACE_OFF ;
44
+ static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED ;
42
45
43
46
struct per_cpu_dm_data {
44
47
struct work_struct dm_alert_work ;
@@ -47,6 +50,13 @@ struct per_cpu_dm_data {
47
50
struct timer_list send_timer ;
48
51
};
49
52
53
+ struct dm_hw_stat_delta {
54
+ struct net_device * dev ;
55
+ struct list_head list ;
56
+ struct rcu_head rcu ;
57
+ unsigned long last_drop_val ;
58
+ };
59
+
50
60
static struct genl_family net_drop_monitor_family = {
51
61
.id = GENL_ID_GENERATE ,
52
62
.hdrsize = 0 ,
@@ -59,7 +69,8 @@ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
59
69
60
70
static int dm_hit_limit = 64 ;
61
71
static int dm_delay = 1 ;
62
-
72
+ static unsigned long dm_hw_check_delta = 2 * HZ ;
73
+ static LIST_HEAD (hw_stats_list );
63
74
64
75
static void reset_per_cpu_data (struct per_cpu_dm_data * data )
65
76
{
@@ -115,7 +126,7 @@ static void sched_send_work(unsigned long unused)
115
126
schedule_work (& data -> dm_alert_work );
116
127
}
117
128
118
- static void trace_kfree_skb_hit (struct sk_buff * skb , void * location )
129
+ static void trace_drop_common (struct sk_buff * skb , void * location )
119
130
{
120
131
struct net_dm_alert_msg * msg ;
121
132
struct nlmsghdr * nlh ;
@@ -159,24 +170,80 @@ static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
159
170
return ;
160
171
}
161
172
173
+ static void trace_kfree_skb_hit (struct sk_buff * skb , void * location )
174
+ {
175
+ trace_drop_common (skb , location );
176
+ }
177
+
178
+ static void trace_napi_poll_hit (struct napi_struct * napi )
179
+ {
180
+ struct dm_hw_stat_delta * new_stat ;
181
+
182
+ /*
183
+ * Ratelimit our check time to dm_hw_check_delta jiffies
184
+ */
185
+ if (!time_after (jiffies , napi -> dev -> last_rx + dm_hw_check_delta ))
186
+ return ;
187
+
188
+ rcu_read_lock ();
189
+ list_for_each_entry_rcu (new_stat , & hw_stats_list , list ) {
190
+ if ((new_stat -> dev == napi -> dev ) &&
191
+ (napi -> dev -> stats .rx_dropped != new_stat -> last_drop_val )) {
192
+ trace_drop_common (NULL , NULL );
193
+ new_stat -> last_drop_val = napi -> dev -> stats .rx_dropped ;
194
+ break ;
195
+ }
196
+ }
197
+ rcu_read_unlock ();
198
+ }
199
+
200
+
201
+ static void free_dm_hw_stat (struct rcu_head * head )
202
+ {
203
+ struct dm_hw_stat_delta * n ;
204
+ n = container_of (head , struct dm_hw_stat_delta , rcu );
205
+ kfree (n );
206
+ }
207
+
162
208
static int set_all_monitor_traces (int state )
163
209
{
164
210
int rc = 0 ;
211
+ struct dm_hw_stat_delta * new_stat = NULL ;
212
+ struct dm_hw_stat_delta * temp ;
213
+
214
+ spin_lock (& trace_state_lock );
165
215
166
216
switch (state ) {
167
217
case TRACE_ON :
168
218
rc |= register_trace_kfree_skb (trace_kfree_skb_hit );
219
+ rc |= register_trace_napi_poll (trace_napi_poll_hit );
169
220
break ;
170
221
case TRACE_OFF :
171
222
rc |= unregister_trace_kfree_skb (trace_kfree_skb_hit );
223
+ rc |= unregister_trace_napi_poll (trace_napi_poll_hit );
172
224
173
225
tracepoint_synchronize_unregister ();
226
+
227
+ /*
228
+ * Clean the device list
229
+ */
230
+ list_for_each_entry_safe (new_stat , temp , & hw_stats_list , list ) {
231
+ if (new_stat -> dev == NULL ) {
232
+ list_del_rcu (& new_stat -> list );
233
+ call_rcu (& new_stat -> rcu , free_dm_hw_stat );
234
+ }
235
+ }
174
236
break ;
175
237
default :
176
238
rc = 1 ;
177
239
break ;
178
240
}
179
241
242
+ if (!rc )
243
+ trace_state = state ;
244
+
245
+ spin_unlock (& trace_state_lock );
246
+
180
247
if (rc )
181
248
return - EINPROGRESS ;
182
249
return rc ;
@@ -204,6 +271,44 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
204
271
return - ENOTSUPP ;
205
272
}
206
273
274
+ static int dropmon_net_event (struct notifier_block * ev_block ,
275
+ unsigned long event , void * ptr )
276
+ {
277
+ struct net_device * dev = ptr ;
278
+ struct dm_hw_stat_delta * new_stat = NULL ;
279
+ struct dm_hw_stat_delta * tmp ;
280
+
281
+ switch (event ) {
282
+ case NETDEV_REGISTER :
283
+ new_stat = kzalloc (sizeof (struct dm_hw_stat_delta ), GFP_KERNEL );
284
+
285
+ if (!new_stat )
286
+ goto out ;
287
+
288
+ new_stat -> dev = dev ;
289
+ INIT_RCU_HEAD (& new_stat -> rcu );
290
+ spin_lock (& trace_state_lock );
291
+ list_add_rcu (& new_stat -> list , & hw_stats_list );
292
+ spin_unlock (& trace_state_lock );
293
+ break ;
294
+ case NETDEV_UNREGISTER :
295
+ spin_lock (& trace_state_lock );
296
+ list_for_each_entry_safe (new_stat , tmp , & hw_stats_list , list ) {
297
+ if (new_stat -> dev == dev ) {
298
+ new_stat -> dev = NULL ;
299
+ if (trace_state == TRACE_OFF ) {
300
+ list_del_rcu (& new_stat -> list );
301
+ call_rcu (& new_stat -> rcu , free_dm_hw_stat );
302
+ break ;
303
+ }
304
+ }
305
+ }
306
+ spin_unlock (& trace_state_lock );
307
+ break ;
308
+ }
309
+ out :
310
+ return NOTIFY_DONE ;
311
+ }
207
312
208
313
static struct genl_ops dropmon_ops [] = {
209
314
{
@@ -220,6 +325,10 @@ static struct genl_ops dropmon_ops[] = {
220
325
},
221
326
};
222
327
328
+ static struct notifier_block dropmon_net_notifier = {
329
+ .notifier_call = dropmon_net_event
330
+ };
331
+
223
332
static int __init init_net_drop_monitor (void )
224
333
{
225
334
int cpu ;
@@ -243,12 +352,18 @@ static int __init init_net_drop_monitor(void)
243
352
ret = genl_register_ops (& net_drop_monitor_family ,
244
353
& dropmon_ops [i ]);
245
354
if (ret ) {
246
- printk (KERN_CRIT "failed to register operation %d\n" ,
355
+ printk (KERN_CRIT "Failed to register operation %d\n" ,
247
356
dropmon_ops [i ].cmd );
248
357
goto out_unreg ;
249
358
}
250
359
}
251
360
361
+ rc = register_netdevice_notifier (& dropmon_net_notifier );
362
+ if (rc < 0 ) {
363
+ printk (KERN_CRIT "Failed to register netdevice notifier\n" );
364
+ goto out_unreg ;
365
+ }
366
+
252
367
rc = 0 ;
253
368
254
369
for_each_present_cpu (cpu ) {
@@ -259,6 +374,7 @@ static int __init init_net_drop_monitor(void)
259
374
data -> send_timer .data = cpu ;
260
375
data -> send_timer .function = sched_send_work ;
261
376
}
377
+
262
378
goto out ;
263
379
264
380
out_unreg :
0 commit comments