@@ -58,9 +58,26 @@ struct net_dm_stats {
58
58
struct u64_stats_sync syncp ;
59
59
};
60
60
61
+ #define NET_DM_MAX_HW_TRAP_NAME_LEN 40
62
+
63
+ struct net_dm_hw_entry {
64
+ char trap_name [NET_DM_MAX_HW_TRAP_NAME_LEN ];
65
+ u32 count ;
66
+ };
67
+
68
+ struct net_dm_hw_entries {
69
+ u32 num_entries ;
70
+ struct net_dm_hw_entry entries [0 ];
71
+ };
72
+
61
73
struct per_cpu_dm_data {
62
- spinlock_t lock ; /* Protects 'skb' and 'send_timer' */
63
- struct sk_buff * skb ;
74
+ spinlock_t lock ; /* Protects 'skb', 'hw_entries' and
75
+ * 'send_timer'
76
+ */
77
+ union {
78
+ struct sk_buff * skb ;
79
+ struct net_dm_hw_entries * hw_entries ;
80
+ };
64
81
struct sk_buff_head drop_queue ;
65
82
struct work_struct dm_alert_work ;
66
83
struct timer_list send_timer ;
@@ -275,16 +292,189 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
275
292
rcu_read_unlock ();
276
293
}
277
294
295
+ static struct net_dm_hw_entries *
296
+ net_dm_hw_reset_per_cpu_data (struct per_cpu_dm_data * hw_data )
297
+ {
298
+ struct net_dm_hw_entries * hw_entries ;
299
+ unsigned long flags ;
300
+
301
+ hw_entries = kzalloc (struct_size (hw_entries , entries , dm_hit_limit ),
302
+ GFP_KERNEL );
303
+ if (!hw_entries ) {
304
+ /* If the memory allocation failed, we try to perform another
305
+ * allocation in 1/10 second. Otherwise, the probe function
306
+ * will constantly bail out.
307
+ */
308
+ mod_timer (& hw_data -> send_timer , jiffies + HZ / 10 );
309
+ }
310
+
311
+ spin_lock_irqsave (& hw_data -> lock , flags );
312
+ swap (hw_data -> hw_entries , hw_entries );
313
+ spin_unlock_irqrestore (& hw_data -> lock , flags );
314
+
315
+ return hw_entries ;
316
+ }
317
+
318
+ static int net_dm_hw_entry_put (struct sk_buff * msg ,
319
+ const struct net_dm_hw_entry * hw_entry )
320
+ {
321
+ struct nlattr * attr ;
322
+
323
+ attr = nla_nest_start (msg , NET_DM_ATTR_HW_ENTRY );
324
+ if (!attr )
325
+ return - EMSGSIZE ;
326
+
327
+ if (nla_put_string (msg , NET_DM_ATTR_HW_TRAP_NAME , hw_entry -> trap_name ))
328
+ goto nla_put_failure ;
329
+
330
+ if (nla_put_u32 (msg , NET_DM_ATTR_HW_TRAP_COUNT , hw_entry -> count ))
331
+ goto nla_put_failure ;
332
+
333
+ nla_nest_end (msg , attr );
334
+
335
+ return 0 ;
336
+
337
+ nla_put_failure :
338
+ nla_nest_cancel (msg , attr );
339
+ return - EMSGSIZE ;
340
+ }
341
+
342
+ static int net_dm_hw_entries_put (struct sk_buff * msg ,
343
+ const struct net_dm_hw_entries * hw_entries )
344
+ {
345
+ struct nlattr * attr ;
346
+ int i ;
347
+
348
+ attr = nla_nest_start (msg , NET_DM_ATTR_HW_ENTRIES );
349
+ if (!attr )
350
+ return - EMSGSIZE ;
351
+
352
+ for (i = 0 ; i < hw_entries -> num_entries ; i ++ ) {
353
+ int rc ;
354
+
355
+ rc = net_dm_hw_entry_put (msg , & hw_entries -> entries [i ]);
356
+ if (rc )
357
+ goto nla_put_failure ;
358
+ }
359
+
360
+ nla_nest_end (msg , attr );
361
+
362
+ return 0 ;
363
+
364
+ nla_put_failure :
365
+ nla_nest_cancel (msg , attr );
366
+ return - EMSGSIZE ;
367
+ }
368
+
369
+ static int
370
+ net_dm_hw_summary_report_fill (struct sk_buff * msg ,
371
+ const struct net_dm_hw_entries * hw_entries )
372
+ {
373
+ struct net_dm_alert_msg anc_hdr = { 0 };
374
+ void * hdr ;
375
+ int rc ;
376
+
377
+ hdr = genlmsg_put (msg , 0 , 0 , & net_drop_monitor_family , 0 ,
378
+ NET_DM_CMD_ALERT );
379
+ if (!hdr )
380
+ return - EMSGSIZE ;
381
+
382
+ /* We need to put the ancillary header in order not to break user
383
+ * space.
384
+ */
385
+ if (nla_put (msg , NLA_UNSPEC , sizeof (anc_hdr ), & anc_hdr ))
386
+ goto nla_put_failure ;
387
+
388
+ rc = net_dm_hw_entries_put (msg , hw_entries );
389
+ if (rc )
390
+ goto nla_put_failure ;
391
+
392
+ genlmsg_end (msg , hdr );
393
+
394
+ return 0 ;
395
+
396
+ nla_put_failure :
397
+ genlmsg_cancel (msg , hdr );
398
+ return - EMSGSIZE ;
399
+ }
400
+
401
+ static void net_dm_hw_summary_work (struct work_struct * work )
402
+ {
403
+ struct net_dm_hw_entries * hw_entries ;
404
+ struct per_cpu_dm_data * hw_data ;
405
+ struct sk_buff * msg ;
406
+ int rc ;
407
+
408
+ hw_data = container_of (work , struct per_cpu_dm_data , dm_alert_work );
409
+
410
+ hw_entries = net_dm_hw_reset_per_cpu_data (hw_data );
411
+ if (!hw_entries )
412
+ return ;
413
+
414
+ msg = nlmsg_new (NLMSG_DEFAULT_SIZE , GFP_KERNEL );
415
+ if (!msg )
416
+ goto out ;
417
+
418
+ rc = net_dm_hw_summary_report_fill (msg , hw_entries );
419
+ if (rc ) {
420
+ nlmsg_free (msg );
421
+ goto out ;
422
+ }
423
+
424
+ genlmsg_multicast (& net_drop_monitor_family , msg , 0 , 0 , GFP_KERNEL );
425
+
426
+ out :
427
+ kfree (hw_entries );
428
+ }
429
+
278
430
static void
279
431
net_dm_hw_summary_probe (struct sk_buff * skb ,
280
432
const struct net_dm_hw_metadata * hw_metadata )
281
433
{
434
+ struct net_dm_hw_entries * hw_entries ;
435
+ struct net_dm_hw_entry * hw_entry ;
436
+ struct per_cpu_dm_data * hw_data ;
437
+ unsigned long flags ;
438
+ int i ;
439
+
440
+ hw_data = this_cpu_ptr (& dm_hw_cpu_data );
441
+ spin_lock_irqsave (& hw_data -> lock , flags );
442
+ hw_entries = hw_data -> hw_entries ;
443
+
444
+ if (!hw_entries )
445
+ goto out ;
446
+
447
+ for (i = 0 ; i < hw_entries -> num_entries ; i ++ ) {
448
+ hw_entry = & hw_entries -> entries [i ];
449
+ if (!strncmp (hw_entry -> trap_name , hw_metadata -> trap_name ,
450
+ NET_DM_MAX_HW_TRAP_NAME_LEN - 1 )) {
451
+ hw_entry -> count ++ ;
452
+ goto out ;
453
+ }
454
+ }
455
+ if (WARN_ON_ONCE (hw_entries -> num_entries == dm_hit_limit ))
456
+ goto out ;
457
+
458
+ hw_entry = & hw_entries -> entries [hw_entries -> num_entries ];
459
+ strlcpy (hw_entry -> trap_name , hw_metadata -> trap_name ,
460
+ NET_DM_MAX_HW_TRAP_NAME_LEN - 1 );
461
+ hw_entry -> count = 1 ;
462
+ hw_entries -> num_entries ++ ;
463
+
464
+ if (!timer_pending (& hw_data -> send_timer )) {
465
+ hw_data -> send_timer .expires = jiffies + dm_delay * HZ ;
466
+ add_timer (& hw_data -> send_timer );
467
+ }
468
+
469
+ out :
470
+ spin_unlock_irqrestore (& hw_data -> lock , flags );
282
471
}
283
472
284
473
static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
285
474
.kfree_skb_probe = trace_kfree_skb_hit ,
286
475
.napi_poll_probe = trace_napi_poll_hit ,
287
476
.work_item_func = send_dm_alert ,
477
+ .hw_work_item_func = net_dm_hw_summary_work ,
288
478
.hw_probe = net_dm_hw_summary_probe ,
289
479
};
290
480
@@ -1309,6 +1499,7 @@ static void net_dm_hw_cpu_data_fini(int cpu)
1309
1499
struct per_cpu_dm_data * hw_data ;
1310
1500
1311
1501
hw_data = & per_cpu (dm_hw_cpu_data , cpu );
1502
+ kfree (hw_data -> hw_entries );
1312
1503
__net_dm_cpu_data_fini (hw_data );
1313
1504
}
1314
1505
0 commit comments