70
70
71
71
#define CAN_BCM_VERSION CAN_VERSION
72
72
static __initdata const char banner [] = KERN_INFO
73
- "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n" ;
73
+ "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t )\n" ;
74
74
75
75
MODULE_DESCRIPTION ("PF_CAN broadcast manager protocol" );
76
76
MODULE_LICENSE ("Dual BSD/GPL" );
@@ -90,6 +90,7 @@ struct bcm_op {
90
90
unsigned long frames_abs , frames_filtered ;
91
91
struct timeval ival1 , ival2 ;
92
92
struct hrtimer timer , thrtimer ;
93
+ struct tasklet_struct tsklet , thrtsklet ;
93
94
ktime_t rx_stamp , kt_ival1 , kt_ival2 , kt_lastmsg ;
94
95
int rx_ifindex ;
95
96
int count ;
@@ -341,6 +342,23 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
341
342
}
342
343
}
343
344
345
+ static void bcm_tx_timeout_tsklet (unsigned long data )
346
+ {
347
+ struct bcm_op * op = (struct bcm_op * )data ;
348
+ struct bcm_msg_head msg_head ;
349
+
350
+ /* create notification to user */
351
+ msg_head .opcode = TX_EXPIRED ;
352
+ msg_head .flags = op -> flags ;
353
+ msg_head .count = op -> count ;
354
+ msg_head .ival1 = op -> ival1 ;
355
+ msg_head .ival2 = op -> ival2 ;
356
+ msg_head .can_id = op -> can_id ;
357
+ msg_head .nframes = 0 ;
358
+
359
+ bcm_send_to_user (op , & msg_head , NULL , 0 );
360
+ }
361
+
344
362
/*
345
363
* bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
346
364
*/
@@ -352,20 +370,8 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
352
370
if (op -> kt_ival1 .tv64 && (op -> count > 0 )) {
353
371
354
372
op -> count -- ;
355
- if (!op -> count && (op -> flags & TX_COUNTEVT )) {
356
- struct bcm_msg_head msg_head ;
357
-
358
- /* create notification to user */
359
- msg_head .opcode = TX_EXPIRED ;
360
- msg_head .flags = op -> flags ;
361
- msg_head .count = op -> count ;
362
- msg_head .ival1 = op -> ival1 ;
363
- msg_head .ival2 = op -> ival2 ;
364
- msg_head .can_id = op -> can_id ;
365
- msg_head .nframes = 0 ;
366
-
367
- bcm_send_to_user (op , & msg_head , NULL , 0 );
368
- }
373
+ if (!op -> count && (op -> flags & TX_COUNTEVT ))
374
+ tasklet_schedule (& op -> tsklet );
369
375
}
370
376
371
377
if (op -> kt_ival1 .tv64 && (op -> count > 0 )) {
@@ -402,6 +408,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
402
408
if (op -> frames_filtered > ULONG_MAX /100 )
403
409
op -> frames_filtered = op -> frames_abs = 0 ;
404
410
411
+ /* this element is not throttled anymore */
412
+ data -> can_dlc &= (BCM_CAN_DLC_MASK |RX_RECV );
413
+
405
414
head .opcode = RX_CHANGED ;
406
415
head .flags = op -> flags ;
407
416
head .count = op -> count ;
@@ -420,45 +429,41 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
420
429
*/
421
430
static void bcm_rx_update_and_send (struct bcm_op * op ,
422
431
struct can_frame * lastdata ,
423
- struct can_frame * rxdata )
432
+ const struct can_frame * rxdata )
424
433
{
425
434
memcpy (lastdata , rxdata , CFSIZ );
426
435
427
- /* mark as used */
428
- lastdata -> can_dlc |= RX_RECV ;
436
+ /* mark as used and throttled by default */
437
+ lastdata -> can_dlc |= ( RX_RECV | RX_THR ) ;
429
438
430
- /* throtteling mode inactive OR data update already on the run ? */
431
- if (!op -> kt_ival2 .tv64 || hrtimer_callback_running ( & op -> thrtimer ) ) {
439
+ /* throtteling mode inactive ? */
440
+ if (!op -> kt_ival2 .tv64 ) {
432
441
/* send RX_CHANGED to the user immediately */
433
- bcm_rx_changed (op , rxdata );
442
+ bcm_rx_changed (op , lastdata );
434
443
return ;
435
444
}
436
445
437
- if (hrtimer_active (& op -> thrtimer )) {
438
- /* mark as 'throttled' */
439
- lastdata -> can_dlc |= RX_THR ;
446
+ /* with active throttling timer we are just done here */
447
+ if (hrtimer_active (& op -> thrtimer ))
440
448
return ;
441
- }
442
449
443
- if (!op -> kt_lastmsg .tv64 ) {
444
- /* send first RX_CHANGED to the user immediately */
445
- bcm_rx_changed (op , rxdata );
446
- op -> kt_lastmsg = ktime_get ();
447
- return ;
448
- }
450
+ /* first receiption with enabled throttling mode */
451
+ if (!op -> kt_lastmsg .tv64 )
452
+ goto rx_changed_settime ;
449
453
454
+ /* got a second frame inside a potential throttle period? */
450
455
if (ktime_us_delta (ktime_get (), op -> kt_lastmsg ) <
451
456
ktime_to_us (op -> kt_ival2 )) {
452
- /* mark as 'throttled' and start timer */
453
- lastdata -> can_dlc |= RX_THR ;
457
+ /* do not send the saved data - only start throttle timer */
454
458
hrtimer_start (& op -> thrtimer ,
455
459
ktime_add (op -> kt_lastmsg , op -> kt_ival2 ),
456
460
HRTIMER_MODE_ABS );
457
461
return ;
458
462
}
459
463
460
464
/* the gap was that big, that throttling was not needed here */
461
- bcm_rx_changed (op , rxdata );
465
+ rx_changed_settime :
466
+ bcm_rx_changed (op , lastdata );
462
467
op -> kt_lastmsg = ktime_get ();
463
468
}
464
469
@@ -467,7 +472,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
467
472
* received data stored in op->last_frames[]
468
473
*/
469
474
static void bcm_rx_cmp_to_index (struct bcm_op * op , int index ,
470
- struct can_frame * rxdata )
475
+ const struct can_frame * rxdata )
471
476
{
472
477
/*
473
478
* no one uses the MSBs of can_dlc for comparation,
@@ -511,14 +516,12 @@ static void bcm_rx_starttimer(struct bcm_op *op)
511
516
hrtimer_start (& op -> timer , op -> kt_ival1 , HRTIMER_MODE_REL );
512
517
}
513
518
514
- /*
515
- * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
516
- */
517
- static enum hrtimer_restart bcm_rx_timeout_handler (struct hrtimer * hrtimer )
519
+ static void bcm_rx_timeout_tsklet (unsigned long data )
518
520
{
519
- struct bcm_op * op = container_of ( hrtimer , struct bcm_op , timer ) ;
521
+ struct bcm_op * op = ( struct bcm_op * ) data ;
520
522
struct bcm_msg_head msg_head ;
521
523
524
+ /* create notification to user */
522
525
msg_head .opcode = RX_TIMEOUT ;
523
526
msg_head .flags = op -> flags ;
524
527
msg_head .count = op -> count ;
@@ -528,6 +531,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
528
531
msg_head .nframes = 0 ;
529
532
530
533
bcm_send_to_user (op , & msg_head , NULL , 0 );
534
+ }
535
+
536
+ /*
537
+ * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
538
+ */
539
+ static enum hrtimer_restart bcm_rx_timeout_handler (struct hrtimer * hrtimer )
540
+ {
541
+ struct bcm_op * op = container_of (hrtimer , struct bcm_op , timer );
542
+
543
+ /* schedule before NET_RX_SOFTIRQ */
544
+ tasklet_hi_schedule (& op -> tsklet );
531
545
532
546
/* no restart of the timer is done here! */
533
547
@@ -540,38 +554,52 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
540
554
return HRTIMER_NORESTART ;
541
555
}
542
556
557
+ /*
558
+ * bcm_rx_do_flush - helper for bcm_rx_thr_flush
559
+ */
560
+ static inline int bcm_rx_do_flush (struct bcm_op * op , int update , int index )
561
+ {
562
+ if ((op -> last_frames ) && (op -> last_frames [index ].can_dlc & RX_THR )) {
563
+ if (update )
564
+ bcm_rx_changed (op , & op -> last_frames [index ]);
565
+ return 1 ;
566
+ }
567
+ return 0 ;
568
+ }
569
+
543
570
/*
544
571
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
572
+ *
573
+ * update == 0 : just check if throttled data is available (any irq context)
574
+ * update == 1 : check and send throttled data to userspace (soft_irq context)
545
575
*/
546
- static int bcm_rx_thr_flush (struct bcm_op * op )
576
+ static int bcm_rx_thr_flush (struct bcm_op * op , int update )
547
577
{
548
578
int updated = 0 ;
549
579
550
580
if (op -> nframes > 1 ) {
551
581
int i ;
552
582
553
583
/* for MUX filter we start at index 1 */
554
- for (i = 1 ; i < op -> nframes ; i ++ ) {
555
- if ((op -> last_frames ) &&
556
- (op -> last_frames [i ].can_dlc & RX_THR )) {
557
- op -> last_frames [i ].can_dlc &= ~RX_THR ;
558
- bcm_rx_changed (op , & op -> last_frames [i ]);
559
- updated ++ ;
560
- }
561
- }
584
+ for (i = 1 ; i < op -> nframes ; i ++ )
585
+ updated += bcm_rx_do_flush (op , update , i );
562
586
563
587
} else {
564
588
/* for RX_FILTER_ID and simple filter */
565
- if (op -> last_frames && (op -> last_frames [0 ].can_dlc & RX_THR )) {
566
- op -> last_frames [0 ].can_dlc &= ~RX_THR ;
567
- bcm_rx_changed (op , & op -> last_frames [0 ]);
568
- updated ++ ;
569
- }
589
+ updated += bcm_rx_do_flush (op , update , 0 );
570
590
}
571
591
572
592
return updated ;
573
593
}
574
594
595
+ static void bcm_rx_thr_tsklet (unsigned long data )
596
+ {
597
+ struct bcm_op * op = (struct bcm_op * )data ;
598
+
599
+ /* push the changed data to the userspace */
600
+ bcm_rx_thr_flush (op , 1 );
601
+ }
602
+
575
603
/*
576
604
* bcm_rx_thr_handler - the time for blocked content updates is over now:
577
605
* Check for throttled data and send it to the userspace
@@ -580,7 +608,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
580
608
{
581
609
struct bcm_op * op = container_of (hrtimer , struct bcm_op , thrtimer );
582
610
583
- if (bcm_rx_thr_flush (op )) {
611
+ tasklet_schedule (& op -> thrtsklet );
612
+
613
+ if (bcm_rx_thr_flush (op , 0 )) {
584
614
hrtimer_forward (hrtimer , ktime_get (), op -> kt_ival2 );
585
615
return HRTIMER_RESTART ;
586
616
} else {
@@ -596,48 +626,38 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
596
626
static void bcm_rx_handler (struct sk_buff * skb , void * data )
597
627
{
598
628
struct bcm_op * op = (struct bcm_op * )data ;
599
- struct can_frame rxframe ;
629
+ const struct can_frame * rxframe = ( struct can_frame * ) skb -> data ;
600
630
int i ;
601
631
602
632
/* disable timeout */
603
633
hrtimer_cancel (& op -> timer );
604
634
605
- if (skb -> len == sizeof (rxframe )) {
606
- memcpy (& rxframe , skb -> data , sizeof (rxframe ));
607
- /* save rx timestamp */
608
- op -> rx_stamp = skb -> tstamp ;
609
- /* save originator for recvfrom() */
610
- op -> rx_ifindex = skb -> dev -> ifindex ;
611
- /* update statistics */
612
- op -> frames_abs ++ ;
613
- kfree_skb (skb );
635
+ if (op -> can_id != rxframe -> can_id )
636
+ goto rx_freeskb ;
614
637
615
- } else {
616
- kfree_skb (skb );
617
- return ;
618
- }
619
-
620
- if (op -> can_id != rxframe .can_id )
621
- return ;
638
+ /* save rx timestamp */
639
+ op -> rx_stamp = skb -> tstamp ;
640
+ /* save originator for recvfrom() */
641
+ op -> rx_ifindex = skb -> dev -> ifindex ;
642
+ /* update statistics */
643
+ op -> frames_abs ++ ;
622
644
623
645
if (op -> flags & RX_RTR_FRAME ) {
624
646
/* send reply for RTR-request (placed in op->frames[0]) */
625
647
bcm_can_tx (op );
626
- return ;
648
+ goto rx_freeskb ;
627
649
}
628
650
629
651
if (op -> flags & RX_FILTER_ID ) {
630
652
/* the easiest case */
631
- bcm_rx_update_and_send (op , & op -> last_frames [0 ], & rxframe );
632
- bcm_rx_starttimer (op );
633
- return ;
653
+ bcm_rx_update_and_send (op , & op -> last_frames [0 ], rxframe );
654
+ goto rx_freeskb_starttimer ;
634
655
}
635
656
636
657
if (op -> nframes == 1 ) {
637
658
/* simple compare with index 0 */
638
- bcm_rx_cmp_to_index (op , 0 , & rxframe );
639
- bcm_rx_starttimer (op );
640
- return ;
659
+ bcm_rx_cmp_to_index (op , 0 , rxframe );
660
+ goto rx_freeskb_starttimer ;
641
661
}
642
662
643
663
if (op -> nframes > 1 ) {
@@ -649,15 +669,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
649
669
*/
650
670
651
671
for (i = 1 ; i < op -> nframes ; i ++ ) {
652
- if ((GET_U64 (& op -> frames [0 ]) & GET_U64 (& rxframe )) ==
672
+ if ((GET_U64 (& op -> frames [0 ]) & GET_U64 (rxframe )) ==
653
673
(GET_U64 (& op -> frames [0 ]) &
654
674
GET_U64 (& op -> frames [i ]))) {
655
- bcm_rx_cmp_to_index (op , i , & rxframe );
675
+ bcm_rx_cmp_to_index (op , i , rxframe );
656
676
break ;
657
677
}
658
678
}
659
- bcm_rx_starttimer (op );
660
679
}
680
+
681
+ rx_freeskb_starttimer :
682
+ bcm_rx_starttimer (op );
683
+ rx_freeskb :
684
+ kfree_skb (skb );
661
685
}
662
686
663
687
/*
@@ -681,6 +705,12 @@ static void bcm_remove_op(struct bcm_op *op)
681
705
hrtimer_cancel (& op -> timer );
682
706
hrtimer_cancel (& op -> thrtimer );
683
707
708
+ if (op -> tsklet .func )
709
+ tasklet_kill (& op -> tsklet );
710
+
711
+ if (op -> thrtsklet .func )
712
+ tasklet_kill (& op -> thrtsklet );
713
+
684
714
if ((op -> frames ) && (op -> frames != & op -> sframe ))
685
715
kfree (op -> frames );
686
716
@@ -891,6 +921,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
891
921
hrtimer_init (& op -> timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
892
922
op -> timer .function = bcm_tx_timeout_handler ;
893
923
924
+ /* initialize tasklet for tx countevent notification */
925
+ tasklet_init (& op -> tsklet , bcm_tx_timeout_tsklet ,
926
+ (unsigned long ) op );
927
+
894
928
/* currently unused in tx_ops */
895
929
hrtimer_init (& op -> thrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
896
930
@@ -1054,9 +1088,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1054
1088
hrtimer_init (& op -> timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
1055
1089
op -> timer .function = bcm_rx_timeout_handler ;
1056
1090
1091
+ /* initialize tasklet for rx timeout notification */
1092
+ tasklet_init (& op -> tsklet , bcm_rx_timeout_tsklet ,
1093
+ (unsigned long ) op );
1094
+
1057
1095
hrtimer_init (& op -> thrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
1058
1096
op -> thrtimer .function = bcm_rx_thr_handler ;
1059
1097
1098
+ /* initialize tasklet for rx throttle handling */
1099
+ tasklet_init (& op -> thrtsklet , bcm_rx_thr_tsklet ,
1100
+ (unsigned long ) op );
1101
+
1060
1102
/* add this bcm_op to the list of the rx_ops */
1061
1103
list_add (& op -> list , & bo -> rx_ops );
1062
1104
@@ -1102,7 +1144,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1102
1144
*/
1103
1145
op -> kt_lastmsg = ktime_set (0 , 0 );
1104
1146
hrtimer_cancel (& op -> thrtimer );
1105
- bcm_rx_thr_flush (op );
1147
+ bcm_rx_thr_flush (op , 1 );
1106
1148
}
1107
1149
1108
1150
if ((op -> flags & STARTTIMER ) && op -> kt_ival1 .tv64 )
0 commit comments