45
45
46
46
#define TRACE_GROUP "6lAd"
47
47
48
+ #define EXTRA_DEBUG_EXTRA
49
+ #ifdef EXTRA_DEBUG_EXTRA
50
+ #define tr_debug_extra (...) tr_debug(__VA_ARGS__)
51
+ #else
52
+ #define tr_debug_extra (...)
53
+ #endif
54
+
48
55
typedef struct {
49
56
uint16_t tag ; /*!< Fragmentation datagram TAG ID */
50
57
uint16_t size ; /*!< Datagram Total Size (uncompressed) */
@@ -57,7 +64,8 @@ typedef struct {
57
64
uint8_t unfrag_len ; /*!< Length of headers that precede the FRAG header */
58
65
bool fragmented_data :1 ;
59
66
bool first_fragment :1 ;
60
- bool indirectData :1 ;
67
+ bool indirect_data :1 ;
68
+ bool indirect_data_cached :1 ; /* Data cached for delayed transmission as mac request is already active */
61
69
buffer_t * buf ;
62
70
uint8_t * fragmenter_buf ;
63
71
ns_list_link_t link ; /*!< List link entry */
@@ -116,6 +124,8 @@ static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_ent
116
124
static bool lowpan_message_fragmentation_message_write (const fragmenter_tx_entry_t * frag_entry , mcps_data_req_t * dataReq );
117
125
static void lowpan_adaptation_indirect_queue_free_message (struct protocol_interface_info_entry * cur , fragmenter_interface_t * interface_ptr , fragmenter_tx_entry_t * tx_ptr );
118
126
127
+ static fragmenter_tx_entry_t * lowpan_adaptation_indirect_mac_data_request_active (fragmenter_interface_t * interface_ptr , fragmenter_tx_entry_t * tx_ptr );
128
+
119
129
//Discover
120
130
static fragmenter_interface_t * lowpan_adaptation_interface_discover (int8_t interfaceId )
121
131
{
@@ -362,6 +372,7 @@ static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_b
362
372
indirec_entry -> buf = NULL ;
363
373
indirec_entry -> fragmented_data = false;
364
374
indirec_entry -> first_fragment = true;
375
+ indirec_entry -> indirect_data_cached = false;
365
376
366
377
return indirec_entry ;
367
378
}
@@ -480,7 +491,7 @@ static fragmenter_tx_entry_t * lowpan_adaptation_tx_process_init(fragmenter_inte
480
491
481
492
lowpan_active_buffer_state_reset (tx_entry );
482
493
483
- tx_entry -> indirectData = indirect ;
494
+ tx_entry -> indirect_data = indirect ;
484
495
485
496
return tx_entry ;
486
497
}
@@ -589,6 +600,80 @@ static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcp
589
600
}
590
601
}
591
602
603
+ static bool lowpan_adaptation_indirect_cache_sanity_check (protocol_interface_info_entry_t * cur , fragmenter_interface_t * interface_ptr )
604
+ {
605
+ fragmenter_tx_entry_t * active_tx_entry ;
606
+ ns_list_foreach (fragmenter_tx_entry_t , fragmenter_tx_entry , & interface_ptr -> indirect_tx_queue ) {
607
+ if (fragmenter_tx_entry -> indirect_data_cached == false) {
608
+ // active entry, jump to next one
609
+ continue ;
610
+ }
611
+
612
+ // cached entry found, check if it has pending data reguest
613
+ active_tx_entry = lowpan_adaptation_indirect_mac_data_request_active (interface_ptr , fragmenter_tx_entry );
614
+
615
+ if (active_tx_entry == NULL ) {
616
+ // entry is in cache and is not sent to mac => trigger this
617
+ tr_debug_extra ("sanity check, push seq %d to addr %s" , fragmenter_tx_entry -> buf -> seq , trace_ipv6 (fragmenter_tx_entry -> buf -> dst_sa .address ));
618
+ fragmenter_tx_entry -> indirect_data_cached = false;
619
+ lowpan_data_request_to_mac (cur , fragmenter_tx_entry -> buf , fragmenter_tx_entry );
620
+ return true;
621
+ }
622
+ }
623
+
624
+ return false;
625
+ }
626
+
627
+ static bool lowpan_adaptation_indirect_cache_trigger (protocol_interface_info_entry_t * cur , fragmenter_interface_t * interface_ptr , fragmenter_tx_entry_t * tx_ptr )
628
+ {
629
+ tr_debug_extra ("lowpan_adaptation_indirect_cache_trigger()" );
630
+
631
+ if (ns_list_count (& interface_ptr -> indirect_tx_queue ) == 0 ) {
632
+ return false;
633
+ }
634
+
635
+ /* Trigger first cached entry */
636
+ ns_list_foreach (fragmenter_tx_entry_t , fragmenter_tx_entry , & interface_ptr -> indirect_tx_queue ) {
637
+ if (fragmenter_tx_entry -> indirect_data_cached ) {
638
+ if (addr_ipv6_equal (tx_ptr -> buf -> dst_sa .address , fragmenter_tx_entry -> buf -> dst_sa .address )) {
639
+ tr_debug_extra ("pushing seq %d to addr %s" , fragmenter_tx_entry -> buf -> seq , trace_ipv6 (fragmenter_tx_entry -> buf -> dst_sa .address ));
640
+ fragmenter_tx_entry -> indirect_data_cached = false;
641
+ lowpan_data_request_to_mac (cur , fragmenter_tx_entry -> buf , fragmenter_tx_entry );
642
+ return true;
643
+ }
644
+ }
645
+ }
646
+
647
+ /* Sanity check, If nothing can be triggered from own address, check cache queue */
648
+ return lowpan_adaptation_indirect_cache_sanity_check (cur , interface_ptr );
649
+ }
650
+
651
+ static fragmenter_tx_entry_t * lowpan_adaptation_indirect_mac_data_request_active (fragmenter_interface_t * interface_ptr , fragmenter_tx_entry_t * tx_ptr )
652
+ {
653
+ ns_list_foreach (fragmenter_tx_entry_t , fragmenter_tx_entry , & interface_ptr -> indirect_tx_queue ) {
654
+ if (fragmenter_tx_entry -> indirect_data_cached == false) {
655
+ if (addr_ipv6_equal (tx_ptr -> buf -> dst_sa .address , fragmenter_tx_entry -> buf -> dst_sa .address )) {
656
+ tr_debug_extra ("active seq: %d" , fragmenter_tx_entry -> buf -> seq );
657
+ return fragmenter_tx_entry ;
658
+ }
659
+ }
660
+ }
661
+ return NULL ;
662
+ }
663
+
664
+ static fragmenter_tx_entry_t * lowpan_adaptation_indirect_first_cached_request_get (fragmenter_interface_t * interface_ptr , fragmenter_tx_entry_t * tx_ptr )
665
+ {
666
+ ns_list_foreach (fragmenter_tx_entry_t , fragmenter_tx_entry , & interface_ptr -> indirect_tx_queue ) {
667
+ if (fragmenter_tx_entry -> indirect_data_cached == true) {
668
+ if (addr_ipv6_equal (tx_ptr -> buf -> dst_sa .address , fragmenter_tx_entry -> buf -> dst_sa .address )) {
669
+ tr_debug_extra ("first cached seq: %d" , fragmenter_tx_entry -> buf -> seq );
670
+ return fragmenter_tx_entry ;
671
+ }
672
+ }
673
+ }
674
+ return NULL ;
675
+ }
676
+
592
677
static void lowpan_adaptation_make_room_for_small_packet (protocol_interface_info_entry_t * cur , fragmenter_interface_t * interface_ptr , mle_neigh_table_entry_t * neighbour_to_count )
593
678
{
594
679
if (interface_ptr -> max_indirect_small_packets_per_child == 0 ) {
@@ -618,6 +703,7 @@ static void lowpan_adaptation_make_room_for_big_packet(struct protocol_interface
618
703
ns_list_foreach_reverse_safe (fragmenter_tx_entry_t , tx_entry , & interface_ptr -> indirect_tx_queue ) {
619
704
if (buffer_data_length (tx_entry -> buf ) > interface_ptr -> indirect_big_packet_threshold ) {
620
705
if (++ count >= interface_ptr -> max_indirect_big_packets_total ) {
706
+ tr_debug_extra ("free seq: %d" , tx_entry -> buf -> seq );
621
707
lowpan_adaptation_indirect_queue_free_message (cur , interface_ptr , tx_entry );
622
708
}
623
709
}
@@ -714,21 +800,45 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff
714
800
715
801
if (indirect ) {
716
802
//Add to indirectQUue
803
+ fragmenter_tx_entry_t * tx_ptr_cached ;
717
804
mle_neigh_table_entry_t * mle_entry = mle_class_get_by_link_address (cur -> id , buf -> dst_sa .address + 2 , buf -> dst_sa .addr_type );
805
+ if (mle_entry ) {
806
+ buf -> link_specific .ieee802_15_4 .indirectTTL = (uint32_t ) mle_entry -> timeout_rx * MLE_TIMER_TICKS_MS ;
807
+ } else {
808
+ buf -> link_specific .ieee802_15_4 .indirectTTL = cur -> mac_parameters -> mac_in_direct_entry_timeout ;
809
+ }
810
+
811
+ tr_debug_extra ("indirect seq: %d, addr=%s" , tx_ptr -> buf -> seq , trace_ipv6 (buf -> dst_sa .address ));
718
812
813
+ // Make room for new message if needed */
719
814
if (buffer_data_length (buf ) <= interface_ptr -> indirect_big_packet_threshold ) {
720
815
lowpan_adaptation_make_room_for_small_packet (cur , interface_ptr , mle_entry );
721
816
} else {
722
817
lowpan_adaptation_make_room_for_big_packet (cur , interface_ptr );
723
818
}
724
819
725
- ns_list_add_to_end (& interface_ptr -> indirect_tx_queue , tx_ptr );
726
- if (mle_entry ) {
727
- buf -> link_specific .ieee802_15_4 .indirectTTL = (uint32_t ) mle_entry -> timeout_rx * MLE_TIMER_TICKS_MS ;
728
- } else {
729
- buf -> link_specific .ieee802_15_4 .indirectTTL = cur -> mac_parameters -> mac_in_direct_entry_timeout ;
820
+ if (lowpan_adaptation_indirect_mac_data_request_active (interface_ptr , tx_ptr )) {
821
+ // mac is handling previous data request, add new one to be cached */
822
+ tr_debug_extra ("caching seq: %d" , tx_ptr -> buf -> seq );
823
+ tx_ptr -> indirect_data_cached = true;
730
824
}
731
825
826
+ ns_list_add_to_end (& interface_ptr -> indirect_tx_queue , tx_ptr );
827
+
828
+ // Check if current message can be delivered to MAC or should some cached message be delivered first
829
+ tx_ptr_cached = lowpan_adaptation_indirect_first_cached_request_get (interface_ptr , tx_ptr );
830
+ if (tx_ptr -> indirect_data_cached == false && tx_ptr_cached ) {
831
+ tr_debug_extra ("sending cached seq: %d" , tx_ptr_cached -> buf -> seq );
832
+ // set current message to cache
833
+ tx_ptr -> indirect_data_cached = true;
834
+ // swap entries
835
+ tx_ptr = tx_ptr_cached ;
836
+ tx_ptr -> indirect_data_cached = false;
837
+ buf = tx_ptr_cached -> buf ;
838
+ } else if (tx_ptr -> indirect_data_cached == true) {
839
+ // There is mac data request ongoing and new req was sent to cache
840
+ return 0 ;
841
+ }
732
842
}
733
843
734
844
lowpan_data_request_to_mac (cur , buf , tx_ptr );
@@ -741,7 +851,6 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff
741
851
742
852
}
743
853
744
-
745
854
static bool lowpan_adaptation_tx_process_ready (fragmenter_tx_entry_t * tx_ptr )
746
855
{
747
856
if (!tx_ptr -> fragmented_data ) {
@@ -880,11 +989,21 @@ int8_t lowpan_adaptation_interface_tx_confirm(protocol_interface_info_entry_t *c
880
989
881
990
//Check is there more packets
882
991
if (lowpan_adaptation_tx_process_ready (tx_ptr )) {
992
+ bool triggered_from_indirect_cache = false;
883
993
if (tx_ptr -> fragmented_data && active_direct_confirm ) {
884
994
//Clean
885
995
interface_ptr -> fragmenter_active = false;
886
996
}
997
+
998
+ if (tx_ptr -> buf -> link_specific .ieee802_15_4 .indirectTxProcess ) {
999
+ triggered_from_indirect_cache = lowpan_adaptation_indirect_cache_trigger (cur , interface_ptr , tx_ptr );
1000
+ }
1001
+
887
1002
lowpan_adaptation_data_process_clean (interface_ptr , tx_ptr , map_mlme_status_to_socket_event (confirm -> status ));
1003
+
1004
+ if (triggered_from_indirect_cache ) {
1005
+ return 0 ;
1006
+ }
888
1007
} else {
889
1008
lowpan_data_request_to_mac (cur , buf , tx_ptr );
890
1009
}
0 commit comments