Skip to content

Commit 70931a7

Browse files
author
Arto Kinnunen
committed
Fix indirect queue packet ordering
Packets might be sent to destination out of order when indirect queue buffer size is increased. This was caused by sending all packets to mac immediately. To maintain the order, a packet is sent to mac only if there is no previous request ongoing to the same address. Once the previous request is finished a new request will be made.
1 parent 10e51a4 commit 70931a7

File tree

1 file changed

+127
-8
lines changed

1 file changed

+127
-8
lines changed

source/6LoWPAN/adaptation_interface.c

Lines changed: 127 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,13 @@
4545

4646
#define TRACE_GROUP "6lAd"
4747

48+
#define EXTRA_DEBUG_EXTRA
49+
#ifdef EXTRA_DEBUG_EXTRA
50+
#define tr_debug_extra(...) tr_debug(__VA_ARGS__)
51+
#else
52+
#define tr_debug_extra(...)
53+
#endif
54+
4855
typedef struct {
4956
uint16_t tag; /*!< Fragmentation datagram TAG ID */
5057
uint16_t size; /*!< Datagram Total Size (uncompressed) */
@@ -57,7 +64,8 @@ typedef struct {
5764
uint8_t unfrag_len; /*!< Length of headers that precede the FRAG header */
5865
bool fragmented_data:1;
5966
bool first_fragment:1;
60-
bool indirectData:1;
67+
bool indirect_data:1;
68+
bool indirect_data_cached:1; /* Data cached for delayed transmission as mac request is already active */
6169
buffer_t *buf;
6270
uint8_t *fragmenter_buf;
6371
ns_list_link_t link; /*!< List link entry */
@@ -116,6 +124,8 @@ static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_ent
116124
static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq);
117125
static void lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);
118126

127+
static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);
128+
119129
//Discover
120130
static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId)
121131
{
@@ -362,6 +372,7 @@ static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_b
362372
indirec_entry->buf = NULL;
363373
indirec_entry->fragmented_data = false;
364374
indirec_entry->first_fragment = true;
375+
indirec_entry->indirect_data_cached = false;
365376

366377
return indirec_entry;
367378
}
@@ -480,7 +491,7 @@ static fragmenter_tx_entry_t * lowpan_adaptation_tx_process_init(fragmenter_inte
480491

481492
lowpan_active_buffer_state_reset(tx_entry);
482493

483-
tx_entry->indirectData = indirect;
494+
tx_entry->indirect_data = indirect;
484495

485496
return tx_entry;
486497
}
@@ -589,6 +600,80 @@ static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcp
589600
}
590601
}
591602

603+
static bool lowpan_adaptation_indirect_cache_sanity_check(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr)
604+
{
605+
fragmenter_tx_entry_t *active_tx_entry;
606+
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
607+
if (fragmenter_tx_entry->indirect_data_cached == false) {
608+
// active entry, jump to next one
609+
continue;
610+
}
611+
612+
// cached entry found, check if it has pending data reguest
613+
active_tx_entry = lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, fragmenter_tx_entry);
614+
615+
if (active_tx_entry == NULL) {
616+
// entry is in cache and is not sent to mac => trigger this
617+
tr_debug_extra("sanity check, push seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
618+
fragmenter_tx_entry->indirect_data_cached = false;
619+
lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
620+
return true;
621+
}
622+
}
623+
624+
return false;
625+
}
626+
627+
static bool lowpan_adaptation_indirect_cache_trigger(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
628+
{
629+
tr_debug_extra("lowpan_adaptation_indirect_cache_trigger()");
630+
631+
if (ns_list_count(&interface_ptr->indirect_tx_queue) == 0) {
632+
return false;
633+
}
634+
635+
/* Trigger first cached entry */
636+
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
637+
if (fragmenter_tx_entry->indirect_data_cached) {
638+
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
639+
tr_debug_extra("pushing seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
640+
fragmenter_tx_entry->indirect_data_cached = false;
641+
lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
642+
return true;
643+
}
644+
}
645+
}
646+
647+
/* Sanity check, If nothing can be triggered from own address, check cache queue */
648+
return lowpan_adaptation_indirect_cache_sanity_check(cur, interface_ptr);
649+
}
650+
651+
static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
652+
{
653+
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
654+
if (fragmenter_tx_entry->indirect_data_cached == false) {
655+
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
656+
tr_debug_extra("active seq: %d", fragmenter_tx_entry->buf->seq);
657+
return fragmenter_tx_entry;
658+
}
659+
}
660+
}
661+
return NULL;
662+
}
663+
664+
static fragmenter_tx_entry_t* lowpan_adaptation_indirect_first_cached_request_get(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
665+
{
666+
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
667+
if (fragmenter_tx_entry->indirect_data_cached == true) {
668+
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
669+
tr_debug_extra("first cached seq: %d", fragmenter_tx_entry->buf->seq);
670+
return fragmenter_tx_entry;
671+
}
672+
}
673+
}
674+
return NULL;
675+
}
676+
592677
static void lowpan_adaptation_make_room_for_small_packet(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, mle_neigh_table_entry_t *neighbour_to_count)
593678
{
594679
if (interface_ptr->max_indirect_small_packets_per_child == 0) {
@@ -618,6 +703,7 @@ static void lowpan_adaptation_make_room_for_big_packet(struct protocol_interface
618703
ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) {
619704
if (buffer_data_length(tx_entry->buf) > interface_ptr->indirect_big_packet_threshold) {
620705
if (++count >= interface_ptr->max_indirect_big_packets_total) {
706+
tr_debug_extra("free seq: %d", tx_entry->buf->seq);
621707
lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, tx_entry);
622708
}
623709
}
@@ -714,21 +800,45 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff
714800

715801
if (indirect) {
716802
//Add to indirectQUue
803+
fragmenter_tx_entry_t *tx_ptr_cached;
717804
mle_neigh_table_entry_t *mle_entry = mle_class_get_by_link_address(cur->id, buf->dst_sa.address + 2, buf->dst_sa.addr_type);
805+
if (mle_entry) {
806+
buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) mle_entry->timeout_rx * MLE_TIMER_TICKS_MS;
807+
} else {
808+
buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout;
809+
}
810+
811+
tr_debug_extra("indirect seq: %d, addr=%s", tx_ptr->buf->seq, trace_ipv6(buf->dst_sa.address));
718812

813+
// Make room for new message if needed */
719814
if (buffer_data_length(buf) <= interface_ptr->indirect_big_packet_threshold) {
720815
lowpan_adaptation_make_room_for_small_packet(cur, interface_ptr, mle_entry);
721816
} else {
722817
lowpan_adaptation_make_room_for_big_packet(cur, interface_ptr);
723818
}
724819

725-
ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr);
726-
if (mle_entry) {
727-
buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) mle_entry->timeout_rx * MLE_TIMER_TICKS_MS;
728-
} else {
729-
buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout;
820+
if (lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, tx_ptr)) {
821+
// mac is handling previous data request, add new one to be cached */
822+
tr_debug_extra("caching seq: %d", tx_ptr->buf->seq);
823+
tx_ptr->indirect_data_cached = true;
730824
}
731825

826+
ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr);
827+
828+
// Check if current message can be delivered to MAC or should some cached message be delivered first
829+
tx_ptr_cached = lowpan_adaptation_indirect_first_cached_request_get(interface_ptr, tx_ptr);
830+
if (tx_ptr->indirect_data_cached == false && tx_ptr_cached) {
831+
tr_debug_extra("sending cached seq: %d", tx_ptr_cached->buf->seq);
832+
// set current message to cache
833+
tx_ptr->indirect_data_cached = true;
834+
// swap entries
835+
tx_ptr = tx_ptr_cached;
836+
tx_ptr->indirect_data_cached = false;
837+
buf = tx_ptr_cached->buf;
838+
} else if (tx_ptr->indirect_data_cached == true) {
839+
// There is mac data request ongoing and new req was sent to cache
840+
return 0;
841+
}
732842
}
733843

734844
lowpan_data_request_to_mac(cur, buf, tx_ptr);
@@ -741,7 +851,6 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff
741851

742852
}
743853

744-
745854
static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr)
746855
{
747856
if (!tx_ptr->fragmented_data) {
@@ -880,11 +989,21 @@ int8_t lowpan_adaptation_interface_tx_confirm(protocol_interface_info_entry_t *c
880989

881990
//Check is there more packets
882991
if (lowpan_adaptation_tx_process_ready(tx_ptr)) {
992+
bool triggered_from_indirect_cache = false;
883993
if (tx_ptr->fragmented_data && active_direct_confirm) {
884994
//Clean
885995
interface_ptr->fragmenter_active = false;
886996
}
997+
998+
if (tx_ptr->buf->link_specific.ieee802_15_4.indirectTxProcess) {
999+
triggered_from_indirect_cache = lowpan_adaptation_indirect_cache_trigger(cur, interface_ptr, tx_ptr);
1000+
}
1001+
8871002
lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status));
1003+
1004+
if (triggered_from_indirect_cache) {
1005+
return 0;
1006+
}
8881007
} else {
8891008
lowpan_data_request_to_mac(cur, buf, tx_ptr);
8901009
}

0 commit comments

Comments
 (0)