Skip to content

Commit ae23051

Browse files
committed
Merge branch 'tipc-link-improvements'
Jon Maloy says: ==================== tipc: some link level code improvements Extensive testing has revealed some weaknesses and non-optimal solutions in the link level code. This commit series addresses those issues. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 175f8d6 + c819930 commit ae23051

File tree

7 files changed

+172
-120
lines changed

7 files changed

+172
-120
lines changed

net/tipc/bearer.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
362362
b_ptr->media->disable_media(b_ptr);
363363

364364
tipc_node_delete_links(net, b_ptr->identity);
365+
RCU_INIT_POINTER(b_ptr->media_ptr, NULL);
365366
if (b_ptr->link_req)
366367
tipc_disc_delete(b_ptr->link_req);
367368

@@ -399,16 +400,13 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
399400

400401
/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
401402
*
402-
* Mark L2 bearer as inactive so that incoming buffers are thrown away,
403-
* then get worker thread to complete bearer cleanup. (Can't do cleanup
404-
* here because cleanup code needs to sleep and caller holds spinlocks.)
403+
* Mark L2 bearer as inactive so that incoming buffers are thrown away
405404
*/
406405
void tipc_disable_l2_media(struct tipc_bearer *b)
407406
{
408407
struct net_device *dev;
409408

410409
dev = (struct net_device *)rtnl_dereference(b->media_ptr);
411-
RCU_INIT_POINTER(b->media_ptr, NULL);
412410
RCU_INIT_POINTER(dev->tipc_ptr, NULL);
413411
synchronize_net();
414412
dev_put(dev);
@@ -554,7 +552,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
554552
case NETDEV_CHANGE:
555553
if (netif_carrier_ok(dev))
556554
break;
557-
case NETDEV_DOWN:
555+
case NETDEV_GOING_DOWN:
558556
case NETDEV_CHANGEMTU:
559557
tipc_reset_bearer(net, b_ptr);
560558
break;

net/tipc/link.c

Lines changed: 102 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -120,11 +120,21 @@ bool tipc_link_is_up(struct tipc_link *l)
120120
return link_is_up(l);
121121
}
122122

123+
bool tipc_link_peer_is_down(struct tipc_link *l)
124+
{
125+
return l->state == LINK_PEER_RESET;
126+
}
127+
123128
bool tipc_link_is_reset(struct tipc_link *l)
124129
{
125130
return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
126131
}
127132

133+
bool tipc_link_is_establishing(struct tipc_link *l)
134+
{
135+
return l->state == LINK_ESTABLISHING;
136+
}
137+
128138
bool tipc_link_is_synching(struct tipc_link *l)
129139
{
130140
return l->state == LINK_SYNCHING;
@@ -321,14 +331,15 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
321331
switch (evt) {
322332
case LINK_ESTABLISH_EVT:
323333
l->state = LINK_ESTABLISHED;
324-
rc |= TIPC_LINK_UP_EVT;
325334
break;
326335
case LINK_FAILOVER_BEGIN_EVT:
327336
l->state = LINK_FAILINGOVER;
328337
break;
329-
case LINK_PEER_RESET_EVT:
330338
case LINK_RESET_EVT:
339+
l->state = LINK_RESET;
340+
break;
331341
case LINK_FAILURE_EVT:
342+
case LINK_PEER_RESET_EVT:
332343
case LINK_SYNCH_BEGIN_EVT:
333344
case LINK_FAILOVER_END_EVT:
334345
break;
@@ -578,8 +589,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
578589

579590
void tipc_link_reset(struct tipc_link *l)
580591
{
581-
tipc_link_fsm_evt(l, LINK_RESET_EVT);
582-
583592
/* Link is down, accept any session */
584593
l->peer_session = WILDCARD_SESSION;
585594

@@ -953,7 +962,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
953962
case TIPC_HIGH_IMPORTANCE:
954963
case TIPC_CRITICAL_IMPORTANCE:
955964
case CONN_MANAGER:
956-
__skb_queue_tail(inputq, skb);
965+
skb_queue_tail(inputq, skb);
957966
return true;
958967
case NAME_DISTRIBUTOR:
959968
node->bclink.recv_permitted = true;
@@ -982,6 +991,7 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
982991
struct tipc_msg *hdr = buf_msg(skb);
983992
struct sk_buff **reasm_skb = &l->reasm_buf;
984993
struct sk_buff *iskb;
994+
struct sk_buff_head tmpq;
985995
int usr = msg_user(hdr);
986996
int rc = 0;
987997
int pos = 0;
@@ -1006,10 +1016,12 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
10061016
}
10071017

10081018
if (usr == MSG_BUNDLER) {
1019+
skb_queue_head_init(&tmpq);
10091020
l->stats.recv_bundles++;
10101021
l->stats.recv_bundled += msg_msgcnt(hdr);
10111022
while (tipc_msg_extract(skb, &iskb, &pos))
1012-
tipc_data_input(l, iskb, inputq);
1023+
tipc_data_input(l, iskb, &tmpq);
1024+
tipc_skb_queue_splice_tail(&tmpq, inputq);
10131025
return 0;
10141026
} else if (usr == MSG_FRAGMENTER) {
10151027
l->stats.recv_fragments++;
@@ -1044,89 +1056,105 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
10441056
return released;
10451057
}
10461058

1059+
/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
1060+
*/
1061+
void tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1062+
{
1063+
l->rcv_unacked = 0;
1064+
l->stats.sent_acks++;
1065+
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1066+
}
1067+
1068+
/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1069+
*/
1070+
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1071+
{
1072+
int mtyp = RESET_MSG;
1073+
1074+
if (l->state == LINK_ESTABLISHING)
1075+
mtyp = ACTIVATE_MSG;
1076+
1077+
tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1078+
}
1079+
1080+
/* tipc_link_build_nack_msg: prepare link nack message for transmission
1081+
*/
1082+
static void tipc_link_build_nack_msg(struct tipc_link *l,
1083+
struct sk_buff_head *xmitq)
1084+
{
1085+
u32 def_cnt = ++l->stats.deferred_recv;
1086+
1087+
if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1088+
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1089+
}
1090+
10471091
/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1048-
* @link: the link that should handle the message
1092+
* @l: the link that should handle the message
10491093
* @skb: TIPC packet
10501094
* @xmitq: queue to place packets to be sent after this call
10511095
*/
10521096
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
10531097
struct sk_buff_head *xmitq)
10541098
{
1055-
struct sk_buff_head *arrvq = &l->deferdq;
1056-
struct sk_buff_head tmpq;
1099+
struct sk_buff_head *defq = &l->deferdq;
10571100
struct tipc_msg *hdr;
1058-
u16 seqno, rcv_nxt;
1101+
u16 seqno, rcv_nxt, win_lim;
10591102
int rc = 0;
10601103

1061-
__skb_queue_head_init(&tmpq);
1062-
1063-
if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
1064-
if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
1065-
tipc_link_build_proto_msg(l, STATE_MSG, 0,
1066-
0, 0, 0, xmitq);
1067-
return rc;
1068-
}
1069-
1070-
while ((skb = skb_peek(arrvq))) {
1104+
do {
10711105
hdr = buf_msg(skb);
1106+
seqno = msg_seqno(hdr);
1107+
rcv_nxt = l->rcv_nxt;
1108+
win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
10721109

10731110
/* Verify and update link state */
1074-
if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
1075-
__skb_dequeue(arrvq);
1076-
rc = tipc_link_proto_rcv(l, skb, xmitq);
1077-
continue;
1078-
}
1111+
if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1112+
return tipc_link_proto_rcv(l, skb, xmitq);
10791113

10801114
if (unlikely(!link_is_up(l))) {
1081-
rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1082-
if (!link_is_up(l)) {
1083-
kfree_skb(__skb_dequeue(arrvq));
1084-
goto exit;
1085-
}
1115+
if (l->state == LINK_ESTABLISHING)
1116+
rc = TIPC_LINK_UP_EVT;
1117+
goto drop;
10861118
}
10871119

1120+
/* Don't send probe at next timeout expiration */
10881121
l->silent_intv_cnt = 0;
10891122

1123+
/* Drop if outside receive window */
1124+
if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1125+
l->stats.duplicates++;
1126+
goto drop;
1127+
}
1128+
10901129
/* Forward queues and wake up waiting users */
10911130
if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
10921131
tipc_link_advance_backlog(l, xmitq);
10931132
if (unlikely(!skb_queue_empty(&l->wakeupq)))
10941133
link_prepare_wakeup(l);
10951134
}
10961135

1097-
/* Defer reception if there is a gap in the sequence */
1098-
seqno = msg_seqno(hdr);
1099-
rcv_nxt = l->rcv_nxt;
1100-
if (unlikely(less(rcv_nxt, seqno))) {
1101-
l->stats.deferred_recv++;
1102-
goto exit;
1103-
}
1104-
1105-
__skb_dequeue(arrvq);
1106-
1107-
/* Drop if packet already received */
1108-
if (unlikely(more(rcv_nxt, seqno))) {
1109-
l->stats.duplicates++;
1110-
kfree_skb(skb);
1111-
goto exit;
1136+
/* Defer delivery if sequence gap */
1137+
if (unlikely(seqno != rcv_nxt)) {
1138+
__tipc_skb_queue_sorted(defq, seqno, skb);
1139+
tipc_link_build_nack_msg(l, xmitq);
1140+
break;
11121141
}
11131142

1114-
/* Packet can be delivered */
1143+
/* Deliver packet */
11151144
l->rcv_nxt++;
11161145
l->stats.recv_info++;
1117-
if (unlikely(!tipc_data_input(l, skb, &tmpq)))
1118-
rc = tipc_link_input(l, skb, &tmpq);
1146+
if (!tipc_data_input(l, skb, l->inputq))
1147+
rc = tipc_link_input(l, skb, l->inputq);
1148+
if (unlikely(rc))
1149+
break;
1150+
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1151+
tipc_link_build_ack_msg(l, xmitq);
1152+
1153+
} while ((skb = __skb_dequeue(defq)));
11191154

1120-
/* Ack at regular intervals */
1121-
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1122-
l->rcv_unacked = 0;
1123-
l->stats.sent_acks++;
1124-
tipc_link_build_proto_msg(l, STATE_MSG,
1125-
0, 0, 0, 0, xmitq);
1126-
}
1127-
}
1128-
exit:
1129-
tipc_skb_queue_splice_tail(&tmpq, l->inputq);
1155+
return rc;
1156+
drop:
1157+
kfree_skb(skb);
11301158
return rc;
11311159
}
11321160

@@ -1250,7 +1278,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
12501278
}
12511279

12521280
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1253-
* with contents of the link's tranmsit and backlog queues.
1281+
* with contents of the link's transmit and backlog queues.
12541282
*/
12551283
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
12561284
int mtyp, struct sk_buff_head *xmitq)
@@ -1331,6 +1359,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
13311359
u16 peers_tol = msg_link_tolerance(hdr);
13321360
u16 peers_prio = msg_linkprio(hdr);
13331361
u16 rcv_nxt = l->rcv_nxt;
1362+
int mtyp = msg_type(hdr);
13341363
char *if_name;
13351364
int rc = 0;
13361365

@@ -1340,7 +1369,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
13401369
if (link_own_addr(l) > msg_prevnode(hdr))
13411370
l->net_plane = msg_net_plane(hdr);
13421371

1343-
switch (msg_type(hdr)) {
1372+
switch (mtyp) {
13441373
case RESET_MSG:
13451374

13461375
/* Ignore duplicate RESET with old session number */
@@ -1367,12 +1396,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
13671396
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
13681397
l->priority = peers_prio;
13691398

1370-
if (msg_type(hdr) == RESET_MSG) {
1371-
rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1372-
} else if (!link_is_up(l)) {
1373-
tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1374-
rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1375-
}
1399+
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1400+
if ((mtyp == RESET_MSG) || !link_is_up(l))
1401+
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1402+
1403+
/* ACTIVATE_MSG takes up link if it was already locally reset */
1404+
if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1405+
rc = TIPC_LINK_UP_EVT;
1406+
13761407
l->peer_session = msg_session(hdr);
13771408
l->peer_bearer_id = msg_bearer_id(hdr);
13781409
if (l->mtu > msg_max_pkt(hdr))
@@ -1389,9 +1420,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
13891420
l->stats.recv_states++;
13901421
if (msg_probe(hdr))
13911422
l->stats.recv_probes++;
1392-
rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1393-
if (!link_is_up(l))
1423+
1424+
if (!link_is_up(l)) {
1425+
if (l->state == LINK_ESTABLISHING)
1426+
rc = TIPC_LINK_UP_EVT;
13941427
break;
1428+
}
13951429

13961430
/* Send NACK if peer has sent pkts we haven't received yet */
13971431
if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))

net/tipc/link.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ struct tipc_link {
185185
} backlog[5];
186186
u16 snd_nxt;
187187
u16 last_retransm;
188-
u32 window;
188+
u16 window;
189189
u32 stale_count;
190190

191191
/* Reception */
@@ -213,10 +213,13 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
213213
int mtyp, struct sk_buff_head *xmitq);
214214
void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
215215
struct sk_buff_head *xmitq);
216+
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
216217
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
217218
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
218219
bool tipc_link_is_up(struct tipc_link *l);
220+
bool tipc_link_peer_is_down(struct tipc_link *l);
219221
bool tipc_link_is_reset(struct tipc_link *l);
222+
bool tipc_link_is_establishing(struct tipc_link *l);
220223
bool tipc_link_is_synching(struct tipc_link *l);
221224
bool tipc_link_is_failingover(struct tipc_link *l);
222225
bool tipc_link_is_blocked(struct tipc_link *l);

net/tipc/msg.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -590,3 +590,34 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
590590
kfree_skb(head);
591591
return NULL;
592592
}
593+
594+
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
595+
* @list: list to be appended to
596+
* @seqno: sequence number of buffer to add
597+
* @skb: buffer to add
598+
*/
599+
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
600+
struct sk_buff *skb)
601+
{
602+
struct sk_buff *_skb, *tmp;
603+
604+
if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
605+
__skb_queue_head(list, skb);
606+
return;
607+
}
608+
609+
if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
610+
__skb_queue_tail(list, skb);
611+
return;
612+
}
613+
614+
skb_queue_walk_safe(list, _skb, tmp) {
615+
if (more(seqno, buf_seqno(_skb)))
616+
continue;
617+
if (seqno == buf_seqno(_skb))
618+
break;
619+
__skb_queue_before(list, _skb, skb);
620+
return;
621+
}
622+
kfree_skb(skb);
623+
}

0 commit comments

Comments
 (0)