Skip to content

Commit 9073fb8

Browse files
Jon Paul Maloydavem330
authored andcommitted
tipc: use temporary, non-protected skb queue for bundle reception
Currently, when we extract small messages from a message bundle, or when many messages have accumulated in the link arrival queue, those messages are added one by one to the lock protected link input queue. This may increase contention with the reader of that queue, in the function tipc_sk_rcv(). This commit introduces a temporary, unprotected input queue in tipc_link_rcv() for such cases. Only when the arrival queue has been emptied, and the function is ready to return, does it splice the whole temporary queue into the real input queue. Tested-by: Ying Xue <[email protected]> Signed-off-by: Jon Maloy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 23d8335 commit 9073fb8

File tree

1 file changed

+19
-15
lines changed

1 file changed

+19
-15
lines changed

net/tipc/link.c

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,6 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
111111
static void link_reset_statistics(struct tipc_link *l_ptr);
112112
static void link_print(struct tipc_link *l_ptr, const char *str);
113113
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
114-
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
115-
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
116114

117115
/*
118116
* Simple non-static link routines (i.e. referenced outside this file)
@@ -960,18 +958,18 @@ static int tipc_link_retransm(struct tipc_link *l, int retransm,
960958
* Consumes buffer if message is of right type
961959
* Node lock must be held
962960
*/
963-
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
961+
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
962+
struct sk_buff_head *inputq)
964963
{
965964
struct tipc_node *node = link->owner;
966-
struct tipc_msg *msg = buf_msg(skb);
967965

968-
switch (msg_user(msg)) {
966+
switch (msg_user(buf_msg(skb))) {
969967
case TIPC_LOW_IMPORTANCE:
970968
case TIPC_MEDIUM_IMPORTANCE:
971969
case TIPC_HIGH_IMPORTANCE:
972970
case TIPC_CRITICAL_IMPORTANCE:
973971
case CONN_MANAGER:
974-
skb_queue_tail(link->inputq, skb);
972+
__skb_queue_tail(inputq, skb);
975973
return true;
976974
case NAME_DISTRIBUTOR:
977975
node->bclink.recv_permitted = true;
@@ -993,7 +991,8 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
993991
*
994992
* Consumes buffer
995993
*/
996-
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb)
994+
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
995+
struct sk_buff_head *inputq)
997996
{
998997
struct tipc_node *node = l->owner;
999998
struct tipc_msg *hdr = buf_msg(skb);
@@ -1016,7 +1015,7 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb)
10161015
hdr = buf_msg(skb);
10171016
if (less(msg_seqno(hdr), l->drop_point))
10181017
goto drop;
1019-
if (tipc_data_input(l, skb))
1018+
if (tipc_data_input(l, skb, inputq))
10201019
return rc;
10211020
usr = msg_user(hdr);
10221021
reasm_skb = &l->failover_reasm_skb;
@@ -1026,13 +1025,13 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb)
10261025
l->stats.recv_bundles++;
10271026
l->stats.recv_bundled += msg_msgcnt(hdr);
10281027
while (tipc_msg_extract(skb, &iskb, &pos))
1029-
tipc_data_input(l, iskb);
1028+
tipc_data_input(l, iskb, inputq);
10301029
return 0;
10311030
} else if (usr == MSG_FRAGMENTER) {
10321031
l->stats.recv_fragments++;
10331032
if (tipc_buf_append(reasm_skb, &skb)) {
10341033
l->stats.recv_fragmented++;
1035-
tipc_data_input(l, skb);
1034+
tipc_data_input(l, skb, inputq);
10361035
} else if (!*reasm_skb) {
10371036
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
10381037
}
@@ -1070,10 +1069,13 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
10701069
struct sk_buff_head *xmitq)
10711070
{
10721071
struct sk_buff_head *arrvq = &l->deferdq;
1072+
struct sk_buff_head tmpq;
10731073
struct tipc_msg *hdr;
10741074
u16 seqno, rcv_nxt;
10751075
int rc = 0;
10761076

1077+
__skb_queue_head_init(&tmpq);
1078+
10771079
if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
10781080
if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
10791081
tipc_link_build_proto_msg(l, STATE_MSG, 0,
@@ -1095,7 +1097,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
10951097
rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
10961098
if (!link_is_up(l)) {
10971099
kfree_skb(__skb_dequeue(arrvq));
1098-
return rc;
1100+
goto exit;
10991101
}
11001102
}
11011103

@@ -1113,7 +1115,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
11131115
rcv_nxt = l->rcv_nxt;
11141116
if (unlikely(less(rcv_nxt, seqno))) {
11151117
l->stats.deferred_recv++;
1116-
return rc;
1118+
goto exit;
11171119
}
11181120

11191121
__skb_dequeue(arrvq);
@@ -1122,14 +1124,14 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
11221124
if (unlikely(more(rcv_nxt, seqno))) {
11231125
l->stats.duplicates++;
11241126
kfree_skb(skb);
1125-
return rc;
1127+
goto exit;
11261128
}
11271129

11281130
/* Packet can be delivered */
11291131
l->rcv_nxt++;
11301132
l->stats.recv_info++;
1131-
if (unlikely(!tipc_data_input(l, skb)))
1132-
rc = tipc_link_input(l, skb);
1133+
if (unlikely(!tipc_data_input(l, skb, &tmpq)))
1134+
rc = tipc_link_input(l, skb, &tmpq);
11331135

11341136
/* Ack at regular intervals */
11351137
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
@@ -1139,6 +1141,8 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
11391141
0, 0, 0, 0, xmitq);
11401142
}
11411143
}
1144+
exit:
1145+
tipc_skb_queue_splice_tail(&tmpq, l->inputq);
11421146
return rc;
11431147
}
11441148

0 commit comments

Comments
 (0)