1
1
/*
2
2
* net/tipc/link.c: TIPC link code
3
3
*
4
- * Copyright (c) 1996-2007, 2012-2015 , Ericsson AB
4
+ * Copyright (c) 1996-2007, 2012-2016 , Ericsson AB
5
5
* Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6
6
* All rights reserved.
7
7
*
@@ -127,6 +127,7 @@ struct tipc_link {
127
127
128
128
/* Management and link supervision data */
129
129
u32 peer_session ;
130
+ u32 session ;
130
131
u32 peer_bearer_id ;
131
132
u32 bearer_id ;
132
133
u32 tolerance ;
@@ -136,11 +137,7 @@ struct tipc_link {
136
137
u16 peer_caps ;
137
138
bool active ;
138
139
u32 silent_intv_cnt ;
139
- struct {
140
- unchar hdr [INT_H_SIZE ];
141
- unchar body [TIPC_MAX_IF_NAME ];
142
- } proto_msg ;
143
- struct tipc_msg * pmsg ;
140
+ char if_name [TIPC_MAX_IF_NAME ];
144
141
u32 priority ;
145
142
char net_plane ;
146
143
@@ -215,10 +212,11 @@ enum {
215
212
* Interval between NACKs when packets arrive out of order
216
213
*/
217
214
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
218
- /*
219
- * Out-of-range value for link session numbers
215
+
216
+ /* Wildcard value for link session numbers. When it is known that
217
+ * peer endpoint is down, any session number must be accepted.
220
218
*/
221
- #define WILDCARD_SESSION 0x10000
219
+ #define ANY_SESSION 0x10000
222
220
223
221
/* Link FSM states:
224
222
*/
@@ -398,16 +396,6 @@ char *tipc_link_name(struct tipc_link *l)
398
396
return l -> name ;
399
397
}
400
398
401
- static u32 link_own_addr (struct tipc_link * l )
402
- {
403
- return msg_prevnode (l -> pmsg );
404
- }
405
-
406
- void tipc_link_reinit (struct tipc_link * l , u32 addr )
407
- {
408
- msg_set_prevnode (l -> pmsg , addr );
409
- }
410
-
411
399
/**
412
400
* tipc_link_create - create a new link
413
401
* @n: pointer to associated node
@@ -441,29 +429,22 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
441
429
struct tipc_link * * link )
442
430
{
443
431
struct tipc_link * l ;
444
- struct tipc_msg * hdr ;
445
432
446
433
l = kzalloc (sizeof (* l ), GFP_ATOMIC );
447
434
if (!l )
448
435
return false;
449
436
* link = l ;
450
- l -> pmsg = (struct tipc_msg * )& l -> proto_msg ;
451
- hdr = l -> pmsg ;
452
- tipc_msg_init (ownnode , hdr , LINK_PROTOCOL , RESET_MSG , INT_H_SIZE , peer );
453
- msg_set_size (hdr , sizeof (l -> proto_msg ));
454
- msg_set_session (hdr , session );
455
- msg_set_bearer_id (hdr , l -> bearer_id );
437
+ l -> session = session ;
456
438
457
439
/* Note: peer i/f name is completed by reset/activate message */
458
440
sprintf (l -> name , "%u.%u.%u:%s-%u.%u.%u:unknown" ,
459
441
tipc_zone (ownnode ), tipc_cluster (ownnode ), tipc_node (ownnode ),
460
442
if_name , tipc_zone (peer ), tipc_cluster (peer ), tipc_node (peer ));
461
- strcpy ((char * )msg_data (hdr ), if_name );
462
-
443
+ strcpy (l -> if_name , if_name );
463
444
l -> addr = peer ;
464
445
l -> peer_caps = peer_caps ;
465
446
l -> net = net ;
466
- l -> peer_session = WILDCARD_SESSION ;
447
+ l -> peer_session = ANY_SESSION ;
467
448
l -> bearer_id = bearer_id ;
468
449
l -> tolerance = tolerance ;
469
450
l -> net_plane = net_plane ;
@@ -790,7 +771,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
790
771
struct tipc_msg * msg = buf_msg (skb_peek (list ));
791
772
int imp = msg_importance (msg );
792
773
u32 oport = msg_origport (msg );
793
- u32 addr = link_own_addr (link );
774
+ u32 addr = tipc_own_addr (link -> net );
794
775
struct sk_buff * skb ;
795
776
796
777
/* This really cannot happen... */
@@ -839,16 +820,9 @@ void link_prepare_wakeup(struct tipc_link *l)
839
820
840
821
void tipc_link_reset (struct tipc_link * l )
841
822
{
842
- /* Link is down, accept any session */
843
- l -> peer_session = WILDCARD_SESSION ;
844
-
845
- /* If peer is up, it only accepts an incremented session number */
846
- msg_set_session (l -> pmsg , msg_session (l -> pmsg ) + 1 );
847
-
848
- /* Prepare for renewed mtu size negotiation */
823
+ l -> peer_session = ANY_SESSION ;
824
+ l -> session ++ ;
849
825
l -> mtu = l -> advertised_mtu ;
850
-
851
- /* Clean up all queues and counters: */
852
826
__skb_queue_purge (& l -> transmq );
853
827
__skb_queue_purge (& l -> deferdq );
854
828
skb_queue_splice_init (& l -> wakeupq , l -> inputq );
@@ -1156,7 +1130,7 @@ int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1156
1130
1157
1131
/* Broadcast ACK must be sent via a unicast link => defer to caller */
1158
1132
if (link_is_bc_rcvlink (l )) {
1159
- if (((l -> rcv_nxt ^ link_own_addr ( l )) & 0xf ) != 0xf )
1133
+ if (((l -> rcv_nxt ^ tipc_own_addr ( l -> net )) & 0xf ) != 0xf )
1160
1134
return 0 ;
1161
1135
l -> rcv_unacked = 0 ;
1162
1136
return TIPC_LINK_SND_BC_ACK ;
@@ -1268,15 +1242,30 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1268
1242
u16 rcvgap , int tolerance , int priority ,
1269
1243
struct sk_buff_head * xmitq )
1270
1244
{
1271
- struct sk_buff * skb = NULL ;
1272
- struct tipc_msg * hdr = l -> pmsg ;
1245
+ struct sk_buff * skb ;
1246
+ struct tipc_msg * hdr ;
1247
+ struct sk_buff_head * dfq = & l -> deferdq ;
1273
1248
bool node_up = link_is_up (l -> bc_rcvlink );
1274
1249
1275
1250
/* Don't send protocol message during reset or link failover */
1276
1251
if (tipc_link_is_blocked (l ))
1277
1252
return ;
1278
1253
1279
- msg_set_type (hdr , mtyp );
1254
+ if (!tipc_link_is_up (l ) && (mtyp == STATE_MSG ))
1255
+ return ;
1256
+
1257
+ if (!skb_queue_empty (dfq ))
1258
+ rcvgap = buf_seqno (skb_peek (dfq )) - l -> rcv_nxt ;
1259
+
1260
+ skb = tipc_msg_create (LINK_PROTOCOL , mtyp , INT_H_SIZE ,
1261
+ TIPC_MAX_IF_NAME , l -> addr ,
1262
+ tipc_own_addr (l -> net ), 0 , 0 , 0 );
1263
+ if (!skb )
1264
+ return ;
1265
+
1266
+ hdr = buf_msg (skb );
1267
+ msg_set_session (hdr , l -> session );
1268
+ msg_set_bearer_id (hdr , l -> bearer_id );
1280
1269
msg_set_net_plane (hdr , l -> net_plane );
1281
1270
msg_set_next_sent (hdr , l -> snd_nxt );
1282
1271
msg_set_ack (hdr , l -> rcv_nxt - 1 );
@@ -1286,36 +1275,23 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1286
1275
msg_set_linkprio (hdr , priority );
1287
1276
msg_set_redundant_link (hdr , node_up );
1288
1277
msg_set_seq_gap (hdr , 0 );
1289
-
1290
- /* Compatibility: created msg must not be in sequence with pkt flow */
1291
1278
msg_set_seqno (hdr , l -> snd_nxt + U16_MAX / 2 );
1292
1279
1293
1280
if (mtyp == STATE_MSG ) {
1294
- if (!tipc_link_is_up (l ))
1295
- return ;
1296
-
1297
- /* Override rcvgap if there are packets in deferred queue */
1298
- if (!skb_queue_empty (& l -> deferdq ))
1299
- rcvgap = buf_seqno (skb_peek (& l -> deferdq )) - l -> rcv_nxt ;
1300
- if (rcvgap ) {
1301
- msg_set_seq_gap (hdr , rcvgap );
1302
- l -> stats .sent_nacks ++ ;
1303
- }
1281
+ msg_set_seq_gap (hdr , rcvgap );
1282
+ msg_set_size (hdr , INT_H_SIZE );
1304
1283
msg_set_probe (hdr , probe );
1305
- if (probe )
1306
- l -> stats .sent_probes ++ ;
1307
1284
l -> stats .sent_states ++ ;
1308
1285
l -> rcv_unacked = 0 ;
1309
1286
} else {
1310
1287
/* RESET_MSG or ACTIVATE_MSG */
1311
1288
msg_set_max_pkt (hdr , l -> advertised_mtu );
1312
- msg_set_ack (hdr , l -> rcv_nxt - 1 );
1313
- msg_set_next_sent (hdr , 1 );
1289
+ strcpy (msg_data (hdr ), l -> if_name );
1314
1290
}
1315
- skb = tipc_buf_acquire ( msg_size ( hdr ));
1316
- if (! skb )
1317
- return ;
1318
- skb_copy_to_linear_data ( skb , hdr , msg_size ( hdr )) ;
1291
+ if ( probe )
1292
+ l -> stats . sent_probes ++ ;
1293
+ if ( rcvgap )
1294
+ l -> stats . sent_nacks ++ ;
1319
1295
skb -> priority = TC_PRIO_CONTROL ;
1320
1296
__skb_queue_tail (xmitq , skb );
1321
1297
}
@@ -1340,7 +1316,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1340
1316
1341
1317
/* At least one packet required for safe algorithm => add dummy */
1342
1318
skb = tipc_msg_create (TIPC_LOW_IMPORTANCE , TIPC_DIRECT_MSG ,
1343
- BASIC_H_SIZE , 0 , l -> addr , link_own_addr ( l ),
1319
+ BASIC_H_SIZE , 0 , l -> addr , tipc_own_addr ( l -> net ),
1344
1320
0 , 0 , TIPC_ERR_NO_PORT );
1345
1321
if (!skb ) {
1346
1322
pr_warn ("%sunable to create tunnel packet\n" , link_co_err );
@@ -1351,7 +1327,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1351
1327
__skb_queue_purge (& tmpxq );
1352
1328
1353
1329
/* Initialize reusable tunnel packet header */
1354
- tipc_msg_init (link_own_addr ( l ), & tnlhdr , TUNNEL_PROTOCOL ,
1330
+ tipc_msg_init (tipc_own_addr ( l -> net ), & tnlhdr , TUNNEL_PROTOCOL ,
1355
1331
mtyp , INT_H_SIZE , l -> addr );
1356
1332
pktcnt = skb_queue_len (& l -> transmq ) + skb_queue_len (& l -> backlogq );
1357
1333
msg_set_msgcnt (& tnlhdr , pktcnt );
@@ -1410,15 +1386,15 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1410
1386
if (tipc_link_is_blocked (l ) || !xmitq )
1411
1387
goto exit ;
1412
1388
1413
- if (link_own_addr ( l ) > msg_prevnode (hdr ))
1389
+ if (tipc_own_addr ( l -> net ) > msg_prevnode (hdr ))
1414
1390
l -> net_plane = msg_net_plane (hdr );
1415
1391
1416
1392
switch (mtyp ) {
1417
1393
case RESET_MSG :
1418
1394
1419
1395
/* Ignore duplicate RESET with old session number */
1420
1396
if ((less_eq (msg_session (hdr ), l -> peer_session )) &&
1421
- (l -> peer_session != WILDCARD_SESSION ))
1397
+ (l -> peer_session != ANY_SESSION ))
1422
1398
break ;
1423
1399
/* fall thru' */
1424
1400
@@ -1515,7 +1491,7 @@ static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1515
1491
u16 gap_to = peers_snd_nxt - 1 ;
1516
1492
1517
1493
skb = tipc_msg_create (BCAST_PROTOCOL , STATE_MSG , INT_H_SIZE ,
1518
- 0 , l -> addr , link_own_addr ( l ), 0 , 0 , 0 );
1494
+ 0 , l -> addr , tipc_own_addr ( l -> net ), 0 , 0 , 0 );
1519
1495
if (!skb )
1520
1496
return false;
1521
1497
hdr = buf_msg (skb );
@@ -1670,7 +1646,7 @@ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1670
1646
if (mtyp != STATE_MSG )
1671
1647
return 0 ;
1672
1648
1673
- if (dnode == link_own_addr ( l )) {
1649
+ if (dnode == tipc_own_addr ( l -> net )) {
1674
1650
tipc_link_bc_ack_rcv (l , acked , xmitq );
1675
1651
rc = tipc_link_retrans (l -> bc_sndlink , from , to , xmitq );
1676
1652
l -> stats .recv_nacks ++ ;
0 commit comments