@@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
357
357
return af ;
358
358
}
359
359
360
+ static void sctp_auto_asconf_init (struct sctp_sock * sp )
361
+ {
362
+ struct net * net = sock_net (& sp -> inet .sk );
363
+
364
+ if (net -> sctp .default_auto_asconf ) {
365
+ spin_lock (& net -> sctp .addr_wq_lock );
366
+ list_add_tail (& sp -> auto_asconf_list , & net -> sctp .auto_asconf_splist );
367
+ spin_unlock (& net -> sctp .addr_wq_lock );
368
+ sp -> do_auto_asconf = 1 ;
369
+ }
370
+ }
371
+
360
372
/* Bind a local address either to an endpoint or to an association. */
361
373
static int sctp_do_bind (struct sock * sk , union sctp_addr * addr , int len )
362
374
{
@@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
418
430
return - EADDRINUSE ;
419
431
420
432
/* Refresh ephemeral port. */
421
- if (!bp -> port )
433
+ if (!bp -> port ) {
422
434
bp -> port = inet_sk (sk )-> inet_num ;
435
+ sctp_auto_asconf_init (sp );
436
+ }
423
437
424
438
/* Add the address to the bind address list.
425
439
* Use GFP_ATOMIC since BHs will be disabled.
@@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
1520
1534
1521
1535
/* Supposedly, no process has access to the socket, but
1522
1536
* the net layers still may.
1537
+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1538
+ * held and that should be grabbed before socket lock.
1523
1539
*/
1524
- local_bh_disable ( );
1525
- bh_lock_sock (sk );
1540
+ spin_lock_bh ( & net -> sctp . addr_wq_lock );
1541
+ bh_lock_sock_nested (sk );
1526
1542
1527
1543
/* Hold the sock, since sk_common_release() will put sock_put()
1528
1544
* and we have just a little more cleanup.
@@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
1531
1547
sk_common_release (sk );
1532
1548
1533
1549
bh_unlock_sock (sk );
1534
- local_bh_enable ( );
1550
+ spin_unlock_bh ( & net -> sctp . addr_wq_lock );
1535
1551
1536
1552
sock_put (sk );
1537
1553
@@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
4991
5007
sk_sockets_allocated_inc (sk );
4992
5008
sock_prot_inuse_add (net , sk -> sk_prot , 1 );
4993
5009
4994
- if (net -> sctp .default_auto_asconf ) {
4995
- spin_lock (& sock_net (sk )-> sctp .addr_wq_lock );
4996
- list_add_tail (& sp -> auto_asconf_list ,
4997
- & net -> sctp .auto_asconf_splist );
4998
- sp -> do_auto_asconf = 1 ;
4999
- spin_unlock (& sock_net (sk )-> sctp .addr_wq_lock );
5000
- } else {
5001
- sp -> do_auto_asconf = 0 ;
5002
- }
5003
-
5004
5010
local_bh_enable ();
5005
5011
5006
5012
return 0 ;
@@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
5025
5031
5026
5032
if (sp -> do_auto_asconf ) {
5027
5033
sp -> do_auto_asconf = 0 ;
5028
- spin_lock_bh (& sock_net (sk )-> sctp .addr_wq_lock );
5029
5034
list_del (& sp -> auto_asconf_list );
5030
- spin_unlock_bh (& sock_net (sk )-> sctp .addr_wq_lock );
5031
5035
}
5032
5036
sctp_endpoint_free (sp -> ep );
5033
5037
local_bh_disable ();
@@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
9398
9402
return err ;
9399
9403
}
9400
9404
9405
+ sctp_auto_asconf_init (newsp );
9406
+
9401
9407
/* Move any messages in the old socket's receive queue that are for the
9402
9408
* peeled off association to the new socket's receive queue.
9403
9409
*/
0 commit comments