@@ -1263,11 +1263,14 @@ static void mptcp_close(struct sock *sk, long timeout)
1263
1263
1264
1264
lock_sock (sk );
1265
1265
1266
- mptcp_token_destroy (msk -> token );
1267
1266
inet_sk_state_store (sk , TCP_CLOSE );
1268
1267
1269
- __mptcp_flush_join_list (msk );
1270
-
1268
+ /* be sure to always acquire the join list lock, to sync vs
1269
+ * mptcp_finish_join().
1270
+ */
1271
+ spin_lock_bh (& msk -> join_list_lock );
1272
+ list_splice_tail_init (& msk -> join_list , & msk -> conn_list );
1273
+ spin_unlock_bh (& msk -> join_list_lock );
1271
1274
list_splice_init (& msk -> conn_list , & conn_list );
1272
1275
1273
1276
data_fin_tx_seq = msk -> write_seq ;
@@ -1457,6 +1460,7 @@ static void mptcp_destroy(struct sock *sk)
1457
1460
{
1458
1461
struct mptcp_sock * msk = mptcp_sk (sk );
1459
1462
1463
+ mptcp_token_destroy (msk -> token );
1460
1464
if (msk -> cached_ext )
1461
1465
__skb_ext_put (msk -> cached_ext );
1462
1466
@@ -1623,22 +1627,30 @@ bool mptcp_finish_join(struct sock *sk)
1623
1627
if (!msk -> pm .server_side )
1624
1628
return true;
1625
1629
1626
- /* passive connection, attach to msk socket */
1630
+ if (!mptcp_pm_allow_new_subflow (msk ))
1631
+ return false;
1632
+
1633
+ /* active connections are already on conn_list, and we can't acquire
1634
+ * msk lock here.
1635
+ * use the join list lock as synchronization point and double-check
1636
+ * msk status to avoid racing with mptcp_close()
1637
+ */
1638
+ spin_lock_bh (& msk -> join_list_lock );
1639
+ ret = inet_sk_state_load (parent ) == TCP_ESTABLISHED ;
1640
+ if (ret && !WARN_ON_ONCE (!list_empty (& subflow -> node )))
1641
+ list_add_tail (& subflow -> node , & msk -> join_list );
1642
+ spin_unlock_bh (& msk -> join_list_lock );
1643
+ if (!ret )
1644
+ return false;
1645
+
1646
+ /* attach to msk socket only after we are sure he will deal with us
1647
+ * at close time
1648
+ */
1627
1649
parent_sock = READ_ONCE (parent -> sk_socket );
1628
1650
if (parent_sock && !sk -> sk_socket )
1629
1651
mptcp_sock_graft (sk , parent_sock );
1630
-
1631
- ret = mptcp_pm_allow_new_subflow (msk );
1632
- if (ret ) {
1633
- subflow -> map_seq = msk -> ack_seq ;
1634
-
1635
- /* active connections are already on conn_list */
1636
- spin_lock_bh (& msk -> join_list_lock );
1637
- if (!WARN_ON_ONCE (!list_empty (& subflow -> node )))
1638
- list_add_tail (& subflow -> node , & msk -> join_list );
1639
- spin_unlock_bh (& msk -> join_list_lock );
1640
- }
1641
- return ret ;
1652
+ subflow -> map_seq = msk -> ack_seq ;
1653
+ return true;
1642
1654
}
1643
1655
1644
1656
bool mptcp_sk_is_subflow (const struct sock * sk )
@@ -1712,6 +1724,14 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1712
1724
int err ;
1713
1725
1714
1726
lock_sock (sock -> sk );
1727
+ if (sock -> state != SS_UNCONNECTED && msk -> subflow ) {
1728
+ /* pending connection or invalid state, let existing subflow
1729
+ * cope with that
1730
+ */
1731
+ ssock = msk -> subflow ;
1732
+ goto do_connect ;
1733
+ }
1734
+
1715
1735
ssock = __mptcp_socket_create (msk , TCP_SYN_SENT );
1716
1736
if (IS_ERR (ssock )) {
1717
1737
err = PTR_ERR (ssock );
@@ -1726,9 +1746,17 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1726
1746
mptcp_subflow_ctx (ssock -> sk )-> request_mptcp = 0 ;
1727
1747
#endif
1728
1748
1749
+ do_connect :
1729
1750
err = ssock -> ops -> connect (ssock , uaddr , addr_len , flags );
1730
- inet_sk_state_store (sock -> sk , inet_sk_state_load (ssock -> sk ));
1731
- mptcp_copy_inaddrs (sock -> sk , ssock -> sk );
1751
+ sock -> state = ssock -> state ;
1752
+
1753
+ /* on successful connect, the msk state will be moved to established by
1754
+ * subflow_finish_connect()
1755
+ */
1756
+ if (!err || err == EINPROGRESS )
1757
+ mptcp_copy_inaddrs (sock -> sk , ssock -> sk );
1758
+ else
1759
+ inet_sk_state_store (sock -> sk , inet_sk_state_load (ssock -> sk ));
1732
1760
1733
1761
unlock :
1734
1762
release_sock (sock -> sk );
0 commit comments