Skip to content

Commit 57fc0f1

Browse files
Paolo Abenikuba-moo
authored andcommitted
mptcp: ensure listener is unhashed before updating the sk status
The MPTCP protocol access the listener subflow in a lockless manner in a couple of places (poll, diag). That works only if the msk itself leaves the listener status only after that the subflow itself has been closed/disconnected. Otherwise we risk deadlock in diag, as reported by Christoph. Address the issue ensuring that the first subflow (the listener one) is always disconnected before updating the msk socket status. Reported-by: Christoph Paasch <[email protected]> Closes: multipath-tcp/mptcp_net-next#407 Fixes: b29fcfb ("mptcp: full disconnect implementation") Cc: [email protected] Signed-off-by: Paolo Abeni <[email protected]> Reviewed-by: Matthieu Baerts <[email protected]> Signed-off-by: Matthieu Baerts <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent b7535cf commit 57fc0f1

File tree

2 files changed

+20
-12
lines changed

2 files changed

+20
-12
lines changed

net/mptcp/pm_netlink.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1047,6 +1047,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
10471047
if (err)
10481048
return err;
10491049

1050+
inet_sk_state_store(newsk, TCP_LISTEN);
10501051
err = kernel_listen(ssock, backlog);
10511052
if (err)
10521053
return err;

net/mptcp/protocol.c

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2368,13 +2368,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
23682368
kfree_rcu(subflow, rcu);
23692369
} else {
23702370
/* otherwise tcp will dispose of the ssk and subflow ctx */
2371-
if (ssk->sk_state == TCP_LISTEN) {
2372-
tcp_set_state(ssk, TCP_CLOSE);
2373-
mptcp_subflow_queue_clean(sk, ssk);
2374-
inet_csk_listen_stop(ssk);
2375-
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
2376-
}
2377-
23782371
__tcp_close(ssk, 0);
23792372

23802373
/* close acquired an extra ref */
@@ -2902,10 +2895,24 @@ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
29022895
return EPOLLIN | EPOLLRDNORM;
29032896
}
29042897

2905-
static void mptcp_listen_inuse_dec(struct sock *sk)
2898+
static void mptcp_check_listen_stop(struct sock *sk)
29062899
{
2907-
if (inet_sk_state_load(sk) == TCP_LISTEN)
2908-
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2900+
struct sock *ssk;
2901+
2902+
if (inet_sk_state_load(sk) != TCP_LISTEN)
2903+
return;
2904+
2905+
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2906+
ssk = mptcp_sk(sk)->first;
2907+
if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
2908+
return;
2909+
2910+
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2911+
mptcp_subflow_queue_clean(sk, ssk);
2912+
inet_csk_listen_stop(ssk);
2913+
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
2914+
tcp_set_state(ssk, TCP_CLOSE);
2915+
release_sock(ssk);
29092916
}
29102917

29112918
bool __mptcp_close(struct sock *sk, long timeout)
@@ -2918,7 +2925,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
29182925
WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
29192926

29202927
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
2921-
mptcp_listen_inuse_dec(sk);
2928+
mptcp_check_listen_stop(sk);
29222929
inet_sk_state_store(sk, TCP_CLOSE);
29232930
goto cleanup;
29242931
}
@@ -3035,7 +3042,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
30353042
if (msk->fastopening)
30363043
return -EBUSY;
30373044

3038-
mptcp_listen_inuse_dec(sk);
3045+
mptcp_check_listen_stop(sk);
30393046
inet_sk_state_store(sk, TCP_CLOSE);
30403047

30413048
mptcp_stop_timer(sk);

0 commit comments

Comments
 (0)