Skip to content

Commit ebb516a

Browse files
edumazetdavem330
authored andcommitted
tcp/dccp: fix race at listener dismantle phase
Under stress, a close() on a listener can trigger the WARN_ON(sk->sk_ack_backlog) in inet_csk_listen_stop() We need to test if listener is still active before queueing a child in inet_csk_reqsk_queue_add() Create a common inet_child_forget() helper, and use it from inet_csk_reqsk_queue_add() and inet_csk_listen_stop() Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent f03f2e1 commit ebb516a

File tree

3 files changed

+51
-48
lines changed

3 files changed

+51
-48
lines changed

include/net/inet_connection_sock.h

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -268,13 +268,8 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
268268
struct sock *newsk,
269269
const struct request_sock *req);
270270

271-
static inline void inet_csk_reqsk_queue_add(struct sock *sk,
272-
struct request_sock *req,
273-
struct sock *child)
274-
{
275-
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
276-
}
277-
271+
void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
272+
struct sock *child);
278273
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
279274
unsigned long timeout);
280275

include/net/request_sock.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -186,25 +186,6 @@ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
186186
return queue->rskq_accept_head == NULL;
187187
}
188188

189-
static inline void reqsk_queue_add(struct request_sock_queue *queue,
190-
struct request_sock *req,
191-
struct sock *parent,
192-
struct sock *child)
193-
{
194-
spin_lock(&queue->rskq_lock);
195-
req->sk = child;
196-
sk_acceptq_added(parent);
197-
198-
if (queue->rskq_accept_head == NULL)
199-
queue->rskq_accept_head = req;
200-
else
201-
queue->rskq_accept_tail->dl_next = req;
202-
203-
queue->rskq_accept_tail = req;
204-
req->dl_next = NULL;
205-
spin_unlock(&queue->rskq_lock);
206-
}
207-
208189
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
209190
struct sock *parent)
210191
{

net/ipv4/inet_connection_sock.c

Lines changed: 49 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -764,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
764764
}
765765
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
766766

767+
static void inet_child_forget(struct sock *sk, struct request_sock *req,
768+
struct sock *child)
769+
{
770+
sk->sk_prot->disconnect(child, O_NONBLOCK);
771+
772+
sock_orphan(child);
773+
774+
percpu_counter_inc(sk->sk_prot->orphan_count);
775+
776+
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
777+
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
778+
BUG_ON(sk != req->rsk_listener);
779+
780+
/* Paranoid, to prevent race condition if
781+
* an inbound pkt destined for child is
782+
* blocked by sock lock in tcp_v4_rcv().
783+
* Also to satisfy an assertion in
784+
* tcp_v4_destroy_sock().
785+
*/
786+
tcp_sk(child)->fastopen_rsk = NULL;
787+
}
788+
inet_csk_destroy_sock(child);
789+
reqsk_put(req);
790+
}
791+
792+
void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
793+
struct sock *child)
794+
{
795+
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
796+
797+
spin_lock(&queue->rskq_lock);
798+
if (unlikely(sk->sk_state != TCP_LISTEN)) {
799+
inet_child_forget(sk, req, child);
800+
} else {
801+
req->sk = child;
802+
req->dl_next = NULL;
803+
if (queue->rskq_accept_head == NULL)
804+
queue->rskq_accept_head = req;
805+
else
806+
queue->rskq_accept_tail->dl_next = req;
807+
queue->rskq_accept_tail = req;
808+
sk_acceptq_added(sk);
809+
}
810+
spin_unlock(&queue->rskq_lock);
811+
}
812+
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
813+
767814
/*
768815
* This routine closes sockets which have been at least partially
769816
* opened, but not yet accepted.
@@ -790,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk)
790837
WARN_ON(sock_owned_by_user(child));
791838
sock_hold(child);
792839

793-
sk->sk_prot->disconnect(child, O_NONBLOCK);
794-
795-
sock_orphan(child);
796-
797-
percpu_counter_inc(sk->sk_prot->orphan_count);
798-
799-
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
800-
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
801-
BUG_ON(sk != req->rsk_listener);
802-
803-
/* Paranoid, to prevent race condition if
804-
* an inbound pkt destined for child is
805-
* blocked by sock lock in tcp_v4_rcv().
806-
* Also to satisfy an assertion in
807-
* tcp_v4_destroy_sock().
808-
*/
809-
tcp_sk(child)->fastopen_rsk = NULL;
810-
}
811-
inet_csk_destroy_sock(child);
812-
840+
inet_child_forget(sk, req, child);
813841
bh_unlock_sock(child);
814842
local_bh_enable();
815843
sock_put(child);
816844

817-
reqsk_put(req);
818845
cond_resched();
819846
}
820847
if (queue->fastopenq.rskq_rst_head) {
@@ -829,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk)
829856
req = next;
830857
}
831858
}
832-
WARN_ON(sk->sk_ack_backlog);
859+
WARN_ON_ONCE(sk->sk_ack_backlog);
833860
}
834861
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
835862

0 commit comments

Comments
 (0)