@@ -2253,6 +2253,10 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
2253
2253
return true;
2254
2254
}
2255
2255
2256
+ /* flags for __mptcp_close_ssk() */
2257
+ #define MPTCP_CF_PUSH BIT(1)
2258
+ #define MPTCP_CF_FASTCLOSE BIT(2)
2259
+
2256
2260
/* subflow sockets can be either outgoing (connect) or incoming
2257
2261
* (accept).
2258
2262
*
@@ -2262,22 +2266,37 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
2262
2266
* parent socket.
2263
2267
*/
2264
2268
static void __mptcp_close_ssk (struct sock * sk , struct sock * ssk ,
2265
- struct mptcp_subflow_context * subflow )
2269
+ struct mptcp_subflow_context * subflow ,
2270
+ unsigned int flags )
2266
2271
{
2267
2272
struct mptcp_sock * msk = mptcp_sk (sk );
2268
- bool need_push ;
2273
+ bool need_push , dispose_it ;
2269
2274
2270
- list_del (& subflow -> node );
2275
+ dispose_it = !msk -> subflow || ssk != msk -> subflow -> sk ;
2276
+ if (dispose_it )
2277
+ list_del (& subflow -> node );
2271
2278
2272
2279
lock_sock_nested (ssk , SINGLE_DEPTH_NESTING );
2273
2280
2281
+ if (flags & MPTCP_CF_FASTCLOSE )
2282
+ subflow -> send_fastclose = 1 ;
2283
+
2284
+ need_push = (flags & MPTCP_CF_PUSH ) && __mptcp_retransmit_pending_data (sk );
2285
+ if (!dispose_it ) {
2286
+ tcp_disconnect (ssk , 0 );
2287
+ msk -> subflow -> state = SS_UNCONNECTED ;
2288
+ mptcp_subflow_ctx_reset (subflow );
2289
+ release_sock (ssk );
2290
+
2291
+ goto out ;
2292
+ }
2293
+
2274
2294
/* if we are invoked by the msk cleanup code, the subflow is
2275
2295
* already orphaned
2276
2296
*/
2277
2297
if (ssk -> sk_socket )
2278
2298
sock_orphan (ssk );
2279
2299
2280
- need_push = __mptcp_retransmit_pending_data (sk );
2281
2300
subflow -> disposable = 1 ;
2282
2301
2283
2302
/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@@ -2297,14 +2316,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2297
2316
2298
2317
sock_put (ssk );
2299
2318
2300
- if (ssk == msk -> last_snd )
2301
- msk -> last_snd = NULL ;
2302
-
2303
2319
if (ssk == msk -> first )
2304
2320
msk -> first = NULL ;
2305
2321
2306
- if (msk -> subflow && ssk == msk -> subflow -> sk )
2307
- mptcp_dispose_initial_subflow (msk );
2322
+ out :
2323
+ if (ssk == msk -> last_snd )
2324
+ msk -> last_snd = NULL ;
2308
2325
2309
2326
if (need_push )
2310
2327
__mptcp_push_pending (sk , 0 );
@@ -2315,7 +2332,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2315
2332
{
2316
2333
if (sk -> sk_state == TCP_ESTABLISHED )
2317
2334
mptcp_event (MPTCP_EVENT_SUB_CLOSED , mptcp_sk (sk ), ssk , GFP_KERNEL );
2318
- __mptcp_close_ssk (sk , ssk , subflow );
2335
+ __mptcp_close_ssk (sk , ssk , subflow , MPTCP_CF_PUSH );
2319
2336
}
2320
2337
2321
2338
static unsigned int mptcp_sync_mss (struct sock * sk , u32 pmtu )
@@ -2533,9 +2550,20 @@ static int __mptcp_init_sock(struct sock *sk)
2533
2550
return 0 ;
2534
2551
}
2535
2552
2536
- static int mptcp_init_sock (struct sock * sk )
2553
+ static void mptcp_ca_reset (struct sock * sk )
2537
2554
{
2538
2555
struct inet_connection_sock * icsk = inet_csk (sk );
2556
+
2557
+ tcp_assign_congestion_control (sk );
2558
+ strcpy (mptcp_sk (sk )-> ca_name , icsk -> icsk_ca_ops -> name );
2559
+
2560
+ /* no need to keep a reference to the ops, the name will suffice */
2561
+ tcp_cleanup_congestion_control (sk );
2562
+ icsk -> icsk_ca_ops = NULL ;
2563
+ }
2564
+
2565
+ static int mptcp_init_sock (struct sock * sk )
2566
+ {
2539
2567
struct net * net = sock_net (sk );
2540
2568
int ret ;
2541
2569
@@ -2556,12 +2584,7 @@ static int mptcp_init_sock(struct sock *sk)
2556
2584
/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2557
2585
* propagate the correct value
2558
2586
*/
2559
- tcp_assign_congestion_control (sk );
2560
- strcpy (mptcp_sk (sk )-> ca_name , icsk -> icsk_ca_ops -> name );
2561
-
2562
- /* no need to keep a reference to the ops, the name will suffice */
2563
- tcp_cleanup_congestion_control (sk );
2564
- icsk -> icsk_ca_ops = NULL ;
2587
+ mptcp_ca_reset (sk );
2565
2588
2566
2589
sk_sockets_allocated_inc (sk );
2567
2590
sk -> sk_rcvbuf = sock_net (sk )-> ipv4 .sysctl_tcp_rmem [1 ];
@@ -2720,9 +2743,13 @@ static void __mptcp_destroy_sock(struct sock *sk)
2720
2743
sk_stop_timer (sk , & sk -> sk_timer );
2721
2744
msk -> pm .status = 0 ;
2722
2745
2746
+ /* clears msk->subflow, allowing the following loop to close
2747
+ * even the initial subflow
2748
+ */
2749
+ mptcp_dispose_initial_subflow (msk );
2723
2750
list_for_each_entry_safe (subflow , tmp , & conn_list , node ) {
2724
2751
struct sock * ssk = mptcp_subflow_tcp_sock (subflow );
2725
- __mptcp_close_ssk (sk , ssk , subflow );
2752
+ __mptcp_close_ssk (sk , ssk , subflow , 0 );
2726
2753
}
2727
2754
2728
2755
sk -> sk_prot -> destroy (sk );
@@ -2733,7 +2760,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
2733
2760
xfrm_sk_free_policy (sk );
2734
2761
2735
2762
sk_refcnt_debug_release (sk );
2736
- mptcp_dispose_initial_subflow (msk );
2737
2763
sock_put (sk );
2738
2764
}
2739
2765
@@ -2769,6 +2795,9 @@ static void mptcp_close(struct sock *sk, long timeout)
2769
2795
2770
2796
sock_hold (sk );
2771
2797
pr_debug ("msk=%p state=%d" , sk , sk -> sk_state );
2798
+ if (mptcp_sk (sk )-> token )
2799
+ mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2800
+
2772
2801
if (sk -> sk_state == TCP_CLOSE ) {
2773
2802
__mptcp_destroy_sock (sk );
2774
2803
do_cancel_work = true;
@@ -2779,9 +2808,6 @@ static void mptcp_close(struct sock *sk, long timeout)
2779
2808
if (do_cancel_work )
2780
2809
mptcp_cancel_work (sk );
2781
2810
2782
- if (mptcp_sk (sk )-> token )
2783
- mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2784
-
2785
2811
sock_put (sk );
2786
2812
}
2787
2813
@@ -2815,13 +2841,36 @@ static int mptcp_disconnect(struct sock *sk, int flags)
2815
2841
2816
2842
mptcp_do_flush_join_list (msk );
2817
2843
2844
+ inet_sk_state_store (sk , TCP_CLOSE );
2845
+
2818
2846
mptcp_for_each_subflow (msk , subflow ) {
2819
2847
struct sock * ssk = mptcp_subflow_tcp_sock (subflow );
2820
2848
2821
- lock_sock (ssk );
2822
- tcp_disconnect (ssk , flags );
2823
- release_sock (ssk );
2849
+ __mptcp_close_ssk (sk , ssk , subflow , MPTCP_CF_FASTCLOSE );
2824
2850
}
2851
+
2852
+ sk_stop_timer (sk , & msk -> sk .icsk_retransmit_timer );
2853
+ sk_stop_timer (sk , & sk -> sk_timer );
2854
+
2855
+ if (mptcp_sk (sk )-> token )
2856
+ mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2857
+
2858
+ mptcp_destroy_common (msk );
2859
+ msk -> last_snd = NULL ;
2860
+ msk -> flags = 0 ;
2861
+ msk -> recovery = false;
2862
+ msk -> can_ack = false;
2863
+ msk -> fully_established = false;
2864
+ msk -> rcv_data_fin = false;
2865
+ msk -> snd_data_fin_enable = false;
2866
+ msk -> rcv_fastclose = false;
2867
+ msk -> use_64bit_ack = false;
2868
+ WRITE_ONCE (msk -> csum_enabled , mptcp_is_checksum_enabled (sock_net (sk )));
2869
+ mptcp_pm_data_reset (msk );
2870
+ mptcp_ca_reset (sk );
2871
+
2872
+ sk -> sk_shutdown = 0 ;
2873
+ sk_error_report (sk );
2825
2874
return 0 ;
2826
2875
}
2827
2876
@@ -2961,9 +3010,11 @@ void mptcp_destroy_common(struct mptcp_sock *msk)
2961
3010
__mptcp_clear_xmit (sk );
2962
3011
2963
3012
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3013
+ mptcp_data_lock (sk );
2964
3014
skb_queue_splice_tail_init (& msk -> receive_queue , & sk -> sk_receive_queue );
2965
3015
__skb_queue_purge (& sk -> sk_receive_queue );
2966
3016
skb_rbtree_purge (& msk -> out_of_order_queue );
3017
+ mptcp_data_unlock (sk );
2967
3018
2968
3019
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
2969
3020
* inet_sock_destruct() will dispose it
0 commit comments