@@ -784,10 +784,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
784
784
return seq | ((old_seq + old_data_len + 1 ) & GENMASK_ULL (63 , 32 ));
785
785
}
786
786
787
- static void warn_bad_map (struct mptcp_subflow_context * subflow , u32 ssn )
787
+ static void dbg_bad_map (struct mptcp_subflow_context * subflow , u32 ssn )
788
788
{
789
- WARN_ONCE ( 1 , "Bad mapping: ssn=%d map_seq=%d map_data_len=%d" ,
790
- ssn , subflow -> map_subflow_seq , subflow -> map_data_len );
789
+ pr_debug ( "Bad mapping: ssn=%d map_seq=%d map_data_len=%d" ,
790
+ ssn , subflow -> map_subflow_seq , subflow -> map_data_len );
791
791
}
792
792
793
793
static bool skb_is_fully_mapped (struct sock * ssk , struct sk_buff * skb )
@@ -812,13 +812,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
812
812
/* Mapping covers data later in the subflow stream,
813
813
* currently unsupported.
814
814
*/
815
- warn_bad_map (subflow , ssn );
815
+ dbg_bad_map (subflow , ssn );
816
816
return false;
817
817
}
818
818
if (unlikely (!before (ssn , subflow -> map_subflow_seq +
819
819
subflow -> map_data_len ))) {
820
820
/* Mapping does covers past subflow data, invalid */
821
- warn_bad_map (subflow , ssn + skb -> len );
821
+ dbg_bad_map (subflow , ssn );
822
822
return false;
823
823
}
824
824
return true;
@@ -1000,7 +1000,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
1000
1000
struct sk_buff * skb ;
1001
1001
1002
1002
if (!skb_peek (& ssk -> sk_receive_queue ))
1003
- subflow -> data_avail = 0 ;
1003
+ WRITE_ONCE ( subflow -> data_avail , 0 ) ;
1004
1004
if (subflow -> data_avail )
1005
1005
return true;
1006
1006
@@ -1039,18 +1039,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
1039
1039
ack_seq = mptcp_subflow_get_mapped_dsn (subflow );
1040
1040
pr_debug ("msk ack_seq=%llx subflow ack_seq=%llx" , old_ack ,
1041
1041
ack_seq );
1042
- if (ack_seq == old_ack ) {
1043
- subflow -> data_avail = MPTCP_SUBFLOW_DATA_AVAIL ;
1044
- break ;
1045
- } else if (after64 (ack_seq , old_ack )) {
1046
- subflow -> data_avail = MPTCP_SUBFLOW_OOO_DATA ;
1047
- break ;
1042
+ if (unlikely (before64 (ack_seq , old_ack ))) {
1043
+ mptcp_subflow_discard_data (ssk , skb , old_ack - ack_seq );
1044
+ continue ;
1048
1045
}
1049
1046
1050
- /* only accept in-sequence mapping. Old values are spurious
1051
- * retransmission
1052
- */
1053
- mptcp_subflow_discard_data (ssk , skb , old_ack - ack_seq );
1047
+ WRITE_ONCE (subflow -> data_avail , MPTCP_SUBFLOW_DATA_AVAIL );
1048
+ break ;
1054
1049
}
1055
1050
return true;
1056
1051
@@ -1065,12 +1060,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
1065
1060
* subflow_error_report() will introduce the appropriate barriers
1066
1061
*/
1067
1062
ssk -> sk_err = EBADMSG ;
1068
- ssk -> sk_error_report (ssk );
1069
1063
tcp_set_state (ssk , TCP_CLOSE );
1070
1064
subflow -> reset_transient = 0 ;
1071
1065
subflow -> reset_reason = MPTCP_RST_EMPTCP ;
1072
1066
tcp_send_active_reset (ssk , GFP_ATOMIC );
1073
- subflow -> data_avail = 0 ;
1067
+ WRITE_ONCE ( subflow -> data_avail , 0 ) ;
1074
1068
return false;
1075
1069
}
1076
1070
@@ -1080,7 +1074,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
1080
1074
subflow -> map_seq = READ_ONCE (msk -> ack_seq );
1081
1075
subflow -> map_data_len = skb -> len ;
1082
1076
subflow -> map_subflow_seq = tcp_sk (ssk )-> copied_seq - subflow -> ssn_offset ;
1083
- subflow -> data_avail = MPTCP_SUBFLOW_DATA_AVAIL ;
1077
+ WRITE_ONCE ( subflow -> data_avail , MPTCP_SUBFLOW_DATA_AVAIL ) ;
1084
1078
return true;
1085
1079
}
1086
1080
@@ -1092,7 +1086,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
1092
1086
if (subflow -> map_valid &&
1093
1087
mptcp_subflow_get_map_offset (subflow ) >= subflow -> map_data_len ) {
1094
1088
subflow -> map_valid = 0 ;
1095
- subflow -> data_avail = 0 ;
1089
+ WRITE_ONCE ( subflow -> data_avail , 0 ) ;
1096
1090
1097
1091
pr_debug ("Done with mapping: seq=%u data_len=%u" ,
1098
1092
subflow -> map_subflow_seq ,
@@ -1120,41 +1114,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1120
1114
* full_space = tcp_full_space (sk );
1121
1115
}
1122
1116
1123
- static void subflow_data_ready (struct sock * sk )
1124
- {
1125
- struct mptcp_subflow_context * subflow = mptcp_subflow_ctx (sk );
1126
- u16 state = 1 << inet_sk_state_load (sk );
1127
- struct sock * parent = subflow -> conn ;
1128
- struct mptcp_sock * msk ;
1129
-
1130
- msk = mptcp_sk (parent );
1131
- if (state & TCPF_LISTEN ) {
1132
- /* MPJ subflow are removed from accept queue before reaching here,
1133
- * avoid stray wakeups
1134
- */
1135
- if (reqsk_queue_empty (& inet_csk (sk )-> icsk_accept_queue ))
1136
- return ;
1137
-
1138
- set_bit (MPTCP_DATA_READY , & msk -> flags );
1139
- parent -> sk_data_ready (parent );
1140
- return ;
1141
- }
1142
-
1143
- WARN_ON_ONCE (!__mptcp_check_fallback (msk ) && !subflow -> mp_capable &&
1144
- !subflow -> mp_join && !(state & TCPF_CLOSE ));
1145
-
1146
- if (mptcp_subflow_data_available (sk ))
1147
- mptcp_data_ready (parent , sk );
1148
- }
1149
-
1150
- static void subflow_write_space (struct sock * ssk )
1151
- {
1152
- struct sock * sk = mptcp_subflow_ctx (ssk )-> conn ;
1153
-
1154
- mptcp_propagate_sndbuf (sk , ssk );
1155
- mptcp_write_space (sk );
1156
- }
1157
-
1158
1117
void __mptcp_error_report (struct sock * sk )
1159
1118
{
1160
1119
struct mptcp_subflow_context * subflow ;
@@ -1195,6 +1154,43 @@ static void subflow_error_report(struct sock *ssk)
1195
1154
mptcp_data_unlock (sk );
1196
1155
}
1197
1156
1157
+ static void subflow_data_ready (struct sock * sk )
1158
+ {
1159
+ struct mptcp_subflow_context * subflow = mptcp_subflow_ctx (sk );
1160
+ u16 state = 1 << inet_sk_state_load (sk );
1161
+ struct sock * parent = subflow -> conn ;
1162
+ struct mptcp_sock * msk ;
1163
+
1164
+ msk = mptcp_sk (parent );
1165
+ if (state & TCPF_LISTEN ) {
1166
+ /* MPJ subflow are removed from accept queue before reaching here,
1167
+ * avoid stray wakeups
1168
+ */
1169
+ if (reqsk_queue_empty (& inet_csk (sk )-> icsk_accept_queue ))
1170
+ return ;
1171
+
1172
+ set_bit (MPTCP_DATA_READY , & msk -> flags );
1173
+ parent -> sk_data_ready (parent );
1174
+ return ;
1175
+ }
1176
+
1177
+ WARN_ON_ONCE (!__mptcp_check_fallback (msk ) && !subflow -> mp_capable &&
1178
+ !subflow -> mp_join && !(state & TCPF_CLOSE ));
1179
+
1180
+ if (mptcp_subflow_data_available (sk ))
1181
+ mptcp_data_ready (parent , sk );
1182
+ else if (unlikely (sk -> sk_err ))
1183
+ subflow_error_report (sk );
1184
+ }
1185
+
1186
+ static void subflow_write_space (struct sock * ssk )
1187
+ {
1188
+ struct sock * sk = mptcp_subflow_ctx (ssk )-> conn ;
1189
+
1190
+ mptcp_propagate_sndbuf (sk , ssk );
1191
+ mptcp_write_space (sk );
1192
+ }
1193
+
1198
1194
static struct inet_connection_sock_af_ops *
1199
1195
subflow_default_af_ops (struct sock * sk )
1200
1196
{
@@ -1505,6 +1501,8 @@ static void subflow_state_change(struct sock *sk)
1505
1501
*/
1506
1502
if (mptcp_subflow_data_available (sk ))
1507
1503
mptcp_data_ready (parent , sk );
1504
+ else if (unlikely (sk -> sk_err ))
1505
+ subflow_error_report (sk );
1508
1506
1509
1507
subflow_sched_work_if_closed (mptcp_sk (parent ), sk );
1510
1508
0 commit comments