@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1033
1033
{
1034
1034
struct sock * sk = sock -> sk ;
1035
1035
struct iucv_sock * iucv = iucv_sk (sk );
1036
+ size_t headroom , linear ;
1036
1037
struct sk_buff * skb ;
1037
1038
struct iucv_message txmsg = {0 };
1038
1039
struct cmsghdr * cmsg ;
@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1110
1111
* this is fine for SOCK_SEQPACKET (unless we want to support
1111
1112
* segmented records using the MSG_EOR flag), but
1112
1113
* for SOCK_STREAM we might want to improve it in future */
1113
- if (iucv -> transport == AF_IUCV_TRANS_HIPER )
1114
- skb = sock_alloc_send_skb (sk ,
1115
- len + sizeof (struct af_iucv_trans_hdr ) + ETH_HLEN ,
1116
- noblock , & err );
1117
- else
1118
- skb = sock_alloc_send_skb (sk , len , noblock , & err );
1114
+ headroom = (iucv -> transport == AF_IUCV_TRANS_HIPER )
1115
+ ? sizeof (struct af_iucv_trans_hdr ) + ETH_HLEN : 0 ;
1116
+ if (headroom + len < PAGE_SIZE ) {
1117
+ linear = len ;
1118
+ } else {
1119
+ /* In nonlinear "classic" iucv skb,
1120
+ * reserve space for iucv_array
1121
+ */
1122
+ if (iucv -> transport != AF_IUCV_TRANS_HIPER )
1123
+ headroom += sizeof (struct iucv_array ) *
1124
+ (MAX_SKB_FRAGS + 1 );
1125
+ linear = PAGE_SIZE - headroom ;
1126
+ }
1127
+ skb = sock_alloc_send_pskb (sk , headroom + linear , len - linear ,
1128
+ noblock , & err , 0 );
1119
1129
if (!skb )
1120
1130
goto out ;
1121
- if (iucv -> transport == AF_IUCV_TRANS_HIPER )
1122
- skb_reserve (skb , sizeof (struct af_iucv_trans_hdr ) + ETH_HLEN );
1123
- if (memcpy_from_msg (skb_put (skb , len ), msg , len )) {
1124
- err = - EFAULT ;
1131
+ if (headroom )
1132
+ skb_reserve (skb , headroom );
1133
+ skb_put (skb , linear );
1134
+ skb -> len = len ;
1135
+ skb -> data_len = len - linear ;
1136
+ err = skb_copy_datagram_from_iter (skb , 0 , & msg -> msg_iter , len );
1137
+ if (err )
1125
1138
goto fail ;
1126
- }
1127
1139
1128
1140
/* wait if outstanding messages for iucv path has reached */
1129
1141
timeo = sock_sndtimeo (sk , noblock );
@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1148
1160
atomic_dec (& iucv -> msg_sent );
1149
1161
goto fail ;
1150
1162
}
1151
- goto release ;
1152
- }
1153
- skb_queue_tail (& iucv -> send_skb_q , skb );
1154
-
1155
- if (((iucv -> path -> flags & IUCV_IPRMDATA ) & iucv -> flags )
1156
- && skb -> len <= 7 ) {
1157
- err = iucv_send_iprm (iucv -> path , & txmsg , skb );
1163
+ } else { /* Classic VM IUCV transport */
1164
+ skb_queue_tail (& iucv -> send_skb_q , skb );
1165
+
1166
+ if (((iucv -> path -> flags & IUCV_IPRMDATA ) & iucv -> flags ) &&
1167
+ skb -> len <= 7 ) {
1168
+ err = iucv_send_iprm (iucv -> path , & txmsg , skb );
1169
+
1170
+ /* on success: there is no message_complete callback */
1171
+ /* for an IPRMDATA msg; remove skb from send queue */
1172
+ if (err == 0 ) {
1173
+ skb_unlink (skb , & iucv -> send_skb_q );
1174
+ kfree_skb (skb );
1175
+ }
1158
1176
1159
- /* on success: there is no message_complete callback
1160
- * for an IPRMDATA msg; remove skb from send queue */
1161
- if (err == 0 ) {
1162
- skb_unlink (skb , & iucv -> send_skb_q );
1163
- kfree_skb (skb );
1177
+ /* this error should never happen since the */
1178
+ /* IUCV_IPRMDATA path flag is set... sever path */
1179
+ if (err == 0x15 ) {
1180
+ pr_iucv -> path_sever (iucv -> path , NULL );
1181
+ skb_unlink (skb , & iucv -> send_skb_q );
1182
+ err = - EPIPE ;
1183
+ goto fail ;
1184
+ }
1185
+ } else if (skb_is_nonlinear (skb )) {
1186
+ struct iucv_array * iba = (struct iucv_array * )skb -> head ;
1187
+ int i ;
1188
+
1189
+ /* skip iucv_array lying in the headroom */
1190
+ iba [0 ].address = (u32 )(addr_t )skb -> data ;
1191
+ iba [0 ].length = (u32 )skb_headlen (skb );
1192
+ for (i = 0 ; i < skb_shinfo (skb )-> nr_frags ; i ++ ) {
1193
+ skb_frag_t * frag = & skb_shinfo (skb )-> frags [i ];
1194
+
1195
+ iba [i + 1 ].address =
1196
+ (u32 )(addr_t )skb_frag_address (frag );
1197
+ iba [i + 1 ].length = (u32 )skb_frag_size (frag );
1198
+ }
1199
+ err = pr_iucv -> message_send (iucv -> path , & txmsg ,
1200
+ IUCV_IPBUFLST , 0 ,
1201
+ (void * )iba , skb -> len );
1202
+ } else { /* non-IPRM Linear skb */
1203
+ err = pr_iucv -> message_send (iucv -> path , & txmsg ,
1204
+ 0 , 0 , (void * )skb -> data , skb -> len );
1164
1205
}
1165
-
1166
- /* this error should never happen since the
1167
- * IUCV_IPRMDATA path flag is set... sever path */
1168
- if (err == 0x15 ) {
1169
- pr_iucv -> path_sever (iucv -> path , NULL );
1206
+ if (err ) {
1207
+ if (err == 3 ) {
1208
+ user_id [8 ] = 0 ;
1209
+ memcpy (user_id , iucv -> dst_user_id , 8 );
1210
+ appl_id [8 ] = 0 ;
1211
+ memcpy (appl_id , iucv -> dst_name , 8 );
1212
+ pr_err (
1213
+ "Application %s on z/VM guest %s exceeds message limit\n" ,
1214
+ appl_id , user_id );
1215
+ err = - EAGAIN ;
1216
+ } else {
1217
+ err = - EPIPE ;
1218
+ }
1170
1219
skb_unlink (skb , & iucv -> send_skb_q );
1171
- err = - EPIPE ;
1172
1220
goto fail ;
1173
1221
}
1174
- } else
1175
- err = pr_iucv -> message_send (iucv -> path , & txmsg , 0 , 0 ,
1176
- (void * ) skb -> data , skb -> len );
1177
- if (err ) {
1178
- if (err == 3 ) {
1179
- user_id [8 ] = 0 ;
1180
- memcpy (user_id , iucv -> dst_user_id , 8 );
1181
- appl_id [8 ] = 0 ;
1182
- memcpy (appl_id , iucv -> dst_name , 8 );
1183
- pr_err ("Application %s on z/VM guest %s"
1184
- " exceeds message limit\n" ,
1185
- appl_id , user_id );
1186
- err = - EAGAIN ;
1187
- } else
1188
- err = - EPIPE ;
1189
- skb_unlink (skb , & iucv -> send_skb_q );
1190
- goto fail ;
1191
1222
}
1192
1223
1193
- release :
1194
1224
release_sock (sk );
1195
1225
return len ;
1196
1226
@@ -1201,42 +1231,32 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1201
1231
return err ;
1202
1232
}
1203
1233
1204
- /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1205
- *
1206
- * Locking: must be called with message_q.lock held
1207
- */
1208
- static int iucv_fragment_skb (struct sock * sk , struct sk_buff * skb , int len )
1234
+ static struct sk_buff * alloc_iucv_recv_skb (unsigned long len )
1209
1235
{
1210
- int dataleft , size , copied = 0 ;
1211
- struct sk_buff * nskb ;
1212
-
1213
- dataleft = len ;
1214
- while (dataleft ) {
1215
- if (dataleft >= sk -> sk_rcvbuf / 4 )
1216
- size = sk -> sk_rcvbuf / 4 ;
1217
- else
1218
- size = dataleft ;
1219
-
1220
- nskb = alloc_skb (size , GFP_ATOMIC | GFP_DMA );
1221
- if (!nskb )
1222
- return - ENOMEM ;
1223
-
1224
- /* copy target class to control buffer of new skb */
1225
- IUCV_SKB_CB (nskb )-> class = IUCV_SKB_CB (skb )-> class ;
1226
-
1227
- /* copy data fragment */
1228
- memcpy (nskb -> data , skb -> data + copied , size );
1229
- copied += size ;
1230
- dataleft -= size ;
1231
-
1232
- skb_reset_transport_header (nskb );
1233
- skb_reset_network_header (nskb );
1234
- nskb -> len = size ;
1236
+ size_t headroom , linear ;
1237
+ struct sk_buff * skb ;
1238
+ int err ;
1235
1239
1236
- skb_queue_tail (& iucv_sk (sk )-> backlog_skb_q , nskb );
1240
+ if (len < PAGE_SIZE ) {
1241
+ headroom = 0 ;
1242
+ linear = len ;
1243
+ } else {
1244
+ headroom = sizeof (struct iucv_array ) * (MAX_SKB_FRAGS + 1 );
1245
+ linear = PAGE_SIZE - headroom ;
1246
+ }
1247
+ skb = alloc_skb_with_frags (headroom + linear , len - linear ,
1248
+ 0 , & err , GFP_ATOMIC | GFP_DMA );
1249
+ WARN_ONCE (!skb ,
1250
+ "alloc of recv iucv skb len=%lu failed with errcode=%d\n" ,
1251
+ len , err );
1252
+ if (skb ) {
1253
+ if (headroom )
1254
+ skb_reserve (skb , headroom );
1255
+ skb_put (skb , linear );
1256
+ skb -> len = len ;
1257
+ skb -> data_len = len - linear ;
1237
1258
}
1238
-
1239
- return 0 ;
1259
+ return skb ;
1240
1260
}
1241
1261
1242
1262
/* iucv_process_message() - Receive a single outstanding IUCV message
@@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1263
1283
skb -> len = 0 ;
1264
1284
}
1265
1285
} else {
1266
- rc = pr_iucv -> message_receive (path , msg ,
1286
+ if (skb_is_nonlinear (skb )) {
1287
+ struct iucv_array * iba = (struct iucv_array * )skb -> head ;
1288
+ int i ;
1289
+
1290
+ iba [0 ].address = (u32 )(addr_t )skb -> data ;
1291
+ iba [0 ].length = (u32 )skb_headlen (skb );
1292
+ for (i = 0 ; i < skb_shinfo (skb )-> nr_frags ; i ++ ) {
1293
+ skb_frag_t * frag = & skb_shinfo (skb )-> frags [i ];
1294
+
1295
+ iba [i + 1 ].address =
1296
+ (u32 )(addr_t )skb_frag_address (frag );
1297
+ iba [i + 1 ].length = (u32 )skb_frag_size (frag );
1298
+ }
1299
+ rc = pr_iucv -> message_receive (path , msg ,
1300
+ IUCV_IPBUFLST ,
1301
+ (void * )iba , len , NULL );
1302
+ } else {
1303
+ rc = pr_iucv -> message_receive (path , msg ,
1267
1304
msg -> flags & IUCV_IPRMDATA ,
1268
1305
skb -> data , len , NULL );
1306
+ }
1269
1307
if (rc ) {
1270
1308
kfree_skb (skb );
1271
1309
return ;
1272
1310
}
1273
- /* we need to fragment iucv messages for SOCK_STREAM only;
1274
- * for SOCK_SEQPACKET, it is only relevant if we support
1275
- * record segmentation using MSG_EOR (see also recvmsg()) */
1276
- if (sk -> sk_type == SOCK_STREAM &&
1277
- skb -> truesize >= sk -> sk_rcvbuf / 4 ) {
1278
- rc = iucv_fragment_skb (sk , skb , len );
1279
- kfree_skb (skb );
1280
- skb = NULL ;
1281
- if (rc ) {
1282
- pr_iucv -> path_sever (path , NULL );
1283
- return ;
1284
- }
1285
- skb = skb_dequeue (& iucv_sk (sk )-> backlog_skb_q );
1286
- } else {
1287
- skb_reset_transport_header (skb );
1288
- skb_reset_network_header (skb );
1289
- skb -> len = len ;
1290
- }
1311
+ WARN_ON_ONCE (skb -> len != len );
1291
1312
}
1292
1313
1293
1314
IUCV_SKB_CB (skb )-> offset = 0 ;
@@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
1306
1327
struct sock_msg_q * p , * n ;
1307
1328
1308
1329
list_for_each_entry_safe (p , n , & iucv -> message_q .list , list ) {
1309
- skb = alloc_skb (iucv_msg_length (& p -> msg ), GFP_ATOMIC | GFP_DMA );
1330
+ skb = alloc_iucv_recv_skb (iucv_msg_length (& p -> msg ));
1310
1331
if (!skb )
1311
1332
break ;
1312
1333
iucv_process_message (sk , skb , p -> path , & p -> msg );
@@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1801
1822
if (len > sk -> sk_rcvbuf )
1802
1823
goto save_message ;
1803
1824
1804
- skb = alloc_skb (iucv_msg_length (msg ), GFP_ATOMIC | GFP_DMA );
1825
+ skb = alloc_iucv_recv_skb (iucv_msg_length (msg ));
1805
1826
if (!skb )
1806
1827
goto save_message ;
1807
1828
0 commit comments