@@ -1231,6 +1231,34 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1231
1231
return err ;
1232
1232
}
1233
1233
1234
+ static struct sk_buff * alloc_iucv_recv_skb (unsigned long len )
1235
+ {
1236
+ size_t headroom , linear ;
1237
+ struct sk_buff * skb ;
1238
+ int err ;
1239
+
1240
+ if (len < PAGE_SIZE ) {
1241
+ headroom = 0 ;
1242
+ linear = len ;
1243
+ } else {
1244
+ headroom = sizeof (struct iucv_array ) * (MAX_SKB_FRAGS + 1 );
1245
+ linear = PAGE_SIZE - headroom ;
1246
+ }
1247
+ skb = alloc_skb_with_frags (headroom + linear , len - linear ,
1248
+ 0 , & err , GFP_ATOMIC | GFP_DMA );
1249
+ WARN_ONCE (!skb ,
1250
+ "alloc of recv iucv skb len=%lu failed with errcode=%d\n" ,
1251
+ len , err );
1252
+ if (skb ) {
1253
+ if (headroom )
1254
+ skb_reserve (skb , headroom );
1255
+ skb_put (skb , linear );
1256
+ skb -> len = len ;
1257
+ skb -> data_len = len - linear ;
1258
+ }
1259
+ return skb ;
1260
+ }
1261
+
1234
1262
/* iucv_process_message() - Receive a single outstanding IUCV message
1235
1263
*
1236
1264
* Locking: must be called with message_q.lock held
@@ -1255,16 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1255
1283
skb -> len = 0 ;
1256
1284
}
1257
1285
} else {
1258
- rc = pr_iucv -> message_receive (path , msg ,
1286
+ if (skb_is_nonlinear (skb )) {
1287
+ struct iucv_array * iba = (struct iucv_array * )skb -> head ;
1288
+ int i ;
1289
+
1290
+ iba [0 ].address = (u32 )(addr_t )skb -> data ;
1291
+ iba [0 ].length = (u32 )skb_headlen (skb );
1292
+ for (i = 0 ; i < skb_shinfo (skb )-> nr_frags ; i ++ ) {
1293
+ skb_frag_t * frag = & skb_shinfo (skb )-> frags [i ];
1294
+
1295
+ iba [i + 1 ].address =
1296
+ (u32 )(addr_t )skb_frag_address (frag );
1297
+ iba [i + 1 ].length = (u32 )skb_frag_size (frag );
1298
+ }
1299
+ rc = pr_iucv -> message_receive (path , msg ,
1300
+ IUCV_IPBUFLST ,
1301
+ (void * )iba , len , NULL );
1302
+ } else {
1303
+ rc = pr_iucv -> message_receive (path , msg ,
1259
1304
msg -> flags & IUCV_IPRMDATA ,
1260
1305
skb -> data , len , NULL );
1306
+ }
1261
1307
if (rc ) {
1262
1308
kfree_skb (skb );
1263
1309
return ;
1264
1310
}
1265
- skb_reset_transport_header (skb );
1266
- skb_reset_network_header (skb );
1267
- skb -> len = len ;
1311
+ WARN_ON_ONCE (skb -> len != len );
1268
1312
}
1269
1313
1270
1314
IUCV_SKB_CB (skb )-> offset = 0 ;
@@ -1283,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
1283
1327
struct sock_msg_q * p , * n ;
1284
1328
1285
1329
list_for_each_entry_safe (p , n , & iucv -> message_q .list , list ) {
1286
- skb = alloc_skb (iucv_msg_length (& p -> msg ), GFP_ATOMIC | GFP_DMA );
1330
+ skb = alloc_iucv_recv_skb (iucv_msg_length (& p -> msg ));
1287
1331
if (!skb )
1288
1332
break ;
1289
1333
iucv_process_message (sk , skb , p -> path , & p -> msg );
@@ -1778,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1778
1822
if (len > sk -> sk_rcvbuf )
1779
1823
goto save_message ;
1780
1824
1781
- skb = alloc_skb (iucv_msg_length (msg ), GFP_ATOMIC | GFP_DMA );
1825
+ skb = alloc_iucv_recv_skb (iucv_msg_length (msg ));
1782
1826
if (!skb )
1783
1827
goto save_message ;
1784
1828
0 commit comments